diff options
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 9 | ||||
-rw-r--r-- | init/main.c | 2 |
2 files changed, 6 insertions, 5 deletions
diff --git a/init/Kconfig b/init/Kconfig index 9082ed33a9cd..d6a0b31b13dc 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1913,9 +1913,8 @@ config SLAB_MERGE_DEFAULT command line. config SLAB_FREELIST_RANDOM - default n + bool "Randomize slab freelist" depends on SLAB || SLUB - bool "SLAB freelist randomization" help Randomizes the freelist order used on creating new pages. This security feature reduces the predictability of the kernel slab @@ -1923,12 +1922,14 @@ config SLAB_FREELIST_RANDOM config SLAB_FREELIST_HARDENED bool "Harden slab freelist metadata" - depends on SLUB + depends on SLAB || SLUB help Many kernel heap attacks try to target slab cache metadata and other infrastructure. This options makes minor performance sacrifices to harden the kernel slab allocator against common - freelist exploit methods. + freelist exploit methods. Some slab implementations have more + sanity-checking than others. This option is most effective with + CONFIG_SLUB. config SHUFFLE_PAGE_ALLOCATOR bool "Page allocator randomization" diff --git a/init/main.c b/init/main.c index 276ca3160c8c..de2f9fa6bb4a 100644 --- a/init/main.c +++ b/init/main.c @@ -830,7 +830,7 @@ void __init __weak arch_call_rest_init(void) rest_init(); } -asmlinkage __visible void __init start_kernel(void) +asmlinkage __visible void __init __no_sanitize_address start_kernel(void) { char *command_line; char *after_dashes; |