summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 10:02:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 10:02:09 -0700
commiteea2647e74cd7bd5d04861ce55fa502de165de14 (patch)
tree6d8fc43f3b8762651f825e74fc20f79d20f054a9 /mm
parent6f78c2a7b7219bc2e455250365f438621e5819d0 (diff)
parent70918779aec9bd01d16f4e6e800ffe423d196021 (diff)
Merge tag 'x86-entry-2021-04-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull entry code update from Thomas Gleixner: "Provide support for randomized stack offsets per syscall to make stack-based attacks harder which rely on the deterministic stack layout. The feature is based on the original idea of PaX's RANDSTACK feature, but uses a significantly different implementation. The offset does not affect the pt_regs location on the task stack as this was agreed on to be of dubious value. The offset is applied before the actual syscall is invoked. The offset is stored per cpu and the randomization happens at the end of the syscall which is less predictable than on syscall entry. The mechanism to apply the offset is via alloca(), i.e. abusing the dispised VLAs. This comes with the drawback that stack-clash-protection has to be disabled for the affected compilation units and there is also a negative interaction with stack-protector. Those downsides are traded with the advantage that this approach does not require any intrusive changes to the low level assembly entry code, does not affect the unwinder and the correct stack alignment is handled automatically by the compiler. The feature is guarded with a static branch which avoids the overhead when disabled. Currently this is supported for X86 and ARM64" * tag 'x86-entry-2021-04-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: arm64: entry: Enable random_kstack_offset support lkdtm: Add REPORT_STACK for checking stack offsets x86/entry: Enable random_kstack_offset support stack: Optionally randomize kernel stack offset each syscall init_on_alloc: Optimize static branches jump_label: Provide CONFIG-driven build state defaults
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slab.h6
2 files changed, 6 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cfc72873961d..e2f19bf948db 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -167,10 +167,10 @@ unsigned long totalcma_pages __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
-DEFINE_STATIC_KEY_FALSE(init_on_alloc);
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
EXPORT_SYMBOL(init_on_alloc);
-DEFINE_STATIC_KEY_FALSE(init_on_free);
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free);
static bool _init_on_alloc_enabled_early __read_mostly
diff --git a/mm/slab.h b/mm/slab.h
index 076582f58f68..774c7221efdc 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -601,7 +601,8 @@ static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
{
- if (static_branch_unlikely(&init_on_alloc)) {
+ if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc)) {
if (c->ctor)
return false;
if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
@@ -613,7 +614,8 @@ static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
static inline bool slab_want_init_on_free(struct kmem_cache *c)
{
- if (static_branch_unlikely(&init_on_free))
+ if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+ &init_on_free))
return !(c->ctor ||
(c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
return false;