summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2019-11-27 18:41:26 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2019-11-27 18:41:26 +1100
commit6f07048c00fd100ed8cab66c225c157e0b6c0a50 (patch)
tree051d6c22eea45269d5f65e1c00fd14bd60b79f7c /arch/powerpc
parent8dcd71b45df34d9b903450fab147ee8c1e6c16b5 (diff)
powerpc: Define arch_is_kernel_initmem_freed() for lockdep
Under certain circumstances, we hit a warning in lockdep_register_key: if (WARN_ON_ONCE(static_obj(key))) return; This occurs when the key falls into initmem that has since been freed and can now be reused. This has been observed on boot, and under memory pressure. Define arch_is_kernel_initmem_freed(), which allows lockdep to correctly identify this memory as dynamic. This fixes a bug picked up by the powerpc64 syzkaller instance where we hit the WARN via alloc_netdev_mqs. Reported-by: Qian Cai <cai@lca.pw> Reported-by: ppc syzbot c/o Andrew Donnellan <ajd@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Daniel Axtens <dja@axtens.net> Link: https://lore.kernel.org/r/87lfs4f7d6.fsf@dja-thinkpad.axtens.net
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/sections.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 5a9b6eb651b6..d19871763ed4 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -5,8 +5,22 @@
#include <linux/elf.h>
#include <linux/uaccess.h>
+
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
+extern bool init_mem_is_free;
+
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ if (!init_mem_is_free)
+ return 0;
+
+ return addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end;
+}
+
extern char __head_end[];
#ifdef __powerpc64__