summaryrefslogtreecommitdiff
path: root/mm/kmsan/hooks.c
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2024-01-24 18:31:34 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-02-22 10:24:41 -0800
commitd749cc7547bb46be73da848576ea155daf8186f3 (patch)
tree000dac1d786fef35069720ce4d55f23d65e97b99 /mm/kmsan/hooks.c
parent9af47276ed83cc346263e56243756543a2a33c9d (diff)
mm: kmsan: remove runtime checks from kmsan_unpoison_memory()
Similarly to what's been done in commit 85716a80c16d ("kmsan: allow using __msan_instrument_asm_store() inside runtime"), it should be safe to call kmsan_unpoison_memory() from within the runtime, as it does not allocate memory or take locks. Remove the redundant runtime checks. This should fix false positives seen with CONFIG_DEBUG_LIST=y when the non-instrumented lib/stackdepot.c failed to unpoison the memory chunks later checked by the instrumented lib/list_debug.c Also replace the implementation of kmsan_unpoison_entry_regs() with a call to kmsan_unpoison_memory(). Link: https://lkml.kernel.org/r/20240124173134.1165747-1-glider@google.com Fixes: f80be4571b19 ("kmsan: add KMSAN runtime core") Signed-off-by: Alexander Potapenko <glider@google.com> Tested-by: Marco Elver <elver@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ilya Leoshkevich <iii@linux.ibm.com> Cc: Nicholas Miehlbradt <nicholas@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kmsan/hooks.c')
-rw-r--r--mm/kmsan/hooks.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 5d6e2dee5692..0b09daa188ef 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -359,6 +359,12 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
}
/* Functions from kmsan-checks.h follow. */
+
+/*
+ * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
+ * into the stack depot. This may cause deadlocks if done from within KMSAN
+ * runtime, therefore we bail out if kmsan_in_runtime().
+ */
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
{
if (!kmsan_enabled || kmsan_in_runtime())
@@ -371,47 +377,31 @@ void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
}
EXPORT_SYMBOL(kmsan_poison_memory);
+/*
+ * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
+ * runtime, because it does not trigger allocations or call instrumented code.
+ */
void kmsan_unpoison_memory(const void *address, size_t size)
{
unsigned long ua_flags;
- if (!kmsan_enabled || kmsan_in_runtime())
+ if (!kmsan_enabled)
return;
ua_flags = user_access_save();
- kmsan_enter_runtime();
/* The users may want to poison/unpoison random memory. */
kmsan_internal_unpoison_memory((void *)address, size,
KMSAN_POISON_NOCHECK);
- kmsan_leave_runtime();
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(kmsan_unpoison_memory);
/*
- * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
- * runtime.
- *
- * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
- * code. Those regs need to be unpoisoned, otherwise using them will result in
- * false positives.
- * Using kmsan_unpoison_memory() is not an option in entry code, because the
- * return value of in_task() is inconsistent - as a result, certain calls to
- * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
- * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
- * entry code.
+ * Version of kmsan_unpoison_memory() called from IRQ entry functions.
*/
void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
{
- unsigned long ua_flags;
-
- if (!kmsan_enabled)
- return;
-
- ua_flags = user_access_save();
- kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
- KMSAN_POISON_NOCHECK);
- user_access_restore(ua_flags);
+ kmsan_unpoison_memory((void *)regs, sizeof(*regs));
}
void kmsan_check_memory(const void *addr, size_t size)