diff options
author | Andrey Konovalov <andreyknvl@google.com> | 2023-12-19 23:28:57 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-29 11:58:39 -0800 |
commit | 7d4847ded24775a01cbe1e1a5292f132d27f158b (patch) | |
tree | 10fca233301685bde4d2d8ec138132317cff8abc /mm/mempool.c | |
parent | 29d7355a9d05de9a6e38cc4d1146fb96c43853fb (diff) |
mempool: skip slub_debug poisoning when KASAN is enabled
With the changes in the following patch, KASAN starts saving its metadata
within freed mempool elements.
Thus, skip slub_debug poisoning and checking of mempool elements when
KASAN is enabled. Corruptions of freed mempool elements will be detected
by KASAN anyway.
Link: https://lkml.kernel.org/r/98a4b1617e8ceeb266ef9a46f5e8c7f67a563ad2.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mempool.c')
-rw-r--r-- | mm/mempool.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/mempool.c b/mm/mempool.c index 7e1c729f292b..1fd39478c85e 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -56,6 +56,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size) static void check_element(mempool_t *pool, void *element) { + /* Skip checking: KASAN might save its metadata in the element. */ + if (kasan_enabled()) + return; + /* Mempools backed by slab allocator */ if (pool->free == mempool_kfree) { __check_element(pool, element, (size_t)pool->pool_data); @@ -81,6 +85,10 @@ static void __poison_element(void *element, size_t size) static void poison_element(mempool_t *pool, void *element) { + /* Skip poisoning: KASAN might save its metadata in the element. */ + if (kasan_enabled()) + return; + /* Mempools backed by slab allocator */ if (pool->alloc == mempool_kmalloc) { __poison_element(element, (size_t)pool->pool_data); |