diff options
author | Feng Tang <feng.tang@intel.com> | 2022-10-21 11:24:04 +0800 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-11-10 16:27:46 +0100 |
commit | 5d1ba31087627423dfb2bd87badd62361701997b (patch) | |
tree | 30115b55a761164a8b64a1e1fdbdef8842ef8ada | |
parent | 9ce67395f5a0cdec6ce152d26bfda13b98b25c01 (diff) |
mm: kasan: Extend kasan_metadata_size() to also cover in-object size
When kasan is enabled for slab/slub, it may save kasan' free_meta
data in the former part of slab object data area in slab object's
free path, which works fine.
There is ongoing effort to extend slub's debug function which will
redzone the latter part of kmalloc object area, and when both of
the debug are enabled, there is possible conflict, especially when
the kmalloc object has small size, as caught by 0Day bot [1].
To solve it, slub code needs to know the in-object kasan's meta
data size. Currently, there is existing kasan_metadata_size()
which returns the kasan's metadata size inside slub's metadata
area, so extend it to also cover the in-object meta size by
adding a boolean flag 'in_object'.
There is no functional change to existing code logic.
[1]. https://lore.kernel.org/lkml/YuYm3dWwpZwH58Hu@xsang-OptiPlex-9020/
Reported-by: kernel test robot <oliver.sang@intel.com>
Suggested-by: Andrey Konovalov <andreyknvl@gmail.com>
Signed-off-by: Feng Tang <feng.tang@intel.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r-- | include/linux/kasan.h | 5 | ||||
-rw-r--r-- | mm/kasan/generic.c | 19 | ||||
-rw-r--r-- | mm/slub.c | 4 |
3 files changed, 18 insertions, 10 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d811b3d7d2a1..96c9d56e5510 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -302,7 +302,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {} #ifdef CONFIG_KASAN_GENERIC -size_t kasan_metadata_size(struct kmem_cache *cache); +size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object); slab_flags_t kasan_never_merge(void); void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); @@ -315,7 +315,8 @@ void kasan_record_aux_stack_noalloc(void *ptr); #else /* CONFIG_KASAN_GENERIC */ /* Tag-based KASAN modes do not use per-object metadata. */ -static inline size_t kasan_metadata_size(struct kmem_cache *cache) +static inline size_t kasan_metadata_size(struct kmem_cache *cache, + bool in_object) { return 0; } diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index d8b5590f9484..b076f597a378 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -450,15 +450,22 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) __memset(alloc_meta, 0, sizeof(*alloc_meta)); } -size_t kasan_metadata_size(struct kmem_cache *cache) +size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) { + struct kasan_cache *info = &cache->kasan_info; + if (!kasan_requires_meta()) return 0; - return (cache->kasan_info.alloc_meta_offset ? - sizeof(struct kasan_alloc_meta) : 0) + - ((cache->kasan_info.free_meta_offset && - cache->kasan_info.free_meta_offset != KASAN_NO_FREE_META) ? - sizeof(struct kasan_free_meta) : 0); + + if (in_object) + return (info->free_meta_offset ? + 0 : sizeof(struct kasan_free_meta)); + else + return (info->alloc_meta_offset ? + sizeof(struct kasan_alloc_meta) : 0) + + ((info->free_meta_offset && + info->free_meta_offset != KASAN_NO_FREE_META) ? + sizeof(struct kasan_free_meta) : 0); } static void __kasan_record_aux_stack(void *addr, bool can_alloc) diff --git a/mm/slub.c b/mm/slub.c index ecc44067625c..b81a4bba1b73 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -910,7 +910,7 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) if (slub_debug_orig_size(s)) off += sizeof(unsigned int); - off += kasan_metadata_size(s); + off += kasan_metadata_size(s, false); if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ @@ -1070,7 +1070,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) off += sizeof(unsigned int); } - off += kasan_metadata_size(s); + off += kasan_metadata_size(s, false); if (size_from_object(s) == off) return 1; |