summaryrefslogtreecommitdiff
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c93
1 files changed, 62 insertions, 31 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d8d83ca56fe2..218b23a5a597 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -278,22 +278,12 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
* based on objects indexes, so that objects that are next to each other
* get different tags.
*/
-static u8 assign_tag(struct kmem_cache *cache, const void *object,
- bool init, bool keep_tag)
+static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return 0xff;
/*
- * 1. When an object is kmalloc()'ed, two hooks are called:
- * kasan_slab_alloc() and kasan_kmalloc(). We assign the
- * tag only in the first one.
- * 2. We reuse the same tag for krealloc'ed objects.
- */
- if (keep_tag)
- return get_tag(object);
-
- /*
* If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
* set, assign a tag when the object is being allocated (init == false).
*/
@@ -325,7 +315,7 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
}
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
- object = set_tag(object, assign_tag(cache, object, true, false));
+ object = set_tag(object, assign_tag(cache, object, true));
return (void *)object;
}
@@ -413,12 +403,46 @@ static void set_alloc_info(struct kmem_cache *cache, void *object,
kasan_set_track(&alloc_meta->alloc_track, flags);
}
+void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
+ void *object, gfp_t flags)
+{
+ u8 tag;
+ void *tagged_object;
+
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(object))
+ return (void *)object;
+
+ /*
+ * Generate and assign random tag for tag-based modes.
+ * Tag is ignored in set_tag() for the generic mode.
+ */
+ tag = assign_tag(cache, object, false);
+ tagged_object = set_tag(object, tag);
+
+ /*
+ * Unpoison the whole object.
+ * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
+ */
+ kasan_unpoison(tagged_object, cache->object_size);
+
+ /* Save alloc info (if possible) for non-kmalloc() allocations. */
+ if (kasan_stack_collection_enabled())
+ set_alloc_info(cache, (void *)object, flags, false);
+
+ return tagged_object;
+}
+
static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags, bool is_kmalloc)
+ size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- u8 tag;
if (gfpflags_allow_blocking(flags))
kasan_quarantine_reduce();
@@ -429,33 +453,41 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
if (is_kfence_address(kasan_reset_tag(object)))
return (void *)object;
+ /*
+ * The object has already been unpoisoned by kasan_slab_alloc() for
+ * kmalloc() or by ksize() for krealloc().
+ */
+
+ /*
+ * The redzone has byte-level precision for the generic mode.
+ * Partially poison the last object granule to cover the unaligned
+ * part of the redzone.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ kasan_poison_last_granule((void *)object, size);
+
+ /* Poison the aligned part of the redzone. */
redzone_start = round_up((unsigned long)(object + size),
KASAN_GRANULE_SIZE);
- redzone_end = round_up((unsigned long)object + cache->object_size,
- KASAN_GRANULE_SIZE);
- tag = assign_tag(cache, object, false, is_kmalloc);
-
- /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
- kasan_unpoison(set_tag(object, tag), size);
+ redzone_end = (unsigned long)object + cache->object_size;
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
+ /*
+ * Save alloc info (if possible) for kmalloc() allocations.
+ * This also rewrites the alloc info when called from kasan_krealloc().
+ */
if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags, is_kmalloc);
+ set_alloc_info(cache, (void *)object, flags, true);
- return set_tag(object, tag);
-}
-
-void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
- void *object, gfp_t flags)
-{
- return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags, true);
+ return ____kasan_kmalloc(cache, object, size, flags);
}
EXPORT_SYMBOL(__kasan_kmalloc);
@@ -496,8 +528,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
if (unlikely(!PageSlab(page)))
return __kasan_kmalloc_large(object, size, flags);
else
- return ____kasan_kmalloc(page->slab_cache, object, size,
- flags, true);
+ return ____kasan_kmalloc(page->slab_cache, object, size, flags);
}
void __kasan_kfree_large(void *ptr, unsigned long ip)