summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorOliver Glitta <glittao@gmail.com>2021-06-28 19:34:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-29 10:53:46 -0700
commit1f9f78b1b376f82cdd8ed73cc0abdb74d0453d43 (patch)
tree1b2cbf976a0b729a4b66e38c37403a39f0d538af /mm
parent26c6cb7cf830349c6518a7efe1c32ac796cd192e (diff)
mm/slub, kunit: add a KUnit test for SLUB debugging functionality
SLUB has resiliency_test() function which is hidden behind #ifdef SLUB_RESILIENCY_TEST that is not part of Kconfig, so nobody runs it. KUnit should be a proper replacement for it. Try changing byte in redzone after allocation and changing pointer to next free node, first byte, 50th byte and redzone byte. Check if validation finds errors. There are several differences from the original resiliency test: Tests create own caches with known state instead of corrupting shared kmalloc caches. The corruption of freepointer uses correct offset, the original resiliency test got broken with freepointer changes. Scratch changing random byte test, because it does not have meaning in this form where we need deterministic results. Add new option CONFIG_SLUB_KUNIT_TEST in Kconfig. Tests next_pointer, first_word and clobber_50th_byte do not run with KASAN option on. Because the test deliberately modifies non-allocated objects. Use kunit_resource to count errors in cache and silence bug reports. Count error whenever slab_bug() or slab_fix() is called or when the count of pages is wrong. [glittao@gmail.com: remove unused function test_exit(), from SLUB KUnit test] Link: https://lkml.kernel.org/r/20210512140656.12083-1-glittao@gmail.com [akpm@linux-foundation.org: export kasan_enable/disable_current to modules] Link: https://lkml.kernel.org/r/20210511150734.3492-2-glittao@gmail.com Signed-off-by: Oliver Glitta <glittao@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Daniel Latypov <dlatypov@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Brendan Higgins <brendanhiggins@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/common.c3
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slub.c46
3 files changed, 47 insertions, 3 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6bb87f2acd4e..2586d3718600 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -51,11 +51,14 @@ void kasan_enable_current(void)
{
current->kasan_depth++;
}
+EXPORT_SYMBOL(kasan_enable_current);
void kasan_disable_current(void)
{
current->kasan_depth--;
}
+EXPORT_SYMBOL(kasan_disable_current);
+
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
void __kasan_unpoison_range(const void *address, size_t size)
diff --git a/mm/slab.h b/mm/slab.h
index 18c1927cd196..9b690fa44cae 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -215,6 +215,7 @@ DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
+long validate_slab_cache(struct kmem_cache *s);
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
diff --git a/mm/slub.c b/mm/slub.c
index 61bd40e3eb9a..290444ab932b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -36,6 +36,7 @@
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>
+#include <kunit/test.h>
#include <trace/events/kmem.h>
@@ -449,6 +450,26 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);
+#if IS_ENABLED(CONFIG_KUNIT)
+static bool slab_add_kunit_errors(void)
+{
+ struct kunit_resource *resource;
+
+ if (likely(!current->kunit_test))
+ return false;
+
+ resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
+ if (!resource)
+ return false;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+ return true;
+}
+#else
+static inline bool slab_add_kunit_errors(void) { return false; }
+#endif
+
/*
* Determine a map of object in use on a page.
*
@@ -679,6 +700,9 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
struct va_format vaf;
va_list args;
+ if (slab_add_kunit_errors())
+ return;
+
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -742,6 +766,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
+ if (slab_add_kunit_errors())
+ return;
+
slab_bug(s, "%s", reason);
print_trailer(s, page, object);
}
@@ -752,6 +779,9 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
va_list args;
char buf[100];
+ if (slab_add_kunit_errors())
+ return;
+
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
@@ -801,12 +831,16 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
while (end > fault && end[-1] == value)
end--;
+ if (slab_add_kunit_errors())
+ goto skip_bug_print;
+
slab_bug(s, "%s overwritten", what);
pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
fault, end - 1, fault - addr,
fault[0], value);
print_trailer(s, page, object);
+skip_bug_print:
restore_bytes(s, what, value, fault, end);
return 0;
}
@@ -4649,9 +4683,11 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, page);
count++;
}
- if (count != n->nr_partial)
+ if (count != n->nr_partial) {
pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
s->name, count, n->nr_partial);
+ slab_add_kunit_errors();
+ }
if (!(s->flags & SLAB_STORE_USER))
goto out;
@@ -4660,16 +4696,18 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, page);
count++;
}
- if (count != atomic_long_read(&n->nr_slabs))
+ if (count != atomic_long_read(&n->nr_slabs)) {
pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
s->name, count, atomic_long_read(&n->nr_slabs));
+ slab_add_kunit_errors();
+ }
out:
spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
-static long validate_slab_cache(struct kmem_cache *s)
+long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
@@ -4681,6 +4719,8 @@ static long validate_slab_cache(struct kmem_cache *s)
return count;
}
+EXPORT_SYMBOL(validate_slab_cache);
+
/*
* Generate lists of code addresses where slabcache objects are allocated
* and freed.