summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorSidhartha Kumar <sidhartha.kumar@oracle.com>2024-08-12 15:05:43 -0400
committerAndrew Morton <akpm@linux-foundation.org>2024-09-01 20:26:11 -0700
commit617f8e4d76b81361884db852005f519012ddd244 (patch)
tree009ff217bf01d80a76fe89f8cf3bb24d9bcaa469 /tools/testing
parente1b8b883bb838339eec728de5d02e2f252a7d3d9 (diff)
maple_tree: add test to replicate low memory race conditions
Add new callback fields to the userspace implementation of struct kmem_cache. This allows for executing callback functions in order to further test low memory scenarios where node allocation is retried. This callback can help test race conditions by calling a function when a low memory event is tested. Link: https://lkml.kernel.org/r/20240812190543.71967-2-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/radix-tree/maple.c63
-rw-r--r--tools/testing/shared/linux.c26
2 files changed, 88 insertions, 1 deletions
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index cd1cf05503b4..ef5b83cf94ea 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -36224,6 +36224,65 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt)
extern void test_kmem_cache_bulk(void);
+/* callback function used for check_nomem_writer_race() */
+static void writer2(void *maple_tree)
+{
+ struct maple_tree *mt = (struct maple_tree *)maple_tree;
+ MA_STATE(mas, mt, 6, 10);
+
+ mtree_lock(mas.tree);
+ mas_store(&mas, xa_mk_value(0xC));
+ mas_destroy(&mas);
+ mtree_unlock(mas.tree);
+}
+
+/*
+ * check_nomem_writer_race() - test a possible race in the mas_nomem() path
+ * @mt: The tree to build.
+ *
+ * There is a possible race condition in low memory conditions when mas_nomem()
+ * gives up its lock. A second writer can chagne the entry that the primary
+ * writer executing the mas_nomem() path is modifying. This test recreates this
+ * scenario to ensure we are handling it correctly.
+ */
+static void check_nomem_writer_race(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 5);
+
+ mt_set_non_kernel(0);
+ /* setup root with 2 values with NULL in between */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
+ mtree_lock(mt);
+ /* erase 0-5 */
+ mas_erase(&mas);
+
+ /* index 6-10 should retain the value from writer 2 */
+ check_load(mt, 6, xa_mk_value(0xC));
+ mtree_unlock(mt);
+
+ /* test for the same race but with mas_store_gfp() */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+
+ mas_set_range(&mas, 0, 5);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+
+ /* ensure write made by writer 2 is retained */
+ check_load(mt, 6, xa_mk_value(0xC));
+
+ mt_set_private(NULL);
+ mt_set_callback(NULL);
+ mtree_unlock(mt);
+}
+
void farmer_tests(void)
{
struct maple_node *node;
@@ -36257,6 +36316,10 @@ void farmer_tests(void)
check_dfs_preorder(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
+ check_nomem_writer_race(&tree);
+ mtree_destroy(&tree);
+
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_prealloc(&tree);
mtree_destroy(&tree);
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 4eb442206d01..17263696b5d8 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -26,8 +26,21 @@ struct kmem_cache {
unsigned int non_kernel;
unsigned long nr_allocated;
unsigned long nr_tallocated;
+ bool exec_callback;
+ void (*callback)(void *);
+ void *private;
};
+void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
+{
+ cachep->callback = callback;
+}
+
+void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
+{
+ cachep->private = private;
+}
+
void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
{
cachep->non_kernel = val;
@@ -58,9 +71,17 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *p;
+ if (cachep->exec_callback) {
+ if (cachep->callback)
+ cachep->callback(cachep->private);
+ cachep->exec_callback = false;
+ }
+
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
- if (!cachep->non_kernel)
+ if (!cachep->non_kernel) {
+ cachep->exec_callback = true;
return NULL;
+ }
cachep->non_kernel--;
}
@@ -223,6 +244,9 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->objs = NULL;
ret->ctor = ctor;
ret->non_kernel = 0;
+ ret->exec_callback = false;
+ ret->callback = NULL;
+ ret->private = NULL;
return ret;
}