diff options
-rw-r--r-- | include/linux/xarray.h | 18 | ||||
-rw-r--r-- | lib/idr.c | 2 | ||||
-rw-r--r-- | lib/xarray.c | 8 | ||||
-rw-r--r-- | tools/testing/radix-tree/multiorder.c | 68 |
4 files changed, 92 insertions, 4 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 741703b45f61..cb571dfcf4b1 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the @@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the @@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the diff --git a/lib/idr.c b/lib/idr.c index 7ecdfdb5309e..13f2758c2377 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc); * @end: The maximum ID (exclusive). * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @nextid and @end. If + * Allocates an unused ID in the range specified by @start and @end. If * @end is <= 0, it is treated as one larger than %INT_MAX. This allows * callers to use @start + N as @end as long as N is within integer range. * The search for an unused ID will start at the last ID allocated and will diff --git a/lib/xarray.c b/lib/xarray.c index 2071a3718f4e..39f07bfc4dcc 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node) void *entry = xa_entry(xas->xa, node, offset); xas->xa_node = node; - if (xa_is_sibling(entry)) { + while (xa_is_sibling(entry)) { offset = xa_to_sibling(entry); entry = xa_entry(xas->xa, node, offset); if (node->shift && xa_is_node(entry)) @@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order); * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc); * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index e00520cc6349..cffaf2245d4f 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa) item_kill_tree(xa); } -bool stop_iteration = false; +bool stop_iteration; static void *creator_func(void *ptr) { @@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa) pthread_t worker_thread[num_threads]; int i; + stop_iteration = false; pthread_create(&worker_thread[0], NULL, &creator_func, xa); for (i = 1; i < num_threads; i++) pthread_create(&worker_thread[i], NULL, &iterator_func, xa); @@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa) item_kill_tree(xa); } +static void *load_creator(void *ptr) +{ + /* 'order' is set up to ensure we have sibling entries */ + unsigned int order; + struct radix_tree_root *tree = ptr; + int i; + + rcu_register_thread(); + item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0); + item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0); + for (i = 0; i < 10000; i++) { + for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) { + unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - + (1 << order); + item_insert_order(tree, index, order); + item_delete_rcu(tree, index); + } + } + rcu_unregister_thread(); + + stop_iteration = true; + return NULL; +} + +static void *load_worker(void *ptr) +{ + unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1; + + rcu_register_thread(); + while (!stop_iteration) { + struct item *item = xa_load(ptr, index); + assert(!xa_is_internal(item)); + } + rcu_unregister_thread(); + + return NULL; +} + +static void load_race(struct xarray *xa) +{ + const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4; + pthread_t worker_thread[num_threads]; + int i; + + stop_iteration = false; + pthread_create(&worker_thread[0], NULL, &load_creator, xa); + for (i = 1; i < num_threads; i++) + pthread_create(&worker_thread[i], NULL, &load_worker, xa); + + for (i = 0; i < num_threads; i++) + pthread_join(worker_thread[i], NULL); + + item_kill_tree(xa); +} + static DEFINE_XARRAY(array); void multiorder_checks(void) @@ -218,12 +274,20 @@ void multiorder_checks(void) multiorder_iteration(&array); multiorder_tagged_iteration(&array); multiorder_iteration_race(&array); + load_race(&array); radix_tree_cpu_dead(0); } -int __weak main(void) +int __weak main(int argc, char **argv) { + int opt; + + while ((opt = getopt(argc, argv, "ls:v")) != -1) { + if (opt == 'v') + test_verbose++; + } + rcu_register_thread(); radix_tree_init(); multiorder_checks(); |