diff options
-rw-r--r-- | include/linux/radix-tree.h | 3 | ||||
-rw-r--r-- | include/linux/xarray.h | 34 | ||||
-rw-r--r-- | lib/idr.c | 1 | ||||
-rw-r--r-- | lib/radix-tree.c | 1 | ||||
-rw-r--r-- | lib/test_xarray.c | 32 | ||||
-rw-r--r-- | lib/xarray.c | 25 | ||||
-rw-r--r-- | mm/workingset.c | 13 | ||||
-rw-r--r-- | tools/testing/radix-tree/idr-test.c | 29 | ||||
-rw-r--r-- | tools/testing/radix-tree/linux/kernel.h | 1 | ||||
-rw-r--r-- | tools/testing/radix-tree/linux/local_lock.h | 8 | ||||
-rw-r--r-- | tools/testing/radix-tree/test.h | 4 |
11 files changed, 116 insertions, 35 deletions
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c2a9f7c90727..64ad900ac742 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> @@ -376,7 +377,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot - * @iter: pointer to interator state + * @iter: pointer to iterator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5cdf441f6377..92c0160b3352 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1286,6 +1286,8 @@ static inline bool xa_is_advanced(const void *entry) */ typedef void (*xa_update_node_t)(struct xa_node *node); +void xa_delete_node(struct xa_node *, xa_update_node_t); + /* * The xa_state is opaque to its users. It contains various different pieces * of state involved in the current operation on the XArray. It should be @@ -1544,10 +1546,21 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry, static inline void *xas_reload(struct xa_state *xas) { struct xa_node *node = xas->xa_node; - - if (node) - return xa_entry(xas->xa, node, xas->xa_offset); - return xa_head(xas->xa); + void *entry; + char offset; + + if (!node) + return xa_head(xas->xa); + if (IS_ENABLED(CONFIG_XARRAY_MULTI)) { + offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; + entry = xa_entry(xas->xa, node, offset); + if (!xa_is_sibling(entry)) + return entry; + offset = xa_to_sibling(entry); + } else { + offset = xas->xa_offset; + } + return xa_entry(xas->xa, node, offset); } /** @@ -1736,13 +1749,12 @@ enum { * @xas: XArray operation state. * @entry: Entry retrieved from the array. * - * The loop body will be executed for each entry in the XArray that lies - * within the range specified by @xas. If the loop completes successfully, - * any entries that lie in this range will be replaced by @entry. The caller - * may break out of the loop; if they do so, the contents of the XArray will - * be unchanged. The operation may fail due to an out of memory condition. - * The caller may also call xa_set_err() to exit the loop while setting an - * error to record the reason. + * The loop body will be executed for each entry in the XArray that + * lies within the range specified by @xas. If the loop terminates + * normally, @entry will be %NULL. The user may break out of the loop, + * which will leave @entry set to the conflicting entry. The caller + * may also call xa_set_err() to exit the loop while setting an error + * to record the reason. */ #define xas_for_each_conflict(xas, entry) \ while ((entry = xas_find_conflict(xas))) diff --git a/lib/idr.c b/lib/idr.c index 3fa8be43696f..f4ab4f4aa3c7 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -471,6 +471,7 @@ alloc: goto retry; nospc: xas_unlock_irqrestore(&xas, flags); + kfree(alloc); return -ENOSPC; } EXPORT_SYMBOL(ida_alloc_range); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 005ced92a4a9..3a4da11b804d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,7 +20,6 @@ #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> -#include <linux/local_lock.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 8262c3f05a5d..8294f43f4981 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } +static noinline void check_xa_mark_3(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + XA_STATE(xas, xa, 0x41); + void *entry; + int count = 0; + + xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); + xa_set_mark(xa, 0x41, XA_MARK_0); + + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { + count++; + XA_BUG_ON(xa, entry != xa_mk_index(0x40)); + } + XA_BUG_ON(xa, count != 1); + rcu_read_unlock(); + xa_destroy(xa); +#endif +} + static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; @@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa) check_xa_mark_1(xa, index); check_xa_mark_2(xa); + check_xa_mark_3(xa); } static noinline void check_xa_shrink(struct xarray *xa) @@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); + XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); xa_erase_index(xa, 12345678); xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1618,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa) xa_lock(xa); while ((node = list_first_entry_or_null(&shadow_nodes, struct xa_node, private_list))) { - XA_STATE(xas, node->array, 0); XA_BUG_ON(xa, node->array != xa); list_del_init(&node->private_list); - xas.xa_node = xa_parent_locked(node->array, node); - xas.xa_offset = node->offset; - xas.xa_shift = node->shift + XA_CHUNK_SHIFT; - xas_set_update(&xas, test_update_node); - xas_store(&xas, NULL); + xa_delete_node(node, test_update_node); } xa_unlock(xa); } diff --git a/lib/xarray.c b/lib/xarray.c index b76eea7b314c..5fa51614802a 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -706,7 +706,7 @@ void xas_create_range(struct xa_state *xas) unsigned char shift = xas->xa_shift; unsigned char sibs = xas->xa_sibs; - xas->xa_index |= ((sibs + 1) << shift) - 1; + xas->xa_index |= ((sibs + 1UL) << shift) - 1; if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) xas->xa_offset |= sibs; xas->xa_shift = 0; @@ -2164,6 +2164,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, EXPORT_SYMBOL(xa_extract); /** + * xa_delete_node() - Private interface for workingset code. + * @node: Node to be removed from the tree. + * @update: Function to call to update ancestor nodes. + * + * Context: xa_lock must be held on entry and will not be released. + */ +void xa_delete_node(struct xa_node *node, xa_update_node_t update) +{ + struct xa_state xas = { + .xa = node->array, + .xa_index = (unsigned long)node->offset << + (node->shift + XA_CHUNK_SHIFT), + .xa_shift = node->shift + XA_CHUNK_SHIFT, + .xa_offset = node->offset, + .xa_node = xa_parent_locked(node->array, node), + .xa_update = update, + }; + + xas_store(&xas, NULL); +} +EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */ + +/** * xa_destroy() - Free all internal data structures. * @xa: XArray. * diff --git a/mm/workingset.c b/mm/workingset.c index 8ed8e6296d8c..975a4d2dd02e 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -519,12 +519,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, void *arg) __must_hold(lru_lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); - XA_STATE(xas, node->array, 0); struct address_space *mapping; int ret; /* - * Page cache insertions and deletions synchroneously maintain + * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any @@ -559,15 +558,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; mapping->nrexceptional -= node->nr_values; - xas.xa_node = xa_parent_locked(&mapping->i_pages, node); - xas.xa_offset = node->offset; - xas.xa_shift = node->shift + XA_CHUNK_SHIFT; - xas_set_update(&xas, workingset_update_node); - /* - * We could store a shadow entry here which was the minimum of the - * shadow entries we were tracking ... - */ - xas_store(&xas, NULL); + xa_delete_node(node, workingset_update_node); __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); out_invalid: diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 8995092d541e..3b796dd5e577 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg) return NULL; } +static void *ida_leak_fn(void *arg) +{ + struct ida *ida = arg; + time_t s = time(NULL); + int i, ret; + + rcu_register_thread(); + + do for (i = 0; i < 1000; i++) { + ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); + if (ret >= 0) + ida_free(ida, 128); + } while (time(NULL) < s + 2); + + rcu_unregister_thread(); + return NULL; +} + void ida_thread_tests(void) { + DEFINE_IDA(ida); pthread_t threads[20]; int i; @@ -536,6 +555,16 @@ void ida_thread_tests(void) while (i--) pthread_join(threads[i], NULL); + + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { + perror("creating ida thread"); + exit(1); + } + + while (i--) + pthread_join(threads[i], NULL); + assert(ida_is_empty(&ida)); } void ida_tests(void) diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 4568248222ae..39867fd80c8f 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -22,4 +22,5 @@ #define __releases(x) #define __must_hold(x) +#define EXPORT_PER_CPU_SYMBOL_GPL(x) #endif /* _KERNEL_H */ diff --git a/tools/testing/radix-tree/linux/local_lock.h b/tools/testing/radix-tree/linux/local_lock.h new file mode 100644 index 000000000000..b3cf8b233ca4 --- /dev/null +++ b/tools/testing/radix-tree/linux/local_lock.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_LOCAL_LOCK +#define _LINUX_LOCAL_LOCK +typedef struct { } local_lock_t; + +static inline void local_lock(local_lock_t *lock) { } +static inline void local_unlock(local_lock_t *lock) { } +#define INIT_LOCAL_LOCK(x) { } +#endif diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 34dab4d18744..7ef7067e942c 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -56,8 +56,4 @@ int root_tag_get(struct radix_tree_root *root, unsigned int tag); unsigned long node_maxindex(struct radix_tree_node *); unsigned long shift_maxindex(unsigned int shift); int radix_tree_cpu_dead(unsigned int cpu); -struct radix_tree_preload { - unsigned nr; - struct radix_tree_node *nodes; -}; extern struct radix_tree_preload radix_tree_preloads; |