diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 5 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/argv_split.c | 4 | ||||
-rw-r--r-- | lib/iov_iter.c | 437 | ||||
-rw-r--r-- | lib/kunit/executor.c | 48 | ||||
-rw-r--r-- | lib/kunit/executor_test.c | 13 | ||||
-rw-r--r-- | lib/kunit/test.c | 3 | ||||
-rw-r--r-- | lib/llist.c | 28 | ||||
-rw-r--r-- | lib/lwq.c | 158 | ||||
-rw-r--r-- | lib/maple_tree.c | 223 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 122 |
12 files changed, 636 insertions, 411 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 263aa6ae8d7c..2d90935d5a21 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -732,6 +732,11 @@ config PARMAN config OBJAGG tristate "objagg" if COMPILE_TEST +config LWQ_TEST + bool "Boot-time test for lwq queuing" + help + Run boot-time test of light-weight queuing. + endmenu config GENERIC_IOREMAP diff --git a/lib/Makefile b/lib/Makefile index 57d394575919..1b311c7fd32b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -45,7 +45,7 @@ obj-y += lockref.o obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ - bsearch.o find_bit.o llist.o memweight.o kfifo.o \ + bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o base64.o \ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o diff --git a/lib/argv_split.c b/lib/argv_split.c index 1a19a0a93dc1..e28db8e3b58c 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c @@ -28,7 +28,7 @@ static int count_argc(const char *str) /** * argv_free - free an argv - * @argv - the argument vector to be freed + * @argv: the argument vector to be freed * * Frees an argv and the strings it points to. */ @@ -46,7 +46,7 @@ EXPORT_SYMBOL(argv_free); * @str: the string to be split * @argcp: returned argument count * - * Returns an array of pointers to strings which are split out from + * Returns: an array of pointers to strings which are split out from * @str. This is performed by strictly splitting on white-space; no * quote processing is performed. Multiple whitespace characters are * considered to be a single argument separator. The returned array diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 27234a820eeb..de7d11cf4c63 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <crypto/hash.h> #include <linux/export.h> #include <linux/bvec.h> #include <linux/fault-inject-usercopy.h> @@ -10,192 +9,71 @@ #include <linux/vmalloc.h> #include <linux/splice.h> #include <linux/compat.h> -#include <net/checksum.h> #include <linux/scatterlist.h> #include <linux/instrumented.h> +#include <linux/iov_iter.h> -/* covers ubuf and kbuf alike */ -#define iterate_buf(i, n, base, len, off, __p, STEP) { \ - size_t __maybe_unused off = 0; \ - len = n; \ - base = __p + i->iov_offset; \ - len -= (STEP); \ - i->iov_offset += len; \ - n = len; \ -} - -/* covers iovec and kvec alike */ -#define iterate_iovec(i, n, base, len, off, __p, STEP) { \ - size_t off = 0; \ - size_t skip = i->iov_offset; \ - do { \ - len = min(n, __p->iov_len - skip); \ - if (likely(len)) { \ - base = __p->iov_base + skip; \ - len -= (STEP); \ - off += len; \ - skip += len; \ - n -= len; \ - if (skip < __p->iov_len) \ - break; \ - } \ - __p++; \ - skip = 0; \ - } while (n); \ - i->iov_offset = skip; \ - n = off; \ -} - -#define iterate_bvec(i, n, base, len, off, p, STEP) { \ - size_t off = 0; \ - unsigned skip = i->iov_offset; \ - while (n) { \ - unsigned offset = p->bv_offset + skip; \ - unsigned left; \ - void *kaddr = kmap_local_page(p->bv_page + \ - offset / PAGE_SIZE); \ - base = kaddr + offset % PAGE_SIZE; \ - len = min(min(n, (size_t)(p->bv_len - skip)), \ - (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ - left = (STEP); \ - kunmap_local(kaddr); \ - len -= left; \ - off += len; \ - skip += len; \ - if (skip == p->bv_len) { \ - skip = 0; \ - p++; \ - } \ - n -= len; \ - if (left) \ - break; \ - } \ - i->iov_offset = skip; \ - n = off; \ -} - -#define iterate_xarray(i, n, base, len, __off, STEP) { \ - __label__ __out; \ - size_t __off = 0; \ - struct folio *folio; \ - loff_t start = i->xarray_start + i->iov_offset; \ - pgoff_t index = start / PAGE_SIZE; \ - XA_STATE(xas, i->xarray, index); \ - \ - len = PAGE_SIZE - offset_in_page(start); \ - rcu_read_lock(); \ - xas_for_each(&xas, folio, ULONG_MAX) { \ - unsigned left; \ - size_t offset; \ - if (xas_retry(&xas, folio)) \ - continue; \ - if (WARN_ON(xa_is_value(folio))) \ - break; \ - if (WARN_ON(folio_test_hugetlb(folio))) \ - break; \ - offset = offset_in_folio(folio, start + __off); \ - while (offset < folio_size(folio)) { \ - base = kmap_local_folio(folio, offset); \ - len = min(n, len); \ - left = (STEP); \ - kunmap_local(base); \ - len -= left; \ - __off += len; \ - n -= len; \ - if (left || n == 0) \ - goto __out; \ - offset += len; \ - len = PAGE_SIZE; \ - } \ - } \ -__out: \ - rcu_read_unlock(); \ - i->iov_offset += __off; \ - n = __off; \ -} - -#define __iterate_and_advance(i, n, base, len, off, I, K) { \ - if (unlikely(i->count < n)) \ - n = i->count; \ - if (likely(n)) { \ - if (likely(iter_is_ubuf(i))) { \ - void __user *base; \ - size_t len; \ - iterate_buf(i, n, base, len, off, \ - i->ubuf, (I)) \ - } else if (likely(iter_is_iovec(i))) { \ - const struct iovec *iov = iter_iov(i); \ - void __user *base; \ - size_t len; \ - iterate_iovec(i, n, base, len, off, \ - iov, (I)) \ - i->nr_segs -= iov - iter_iov(i); \ - i->__iov = iov; \ - } else if (iov_iter_is_bvec(i)) { \ - const struct bio_vec *bvec = i->bvec; \ - void *base; \ - size_t len; \ - iterate_bvec(i, n, base, len, off, \ - bvec, (K)) \ - i->nr_segs -= bvec - i->bvec; \ - i->bvec = bvec; \ - } else if (iov_iter_is_kvec(i)) { \ - const struct kvec *kvec = i->kvec; \ - void *base; \ - size_t len; \ - iterate_iovec(i, n, base, len, off, \ - kvec, (K)) \ - i->nr_segs -= kvec - i->kvec; \ - i->kvec = kvec; \ - } else if (iov_iter_is_xarray(i)) { \ - void *base; \ - size_t len; \ - iterate_xarray(i, n, base, len, off, \ - (K)) \ - } \ - i->count -= n; \ - } \ -} -#define iterate_and_advance(i, n, base, len, off, I, K) \ - __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) - -static int copyout(void __user *to, const void *from, size_t n) +static __always_inline +size_t copy_to_user_iter(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) { if (should_fail_usercopy()) - return n; - if (access_ok(to, n)) { - instrument_copy_to_user(to, from, n); - n = raw_copy_to_user(to, from, n); + return len; + if (access_ok(iter_to, len)) { + from += progress; + instrument_copy_to_user(iter_to, from, len); + len = raw_copy_to_user(iter_to, from, len); } - return n; + return len; } -static int copyout_nofault(void __user *to, const void *from, size_t n) +static __always_inline +size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) { - long res; + ssize_t res; if (should_fail_usercopy()) - return n; - - res = copy_to_user_nofault(to, from, n); + return len; - return res < 0 ? n : res; + from += progress; + res = copy_to_user_nofault(iter_to, from, len); + return res < 0 ? len : res; } -static int copyin(void *to, const void __user *from, size_t n) +static __always_inline +size_t copy_from_user_iter(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) { - size_t res = n; + size_t res = len; if (should_fail_usercopy()) - return n; - if (access_ok(from, n)) { - instrument_copy_from_user_before(to, from, n); - res = raw_copy_from_user(to, from, n); - instrument_copy_from_user_after(to, from, n, res); + return len; + if (access_ok(iter_from, len)) { + to += progress; + instrument_copy_from_user_before(to, iter_from, len); + res = raw_copy_from_user(to, iter_from, len); + instrument_copy_from_user_after(to, iter_from, len, res); } return res; } +static __always_inline +size_t memcpy_to_iter(void *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + memcpy(iter_to, from + progress, len); + return 0; +} + +static __always_inline +size_t memcpy_from_iter(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + memcpy(to + progress, iter_from, len); + return 0; +} + /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator @@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, .iter_type = ITER_IOVEC, .copy_mc = false, .nofault = false, - .user_backed = true, .data_source = direction, .__iov = iov, .nr_segs = nr_segs, @@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -static __wsum csum_and_memcpy(void *to, const void *from, size_t len, - __wsum sum, size_t off) -{ - __wsum next = csum_partial_copy_nocheck(from, to, len); - return csum_block_add(sum, next, off); -} - size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); - iterate_and_advance(i, bytes, base, len, off, - copyout(base, addr + off, len), - memcpy(base, addr + off, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, (void *)addr, + copy_to_user_iter, memcpy_to_iter); } EXPORT_SYMBOL(_copy_to_iter); #ifdef CONFIG_ARCH_HAS_COPY_MC -static int copyout_mc(void __user *to, const void *from, size_t n) -{ - if (access_ok(to, n)) { - instrument_copy_to_user(to, from, n); - n = copy_mc_to_user((__force void *) to, from, n); +static __always_inline +size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + if (access_ok(iter_to, len)) { + from += progress; + instrument_copy_to_user(iter_to, from, len); + len = copy_mc_to_user(iter_to, from, len); } - return n; + return len; +} + +static __always_inline +size_t memcpy_to_iter_mc(void *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + return copy_mc_to_kernel(iter_to, from + progress, len); } /** @@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return 0; if (user_backed_iter(i)) might_fault(); - __iterate_and_advance(i, bytes, base, len, off, - copyout_mc(base, addr + off, len), - copy_mc_to_kernel(base, addr + off, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, (void *)addr, + copy_to_user_iter_mc, memcpy_to_iter_mc); } EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ -static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from, - size_t size) +static __always_inline +size_t memcpy_from_iter_mc(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return copy_mc_to_kernel(to + progress, iter_from, len); +} + +static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(i->count < bytes)) + bytes = i->count; + if (unlikely(!bytes)) + return 0; + return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); +} + +static __always_inline +size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (iov_iter_is_copy_mc(i)) - return (void *)copy_mc_to_kernel(to, from, size); - return memcpy(to, from, size); + if (unlikely(iov_iter_is_copy_mc(i))) + return __copy_from_iter_mc(addr, bytes, i); + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter, memcpy_from_iter); } size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) @@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) if (user_backed_iter(i)) might_fault(); - iterate_and_advance(i, bytes, base, len, off, - copyin(addr + off, base, len), - memcpy_from_iter(i, addr + off, base, len) - ) - - return bytes; + return __copy_from_iter(addr, bytes, i); } EXPORT_SYMBOL(_copy_from_iter); +static __always_inline +size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); +} + size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) return 0; - iterate_and_advance(i, bytes, base, len, off, - __copy_from_user_inatomic_nocache(addr + off, base, len), - memcpy(addr + off, base, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter_nocache, + memcpy_from_iter); } EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +static __always_inline +size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return __copy_from_user_flushcache(to + progress, iter_from, len); +} + +static __always_inline +size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + memcpy_flushcache(to + progress, iter_from, len); + return 0; +} + /** * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address @@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) if (WARN_ON_ONCE(!i->data_source)) return 0; - iterate_and_advance(i, bytes, base, len, off, - __copy_from_user_flushcache(addr + off, base, len), - memcpy_flushcache(addr + off, base, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter_flushcache, + memcpy_from_iter_flushcache); } EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); #endif @@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); - iterate_and_advance(i, n, base, len, off, - copyout_nofault(base, kaddr + offset + off, len), - memcpy(base, kaddr + offset + off, len) - ) + n = iterate_and_advance(i, bytes, kaddr, + copy_to_user_iter_nofault, + memcpy_to_iter); kunmap_local(kaddr); res += n; bytes -= n; @@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_from_iter); -size_t iov_iter_zero(size_t bytes, struct iov_iter *i) +static __always_inline +size_t zero_to_user_iter(void __user *iter_to, size_t progress, + size_t len, void *priv, void *priv2) { - iterate_and_advance(i, bytes, base, len, count, - clear_user(base, len), - memset(base, 0, len) - ) + return clear_user(iter_to, len); +} + +static __always_inline +size_t zero_to_iter(void *iter_to, size_t progress, + size_t len, void *priv, void *priv2) +{ + memset(iter_to, 0, len); + return 0; +} - return bytes; +size_t iov_iter_zero(size_t bytes, struct iov_iter *i) +{ + return iterate_and_advance(i, bytes, NULL, + zero_to_user_iter, zero_to_iter); } EXPORT_SYMBOL(iov_iter_zero); @@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, } p = kmap_atomic(page) + offset; - iterate_and_advance(i, n, base, len, off, - copyin(p + off, base, len), - memcpy_from_iter(i, p + off, base, len) - ) + n = __copy_from_iter(p, n, i); kunmap_atomic(p); copied += n; offset += n; @@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, } EXPORT_SYMBOL(iov_iter_get_pages_alloc2); -size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, - struct iov_iter *i) -{ - __wsum sum, next; - sum = *csum; - if (WARN_ON_ONCE(!i->data_source)) - return 0; - - iterate_and_advance(i, bytes, base, len, off, ({ - next = csum_and_copy_from_user(base, addr + off, len); - sum = csum_block_add(sum, next, off); - next ? 0 : len; - }), ({ - sum = csum_and_memcpy(addr + off, base, len, sum, off); - }) - ) - *csum = sum; - return bytes; -} -EXPORT_SYMBOL(csum_and_copy_from_iter); - -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, - struct iov_iter *i) -{ - struct csum_state *csstate = _csstate; - __wsum sum, next; - - if (WARN_ON_ONCE(i->data_source)) - return 0; - if (unlikely(iov_iter_is_discard(i))) { - // can't use csum_memcpy() for that one - data is not copied - csstate->csum = csum_block_add(csstate->csum, - csum_partial(addr, bytes, 0), - csstate->off); - csstate->off += bytes; - return bytes; - } - - sum = csum_shift(csstate->csum, csstate->off); - iterate_and_advance(i, bytes, base, len, off, ({ - next = csum_and_copy_to_user(addr + off, base, len); - sum = csum_block_add(sum, next, off); - next ? 0 : len; - }), ({ - sum = csum_and_memcpy(base, addr + off, len, sum, off); - }) - ) - csstate->csum = csum_shift(sum, csstate->off); - csstate->off += bytes; - return bytes; -} -EXPORT_SYMBOL(csum_and_copy_to_iter); - -size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i) -{ -#ifdef CONFIG_CRYPTO_HASH - struct ahash_request *hash = hashp; - struct scatterlist sg; - size_t copied; - - copied = copy_to_iter(addr, bytes, i); - sg_init_one(&sg, addr, copied); - ahash_request_set_crypt(hash, &sg, NULL, copied); - crypto_ahash_update(hash); - return copied; -#else - return 0; -#endif -} -EXPORT_SYMBOL(hash_and_copy_to_iter); - static int iov_npages(const struct iov_iter *i, int maxpages) { size_t skip = i->iov_offset, size = i->count; diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 5181aa2e760b..a6348489d45f 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -65,7 +65,7 @@ struct kunit_glob_filter { }; /* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */ -static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed, +static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed, const char *filter_glob) { const int len = strlen(filter_glob); @@ -73,16 +73,28 @@ static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed, if (!period) { parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL); + if (!parsed->suite_glob) + return -ENOMEM; + parsed->test_glob = NULL; strcpy(parsed->suite_glob, filter_glob); - return; + return 0; } parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL); + if (!parsed->suite_glob) + return -ENOMEM; + parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL); + if (!parsed->test_glob) { + kfree(parsed->suite_glob); + return -ENOMEM; + } strncpy(parsed->suite_glob, filter_glob, period - filter_glob); strncpy(parsed->test_glob, period + 1, len - (period - filter_glob)); + + return 0; } /* Create a copy of suite with only tests that match test_glob. */ @@ -152,21 +164,24 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, } copy_start = copy; - if (filter_glob) - kunit_parse_glob_filter(&parsed_glob, filter_glob); + if (filter_glob) { + *err = kunit_parse_glob_filter(&parsed_glob, filter_glob); + if (*err) + goto free_copy; + } /* Parse attribute filters */ if (filters) { filter_count = kunit_get_filter_count(filters); parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL); if (!parsed_filters) { - kfree(copy); - return filtered; + *err = -ENOMEM; + goto free_parsed_glob; } for (j = 0; j < filter_count; j++) parsed_filters[j] = kunit_next_attr_filter(&filters, err); if (*err) - goto err; + goto free_parsed_filters; } for (i = 0; &suite_set->start[i] != suite_set->end; i++) { @@ -178,7 +193,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, parsed_glob.test_glob); if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto err; + goto free_parsed_filters; } } if (filter_count > 0 && parsed_filters != NULL) { @@ -195,10 +210,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered_suite = new_filtered_suite; if (*err) - goto err; + goto free_parsed_filters; + if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto err; + goto free_parsed_filters; } if (!filtered_suite) break; @@ -213,17 +229,19 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered.start = copy_start; filtered.end = copy; -err: - if (*err) - kfree(copy); +free_parsed_filters: + if (filter_count) + kfree(parsed_filters); +free_parsed_glob: if (filter_glob) { kfree(parsed_glob.suite_glob); kfree(parsed_glob.test_glob); } - if (filter_count) - kfree(parsed_filters); +free_copy: + if (*err) + kfree(copy); return filtered; } diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 4084071d0eb5..b4f6f96b2844 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -119,7 +119,7 @@ static void parse_filter_attr_test(struct kunit *test) { int j, filter_count; struct kunit_attr_filter *parsed_filters; - char *filters = "speed>slow, module!=example"; + char filters[] = "speed>slow, module!=example", *filter = filters; int err = 0; filter_count = kunit_get_filter_count(filters); @@ -128,7 +128,7 @@ static void parse_filter_attr_test(struct kunit *test) parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters), GFP_KERNEL); for (j = 0; j < filter_count; j++) { - parsed_filters[j] = kunit_next_attr_filter(&filters, &err); + parsed_filters[j] = kunit_next_attr_filter(&filter, &err); KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]); } @@ -154,6 +154,7 @@ static void filter_attr_test(struct kunit *test) .start = subsuite, .end = &subsuite[2], }; struct kunit_suite_set got; + char filter[] = "speed>slow"; int err = 0; subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases); @@ -168,7 +169,7 @@ static void filter_attr_test(struct kunit *test) * attribute is unset and thus, the filtering is based on the parent attribute * of slow. */ - got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err); + got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); @@ -191,12 +192,13 @@ static void filter_attr_empty_test(struct kunit *test) .start = subsuite, .end = &subsuite[2], }; struct kunit_suite_set got; + char filter[] = "module!=dummy"; int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases); - got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err); + got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); /* just in case */ @@ -211,12 +213,13 @@ static void filter_attr_skip_test(struct kunit *test) .start = subsuite, .end = &subsuite[1], }; struct kunit_suite_set got; + char filter[] = "speed>slow"; int err = 0; subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases); /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */ - got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err); + got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 49698a168437..421f13981412 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -784,12 +784,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val, switch (val) { case MODULE_STATE_LIVE: - kunit_module_init(mod); break; case MODULE_STATE_GOING: kunit_module_exit(mod); break; case MODULE_STATE_COMING: + kunit_module_init(mod); + break; case MODULE_STATE_UNFORMED: break; } diff --git a/lib/llist.c b/lib/llist.c index 6e668fa5a2c6..f21d0cfbbaaa 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -66,6 +66,34 @@ struct llist_node *llist_del_first(struct llist_head *head) EXPORT_SYMBOL_GPL(llist_del_first); /** + * llist_del_first_this - delete given entry of lock-less list if it is first + * @head: the head for your lock-less list + * @this: a list entry. + * + * If head of the list is given entry, delete and return %true else + * return %false. + * + * Multiple callers can safely call this concurrently with multiple + * llist_add() callers, providing all the callers offer a different @this. + */ +bool llist_del_first_this(struct llist_head *head, + struct llist_node *this) +{ + struct llist_node *entry, *next; + + /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */ + entry = smp_load_acquire(&head->first); + do { + if (entry != this) + return false; + next = READ_ONCE(entry->next); + } while (!try_cmpxchg(&head->first, &entry, next)); + + return true; +} +EXPORT_SYMBOL_GPL(llist_del_first_this); + +/** * llist_reverse_order - reverse order of a llist chain * @head: first item of the list to be reversed * diff --git a/lib/lwq.c b/lib/lwq.c new file mode 100644 index 000000000000..57d080a4d53d --- /dev/null +++ b/lib/lwq.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Light-weight single-linked queue. + * + * Entries are enqueued to the head of an llist, with no blocking. + * This can happen in any context. + * + * Entries are dequeued using a spinlock to protect against multiple + * access. The llist is staged in reverse order, and refreshed + * from the llist when it exhausts. + * + * This is particularly suitable when work items are queued in BH or + * IRQ context, and where work items are handled one at a time by + * dedicated threads. + */ +#include <linux/rcupdate.h> +#include <linux/lwq.h> + +struct llist_node *__lwq_dequeue(struct lwq *q) +{ + struct llist_node *this; + + if (lwq_empty(q)) + return NULL; + spin_lock(&q->lock); + this = q->ready; + if (!this && !llist_empty(&q->new)) { + /* ensure queue doesn't appear transiently lwq_empty */ + smp_store_release(&q->ready, (void *)1); + this = llist_reverse_order(llist_del_all(&q->new)); + if (!this) + q->ready = NULL; + } + if (this) + q->ready = llist_next(this); + spin_unlock(&q->lock); + return this; +} +EXPORT_SYMBOL_GPL(__lwq_dequeue); + +/** + * lwq_dequeue_all - dequeue all currently enqueued objects + * @q: the queue to dequeue from + * + * Remove and return a linked list of llist_nodes of all the objects that were + * in the queue. The first on the list will be the object that was least + * recently enqueued. + */ +struct llist_node *lwq_dequeue_all(struct lwq *q) +{ + struct llist_node *r, *t, **ep; + + if (lwq_empty(q)) + return NULL; + + spin_lock(&q->lock); + r = q->ready; + q->ready = NULL; + t = llist_del_all(&q->new); + spin_unlock(&q->lock); + ep = &r; + while (*ep) + ep = &(*ep)->next; + *ep = llist_reverse_order(t); + return r; +} +EXPORT_SYMBOL_GPL(lwq_dequeue_all); + +#if IS_ENABLED(CONFIG_LWQ_TEST) + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait_bit.h> +#include <linux/kthread.h> +#include <linux/delay.h> +struct tnode { + struct lwq_node n; + int i; + int c; +}; + +static int lwq_exercise(void *qv) +{ + struct lwq *q = qv; + int cnt; + struct tnode *t; + + for (cnt = 0; cnt < 10000; cnt++) { + wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL); + t->c++; + if (lwq_enqueue(&t->n, q)) + wake_up_var(q); + } + while (!kthread_should_stop()) + schedule_timeout_idle(1); + return 0; +} + +static int lwq_test(void) +{ + int i; + struct lwq q; + struct llist_node *l, **t1, *t2; + struct tnode *t; + struct task_struct *threads[8]; + + printk(KERN_INFO "testing lwq....\n"); + lwq_init(&q); + printk(KERN_INFO " lwq: run some threads\n"); + for (i = 0; i < ARRAY_SIZE(threads); i++) + threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i); + for (i = 0; i < 100; i++) { + t = kmalloc(sizeof(*t), GFP_KERNEL); + if (!t) + break; + t->i = i; + t->c = 0; + if (lwq_enqueue(&t->n, &q)) + wake_up_var(&q); + } + /* wait for threads to exit */ + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (!IS_ERR_OR_NULL(threads[i])) + kthread_stop(threads[i]); + printk(KERN_INFO " lwq: dequeue first 50:"); + for (i = 0; i < 50 ; i++) { + if (i && (i % 10) == 0) { + printk(KERN_CONT "\n"); + printk(KERN_INFO " lwq: ... "); + } + t = lwq_dequeue(&q, struct tnode, n); + if (t) + printk(KERN_CONT " %d(%d)", t->i, t->c); + kfree(t); + } + printk(KERN_CONT "\n"); + l = lwq_dequeue_all(&q); + printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n"); + lwq_for_each_safe(t, t1, t2, &l, n) { + if ((t->i % 3) == 0) { + t->i = -1; + kfree(t); + t = NULL; + } + } + if (l) + lwq_enqueue_batch(l, &q); + printk(KERN_INFO " lwq: dequeue remaining:"); + while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) { + printk(KERN_CONT " %d", t->i); + kfree(t); + } + printk(KERN_CONT "\n"); + return 0; +} + +module_init(lwq_test); +#endif /* CONFIG_LWQ_TEST*/ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ee1ff0c59fd7..bb24d84a4922 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -256,6 +256,22 @@ bool mas_is_err(struct ma_state *mas) return xa_is_err(mas->node); } +static __always_inline bool mas_is_overflow(struct ma_state *mas) +{ + if (unlikely(mas->node == MAS_OVERFLOW)) + return true; + + return false; +} + +static __always_inline bool mas_is_underflow(struct ma_state *mas) +{ + if (unlikely(mas->node == MAS_UNDERFLOW)) + return true; + + return false; +} + static inline bool mas_searchable(struct ma_state *mas) { if (mas_is_none(mas)) @@ -4415,10 +4431,13 @@ no_entry: * * @mas: The maple state * @max: The minimum starting range + * @empty: Can be empty + * @set_underflow: Set the @mas->node to underflow state on limit. * * Return: The entry in the previous slot which is possibly NULL */ -static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) +static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty, + bool set_underflow) { void *entry; void __rcu **slots; @@ -4435,7 +4454,6 @@ retry: if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; -again: if (mas->min <= min) { pivot = mas_safe_min(mas, pivots, mas->offset); @@ -4443,9 +4461,10 @@ again: goto retry; if (pivot <= min) - return NULL; + goto underflow; } +again: if (likely(mas->offset)) { mas->offset--; mas->last = mas->index - 1; @@ -4457,7 +4476,7 @@ again: } if (mas_is_none(mas)) - return NULL; + goto underflow; mas->last = mas->max; node = mas_mn(mas); @@ -4474,10 +4493,19 @@ again: if (likely(entry)) return entry; - if (!empty) + if (!empty) { + if (mas->index <= min) + goto underflow; + goto again; + } return entry; + +underflow: + if (set_underflow) + mas->node = MAS_UNDERFLOW; + return NULL; } /* @@ -4567,10 +4595,13 @@ no_entry: * @mas: The maple state * @max: The maximum starting range * @empty: Can be empty + * @set_overflow: Should @mas->node be set to overflow when the limit is + * reached. * * Return: The entry in the next slot which is possibly NULL */ -static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) +static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty, + bool set_overflow) { void __rcu **slots; unsigned long *pivots; @@ -4589,22 +4620,22 @@ retry: if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; -again: if (mas->max >= max) { if (likely(mas->offset < data_end)) pivot = pivots[mas->offset]; else - return NULL; /* must be mas->max */ + goto overflow; if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; if (pivot >= max) - return NULL; + goto overflow; } if (likely(mas->offset < data_end)) { mas->index = pivots[mas->offset] + 1; +again: mas->offset++; if (likely(mas->offset < data_end)) mas->last = pivots[mas->offset]; @@ -4616,8 +4647,11 @@ again: goto retry; } - if (mas_is_none(mas)) + if (WARN_ON_ONCE(mas_is_none(mas))) { + mas->node = MAS_OVERFLOW; return NULL; + goto overflow; + } mas->offset = 0; mas->index = mas->min; @@ -4636,12 +4670,20 @@ again: return entry; if (!empty) { - if (!mas->offset) - data_end = 2; + if (mas->last >= max) + goto overflow; + + mas->index = mas->last + 1; + /* Node cannot end on NULL, so it's safe to short-cut here */ goto again; } return entry; + +overflow: + if (set_overflow) + mas->node = MAS_OVERFLOW; + return NULL; } /* @@ -4651,17 +4693,20 @@ again: * * Set the @mas->node to the next entry and the range_start to * the beginning value for the entry. Does not check beyond @limit. - * Sets @mas->index and @mas->last to the limit if it is hit. + * Sets @mas->index and @mas->last to the range, Does not update @mas->index and + * @mas->last on overflow. * Restarts on dead nodes. * * Return: the next entry or %NULL. */ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) { - if (mas->last >= limit) + if (mas->last >= limit) { + mas->node = MAS_OVERFLOW; return NULL; + } - return mas_next_slot(mas, limit, false); + return mas_next_slot(mas, limit, false, true); } /* @@ -4837,7 +4882,7 @@ void *mas_walk(struct ma_state *mas) { void *entry; - if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) + if (!mas_is_active(mas) || !mas_is_start(mas)) mas->node = MAS_START; retry: entry = mas_state_walk(mas); @@ -5294,14 +5339,22 @@ static inline void mte_destroy_walk(struct maple_enode *enode, static void mas_wr_store_setup(struct ma_wr_state *wr_mas) { - if (mas_is_start(wr_mas->mas)) - return; + if (!mas_is_active(wr_mas->mas)) { + if (mas_is_start(wr_mas->mas)) + return; - if (unlikely(mas_is_paused(wr_mas->mas))) - goto reset; + if (unlikely(mas_is_paused(wr_mas->mas))) + goto reset; - if (unlikely(mas_is_none(wr_mas->mas))) - goto reset; + if (unlikely(mas_is_none(wr_mas->mas))) + goto reset; + + if (unlikely(mas_is_overflow(wr_mas->mas))) + goto reset; + + if (unlikely(mas_is_underflow(wr_mas->mas))) + goto reset; + } /* * A less strict version of mas_is_span_wr() where we allow spanning @@ -5574,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) /* Internal nodes */ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); /* Add working room for split (2 nodes) + new parents */ - mas_node_count(mas, nr_nodes + 3); + mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); /* Detect if allocations run out */ mas->mas_flags |= MA_STATE_PREALLOC; @@ -5595,8 +5648,25 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, { bool was_none = mas_is_none(mas); - if (mas_is_none(mas) || mas_is_paused(mas)) + if (unlikely(mas->last >= max)) { + mas->node = MAS_OVERFLOW; + return true; + } + + if (mas_is_active(mas)) + return false; + + if (mas_is_none(mas) || mas_is_paused(mas)) { + mas->node = MAS_START; + } else if (mas_is_overflow(mas)) { + /* Overflowed before, but the max changed */ mas->node = MAS_START; + } else if (mas_is_underflow(mas)) { + mas->node = MAS_START; + *entry = mas_walk(mas); + if (*entry) + return true; + } if (mas_is_start(mas)) *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ @@ -5615,6 +5685,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, if (mas_is_none(mas)) return true; + return false; } @@ -5637,7 +5708,7 @@ void *mas_next(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, false); + return mas_next_slot(mas, max, false, true); } EXPORT_SYMBOL_GPL(mas_next); @@ -5660,7 +5731,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, true); + return mas_next_slot(mas, max, true, true); } EXPORT_SYMBOL_GPL(mas_next_range); @@ -5691,18 +5762,31 @@ EXPORT_SYMBOL_GPL(mt_next); static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) { - if (mas->index <= min) - goto none; + if (unlikely(mas->index <= min)) { + mas->node = MAS_UNDERFLOW; + return true; + } - if (mas_is_none(mas) || mas_is_paused(mas)) + if (mas_is_active(mas)) + return false; + + if (mas_is_overflow(mas)) { mas->node = MAS_START; + *entry = mas_walk(mas); + if (*entry) + return true; + } - if (mas_is_start(mas)) { - mas_walk(mas); - if (!mas->index) - goto none; + if (mas_is_none(mas) || mas_is_paused(mas)) { + mas->node = MAS_START; + } else if (mas_is_underflow(mas)) { + /* underflowed before but the min changed */ + mas->node = MAS_START; } + if (mas_is_start(mas)) + mas_walk(mas); + if (unlikely(mas_is_ptr(mas))) { if (!mas->index) goto none; @@ -5747,7 +5831,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min) if (mas_prev_setup(mas, min, &entry)) return entry; - return mas_prev_slot(mas, min, false); + return mas_prev_slot(mas, min, false, true); } EXPORT_SYMBOL_GPL(mas_prev); @@ -5770,7 +5854,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min) if (mas_prev_setup(mas, min, &entry)) return entry; - return mas_prev_slot(mas, min, true); + return mas_prev_slot(mas, min, true, true); } EXPORT_SYMBOL_GPL(mas_prev_range); @@ -5828,24 +5912,35 @@ EXPORT_SYMBOL_GPL(mas_pause); static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) { - *entry = NULL; + if (mas_is_active(mas)) { + if (mas->last < max) + return false; - if (unlikely(mas_is_none(mas))) { + return true; + } + + if (mas_is_paused(mas)) { if (unlikely(mas->last >= max)) return true; - mas->index = mas->last; + mas->index = ++mas->last; mas->node = MAS_START; - } else if (unlikely(mas_is_paused(mas))) { + } else if (mas_is_none(mas)) { if (unlikely(mas->last >= max)) return true; + mas->index = mas->last; mas->node = MAS_START; - mas->index = ++mas->last; - } else if (unlikely(mas_is_ptr(mas))) - goto ptr_out_of_range; + } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) { + if (mas->index > max) { + mas->node = MAS_OVERFLOW; + return true; + } + + mas->node = MAS_START; + } - if (unlikely(mas_is_start(mas))) { + if (mas_is_start(mas)) { /* First run or continue */ if (mas->index > max) return true; @@ -5895,7 +5990,7 @@ void *mas_find(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, false); + return mas_next_slot(mas, max, false, false); } EXPORT_SYMBOL_GPL(mas_find); @@ -5913,13 +6008,13 @@ EXPORT_SYMBOL_GPL(mas_find); */ void *mas_find_range(struct ma_state *mas, unsigned long max) { - void *entry; + void *entry = NULL; if (mas_find_setup(mas, max, &entry)) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, true); + return mas_next_slot(mas, max, true, false); } EXPORT_SYMBOL_GPL(mas_find_range); @@ -5934,26 +6029,36 @@ EXPORT_SYMBOL_GPL(mas_find_range); static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, void **entry) { - *entry = NULL; - - if (unlikely(mas_is_none(mas))) { - if (mas->index <= min) - goto none; + if (mas_is_active(mas)) { + if (mas->index > min) + return false; - mas->last = mas->index; - mas->node = MAS_START; + return true; } - if (unlikely(mas_is_paused(mas))) { + if (mas_is_paused(mas)) { if (unlikely(mas->index <= min)) { mas->node = MAS_NONE; return true; } mas->node = MAS_START; mas->last = --mas->index; + } else if (mas_is_none(mas)) { + if (mas->index <= min) + goto none; + + mas->last = mas->index; + mas->node = MAS_START; + } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) { + if (mas->last <= min) { + mas->node = MAS_UNDERFLOW; + return true; + } + + mas->node = MAS_START; } - if (unlikely(mas_is_start(mas))) { + if (mas_is_start(mas)) { /* First run or continue */ if (mas->index < min) return true; @@ -6004,13 +6109,13 @@ none: */ void *mas_find_rev(struct ma_state *mas, unsigned long min) { - void *entry; + void *entry = NULL; if (mas_find_rev_setup(mas, min, &entry)) return entry; /* Retries on dead nodes handled by mas_prev_slot */ - return mas_prev_slot(mas, min, false); + return mas_prev_slot(mas, min, false, false); } EXPORT_SYMBOL_GPL(mas_find_rev); @@ -6030,13 +6135,13 @@ EXPORT_SYMBOL_GPL(mas_find_rev); */ void *mas_find_range_rev(struct ma_state *mas, unsigned long min) { - void *entry; + void *entry = NULL; if (mas_find_rev_setup(mas, min, &entry)) return entry; /* Retries on dead nodes handled by mas_prev_slot */ - return mas_prev_slot(mas, min, true); + return mas_prev_slot(mas, min, true, false); } EXPORT_SYMBOL_GPL(mas_find_range_rev); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c65566b4dc66..68b45c82c37a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -265,7 +265,8 @@ EXPORT_SYMBOL(sg_free_table); * @table: The sg table header to use * @nents: Number of entries in sg list * @max_ents: The maximum number of entries the allocator returns per call - * @nents_first_chunk: Number of entries int the (preallocated) first + * @first_chunk: first SGL if preallocated (may be %NULL) + * @nents_first_chunk: Number of entries in the (preallocated) first * scatterlist chunk, 0 means no such preallocated chunk provided by user * @gfp_mask: GFP allocation mask * @alloc_fn: Allocator to use @@ -788,6 +789,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next); * @miter: sg mapping iter to be started * @sgl: sg list to iterate over * @nents: number of sg entries + * @flags: sg iterator flags * * Description: * Starts mapping iterator @miter. diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 0674aebd4423..464eeb90d5ad 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -9,6 +9,7 @@ #include <linux/maple_tree.h> #include <linux/module.h> +#include <linux/rwsem.h> #define MTREE_ALLOC_MAX 0x2000000000000Ul #define CONFIG_MAPLE_SEARCH @@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt) void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); for (i = 0; i <= nr_entries; i++) mtree_store_range(mt, i*10, i*10 + 5, xa_mk_value(i), GFP_KERNEL); mt_set_non_kernel(99999); - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&newmt, &newmt_lock); newmas.tree = &newmt; mas_reset(&newmas); mas_reset(&mas); - mas_lock(&newmas); + down_write(&newmt_lock); mas.index = 0; mas.last = 0; if (mas_expected_entries(&newmas, nr_entries)) { @@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt) } rcu_read_unlock(); mas_destroy(&newmas); - mas_unlock(&newmas); mt_validate(&newmt); mt_set_non_kernel(0); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } static noinline void __init check_iteration(struct maple_tree *mt) @@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt) void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); + mt_set_external_lock(&newmt, &newmt_lock); for (i = 0; i <= nr_entries; i++) mtree_store_range(mt, i*10, i*10 + 5, @@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt) mas.index = 0; mas.last = 0; rcu_read_lock(); - mas_lock(&newmas); + down_write(&newmt_lock); if (mas_expected_entries(&newmas, nr_entries)) { printk("OOM!"); BUG_ON(1); @@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt) mas_store(&newmas, val); } mas_destroy(&newmas); - mas_unlock(&newmas); rcu_read_unlock(); mt_validate(&newmt); mt_set_non_kernel(0); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } } #endif @@ -2166,7 +2175,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt) MT_BUG_ON(mt, val != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 5); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); mas.index = 0; mas.last = 5; @@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, void *tmp; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, &newmt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); + mt_set_external_lock(&newmt, &newmt_lock); if (!zero_start) i = 1; @@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, mtree_store_range(mt, i*10, (i+1)*10 - gap, xa_mk_value(i), GFP_KERNEL); - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); mt_set_non_kernel(99999); - mas_lock(&newmas); + down_write(&newmt_lock); ret = mas_expected_entries(&newmas, nr_entries); mt_set_non_kernel(0); MT_BUG_ON(mt, ret != 0); @@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, } rcu_read_unlock(); mas_destroy(&newmas); - mas_unlock(&newmas); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } /* Duplicate many sizes of trees. Mainly to test expected entry values */ @@ -2917,6 +2930,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * exists MAS_NONE active range * exists active active range * DNE active active set to last range + * ERANGE active MAS_OVERFLOW last range * * Function ENTRY Start Result index & last * mas_prev() @@ -2945,6 +2959,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * any MAS_ROOT MAS_NONE 0 * exists active active range * DNE active active last range + * ERANGE active MAS_UNDERFLOW last range * * Function ENTRY Start Result index & last * mas_find() @@ -2955,7 +2970,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * DNE MAS_START MAS_NONE 0 * DNE MAS_PAUSE MAS_NONE 0 * DNE MAS_ROOT MAS_NONE 0 - * DNE MAS_NONE MAS_NONE 0 + * DNE MAS_NONE MAS_NONE 1 * if index == 0 * exists MAS_START MAS_ROOT 0 * exists MAS_PAUSE MAS_ROOT 0 @@ -2967,7 +2982,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * DNE MAS_START active set to max * exists MAS_PAUSE active range * DNE MAS_PAUSE active set to max - * exists MAS_NONE active range + * exists MAS_NONE active range (start at last) * exists active active range * DNE active active last range (max < last) * @@ -2992,7 +3007,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * DNE MAS_START active set to min * exists MAS_PAUSE active range * DNE MAS_PAUSE active set to min - * exists MAS_NONE active range + * exists MAS_NONE active range (start at index) * exists active active range * DNE active active last range (min > index) * @@ -3039,10 +3054,10 @@ static noinline void __init check_state_handling(struct maple_tree *mt) mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); mas_lock(&mas); - /* prev: Start -> none */ + /* prev: Start -> underflow*/ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != NULL); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); /* prev: Start -> root */ mas_set(&mas, 10); @@ -3069,7 +3084,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.node != MAS_NONE); - /* next: start -> none */ + /* next: start -> none*/ mas_set(&mas, 10); entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, mas.index != 1); @@ -3268,27 +3283,48 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.last != 0x2500); MT_BUG_ON(mt, !mas_active(mas)); - /* next:active -> active out of range*/ + /* next:active -> active beyond data */ entry = mas_next(&mas, 0x2999); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x2501); MT_BUG_ON(mt, mas.last != 0x2fff); MT_BUG_ON(mt, !mas_active(mas)); - /* Continue after out of range*/ + /* Continue after last range ends after max */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); MT_BUG_ON(mt, mas.last != 0x3500); MT_BUG_ON(mt, !mas_active(mas)); - /* next:active -> active out of range*/ + /* next:active -> active continued */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x3501); MT_BUG_ON(mt, mas.last != ULONG_MAX); MT_BUG_ON(mt, !mas_active(mas)); + /* next:active -> overflow */ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x3501); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); + + /* next:overflow -> overflow */ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x3501); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); + + /* prev:overflow -> active */ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr3); + MT_BUG_ON(mt, mas.index != 0x3000); + MT_BUG_ON(mt, mas.last != 0x3500); + MT_BUG_ON(mt, !mas_active(mas)); + /* next: none -> active, skip value at location */ mas_set(&mas, 0); entry = mas_next(&mas, ULONG_MAX); @@ -3307,11 +3343,46 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.last != 0x1500); MT_BUG_ON(mt, !mas_active(mas)); - /* prev:active -> active out of range*/ + /* prev:active -> active spanning end range */ + entry = mas_prev(&mas, 0x0100); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0x0FFF); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev:active -> underflow */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0x0FFF); + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + + /* prev:underflow -> underflow */ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0x0FFF); + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + + /* next:underflow -> active */ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev:first value -> underflow */ + entry = mas_prev(&mas, 0x1000); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + + /* find:underflow -> first value */ + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); MT_BUG_ON(mt, !mas_active(mas)); /* prev: pause ->active */ @@ -3325,14 +3396,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.last != 0x2500); MT_BUG_ON(mt, !mas_active(mas)); - /* prev:active -> active out of range*/ + /* prev:active -> active spanning min */ entry = mas_prev(&mas, 0x1600); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1FFF); MT_BUG_ON(mt, !mas_active(mas)); - /* prev: active ->active, continue*/ + /* prev: active ->active, continue */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); @@ -3379,7 +3450,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.last != 0x2FFF); MT_BUG_ON(mt, !mas_active(mas)); - /* find: none ->active */ + /* find: overflow ->active */ entry = mas_find(&mas, 0x5000); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); @@ -3778,7 +3849,6 @@ static int __init maple_tree_seed(void) check_empty_area_fill(&tree); mtree_destroy(&tree); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_state_handling(&tree); mtree_destroy(&tree); |