diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-09 10:30:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-09 10:30:07 -0700 |
commit | fbf4432ff71b7a25bef993a5312906946d27f446 (patch) | |
tree | cf3e0024af4b8f9376eff75743f1fa1526e40900 /lib | |
parent | c054be10ffdbd5507a1fd738067d76acfb4808fd (diff) | |
parent | 0cfb6aee70bddbef6ec796b255f588ce0e126766 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- most of the rest of MM
- a small number of misc things
- lib/ updates
- checkpatch
- autofs updates
- ipc/ updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (126 commits)
ipc: optimize semget/shmget/msgget for lots of keys
ipc/sem: play nicer with large nsops allocations
ipc/sem: drop sem_checkid helper
ipc: convert kern_ipc_perm.refcount from atomic_t to refcount_t
ipc: convert sem_undo_list.refcnt from atomic_t to refcount_t
ipc: convert ipc_namespace.count from atomic_t to refcount_t
kcov: support compat processes
sh: defconfig: cleanup from old Kconfig options
mn10300: defconfig: cleanup from old Kconfig options
m32r: defconfig: cleanup from old Kconfig options
drivers/pps: use surrounding "if PPS" to remove numerous dependency checks
drivers/pps: aesthetic tweaks to PPS-related content
cpumask: make cpumask_next() out-of-line
kmod: move #ifdef CONFIG_MODULES wrapper to Makefile
kmod: split off umh headers into its own file
MAINTAINERS: clarify kmod is just a kernel module loader
kmod: split out umh code into its own file
test_kmod: flip INT checks to be consistent
test_kmod: remove paranoid UINT_MAX check on uint range processing
vfat: deduplicate hex2bin()
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 11 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/bitmap.c | 18 | ||||
-rw-r--r-- | lib/cmdline.c | 1 | ||||
-rw-r--r-- | lib/cpumask.c | 16 | ||||
-rw-r--r-- | lib/hexdump.c | 5 | ||||
-rw-r--r-- | lib/interval_tree_test.c | 4 | ||||
-rw-r--r-- | lib/oid_registry.c | 4 | ||||
-rw-r--r-- | lib/radix-tree.c | 9 | ||||
-rw-r--r-- | lib/rbtree.c | 65 | ||||
-rw-r--r-- | lib/rbtree_test.c | 230 | ||||
-rw-r--r-- | lib/string.c | 207 | ||||
-rw-r--r-- | lib/test_bitmap.c | 91 | ||||
-rw-r--r-- | lib/test_debug_virtual.c | 49 | ||||
-rw-r--r-- | lib/test_kmod.c | 4 |
16 files changed, 639 insertions, 79 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 6762529ad9e4..40b114a11d7c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -575,4 +575,7 @@ config PARMAN config PRIME_NUMBERS tristate +config STRING_SELFTEST + bool "Test string functions" + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7396f5044397..b19c491cbc4e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1930,6 +1930,17 @@ config TEST_KMOD If unsure, say N. +config TEST_DEBUG_VIRTUAL + tristate "Test CONFIG_DEBUG_VIRTUAL feature" + depends on DEBUG_VIRTUAL + help + Test the kernel's ability to detect incorrect calls to + virt_to_phys() done against the non-linear part of the + kernel's virtual address map. + + If unsure, say N. + + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/lib/Makefile b/lib/Makefile index 40c18372b301..469ce5e24e4f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o +obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/bitmap.c b/lib/bitmap.c index 9a532805364b..c82c61b66e16 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -513,7 +513,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, int nmaskbits) { unsigned int a, b, old_a, old_b; - unsigned int group_size, used_size; + unsigned int group_size, used_size, off; int c, old_c, totaldigits, ndigits; const char __user __force *ubuf = (const char __user __force *)buf; int at_start, in_range, in_partial_range; @@ -599,6 +599,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, a = old_a; b = old_b; old_a = old_b = 0; + } else { + used_size = group_size = b - a + 1; } /* if no digit is after '-', it's wrong*/ if (at_start && in_range) @@ -608,17 +610,9 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, if (b >= nmaskbits) return -ERANGE; while (a <= b) { - if (in_partial_range) { - static int pos_in_group = 1; - - if (pos_in_group <= used_size) - set_bit(a, maskp); - - if (a == b || ++pos_in_group > group_size) - pos_in_group = 1; - } else - set_bit(a, maskp); - a++; + off = min(b - a + 1, used_size); + bitmap_set(maskp, a, off); + a += group_size; } } while (buflen && c == ','); return 0; diff --git a/lib/cmdline.c b/lib/cmdline.c index 4c0888c4a68d..171c19b6888e 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -244,5 +244,4 @@ char *next_arg(char *args, char **param, char **val) /* Chew up trailing spaces. */ return skip_spaces(next); - //return next; } diff --git a/lib/cpumask.c b/lib/cpumask.c index 4731a0895760..8b1a1bd77539 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -6,6 +6,22 @@ #include <linux/bootmem.h> /** + * cpumask_next - get the next cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set. + */ +unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); +} +EXPORT_SYMBOL(cpumask_next); + +/** * cpumask_next_and - get the next cpu in *src1p & *src2p * @n: the cpu prior to the place to search (ie. return will be > @n) * @src1p: the first cpumask pointer diff --git a/lib/hexdump.c b/lib/hexdump.c index 992457b1284c..81b70ed37209 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/ctype.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <asm/unaligned.h> @@ -42,7 +43,7 @@ EXPORT_SYMBOL(hex_to_bin); * @src: ascii hexadecimal string * @count: result length * - * Return 0 on success, -1 in case of bad input. + * Return 0 on success, -EINVAL in case of bad input. */ int hex2bin(u8 *dst, const char *src, size_t count) { @@ -51,7 +52,7 @@ int hex2bin(u8 *dst, const char *src, size_t count) int lo = hex_to_bin(*src++); if ((hi < 0) || (lo < 0)) - return -1; + return -EINVAL; *dst++ = (hi << 4) | lo; } diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index df495fe81421..0e343fd29570 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c @@ -19,14 +19,14 @@ __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); -static struct rb_root root = RB_ROOT; +static struct rb_root_cached root = RB_ROOT_CACHED; static struct interval_tree_node *nodes = NULL; static u32 *queries = NULL; static struct rnd_state rnd; static inline unsigned long -search(struct rb_root *root, unsigned long start, unsigned long last) +search(struct rb_root_cached *root, unsigned long start, unsigned long last) { struct interval_tree_node *node; unsigned long results = 0; diff --git a/lib/oid_registry.c b/lib/oid_registry.c index 318f382a010d..41b9e50711a7 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -142,9 +142,9 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) } ret += count = snprintf(buffer, bufsize, ".%lu", num); buffer += count; - bufsize -= count; - if (bufsize == 0) + if (bufsize <= count) return -ENOBUFS; + bufsize -= count; } return ret; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 9717e2a50374..8b1feca1230a 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node) * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ -static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) +static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) { struct radix_tree_preload *rtp; struct radix_tree_node *node; @@ -2104,7 +2104,8 @@ EXPORT_SYMBOL(radix_tree_tagged); */ void idr_preload(gfp_t gfp_mask) { - __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE); + if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) + preempt_disable(); } EXPORT_SYMBOL(idr_preload); @@ -2118,13 +2119,13 @@ EXPORT_SYMBOL(idr_preload); */ int ida_pre_get(struct ida *ida, gfp_t gfp) { - __radix_tree_preload(gfp, IDA_PRELOAD_SIZE); /* * The IDA API has no preload_end() equivalent. Instead, * ida_get_new() can return -EAGAIN, prompting the caller * to return to the ida_pre_get() step. */ - preempt_enable(); + if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) + preempt_enable(); if (!this_cpu_read(ida_bitmap)) { struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); diff --git a/lib/rbtree.c b/lib/rbtree.c index 4ba2828a67c0..ba4a9d165f1b 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -95,22 +95,35 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, static __always_inline void __rb_insert(struct rb_node *node, struct rb_root *root, + bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + if (newleft) + *leftmost = node; + while (true) { /* - * Loop invariant: node is red - * - * If there is a black parent, we are done. - * Otherwise, take some corrective action as we don't - * want a red root or two consecutive red nodes. + * Loop invariant: node is red. */ - if (!parent) { + if (unlikely(!parent)) { + /* + * The inserted node is root. Either this is the + * first node, or we recursed at Case 1 below and + * are no longer violating 4). + */ rb_set_parent_color(node, NULL, RB_BLACK); break; - } else if (rb_is_black(parent)) + } + + /* + * If there is a black parent, we are done. + * Otherwise, take some corrective action as, + * per 4), we don't want a red root or two + * consecutive red nodes. + */ + if(rb_is_black(parent)) break; gparent = rb_red_parent(parent); @@ -119,7 +132,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root, if (parent != tmp) { /* parent == gparent->rb_left */ if (tmp && rb_is_red(tmp)) { /* - * Case 1 - color flips + * Case 1 - node's uncle is red (color flips). * * G g * / \ / \ @@ -142,7 +155,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, tmp = parent->rb_right; if (node == tmp) { /* - * Case 2 - left rotate at parent + * Case 2 - node's uncle is black and node is + * the parent's right child (left rotate at parent). * * G G * / \ / \ @@ -166,7 +180,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } /* - * Case 3 - right rotate at gparent + * Case 3 - node's uncle is black and node is + * the parent's left child (right rotate at gparent). * * G P * / \ / \ @@ -434,19 +449,38 @@ static const struct rb_augment_callbacks dummy_callbacks = { void rb_insert_color(struct rb_node *node, struct rb_root *root) { - __rb_insert(node, root, dummy_rotate); + __rb_insert(node, root, false, NULL, dummy_rotate); } EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *rebalance; - rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + rebalance = __rb_erase_augmented(node, root, + NULL, &dummy_callbacks); if (rebalance) ____rb_erase_color(rebalance, root, dummy_rotate); } EXPORT_SYMBOL(rb_erase); +void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, bool leftmost) +{ + __rb_insert(node, &root->rb_root, leftmost, + &root->rb_leftmost, dummy_rotate); +} +EXPORT_SYMBOL(rb_insert_color_cached); + +void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) +{ + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, &root->rb_root, + &root->rb_leftmost, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate); +} +EXPORT_SYMBOL(rb_erase_cached); + /* * Augmented rbtree manipulation functions. * @@ -455,9 +489,10 @@ EXPORT_SYMBOL(rb_erase); */ void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - __rb_insert(node, root, augment_rotate); + __rb_insert(node, root, newleft, leftmost, augment_rotate); } EXPORT_SYMBOL(__rb_insert_augmented); @@ -502,7 +537,7 @@ struct rb_node *rb_next(const struct rb_node *node) * as we can. */ if (node->rb_right) { - node = node->rb_right; + node = node->rb_right; while (node->rb_left) node=node->rb_left; return (struct rb_node *)node; @@ -534,7 +569,7 @@ struct rb_node *rb_prev(const struct rb_node *node) * as we can. */ if (node->rb_left) { - node = node->rb_left; + node = node->rb_left; while (node->rb_right) node=node->rb_right; return (struct rb_node *)node; diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 8b3c9dc88262..191a238e5a9d 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -1,11 +1,18 @@ #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/rbtree_augmented.h> #include <linux/random.h> +#include <linux/slab.h> #include <asm/timex.h> -#define NODES 100 -#define PERF_LOOPS 100000 -#define CHECK_LOOPS 100 +#define __param(type, name, init, msg) \ + static type name = init; \ + module_param(name, type, 0444); \ + MODULE_PARM_DESC(name, msg); + +__param(int, nnodes, 100, "Number of nodes in the rb-tree"); +__param(int, perf_loops, 100000, "Number of iterations modifying the rb-tree"); +__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); struct test_node { u32 key; @@ -16,14 +23,14 @@ struct test_node { u32 augmented; }; -static struct rb_root root = RB_ROOT; -static struct test_node nodes[NODES]; +static struct rb_root_cached root = RB_ROOT_CACHED; +static struct test_node *nodes = NULL; static struct rnd_state rnd; -static void insert(struct test_node *node, struct rb_root *root) +static void insert(struct test_node *node, struct rb_root_cached *root) { - struct rb_node **new = &root->rb_node, *parent = NULL; + struct rb_node **new = &root->rb_root.rb_node, *parent = NULL; u32 key = node->key; while (*new) { @@ -35,14 +42,40 @@ static void insert(struct test_node *node, struct rb_root *root) } rb_link_node(&node->rb, parent, new); - rb_insert_color(&node->rb, root); + rb_insert_color(&node->rb, &root->rb_root); +} + +static void insert_cached(struct test_node *node, struct rb_root_cached *root) +{ + struct rb_node **new = &root->rb_root.rb_node, *parent = NULL; + u32 key = node->key; + bool leftmost = true; + + while (*new) { + parent = *new; + if (key < rb_entry(parent, struct test_node, rb)->key) + new = &parent->rb_left; + else { + new = &parent->rb_right; + leftmost = false; + } + } + + rb_link_node(&node->rb, parent, new); + rb_insert_color_cached(&node->rb, root, leftmost); } -static inline void erase(struct test_node *node, struct rb_root *root) +static inline void erase(struct test_node *node, struct rb_root_cached *root) { - rb_erase(&node->rb, root); + rb_erase(&node->rb, &root->rb_root); } +static inline void erase_cached(struct test_node *node, struct rb_root_cached *root) +{ + rb_erase_cached(&node->rb, root); +} + + static inline u32 augment_recompute(struct test_node *node) { u32 max = node->val, child_augmented; @@ -64,9 +97,10 @@ static inline u32 augment_recompute(struct test_node *node) RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, u32, augmented, augment_recompute) -static void insert_augmented(struct test_node *node, struct rb_root *root) +static void insert_augmented(struct test_node *node, + struct rb_root_cached *root) { - struct rb_node **new = &root->rb_node, *rb_parent = NULL; + struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL; u32 key = node->key; u32 val = node->val; struct test_node *parent; @@ -84,18 +118,53 @@ static void insert_augmented(struct test_node *node, struct rb_root *root) node->augmented = val; rb_link_node(&node->rb, rb_parent, new); - rb_insert_augmented(&node->rb, root, &augment_callbacks); + rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks); +} + +static void insert_augmented_cached(struct test_node *node, + struct rb_root_cached *root) +{ + struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL; + u32 key = node->key; + u32 val = node->val; + struct test_node *parent; + bool leftmost = true; + + while (*new) { + rb_parent = *new; + parent = rb_entry(rb_parent, struct test_node, rb); + if (parent->augmented < val) + parent->augmented = val; + if (key < parent->key) + new = &parent->rb.rb_left; + else { + new = &parent->rb.rb_right; + leftmost = false; + } + } + + node->augmented = val; + rb_link_node(&node->rb, rb_parent, new); + rb_insert_augmented_cached(&node->rb, root, + leftmost, &augment_callbacks); +} + + +static void erase_augmented(struct test_node *node, struct rb_root_cached *root) +{ + rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks); } -static void erase_augmented(struct test_node *node, struct rb_root *root) +static void erase_augmented_cached(struct test_node *node, + struct rb_root_cached *root) { - rb_erase_augmented(&node->rb, root, &augment_callbacks); + rb_erase_augmented_cached(&node->rb, root, &augment_callbacks); } static void init(void) { int i; - for (i = 0; i < NODES; i++) { + for (i = 0; i < nnodes; i++) { nodes[i].key = prandom_u32_state(&rnd); nodes[i].val = prandom_u32_state(&rnd); } @@ -118,7 +187,7 @@ static void check_postorder_foreach(int nr_nodes) { struct test_node *cur, *n; int count = 0; - rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) + rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb) count++; WARN_ON_ONCE(count != nr_nodes); @@ -128,7 +197,7 @@ static void check_postorder(int nr_nodes) { struct rb_node *rb; int count = 0; - for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb)) + for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb)) count++; WARN_ON_ONCE(count != nr_nodes); @@ -140,7 +209,7 @@ static void check(int nr_nodes) int count = 0, blacks = 0; u32 prev_key = 0; - for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { struct test_node *node = rb_entry(rb, struct test_node, rb); WARN_ON_ONCE(node->key < prev_key); WARN_ON_ONCE(is_red(rb) && @@ -155,7 +224,7 @@ static void check(int nr_nodes) } WARN_ON_ONCE(count != nr_nodes); - WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); + WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1); check_postorder(nr_nodes); check_postorder_foreach(nr_nodes); @@ -166,7 +235,7 @@ static void check_augmented(int nr_nodes) struct rb_node *rb; check(nr_nodes); - for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { struct test_node *node = rb_entry(rb, struct test_node, rb); WARN_ON_ONCE(node->augmented != augment_recompute(node)); } @@ -176,6 +245,11 @@ static int __init rbtree_test_init(void) { int i, j; cycles_t time1, time2, time; + struct rb_node *node; + + nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL); + if (!nodes) + return -ENOMEM; printk(KERN_ALERT "rbtree testing"); @@ -184,27 +258,88 @@ static int __init rbtree_test_init(void) time1 = get_cycles(); - for (i = 0; i < PERF_LOOPS; i++) { - for (j = 0; j < NODES; j++) + for (i = 0; i < perf_loops; i++) { + for (j = 0; j < nnodes; j++) insert(nodes + j, &root); - for (j = 0; j < NODES; j++) + for (j = 0; j < nnodes; j++) erase(nodes + j, &root); } time2 = get_cycles(); time = time2 - time1; - time = div_u64(time, PERF_LOOPS); - printk(" -> %llu cycles\n", (unsigned long long)time); + time = div_u64(time, perf_loops); + printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", + (unsigned long long)time); + + time1 = get_cycles(); + + for (i = 0; i < perf_loops; i++) { + for (j = 0; j < nnodes; j++) + insert_cached(nodes + j, &root); + for (j = 0; j < nnodes; j++) + erase_cached(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, perf_loops); + printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", + (unsigned long long)time); + + for (i = 0; i < nnodes; i++) + insert(nodes + i, &root); + + time1 = get_cycles(); + + for (i = 0; i < perf_loops; i++) { + for (node = rb_first(&root.rb_root); node; node = rb_next(node)) + ; + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, perf_loops); + printk(" -> test 3 (latency of inorder traversal): %llu cycles\n", + (unsigned long long)time); + + time1 = get_cycles(); + + for (i = 0; i < perf_loops; i++) + node = rb_first(&root.rb_root); + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, perf_loops); + printk(" -> test 4 (latency to fetch first node)\n"); + printk(" non-cached: %llu cycles\n", (unsigned long long)time); + + time1 = get_cycles(); + + for (i = 0; i < perf_loops; i++) + node = rb_first_cached(&root); + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, perf_loops); + printk(" cached: %llu cycles\n", (unsigned long long)time); - for (i = 0; i < CHECK_LOOPS; i++) { + for (i = 0; i < nnodes; i++) + erase(nodes + i, &root); + + /* run checks */ + for (i = 0; i < check_loops; i++) { init(); - for (j = 0; j < NODES; j++) { + for (j = 0; j < nnodes; j++) { check(j); insert(nodes + j, &root); } - for (j = 0; j < NODES; j++) { - check(NODES - j); + for (j = 0; j < nnodes; j++) { + check(nnodes - j); erase(nodes + j, &root); } check(0); @@ -216,32 +351,49 @@ static int __init rbtree_test_init(void) time1 = get_cycles(); - for (i = 0; i < PERF_LOOPS; i++) { - for (j = 0; j < NODES; j++) + for (i = 0; i < perf_loops; i++) { + for (j = 0; j < nnodes; j++) insert_augmented(nodes + j, &root); - for (j = 0; j < NODES; j++) + for (j = 0; j < nnodes; j++) erase_augmented(nodes + j, &root); } time2 = get_cycles(); time = time2 - time1; - time = div_u64(time, PERF_LOOPS); - printk(" -> %llu cycles\n", (unsigned long long)time); + time = div_u64(time, perf_loops); + printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time); + + time1 = get_cycles(); + + for (i = 0; i < perf_loops; i++) { + for (j = 0; j < nnodes; j++) + insert_augmented_cached(nodes + j, &root); + for (j = 0; j < nnodes; j++) + erase_augmented_cached(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, perf_loops); + printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time); - for (i = 0; i < CHECK_LOOPS; i++) { + for (i = 0; i < check_loops; i++) { init(); - for (j = 0; j < NODES; j++) { + for (j = 0; j < nnodes; j++) { check_augmented(j); insert_augmented(nodes + j, &root); } - for (j = 0; j < NODES; j++) { - check_augmented(NODES - j); + for (j = 0; j < nnodes; j++) { + check_augmented(nnodes - j); erase_augmented(nodes + j, &root); } check_augmented(0); } + kfree(nodes); + return -EAGAIN; /* Fail will directly unload the module */ } diff --git a/lib/string.c b/lib/string.c index ebbb99c775bd..9921dc202db4 100644 --- a/lib/string.c +++ b/lib/string.c @@ -723,6 +723,72 @@ void memzero_explicit(void *s, size_t count) } EXPORT_SYMBOL(memzero_explicit); +#ifndef __HAVE_ARCH_MEMSET16 +/** + * memset16() - Fill a memory area with a uint16_t + * @s: Pointer to the start of the area. + * @v: The value to fill the area with + * @count: The number of values to store + * + * Differs from memset() in that it fills with a uint16_t instead + * of a byte. Remember that @count is the number of uint16_ts to + * store, not the number of bytes. + */ +void *memset16(uint16_t *s, uint16_t v, size_t count) +{ + uint16_t *xs = s; + + while (count--) + *xs++ = v; + return s; +} +EXPORT_SYMBOL(memset16); +#endif + +#ifndef __HAVE_ARCH_MEMSET32 +/** + * memset32() - Fill a memory area with a uint32_t + * @s: Pointer to the start of the area. + * @v: The value to fill the area with + * @count: The number of values to store + * + * Differs from memset() in that it fills with a uint32_t instead + * of a byte. Remember that @count is the number of uint32_ts to + * store, not the number of bytes. + */ +void *memset32(uint32_t *s, uint32_t v, size_t count) +{ + uint32_t *xs = s; + + while (count--) + *xs++ = v; + return s; +} +EXPORT_SYMBOL(memset32); +#endif + +#ifndef __HAVE_ARCH_MEMSET64 +/** + * memset64() - Fill a memory area with a uint64_t + * @s: Pointer to the start of the area. + * @v: The value to fill the area with + * @count: The number of values to store + * + * Differs from memset() in that it fills with a uint64_t instead + * of a byte. Remember that @count is the number of uint64_ts to + * store, not the number of bytes. + */ +void *memset64(uint64_t *s, uint64_t v, size_t count) +{ + uint64_t *xs = s; + + while (count--) + *xs++ = v; + return s; +} +EXPORT_SYMBOL(memset64); +#endif + #ifndef __HAVE_ARCH_MEMCPY /** * memcpy - Copy one area of memory to another @@ -985,3 +1051,144 @@ void fortify_panic(const char *name) BUG(); } EXPORT_SYMBOL(fortify_panic); + +#ifdef CONFIG_STRING_SELFTEST +#include <linux/slab.h> +#include <linux/module.h> + +static __init int memset16_selftest(void) +{ + unsigned i, j, k; + u16 v, *p; + + p = kmalloc(256 * 2 * 2, GFP_KERNEL); + if (!p) + return -1; + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset16(p + i, 0xb1b2, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + if (v != 0xa1a1) + goto fail; + } else if (k < i + j) { + if (v != 0xb1b2) + goto fail; + } else { + if (v != 0xa1a1) + goto fail; + } + } + } + } + +fail: + kfree(p); + if (i < 256) + return (i << 24) | (j << 16) | k; + return 0; +} + +static __init int memset32_selftest(void) +{ + unsigned i, j, k; + u32 v, *p; + + p = kmalloc(256 * 2 * 4, GFP_KERNEL); + if (!p) + return -1; + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset32(p + i, 0xb1b2b3b4, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + if (v != 0xa1a1a1a1) + goto fail; + } else if (k < i + j) { + if (v != 0xb1b2b3b4) + goto fail; + } else { + if (v != 0xa1a1a1a1) + goto fail; + } + } + } + } + +fail: + kfree(p); + if (i < 256) + return (i << 24) | (j << 16) | k; + return 0; +} + +static __init int memset64_selftest(void) +{ + unsigned i, j, k; + u64 v, *p; + + p = kmalloc(256 * 2 * 8, GFP_KERNEL); + if (!p) + return -1; + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + if (v != 0xa1a1a1a1a1a1a1a1ULL) + goto fail; + } else if (k < i + j) { + if (v != 0xb1b2b3b4b5b6b7b8ULL) + goto fail; + } else { + if (v != 0xa1a1a1a1a1a1a1a1ULL) + goto fail; + } + } + } + } + +fail: + kfree(p); + if (i < 256) + return (i << 24) | (j << 16) | k; + return 0; +} + +static __init int string_selftest_init(void) +{ + int test, subtest; + + test = 1; + subtest = memset16_selftest(); + if (subtest) + goto fail; + + test = 2; + subtest = memset32_selftest(); + if (subtest) + goto fail; + + test = 3; + subtest = memset64_selftest(); + if (subtest) + goto fail; + + pr_info("String selftests succeeded\n"); + return 0; +fail: + pr_crit("String selftest failure %d.%08x\n", test, subtest); + return 0; +} + +module_init(string_selftest_init); +#endif /* CONFIG_STRING_SELFTEST */ diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 2526a2975c51..599c6713f2a2 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -165,6 +165,96 @@ static void __init test_zero_fill_copy(void) expect_eq_pbl("128-1023", bmap2, 1024); } +#define PARSE_TIME 0x1 + +struct test_bitmap_parselist{ + const int errno; + const char *in; + const unsigned long *expected; + const int nbits; + const int flags; +}; + +static const unsigned long exp[] __initconst = { + BITMAP_FROM_U64(1), + BITMAP_FROM_U64(2), + BITMAP_FROM_U64(0x0000ffff), + BITMAP_FROM_U64(0xffff0000), + BITMAP_FROM_U64(0x55555555), + BITMAP_FROM_U64(0xaaaaaaaa), + BITMAP_FROM_U64(0x11111111), + BITMAP_FROM_U64(0x22222222), + BITMAP_FROM_U64(0xffffffff), + BITMAP_FROM_U64(0xfffffffe), + BITMAP_FROM_U64(0x3333333311111111), + BITMAP_FROM_U64(0xffffffff77777777) +}; + +static const unsigned long exp2[] __initconst = { + BITMAP_FROM_U64(0x3333333311111111), + BITMAP_FROM_U64(0xffffffff77777777) +}; + +static const struct test_bitmap_parselist parselist_tests[] __initconst = { +#define step (sizeof(u64) / sizeof(unsigned long)) + + {0, "0", &exp[0], 8, 0}, + {0, "1", &exp[1 * step], 8, 0}, + {0, "0-15", &exp[2 * step], 32, 0}, + {0, "16-31", &exp[3 * step], 32, 0}, + {0, "0-31:1/2", &exp[4 * step], 32, 0}, + {0, "1-31:1/2", &exp[5 * step], 32, 0}, + {0, "0-31:1/4", &exp[6 * step], 32, 0}, + {0, "1-31:1/4", &exp[7 * step], 32, 0}, + {0, "0-31:4/4", &exp[8 * step], 32, 0}, + {0, "1-31:4/4", &exp[9 * step], 32, 0}, + {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0}, + {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0}, + + {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0}, + + {0, "0-2047:128/256", NULL, 2048, PARSE_TIME}, + + {-EINVAL, "-1", NULL, 8, 0}, + {-EINVAL, "-0", NULL, 8, 0}, + {-EINVAL, "10-1", NULL, 8, 0}, + {-EINVAL, "0-31:10/1", NULL, 8, 0}, +}; + +static void __init test_bitmap_parselist(void) +{ + int i; + int err; + cycles_t cycles; + DECLARE_BITMAP(bmap, 2048); + + for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) { +#define ptest parselist_tests[i] + + cycles = get_cycles(); + err = bitmap_parselist(ptest.in, bmap, ptest.nbits); + cycles = get_cycles() - cycles; + + if (err != ptest.errno) { + pr_err("test %d: input is %s, errno is %d, expected %d\n", + i, ptest.in, err, ptest.errno); + continue; + } + + if (!err && ptest.expected + && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) { + pr_err("test %d: input is %s, result is 0x%lx, expected 0x%lx\n", + i, ptest.in, bmap[0], *ptest.expected); + continue; + } + + if (ptest.flags & PARSE_TIME) + pr_err("test %d: input is '%s' OK, Time: %llu\n", + i, ptest.in, + (unsigned long long)cycles); + } +} + static void __init test_bitmap_u32_array_conversions(void) { DECLARE_BITMAP(bmap1, 1024); @@ -365,6 +455,7 @@ static int __init test_bitmap_init(void) { test_zero_fill_copy(); test_bitmap_u32_array_conversions(); + test_bitmap_parselist(); test_mem_optimisations(); if (failed_tests == 0) diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c new file mode 100644 index 000000000000..b9cdeecc19dc --- /dev/null +++ b/lib/test_debug_virtual.c @@ -0,0 +1,49 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/sizes.h> + +#include <asm/page.h> +#ifdef CONFIG_MIPS +#include <asm/bootinfo.h> +#endif + +struct foo { + unsigned int bar; +}; + +struct foo *foo; + +static int __init test_debug_virtual_init(void) +{ + phys_addr_t pa; + void *va; + + va = (void *)VMALLOC_START; + pa = virt_to_phys(va); + + pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); + + foo = kzalloc(sizeof(*foo), GFP_KERNEL); + if (!foo) + return -ENOMEM; + + pa = virt_to_phys(foo); + va = foo; + pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); + + return 0; +} +module_init(test_debug_virtual_init); + +static void __exit test_debug_virtual_exit(void) +{ + kfree(foo); +} +module_exit(test_debug_virtual_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test module for CONFIG_DEBUG_VIRTUAL"); diff --git a/lib/test_kmod.c b/lib/test_kmod.c index ff9148969b92..fba78d25e825 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -924,7 +924,7 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev, if (ret) return ret; - if (new < min || new > max || new > UINT_MAX) + if (new < min || new > max) return -EINVAL; mutex_lock(&test_dev->config_mutex); @@ -946,7 +946,7 @@ static int test_dev_config_update_int(struct kmod_test_device *test_dev, if (ret) return ret; - if (new > INT_MAX || new < INT_MIN) + if (new < INT_MIN || new > INT_MAX) return -EINVAL; mutex_lock(&test_dev->config_mutex); |