diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 59 | ||||
-rw-r--r-- | lib/Makefile | 7 | ||||
-rw-r--r-- | lib/bcd.c | 8 | ||||
-rw-r--r-- | lib/crc32.c | 9 | ||||
-rw-r--r-- | lib/decompress.c | 9 | ||||
-rw-r--r-- | lib/dma-debug.c | 5 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 56 | ||||
-rw-r--r-- | lib/flex_proportions.c | 2 | ||||
-rw-r--r-- | lib/gcd.c | 3 | ||||
-rw-r--r-- | lib/gen_crc32table.c | 6 | ||||
-rw-r--r-- | lib/genalloc.c | 88 | ||||
-rw-r--r-- | lib/idr.c | 32 | ||||
-rw-r--r-- | lib/interval_tree.c | 10 | ||||
-rw-r--r-- | lib/interval_tree_test_main.c | 105 | ||||
-rw-r--r-- | lib/kasprintf.c | 2 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 5 | ||||
-rw-r--r-- | lib/nlattr.c | 4 | ||||
-rw-r--r-- | lib/parser.c | 10 | ||||
-rw-r--r-- | lib/plist.c | 4 | ||||
-rw-r--r-- | lib/prio_tree.c | 466 | ||||
-rw-r--r-- | lib/rbtree.c | 656 | ||||
-rw-r--r-- | lib/rbtree_test.c | 234 | ||||
-rw-r--r-- | lib/scatterlist.c | 35 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 32 | ||||
-rw-r--r-- | lib/swiotlb.c | 33 | ||||
-rw-r--r-- | lib/vsprintf.c | 139 |
26 files changed, 1073 insertions, 946 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2403a63b5da5..28e9d6c98941 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -196,12 +196,13 @@ config LOCKUP_DETECTOR thresholds can be controlled through the sysctl watchdog_thresh. config HARDLOCKUP_DETECTOR - def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ - !HAVE_NMI_WATCHDOG + def_bool y + depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG + depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR help Say Y here to enable the kernel to panic on "hard lockups", which are bugs that cause the kernel to loop in kernel @@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC config BOOTPARAM_HARDLOCKUP_PANIC_VALUE int - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR range 0 1 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC default 1 if BOOTPARAM_HARDLOCKUP_PANIC @@ -449,11 +450,12 @@ config SLUB_STATS out which slabs are relevant to a particular load. Try running: slabinfo -DA +config HAVE_DEBUG_KMEMLEAK + bool + config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && EXPERIMENTAL && \ - (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) - + depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK select DEBUG_FS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS @@ -629,6 +631,20 @@ config PROVE_RCU_REPEATEDLY Say N if you are unsure. +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + config SPARSE_RCU_POINTER bool "RCU debugging: sparse-based checks for pointer usage" default n @@ -735,11 +751,12 @@ config DEBUG_HIGHMEM This options enables addition error checking for high memory systems. Disable for production systems. +config HAVE_DEBUG_BUGVERBOSE + bool + config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT - depends on BUG - depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ - FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE + depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE) default y help Say Y here to make BUG() panics output the file name and line number @@ -781,6 +798,15 @@ config DEBUG_VM If unsure, say N. +config DEBUG_VM_RB + bool "Debug VM red-black trees" + depends on DEBUG_VM + help + Enable this to turn on more extended checks in the virtual-memory + system that may impact performance. + + If unsure, say N. + config DEBUG_VIRTUAL bool "Debug VM translations" depends on DEBUG_KERNEL && X86 @@ -1265,6 +1291,19 @@ config LATENCYTOP source mm/Kconfig.debug source kernel/trace/Kconfig +config RBTREE_TEST + tristate "Red-Black tree test" + depends on m && DEBUG_KERNEL + help + A benchmark measuring the performance of the rbtree library. + Also includes rbtree invariant checks. + +config INTERVAL_TREE_TEST + tristate "Interval tree test" + depends on m && DEBUG_KERNEL + help + A benchmark measuring the performance of the interval tree library + config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" depends on PCI && X86 diff --git a/lib/Makefile b/lib/Makefile index ca856b69a21d..821a16229111 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -9,7 +9,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ - idr.o int_sqrt.o extable.o prio_tree.o \ + idr.o int_sqrt.o extable.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o @@ -140,6 +140,11 @@ $(foreach file, $(libfdt_files), \ $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) lib-$(CONFIG_LIBFDT) += $(libfdt_files) +obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o +obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o + +interval_tree_test-objs := interval_tree_test_main.o interval_tree.o + obj-$(CONFIG_ASN1) += asn1_decoder.o hostprogs-y := gen_crc32table diff --git a/lib/bcd.c b/lib/bcd.c index 55efaf742346..40d304efe272 100644 --- a/lib/bcd.c +++ b/lib/bcd.c @@ -1,14 +1,14 @@ #include <linux/bcd.h> #include <linux/export.h> -unsigned bcd2bin(unsigned char val) +unsigned _bcd2bin(unsigned char val) { return (val & 0x0f) + (val >> 4) * 10; } -EXPORT_SYMBOL(bcd2bin); +EXPORT_SYMBOL(_bcd2bin); -unsigned char bin2bcd(unsigned val) +unsigned char _bin2bcd(unsigned val) { return ((val / 10) << 4) + val % 10; } -EXPORT_SYMBOL(bin2bcd); +EXPORT_SYMBOL(_bin2bcd); diff --git a/lib/crc32.c b/lib/crc32.c index 61774b8db4de..072fbd8234d5 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32table_le, CRCPOLY_LE); } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); @@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { - return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); + return crc32_be_generic(crc, p, len, + (const u32 (*)[256])crc32table_be, CRCPOLY_BE); } #endif EXPORT_SYMBOL(crc32_be); diff --git a/lib/decompress.c b/lib/decompress.c index 3d766b7f60ab..31a804277282 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/string.h> +#include <linux/init.h> #ifndef CONFIG_DECOMPRESS_GZIP # define gunzip NULL @@ -31,11 +32,13 @@ # define unlzo NULL #endif -static const struct compress_format { +struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; -} compressed_formats[] = { +}; + +static const struct compress_format compressed_formats[] __initdata = { { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, @@ -45,7 +48,7 @@ static const struct compress_format { { {0, 0}, NULL, NULL } }; -decompress_fn decompress_method(const unsigned char *inbuf, int len, +decompress_fn __init decompress_method(const unsigned char *inbuf, int len, const char **name) { const struct compress_format *cf; diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 66ce41489133..b9087bff008b 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page", static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; -/* little merge helper - remove it after the merge window */ -#ifndef BUS_NOTIFY_UNBOUND_DRIVER -#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 -#endif - /* * The access to some variables in this macro is racy. We can't use atomic_t * here because all these variables are exported to debugfs. Some of them even diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 7ca29a0a3019..e7f7d993357a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) int pos_after_tid; int pos = 0; - pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); + *buf = '\0'; + if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { if (in_interrupt()) - pos += snprintf(buf + pos, remaining(pos), "%s ", - "<intr>"); + pos += snprintf(buf + pos, remaining(pos), "<intr> "); else pos += snprintf(buf + pos, remaining(pos), "[%d] ", - task_pid_vnr(current)); + task_pid_vnr(current)); } pos_after_tid = pos; if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->modname); + desc->modname); if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->function); + desc->function); if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) pos += snprintf(buf + pos, remaining(pos), "%d:", - desc->lineno); + desc->lineno); if (pos - pos_after_tid) pos += snprintf(buf + pos, remaining(pos), " "); if (pos >= PREFIX_SIZE) @@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); + + res = printk(KERN_DEBUG "%s%pV", + dynamic_emit_prefix(descriptor, buf), &vaf); + va_end(args); return res; @@ -574,15 +578,26 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor, struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (!dev) { + res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); + } else { + char buf[PREFIX_SIZE]; + + res = dev_printk_emit(7, dev, "%s%s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev), dev_name(dev), + &vaf); + } + va_end(args); return res; @@ -592,20 +607,35 @@ EXPORT_SYMBOL(__dynamic_dev_dbg); #ifdef CONFIG_NET int __dynamic_netdev_dbg(struct _ddebug *descriptor, - const struct net_device *dev, const char *fmt, ...) + const struct net_device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (dev && dev->dev.parent) { + char buf[PREFIX_SIZE]; + + res = dev_printk_emit(7, dev->dev.parent, + "%s%s %s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev->dev.parent), + dev_name(dev->dev.parent), + netdev_name(dev), &vaf); + } else if (dev) { + res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); + } else { + res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); + } + va_end(args); return res; diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index c785554f9523..ebf3bac460b0 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p) */ bool fprop_new_period(struct fprop_global *p, int periods) { - u64 events; + s64 events; unsigned long flags; local_irq_save(flags); diff --git a/lib/gcd.c b/lib/gcd.c index cce4f3cd14b3..3657f129d7b8 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b) if (a < b) swap(a, b); + + if (!b) + return a; while ((r = a % b) != 0) { a = b; b = r; diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 8f8d5439e2d9..71fcfcd96410 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -109,7 +109,7 @@ int main(int argc, char** argv) if (CRC_LE_BITS > 1) { crc32init_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32table_le, LE_TABLE_ROWS, @@ -119,7 +119,7 @@ int main(int argc, char** argv) if (CRC_BE_BITS > 1) { crc32init_be(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_be[%d][%d] = {", BE_TABLE_ROWS, BE_TABLE_SIZE); output_table(crc32table_be, LE_TABLE_ROWS, @@ -128,7 +128,7 @@ int main(int argc, char** argv) } if (CRC_LE_BITS > 1) { crc32cinit_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32ctable_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32ctable_le, LE_TABLE_ROWS, diff --git a/lib/genalloc.c b/lib/genalloc.c index 6bc04aab6ec7..ca208a92628c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; + pool->algo = gen_pool_first_fit; + pool->data = NULL; } return pool; } @@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy); * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. - * Uses a first-fit algorithm. Can not be used in NMI handler on - * architectures without NMI-safe cmpxchg implementation. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { @@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit = (chunk->end_addr - chunk->start_addr) >> order; retry: - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, - start_bit, nbits, 0); + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, + pool->data); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool) return size; } EXPORT_SYMBOL_GPL(gen_pool_size); + +/** + * gen_pool_set_algo - set the allocation algorithm + * @pool: pool to change allocation algorithm + * @algo: custom algorithm function + * @data: additional data used by @algo + * + * Call @algo for each memory allocation in the pool. + * If @algo is NULL use gen_pool_first_fit as default + * memory allocation function. + */ +void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) +{ + rcu_read_lock(); + + pool->algo = algo; + if (!pool->algo) + pool->algo = gen_pool_first_fit; + + pool->data = data; + + rcu_read_unlock(); +} +EXPORT_SYMBOL(gen_pool_set_algo); + +/** + * gen_pool_first_fit - find the first available region + * of memory matching the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + */ +unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + return bitmap_find_next_zero_area(map, size, start, nr, 0); +} +EXPORT_SYMBOL(gen_pool_first_fit); + +/** + * gen_pool_best_fit - find the best fitting region of memory + * macthing the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + * + * Iterate over the bitmap to find the smallest free region + * which we can allocate the memory. + */ +unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + unsigned long start_bit = size; + unsigned long len = size + 1; + unsigned long index; + + index = bitmap_find_next_zero_area(map, size, start, nr, 0); + + while (index < size) { + int next_bit = find_next_bit(map, size, index + nr); + if ((next_bit - index) < len) { + len = next_bit - index; + start_bit = index; + if (len == nr) + return start_bit; + } + index = bitmap_find_next_zero_area(map, size, + next_bit + 1, nr, 0); + } + + return start_bit; +} +EXPORT_SYMBOL(gen_pool_best_fit); diff --git a/lib/idr.c b/lib/idr.c index 4046e29c0a99..648239079dd2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -20,7 +20,7 @@ * that id to this code and it returns your pointer. * You can release ids at any time. When all ids are released, most of - * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we + * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. @@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) */ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { - while (idp->id_free_cnt < IDR_FREE_MAX) { + while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); if (new == NULL) @@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) sh = IDR_BITS*l; id = ((id >> sh) ^ n ^ m) << sh; } - if ((id >= MAX_ID_BIT) || (id < 0)) + if ((id >= MAX_IDR_BIT) || (id < 0)) return IDR_NOMORE_SPACE; if (l == 0) break; @@ -223,7 +223,7 @@ build_up: * Add a new layer to the top of the tree if the requested * id is larger than the currently allocated space. */ - while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { + while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { layers++; if (!p->count) { /* special case: if the tree is currently empty, @@ -265,7 +265,7 @@ build_up: static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; int id; id = idr_get_empty_slot(idp, starting_id, pa); @@ -357,7 +357,7 @@ static void idr_remove_warning(int id) static void sub_remove(struct idr *idp, int shift, int id) { struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_LEVEL]; + struct idr_layer **pa[MAX_IDR_LEVEL]; struct idr_layer ***paa = &pa[0]; struct idr_layer *to_free; int n; @@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id) struct idr_layer *to_free; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && @@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id) to_free->bitmap = to_free->count = 0; free_layer(to_free); } - while (idp->id_free_cnt >= IDR_FREE_MAX) { + while (idp->id_free_cnt >= MAX_IDR_FREE) { p = get_from_free_list(idp); /* * Note: we don't call the rcu callback here, since the only @@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp) int n, id, max; int bt_mask; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id) n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return NULL; @@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp, { int n, id, max, error = 0; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each); */ void *idr_get_next(struct idr *idp, int *nextidp) { - struct idr_layer *p, *pa[MAX_LEVEL]; + struct idr_layer *p, *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; int id = *nextidp; int n, max; @@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) n = (p->layer+1) * IDR_BITS; - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return ERR_PTR(-EINVAL); @@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get); */ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct ida_bitmap *bitmap; unsigned long flags; int idr_id = starting_id / IDA_BITMAP_BITS; @@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) if (t < 0) return _idr_rc_to_errno(t); - if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) + if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) return -ENOSPC; if (t != idr_id) @@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) } id = idr_id * IDA_BITMAP_BITS + t; - if (id >= MAX_ID_BIT) + if (id >= MAX_IDR_BIT) return -ENOSPC; __set_bit(t, bitmap->bitmap); diff --git a/lib/interval_tree.c b/lib/interval_tree.c new file mode 100644 index 000000000000..e6eb406f2d65 --- /dev/null +++ b/lib/interval_tree.c @@ -0,0 +1,10 @@ +#include <linux/init.h> +#include <linux/interval_tree.h> +#include <linux/interval_tree_generic.h> + +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) + +INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, + unsigned long, __subtree_last, + START, LAST,, interval_tree) diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c new file mode 100644 index 000000000000..b25903987f7a --- /dev/null +++ b/lib/interval_tree_test_main.c @@ -0,0 +1,105 @@ +#include <linux/module.h> +#include <linux/interval_tree.h> +#include <linux/random.h> +#include <asm/timex.h> + +#define NODES 100 +#define PERF_LOOPS 100000 +#define SEARCHES 100 +#define SEARCH_LOOPS 10000 + +static struct rb_root root = RB_ROOT; +static struct interval_tree_node nodes[NODES]; +static u32 queries[SEARCHES]; + +static struct rnd_state rnd; + +static inline unsigned long +search(unsigned long query, struct rb_root *root) +{ + struct interval_tree_node *node; + unsigned long results = 0; + + for (node = interval_tree_iter_first(root, query, query); node; + node = interval_tree_iter_next(node, query, query)) + results++; + return results; +} + +static void init(void) +{ + int i; + for (i = 0; i < NODES; i++) { + u32 a = prandom32(&rnd), b = prandom32(&rnd); + if (a <= b) { + nodes[i].start = a; + nodes[i].last = b; + } else { + nodes[i].start = b; + nodes[i].last = a; + } + } + for (i = 0; i < SEARCHES; i++) + queries[i] = prandom32(&rnd); +} + +static int interval_tree_test_init(void) +{ + int i, j; + unsigned long results; + cycles_t time1, time2, time; + + printk(KERN_ALERT "interval tree insert/remove"); + + prandom32_seed(&rnd, 3141592653589793238ULL); + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + interval_tree_insert(nodes + j, &root); + for (j = 0; j < NODES; j++) + interval_tree_remove(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + printk(KERN_ALERT "interval tree search"); + + for (j = 0; j < NODES; j++) + interval_tree_insert(nodes + j, &root); + + time1 = get_cycles(); + + results = 0; + for (i = 0; i < SEARCH_LOOPS; i++) + for (j = 0; j < SEARCHES; j++) + results += search(queries[j], &root); + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, SEARCH_LOOPS); + results = div_u64(results, SEARCH_LOOPS); + printk(" -> %llu cycles (%lu results)\n", + (unsigned long long)time, results); + + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void interval_tree_test_exit(void) +{ + printk(KERN_ALERT "test exit\n"); +} + +module_init(interval_tree_test_init) +module_exit(interval_tree_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michel Lespinasse"); +MODULE_DESCRIPTION("Interval Tree test"); diff --git a/lib/kasprintf.c b/lib/kasprintf.c index ae0de80c1c88..32f12150fc4f 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c @@ -21,7 +21,7 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); - p = kmalloc(len+1, gfp); + p = kmalloc_track_caller(len+1, gfp); if (!p) return NULL; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 0401d2916d9f..52e5abbc41db 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net) struct uevent_sock *ue_sk; struct netlink_kernel_cfg cfg = { .groups = 1, + .flags = NL_CFG_F_NONROOT_RECV, }; ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); if (!ue_sk) return -ENOMEM; - ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, - THIS_MODULE, &cfg); + ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); @@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = { static int __init kobject_uevent_init(void) { - netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); return register_pernet_subsys(&uevent_net_ops); } diff --git a/lib/nlattr.c b/lib/nlattr.c index 4226dfeb5178..18eca7809b08 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, + [NLA_S8] = sizeof(s8), + [NLA_S16] = sizeof(s16), + [NLA_S32] = sizeof(s32), + [NLA_S64] = sizeof(s64), }; static int validate_nla(const struct nlattr *nla, int maxtype, diff --git a/lib/parser.c b/lib/parser.c index c43410084838..52cfa69f73df 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[]) * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. On success, sets @result to the integer represented - * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. + * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char *buf; int ret; + long val; size_t len = s->to - s->from; buf = kmalloc(len + 1, GFP_KERNEL); @@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base) return -ENOMEM; memcpy(buf, s->from, len); buf[len] = '\0'; - *result = simple_strtol(buf, &endp, base); + ret = 0; + val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; + else if (val < (long)INT_MIN || val > (long)INT_MAX) + ret = -ERANGE; + else + *result = (int) val; kfree(buf); return ret; } diff --git a/lib/plist.c b/lib/plist.c index 6ab0e521c48b..1ebc95f7a46f 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -175,7 +175,7 @@ static int __init plist_test(void) int nr_expect = 0, i, loop; unsigned int r = local_clock(); - printk(KERN_INFO "start plist test\n"); + pr_debug("start plist test\n"); plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); @@ -203,7 +203,7 @@ static int __init plist_test(void) plist_test_check(nr_expect); } - printk(KERN_INFO "end plist test\n"); + pr_debug("end plist test\n"); return 0; } diff --git a/lib/prio_tree.c b/lib/prio_tree.c deleted file mode 100644 index 8d443af03b4c..000000000000 --- a/lib/prio_tree.c +++ /dev/null @@ -1,466 +0,0 @@ -/* - * lib/prio_tree.c - priority search tree - * - * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu> - * - * This file is released under the GPL v2. - * - * Based on the radix priority search tree proposed by Edward M. McCreight - * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985 - * - * 02Feb2004 Initial version - */ - -#include <linux/init.h> -#include <linux/mm.h> -#include <linux/prio_tree.h> - -/* - * A clever mix of heap and radix trees forms a radix priority search tree (PST) - * which is useful for storing intervals, e.g, we can consider a vma as a closed - * interval of file pages [offset_begin, offset_end], and store all vmas that - * map a file in a PST. Then, using the PST, we can answer a stabbing query, - * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a - * given input interval X (a set of consecutive file pages), in "O(log n + m)" - * time where 'log n' is the height of the PST, and 'm' is the number of stored - * intervals (vmas) that overlap (map) with the input interval X (the set of - * consecutive file pages). - * - * In our implementation, we store closed intervals of the form [radix_index, - * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST - * is designed for storing intervals with unique radix indices, i.e., each - * interval have different radix_index. However, this limitation can be easily - * overcome by using the size, i.e., heap_index - radix_index, as part of the - * index, so we index the tree using [(radix_index,size), heap_index]. - * - * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit - * machine, the maximum height of a PST can be 64. We can use a balanced version - * of the priority search tree to optimize the tree height, but the balanced - * tree proposed by McCreight is too complex and memory-hungry for our purpose. - */ - -/* - * The following macros are used for implementing prio_tree for i_mmap - */ - -#define RADIX_INDEX(vma) ((vma)->vm_pgoff) -#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT) -/* avoid overflow */ -#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1)) - - -static void get_index(const struct prio_tree_root *root, - const struct prio_tree_node *node, - unsigned long *radix, unsigned long *heap) -{ - if (root->raw) { - struct vm_area_struct *vma = prio_tree_entry( - node, struct vm_area_struct, shared.prio_tree_node); - - *radix = RADIX_INDEX(vma); - *heap = HEAP_INDEX(vma); - } - else { - *radix = node->start; - *heap = node->last; - } -} - -static unsigned long index_bits_to_maxindex[BITS_PER_LONG]; - -void __init prio_tree_init(void) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++) - index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1; - index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL; -} - -/* - * Maximum heap_index that can be stored in a PST with index_bits bits - */ -static inline unsigned long prio_tree_maxindex(unsigned int bits) -{ - return index_bits_to_maxindex[bits - 1]; -} - -static void prio_set_parent(struct prio_tree_node *parent, - struct prio_tree_node *child, bool left) -{ - if (left) - parent->left = child; - else - parent->right = child; - - child->parent = parent; -} - -/* - * Extend a priority search tree so that it can store a node with heap_index - * max_heap_index. In the worst case, this algorithm takes O((log n)^2). - * However, this function is used rarely and the common case performance is - * not bad. - */ -static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root, - struct prio_tree_node *node, unsigned long max_heap_index) -{ - struct prio_tree_node *prev; - - if (max_heap_index > prio_tree_maxindex(root->index_bits)) - root->index_bits++; - - prev = node; - INIT_PRIO_TREE_NODE(node); - - while (max_heap_index > prio_tree_maxindex(root->index_bits)) { - struct prio_tree_node *tmp = root->prio_tree_node; - - root->index_bits++; - - if (prio_tree_empty(root)) - continue; - - prio_tree_remove(root, root->prio_tree_node); - INIT_PRIO_TREE_NODE(tmp); - - prio_set_parent(prev, tmp, true); - prev = tmp; - } - - if (!prio_tree_empty(root)) - prio_set_parent(prev, root->prio_tree_node, true); - - root->prio_tree_node = node; - return node; -} - -/* - * Replace a prio_tree_node with a new node and return the old node - */ -struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, - struct prio_tree_node *old, struct prio_tree_node *node) -{ - INIT_PRIO_TREE_NODE(node); - - if (prio_tree_root(old)) { - BUG_ON(root->prio_tree_node != old); - /* - * We can reduce root->index_bits here. However, it is complex - * and does not help much to improve performance (IMO). - */ - root->prio_tree_node = node; - } else - prio_set_parent(old->parent, node, old->parent->left == old); - - if (!prio_tree_left_empty(old)) - prio_set_parent(node, old->left, true); - - if (!prio_tree_right_empty(old)) - prio_set_parent(node, old->right, false); - - return old; -} - -/* - * Insert a prio_tree_node @node into a radix priority search tree @root. The - * algorithm typically takes O(log n) time where 'log n' is the number of bits - * required to represent the maximum heap_index. In the worst case, the algo - * can take O((log n)^2) - check prio_tree_expand. - * - * If a prior node with same radix_index and heap_index is already found in - * the tree, then returns the address of the prior node. Otherwise, inserts - * @node into the tree and returns @node. - */ -struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, - struct prio_tree_node *node) -{ - struct prio_tree_node *cur, *res = node; - unsigned long radix_index, heap_index; - unsigned long r_index, h_index, index, mask; - int size_flag = 0; - - get_index(root, node, &radix_index, &heap_index); - - if (prio_tree_empty(root) || - heap_index > prio_tree_maxindex(root->index_bits)) - return prio_tree_expand(root, node, heap_index); - - cur = root->prio_tree_node; - mask = 1UL << (root->index_bits - 1); - - while (mask) { - get_index(root, cur, &r_index, &h_index); - - if (r_index == radix_index && h_index == heap_index) - return cur; - - if (h_index < heap_index || - (h_index == heap_index && r_index > radix_index)) { - struct prio_tree_node *tmp = node; - node = prio_tree_replace(root, cur, node); - cur = tmp; - /* swap indices */ - index = r_index; - r_index = radix_index; - radix_index = index; - index = h_index; - h_index = heap_index; - heap_index = index; - } - - if (size_flag) - index = heap_index - radix_index; - else - index = radix_index; - - if (index & mask) { - if (prio_tree_right_empty(cur)) { - INIT_PRIO_TREE_NODE(node); - prio_set_parent(cur, node, false); - return res; - } else - cur = cur->right; - } else { - if (prio_tree_left_empty(cur)) { - INIT_PRIO_TREE_NODE(node); - prio_set_parent(cur, node, true); - return res; - } else - cur = cur->left; - } - - mask >>= 1; - - if (!mask) { - mask = 1UL << (BITS_PER_LONG - 1); - size_flag = 1; - } - } - /* Should not reach here */ - BUG(); - return NULL; -} - -/* - * Remove a prio_tree_node @node from a radix priority search tree @root. The - * algorithm takes O(log n) time where 'log n' is the number of bits required - * to represent the maximum heap_index. - */ -void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) -{ - struct prio_tree_node *cur; - unsigned long r_index, h_index_right, h_index_left; - - cur = node; - - while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) { - if (!prio_tree_left_empty(cur)) - get_index(root, cur->left, &r_index, &h_index_left); - else { - cur = cur->right; - continue; - } - - if (!prio_tree_right_empty(cur)) - get_index(root, cur->right, &r_index, &h_index_right); - else { - cur = cur->left; - continue; - } - - /* both h_index_left and h_index_right cannot be 0 */ - if (h_index_left >= h_index_right) - cur = cur->left; - else - cur = cur->right; - } - - if (prio_tree_root(cur)) { - BUG_ON(root->prio_tree_node != cur); - __INIT_PRIO_TREE_ROOT(root, root->raw); - return; - } - - if (cur->parent->right == cur) - cur->parent->right = cur->parent; - else - cur->parent->left = cur->parent; - - while (cur != node) - cur = prio_tree_replace(root, cur->parent, cur); -} - -static void iter_walk_down(struct prio_tree_iter *iter) -{ - iter->mask >>= 1; - if (iter->mask) { - if (iter->size_level) - iter->size_level++; - return; - } - - if (iter->size_level) { - BUG_ON(!prio_tree_left_empty(iter->cur)); - BUG_ON(!prio_tree_right_empty(iter->cur)); - iter->size_level++; - iter->mask = ULONG_MAX; - } else { - iter->size_level = 1; - iter->mask = 1UL << (BITS_PER_LONG - 1); - } -} - -static void iter_walk_up(struct prio_tree_iter *iter) -{ - if (iter->mask == ULONG_MAX) - iter->mask = 1UL; - else if (iter->size_level == 1) - iter->mask = 1UL; - else - iter->mask <<= 1; - if (iter->size_level) - iter->size_level--; - if (!iter->size_level && (iter->value & iter->mask)) - iter->value ^= iter->mask; -} - -/* - * Following functions help to enumerate all prio_tree_nodes in the tree that - * overlap with the input interval X [radix_index, heap_index]. The enumeration - * takes O(log n + m) time where 'log n' is the height of the tree (which is - * proportional to # of bits required to represent the maximum heap_index) and - * 'm' is the number of prio_tree_nodes that overlap the interval X. - */ - -static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter, - unsigned long *r_index, unsigned long *h_index) -{ - if (prio_tree_left_empty(iter->cur)) - return NULL; - - get_index(iter->root, iter->cur->left, r_index, h_index); - - if (iter->r_index <= *h_index) { - iter->cur = iter->cur->left; - iter_walk_down(iter); - return iter->cur; - } - - return NULL; -} - -static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter, - unsigned long *r_index, unsigned long *h_index) -{ - unsigned long value; - - if (prio_tree_right_empty(iter->cur)) - return NULL; - - if (iter->size_level) - value = iter->value; - else - value = iter->value | iter->mask; - - if (iter->h_index < value) - return NULL; - - get_index(iter->root, iter->cur->right, r_index, h_index); - - if (iter->r_index <= *h_index) { - iter->cur = iter->cur->right; - iter_walk_down(iter); - return iter->cur; - } - - return NULL; -} - -static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter) -{ - iter->cur = iter->cur->parent; - iter_walk_up(iter); - return iter->cur; -} - -static inline int overlap(struct prio_tree_iter *iter, - unsigned long r_index, unsigned long h_index) -{ - return iter->h_index >= r_index && iter->r_index <= h_index; -} - -/* - * prio_tree_first: - * - * Get the first prio_tree_node that overlaps with the interval [radix_index, - * heap_index]. Note that always radix_index <= heap_index. We do a pre-order - * traversal of the tree. - */ -static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter) -{ - struct prio_tree_root *root; - unsigned long r_index, h_index; - - INIT_PRIO_TREE_ITER(iter); - - root = iter->root; - if (prio_tree_empty(root)) - return NULL; - - get_index(root, root->prio_tree_node, &r_index, &h_index); - - if (iter->r_index > h_index) - return NULL; - - iter->mask = 1UL << (root->index_bits - 1); - iter->cur = root->prio_tree_node; - - while (1) { - if (overlap(iter, r_index, h_index)) - return iter->cur; - - if (prio_tree_left(iter, &r_index, &h_index)) - continue; - - if (prio_tree_right(iter, &r_index, &h_index)) - continue; - - break; - } - return NULL; -} - -/* - * prio_tree_next: - * - * Get the next prio_tree_node that overlaps with the input interval in iter - */ -struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter) -{ - unsigned long r_index, h_index; - - if (iter->cur == NULL) - return prio_tree_first(iter); - -repeat: - while (prio_tree_left(iter, &r_index, &h_index)) - if (overlap(iter, r_index, h_index)) - return iter->cur; - - while (!prio_tree_right(iter, &r_index, &h_index)) { - while (!prio_tree_root(iter->cur) && - iter->cur->parent->right == iter->cur) - prio_tree_parent(iter); - - if (prio_tree_root(iter->cur)) - return NULL; - - prio_tree_parent(iter); - } - - if (overlap(iter, r_index, h_index)) - return iter->cur; - - goto repeat; -} diff --git a/lib/rbtree.c b/lib/rbtree.c index d4175565dc2c..4f56a11d67fa 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -2,7 +2,8 @@ Red Black Trees (C) 1999 Andrea Arcangeli <andrea@suse.de> (C) 2002 David Woodhouse <dwmw2@infradead.org> - + (C) 2012 Michel Lespinasse <walken@google.com> + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or @@ -20,339 +21,382 @@ linux/lib/rbtree.c */ -#include <linux/rbtree.h> +#include <linux/rbtree_augmented.h> #include <linux/export.h> -static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *right = node->rb_right; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_right = right->rb_left)) - rb_set_parent(right->rb_left, node); - right->rb_left = node; - - rb_set_parent(right, parent); +/* + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree + * + * 1) A node is either red or black + * 2) The root is black + * 3) All leaves (NULL) are black + * 4) Both children of every red node are black + * 5) Every simple path from root to leaves contains the same number + * of black nodes. + * + * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two + * consecutive red nodes in a path and every red node is therefore followed by + * a black. So if B is the number of black nodes on every simple path (as per + * 5), then the longest possible path due to 4 is 2B. + * + * We shall indicate color with case, where black nodes are uppercase and red + * nodes will be lowercase. Unknown color nodes shall be drawn as red within + * parentheses and have some accompanying text comment. + */ - if (parent) - { - if (node == parent->rb_left) - parent->rb_left = right; - else - parent->rb_right = right; - } - else - root->rb_node = right; - rb_set_parent(node, right); +static inline void rb_set_black(struct rb_node *rb) +{ + rb->__rb_parent_color |= RB_BLACK; } -static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) +static inline struct rb_node *rb_red_parent(struct rb_node *red) { - struct rb_node *left = node->rb_left; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_left = left->rb_right)) - rb_set_parent(left->rb_right, node); - left->rb_right = node; - - rb_set_parent(left, parent); + return (struct rb_node *)red->__rb_parent_color; +} - if (parent) - { - if (node == parent->rb_right) - parent->rb_right = left; - else - parent->rb_left = left; - } - else - root->rb_node = left; - rb_set_parent(node, left); +/* + * Helper function for rotations: + * - old's parent and color get assigned to new + * - old gets assigned new as a parent and 'color' as a color. + */ +static inline void +__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, + struct rb_root *root, int color) +{ + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + __rb_change_child(old, new, parent, root); } -void rb_insert_color(struct rb_node *node, struct rb_root *root) +static __always_inline void +__rb_insert(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - struct rb_node *parent, *gparent; - - while ((parent = rb_parent(node)) && rb_is_red(parent)) - { - gparent = rb_parent(parent); - - if (parent == gparent->rb_left) - { - { - register struct rb_node *uncle = gparent->rb_right; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + + while (true) { + /* + * Loop invariant: node is red + * + * If there is a black parent, we are done. + * Otherwise, take some corrective action as we don't + * want a red root or two consecutive red nodes. + */ + if (!parent) { + rb_set_parent_color(node, NULL, RB_BLACK); + break; + } else if (rb_is_black(parent)) + break; + + gparent = rb_red_parent(parent); + + tmp = gparent->rb_right; + if (parent != tmp) { /* parent == gparent->rb_left */ + if (tmp && rb_is_red(tmp)) { + /* + * Case 1 - color flips + * + * G g + * / \ / \ + * p u --> P U + * / / + * n N + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; } - if (parent->rb_right == node) - { - register struct rb_node *tmp; - __rb_rotate_left(parent, root); - tmp = parent; + tmp = parent->rb_right; + if (node == tmp) { + /* + * Case 2 - left rotate at parent + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + parent->rb_right = tmp = node->rb_left; + node->rb_left = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); parent = node; - node = tmp; + tmp = node->rb_right; } - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_right(gparent, root); + /* + * Case 3 - right rotate at gparent + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + gparent->rb_left = tmp; /* == parent->rb_right */ + parent->rb_right = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; } else { - { - register struct rb_node *uncle = gparent->rb_left; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } + tmp = gparent->rb_left; + if (tmp && rb_is_red(tmp)) { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; } - if (parent->rb_left == node) - { - register struct rb_node *tmp; - __rb_rotate_right(parent, root); - tmp = parent; + tmp = parent->rb_left; + if (node == tmp) { + /* Case 2 - right rotate at parent */ + parent->rb_left = tmp = node->rb_right; + node->rb_right = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); parent = node; - node = tmp; + tmp = node->rb_left; } - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_left(gparent, root); + /* Case 3 - left rotate at gparent */ + gparent->rb_right = tmp; /* == parent->rb_left */ + parent->rb_left = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; } } - - rb_set_black(root->rb_node); } -EXPORT_SYMBOL(rb_insert_color); -static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, - struct rb_root *root) +__always_inline void +__rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - struct rb_node *other; - - while ((!node || rb_is_black(node)) && node != root->rb_node) - { - if (parent->rb_left == node) - { - other = parent->rb_right; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_left(parent, root); - other = parent->rb_right; + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; + + while (true) { + /* + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. + */ + sibling = parent->rb_right; + if (node != sibling) { /* node == parent->rb_left */ + if (rb_is_red(sibling)) { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + parent->rb_right = tmp1 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); - } - else - { - if (!other->rb_right || rb_is_black(other->rb_right)) - { - rb_set_black(other->rb_left); - rb_set_red(other); - __rb_rotate_right(other, root); - other = parent->rb_right; + tmp1 = sibling->rb_right; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_left; + if (!tmp2 || rb_is_black(tmp2)) { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. + */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_right); - __rb_rotate_left(parent, root); - node = root->rb_node; - break; - } - } - else - { - other = parent->rb_left; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_right(parent, root); - other = parent->rb_left; + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N Sl + * / \ \ + * sl Sr s + * \ + * Sr + */ + sibling->rb_left = tmp1 = tmp2->rb_right; + tmp2->rb_right = sibling; + parent->rb_right = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + parent->rb_right = tmp2 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; + } else { + sibling = parent->rb_left; + if (rb_is_red(sibling)) { + /* Case 1 - right rotate at parent */ + parent->rb_left = tmp1 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; } - else - { - if (!other->rb_left || rb_is_black(other->rb_left)) - { - rb_set_black(other->rb_right); - rb_set_red(other); - __rb_rotate_left(other, root); - other = parent->rb_left; + tmp1 = sibling->rb_left; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_right; + if (!tmp2 || rb_is_black(tmp2)) { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_left); - __rb_rotate_right(parent, root); - node = root->rb_node; - break; + /* Case 3 - right rotate at sibling */ + sibling->rb_right = tmp1 = tmp2->rb_left; + tmp2->rb_left = sibling; + parent->rb_left = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; } + /* Case 4 - left rotate at parent + color flips */ + parent->rb_left = tmp2 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; } } - if (node) - rb_set_black(node); } +EXPORT_SYMBOL(__rb_erase_color); -void rb_erase(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *child, *parent; - int color; - - if (!node->rb_left) - child = node->rb_right; - else if (!node->rb_right) - child = node->rb_left; - else - { - struct rb_node *old = node, *left; - - node = node->rb_right; - while ((left = node->rb_left) != NULL) - node = left; - - if (rb_parent(old)) { - if (rb_parent(old)->rb_left == old) - rb_parent(old)->rb_left = node; - else - rb_parent(old)->rb_right = node; - } else - root->rb_node = node; - - child = node->rb_right; - parent = rb_parent(node); - color = rb_color(node); - - if (parent == old) { - parent = node; - } else { - if (child) - rb_set_parent(child, parent); - parent->rb_left = child; - - node->rb_right = old->rb_right; - rb_set_parent(old->rb_right, node); - } - - node->rb_parent_color = old->rb_parent_color; - node->rb_left = old->rb_left; - rb_set_parent(old->rb_left, node); +/* + * Non-augmented rbtree manipulation functions. + * + * We use dummy augmented callbacks here, and have the compiler optimize them + * out of the rb_insert_color() and rb_erase() function definitions. + */ - goto color; - } +static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} +static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} +static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} - parent = rb_parent(node); - color = rb_color(node); - - if (child) - rb_set_parent(child, parent); - if (parent) - { - if (parent->rb_left == node) - parent->rb_left = child; - else - parent->rb_right = child; - } - else - root->rb_node = child; +static const struct rb_augment_callbacks dummy_callbacks = { + dummy_propagate, dummy_copy, dummy_rotate +}; - color: - if (color == RB_BLACK) - __rb_erase_color(child, parent, root); -} -EXPORT_SYMBOL(rb_erase); - -static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) +void rb_insert_color(struct rb_node *node, struct rb_root *root) { - struct rb_node *parent; - -up: - func(node, data); - parent = rb_parent(node); - if (!parent) - return; - - if (node == parent->rb_left && parent->rb_right) - func(parent->rb_right, data); - else if (parent->rb_left) - func(parent->rb_left, data); - - node = parent; - goto up; + __rb_insert(node, root, dummy_rotate); } +EXPORT_SYMBOL(rb_insert_color); -/* - * after inserting @node into the tree, update the tree to account for - * both the new entry and any damage done by rebalance - */ -void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) +void rb_erase(struct rb_node *node, struct rb_root *root) { - if (node->rb_left) - node = node->rb_left; - else if (node->rb_right) - node = node->rb_right; - - rb_augment_path(node, func, data); + rb_erase_augmented(node, root, &dummy_callbacks); } -EXPORT_SYMBOL(rb_augment_insert); +EXPORT_SYMBOL(rb_erase); /* - * before removing the node, find the deepest node on the rebalance path - * that will still be there after @node gets removed + * Augmented rbtree manipulation functions. + * + * This instantiates the same __always_inline functions as in the non-augmented + * case, but this time with user-defined callbacks. */ -struct rb_node *rb_augment_erase_begin(struct rb_node *node) -{ - struct rb_node *deepest; - - if (!node->rb_right && !node->rb_left) - deepest = rb_parent(node); - else if (!node->rb_right) - deepest = node->rb_left; - else if (!node->rb_left) - deepest = node->rb_right; - else { - deepest = rb_next(node); - if (deepest->rb_right) - deepest = deepest->rb_right; - else if (rb_parent(deepest) != node) - deepest = rb_parent(deepest); - } - - return deepest; -} -EXPORT_SYMBOL(rb_augment_erase_begin); -/* - * after removal, update the tree to account for the removed entry - * and any rebalance damage. - */ -void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) +void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - if (node) - rb_augment_path(node, func, data); + __rb_insert(node, root, augment_rotate); } -EXPORT_SYMBOL(rb_augment_erase_end); +EXPORT_SYMBOL(__rb_insert_augmented); /* * This function returns the first node (in sort order) of the tree. @@ -387,11 +431,13 @@ struct rb_node *rb_next(const struct rb_node *node) { struct rb_node *parent; - if (rb_parent(node) == node) + if (RB_EMPTY_NODE(node)) return NULL; - /* If we have a right-hand child, go down and then left as far - as we can. */ + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ if (node->rb_right) { node = node->rb_right; while (node->rb_left) @@ -399,12 +445,13 @@ struct rb_node *rb_next(const struct rb_node *node) return (struct rb_node *)node; } - /* No right-hand children. Everything down and left is - smaller than us, so any 'next' node must be in the general - direction of our parent. Go up the tree; any time the - ancestor is a right-hand child of its parent, keep going - up. First time it's a left-hand child of its parent, said - parent is our 'next' node. */ + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ while ((parent = rb_parent(node)) && node == parent->rb_right) node = parent; @@ -416,11 +463,13 @@ struct rb_node *rb_prev(const struct rb_node *node) { struct rb_node *parent; - if (rb_parent(node) == node) + if (RB_EMPTY_NODE(node)) return NULL; - /* If we have a left-hand child, go down and then right as far - as we can. */ + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ if (node->rb_left) { node = node->rb_left; while (node->rb_right) @@ -428,8 +477,10 @@ struct rb_node *rb_prev(const struct rb_node *node) return (struct rb_node *)node; } - /* No left-hand children. Go up till we find an ancestor which - is a right-hand child of its parent */ + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent. + */ while ((parent = rb_parent(node)) && node == parent->rb_left) node = parent; @@ -443,14 +494,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_node *parent = rb_parent(victim); /* Set the surrounding nodes to point to the replacement */ - if (parent) { - if (victim == parent->rb_left) - parent->rb_left = new; - else - parent->rb_right = new; - } else { - root->rb_node = new; - } + __rb_change_child(victim, new, parent, root); if (victim->rb_left) rb_set_parent(victim->rb_left, new); if (victim->rb_right) diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c new file mode 100644 index 000000000000..268b23951fec --- /dev/null +++ b/lib/rbtree_test.c @@ -0,0 +1,234 @@ +#include <linux/module.h> +#include <linux/rbtree_augmented.h> +#include <linux/random.h> +#include <asm/timex.h> + +#define NODES 100 +#define PERF_LOOPS 100000 +#define CHECK_LOOPS 100 + +struct test_node { + struct rb_node rb; + u32 key; + + /* following fields used for testing augmented rbtree functionality */ + u32 val; + u32 augmented; +}; + +static struct rb_root root = RB_ROOT; +static struct test_node nodes[NODES]; + +static struct rnd_state rnd; + +static void insert(struct test_node *node, struct rb_root *root) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + u32 key = node->key; + + while (*new) { + parent = *new; + if (key < rb_entry(parent, struct test_node, rb)->key) + new = &parent->rb_left; + else + new = &parent->rb_right; + } + + rb_link_node(&node->rb, parent, new); + rb_insert_color(&node->rb, root); +} + +static inline void erase(struct test_node *node, struct rb_root *root) +{ + rb_erase(&node->rb, root); +} + +static inline u32 augment_recompute(struct test_node *node) +{ + u32 max = node->val, child_augmented; + if (node->rb.rb_left) { + child_augmented = rb_entry(node->rb.rb_left, struct test_node, + rb)->augmented; + if (max < child_augmented) + max = child_augmented; + } + if (node->rb.rb_right) { + child_augmented = rb_entry(node->rb.rb_right, struct test_node, + rb)->augmented; + if (max < child_augmented) + max = child_augmented; + } + return max; +} + +RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, + u32, augmented, augment_recompute) + +static void insert_augmented(struct test_node *node, struct rb_root *root) +{ + struct rb_node **new = &root->rb_node, *rb_parent = NULL; + u32 key = node->key; + u32 val = node->val; + struct test_node *parent; + + while (*new) { + rb_parent = *new; + parent = rb_entry(rb_parent, struct test_node, rb); + if (parent->augmented < val) + parent->augmented = val; + if (key < parent->key) + new = &parent->rb.rb_left; + else + new = &parent->rb.rb_right; + } + + node->augmented = val; + rb_link_node(&node->rb, rb_parent, new); + rb_insert_augmented(&node->rb, root, &augment_callbacks); +} + +static void erase_augmented(struct test_node *node, struct rb_root *root) +{ + rb_erase_augmented(&node->rb, root, &augment_callbacks); +} + +static void init(void) +{ + int i; + for (i = 0; i < NODES; i++) { + nodes[i].key = prandom32(&rnd); + nodes[i].val = prandom32(&rnd); + } +} + +static bool is_red(struct rb_node *rb) +{ + return !(rb->__rb_parent_color & 1); +} + +static int black_path_count(struct rb_node *rb) +{ + int count; + for (count = 0; rb; rb = rb_parent(rb)) + count += !is_red(rb); + return count; +} + +static void check(int nr_nodes) +{ + struct rb_node *rb; + int count = 0; + int blacks; + u32 prev_key = 0; + + for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + struct test_node *node = rb_entry(rb, struct test_node, rb); + WARN_ON_ONCE(node->key < prev_key); + WARN_ON_ONCE(is_red(rb) && + (!rb_parent(rb) || is_red(rb_parent(rb)))); + if (!count) + blacks = black_path_count(rb); + else + WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && + blacks != black_path_count(rb)); + prev_key = node->key; + count++; + } + WARN_ON_ONCE(count != nr_nodes); +} + +static void check_augmented(int nr_nodes) +{ + struct rb_node *rb; + + check(nr_nodes); + for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + struct test_node *node = rb_entry(rb, struct test_node, rb); + WARN_ON_ONCE(node->augmented != augment_recompute(node)); + } +} + +static int rbtree_test_init(void) +{ + int i, j; + cycles_t time1, time2, time; + + printk(KERN_ALERT "rbtree testing"); + + prandom32_seed(&rnd, 3141592653589793238ULL); + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + insert(nodes + j, &root); + for (j = 0; j < NODES; j++) + erase(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + for (i = 0; i < CHECK_LOOPS; i++) { + init(); + for (j = 0; j < NODES; j++) { + check(j); + insert(nodes + j, &root); + } + for (j = 0; j < NODES; j++) { + check(NODES - j); + erase(nodes + j, &root); + } + check(0); + } + + printk(KERN_ALERT "augmented rbtree testing"); + + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + insert_augmented(nodes + j, &root); + for (j = 0; j < NODES; j++) + erase_augmented(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + for (i = 0; i < CHECK_LOOPS; i++) { + init(); + for (j = 0; j < NODES; j++) { + check_augmented(j); + insert_augmented(nodes + j, &root); + } + for (j = 0; j < NODES; j++) { + check_augmented(NODES - j); + erase_augmented(nodes + j, &root); + } + check_augmented(0); + } + + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void rbtree_test_exit(void) +{ + printk(KERN_ALERT "test exit\n"); +} + +module_init(rbtree_test_init) +module_exit(rbtree_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michel Lespinasse"); +MODULE_DESCRIPTION("Red Black Tree test"); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index fadae774a20c..3675452b23ca 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -39,6 +39,25 @@ struct scatterlist *sg_next(struct scatterlist *sg) EXPORT_SYMBOL(sg_next); /** + * sg_nents - return total count of entries in scatterlist + * @sg: The scatterlist + * + * Description: + * Allows to know how many entries are in sg, taking into acount + * chaining as well + * + **/ +int sg_nents(struct scatterlist *sg) +{ + int nents; + for (nents = 0; sg; sg = sg_next(sg)) + nents++; + return nents; +} +EXPORT_SYMBOL(sg_nents); + + +/** * sg_last - return the last scatterlist entry in a list * @sgl: First entry in the scatterlist * @nents: Number of entries in the scatterlist @@ -404,14 +423,13 @@ EXPORT_SYMBOL(sg_miter_start); * @miter: sg mapping iter to proceed * * Description: - * Proceeds @miter@ to the next mapping. @miter@ should have been - * started using sg_miter_start(). On successful return, - * @miter@->page, @miter@->addr and @miter@->length point to the - * current mapping. + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. * * Context: - * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till - * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -465,7 +483,8 @@ EXPORT_SYMBOL(sg_miter_next); * resources (kmap) need to be released during iteration. * * Context: - * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { @@ -479,7 +498,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON(!irqs_disabled()); + WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index eb10578ae055..0374a596cffa 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; - int print_once = 1; - for (;;) { - for (i = 0; i < loops; i++) { - if (arch_spin_trylock(&lock->raw_lock)) - return; - __delay(1); - } - /* lockup suspected: */ - if (print_once) { - print_once = 0; - spin_dump(lock, "lockup suspected"); + for (i = 0; i < loops; i++) { + if (arch_spin_trylock(&lock->raw_lock)) + return; + __delay(1); + } + /* lockup suspected: */ + spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP - trigger_all_cpu_backtrace(); + trigger_all_cpu_backtrace(); #endif - } - } + + /* + * The trylock above was causing a livelock. Give the lower level arch + * specific lock code a chance to acquire the lock. We have already + * printed a warning/backtrace at this point. The non-debug arch + * specific code might actually succeed in acquiring the lock. If it is + * not successful, the end-result is the same - there is no forward + * progress. + */ + arch_spin_lock(&lock->raw_lock); } void do_raw_spin_lock(raw_spinlock_t *lock) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 45bc1f83a5ad..f114bf6a8e13 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init +static void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; @@ -206,8 +206,9 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long i, bytes, req_nslabs = io_tlb_nslabs; + unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned int order; + int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); @@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!io_tlb_start) - goto cleanup1; - + if (!io_tlb_start) { + io_tlb_nslabs = req_nslabs; + return -ENOMEM; + } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; } + rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); + if (rc) + free_pages((unsigned long)io_tlb_start, order); + return rc; +} + +int +swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) +{ + unsigned long i, bytes; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; + memset(io_tlb_start, 0, bytes); /* @@ -288,10 +305,8 @@ cleanup3: io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; - free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; -cleanup1: - io_tlb_nslabs = req_nslabs; + io_tlb_nslabs = 0; return -ENOMEM; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0e337541f005..39c99fea7c03 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r) unsigned q; /* Copy of previous function's body with added early returns */ - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 2 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 3 */ - if (r == 0) - return buf; - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 4 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 5 */ - if (r == 0) - return buf; - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; /* 6 */ + while (r >= 10000) { + q = r + '0'; + r = (r * (uint64_t)0x1999999a) >> 32; + *buf++ = q - 10*r; + } + + q = (r * 0x199a) >> 16; /* r <= 9999 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; /* 7 */ + r = (q * 0xcd) >> 11; /* q <= 999 */ + *buf++ = (q - 10 * r) + '0'; if (r == 0) return buf; - q = (r * 0xcd) >> 11; - *buf++ = (r - 10 * q) + '0'; /* 8 */ + q = (r * 0xcd) >> 11; /* r <= 99 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - *buf++ = q + '0'; /* 9 */ + *buf++ = q + '0'; /* q <= 9 */ return buf; } @@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n) /* Second algorithm: valid only for 64-bit long longs */ +/* See comment in put_dec_full9 for choice of constants */ static noinline_for_stack -char *put_dec_full4(char *buf, unsigned q) +void put_dec_full4(char *buf, unsigned q) { unsigned r; - r = (q * 0xcccd) >> 19; - *buf++ = (q - 10 * r) + '0'; - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; + r = (q * 0xccd) >> 15; + buf[0] = (q - 10 * r) + '0'; + q = (r * 0xcd) >> 11; + buf[1] = (r - 10 * q) + '0'; r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; - *buf++ = r + '0'; - return buf; + buf[2] = (q - 10 * r) + '0'; + buf[3] = r + '0'; +} + +/* + * Call put_dec_full4 on x % 10000, return x / 10000. + * The approximation x/10000 == (x * 0x346DC5D7) >> 43 + * holds for all x < 1,128,869,999. The largest value this + * helper will ever be asked to convert is 1,125,520,955. + * (d1 in the put_dec code, assuming n is all-ones). + */ +static +unsigned put_dec_helper4(char *buf, unsigned x) +{ + uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; + + put_dec_full4(buf, x - q * 10000); + return q; } /* Based on code by Douglas W. Jones found at @@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n) d3 = (h >> 16); /* implicit "& 0xffff" */ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); + q = put_dec_helper4(buf, q); + + q += 7671 * d3 + 9496 * d2 + 6 * d1; + q = put_dec_helper4(buf+4, q); + + q += 4749 * d3 + 42 * d2; + q = put_dec_helper4(buf+8, q); - buf = put_dec_full4(buf, q % 10000); - q = q / 10000; - - d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1; - buf = put_dec_full4(buf, d1 % 10000); - q = d1 / 10000; - - d2 = q + 4749 * d3 + 42 * d2; - buf = put_dec_full4(buf, d2 % 10000); - q = d2 / 10000; - - d3 = q + 281 * d3; - if (!d3) - goto done; - buf = put_dec_full4(buf, d3 % 10000); - q = d3 / 10000; - if (!q) - goto done; - buf = put_dec_full4(buf, q); - done: - while (buf[-1] == '0') + q += 281 * d3; + buf += 12; + if (q) + buf = put_dec_trunc8(buf, q); + else while (buf[-1] == '0') --buf; return buf; @@ -990,7 +987,7 @@ int kptr_restrict __read_mostly; * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address * with a dash-separated hex notation - * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth) + * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth) * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's @@ -1341,7 +1338,10 @@ qualifier: * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags * %pM output a 6-byte MAC address with colons + * %pMR output a 6-byte MAC address with colons in reversed order + * %pMF output a 6-byte MAC address with dashes * %pm output a 6-byte MAC address without colons + * %pmR output a 6-byte MAC address without colons in reversed order * %pI4 print an IPv4 address without leading zeros * %pi4 print an IPv4 address with leading zeros * %pI6 print an IPv6 address with colons @@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) s16 field_width; bool is_sign; - while (*fmt && *str) { + while (*fmt) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. @@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args) * advance both strings to next white space */ if (*fmt == '*') { + if (!*str) + break; while (!isspace(*fmt) && *fmt != '%' && *fmt) fmt++; while (!isspace(*str) && *str) @@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args) } } - if (!*fmt || !*str) + if (!*fmt) + break; + + if (*fmt == 'n') { + /* return number of characters read so far */ + *va_arg(args, int *) = str - buf; + ++fmt; + continue; + } + + if (!*str) break; base = 10; @@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) num++; } continue; - case 'n': - /* return number of characters read so far */ - { - int *i = (int *)va_arg(args, int*); - *i = str - buf; - } - continue; case 'o': base = 8; break; @@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) str = next; } - /* - * Now we've come all the way through so either the input string or the - * format ended. In the former case, there can be a %n at the current - * position in the format that needs to be filled. - */ - if (*fmt == '%' && *(fmt + 1) == 'n') { - int *p = (int *)va_arg(args, int *); - *p = str - buf; - } - return num; } EXPORT_SYMBOL(vsscanf); |