diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 10 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/btree.c | 1 | ||||
-rw-r--r-- | lib/cordic.c | 2 | ||||
-rw-r--r-- | lib/crc32.c | 21 | ||||
-rw-r--r-- | lib/debugobjects.c | 54 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 5 | ||||
-rw-r--r-- | lib/decompress_unlzma.c | 2 | ||||
-rw-r--r-- | lib/devres.c | 55 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | lib/dynamic_queue_limits.c | 133 | ||||
-rw-r--r-- | lib/fault-inject.c | 8 | ||||
-rw-r--r-- | lib/kobject.c | 37 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 3 | ||||
-rw-r--r-- | lib/kref.c | 97 | ||||
-rw-r--r-- | lib/reciprocal_div.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 5 | ||||
-rw-r--r-- | lib/vsprintf.c | 19 |
18 files changed, 293 insertions, 167 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 36884b409e37..7f6b8bca8c25 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -251,6 +251,9 @@ config CPU_RMAP bool depends on SMP +config DQL + bool + # # Netlink attribute parsing support is select'ed if needed # @@ -277,10 +280,9 @@ config AVERAGE If unsure, say N. config CORDIC - tristate "Cordic function" + tristate "CORDIC algorithm" help - The option provides arithmetic function using cordic algorithm - so its calculations are in fixed point. Modules can select this - when they require this function. Module will be called cordic. + This option provides an implementation of the CORDIC algorithm; + calculations are in fixed point. Module will be called cordic. endmenu diff --git a/lib/Makefile b/lib/Makefile index 609b2adc604c..884ed376164d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -17,7 +17,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -lib-y += kobject.o kref.o klist.o +lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ @@ -116,6 +116,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o obj-$(CONFIG_CORDIC) += cordic.o +obj-$(CONFIG_DQL) += dynamic_queue_limits.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/btree.c b/lib/btree.c index 2a34392bcecc..e5ec1e9c1aa5 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -357,6 +357,7 @@ miss: } return NULL; } +EXPORT_SYMBOL_GPL(btree_get_prev); static int getpos(struct btree_geo *geo, unsigned long *node, unsigned long *key) diff --git a/lib/cordic.c b/lib/cordic.c index aa27a88d7e04..6cf477839ebd 100644 --- a/lib/cordic.c +++ b/lib/cordic.c @@ -96,6 +96,6 @@ struct cordic_iq cordic_calc_iq(s32 theta) } EXPORT_SYMBOL(cordic_calc_iq); -MODULE_DESCRIPTION("Cordic functions"); +MODULE_DESCRIPTION("CORDIC algorithm"); MODULE_AUTHOR("Broadcom Corporation"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/crc32.c b/lib/crc32.c index a6e633a48cea..4b35d2b4437c 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -51,20 +51,21 @@ static inline u32 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) -# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ - tab[2][(crc >> 8) & 255] ^ \ - tab[1][(crc >> 16) & 255] ^ \ - tab[0][(crc >> 24) & 255] +# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) +# define DO_CRC4 crc = t3[(crc) & 255] ^ \ + t2[(crc >> 8) & 255] ^ \ + t1[(crc >> 16) & 255] ^ \ + t0[(crc >> 24) & 255] # else -# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) -# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \ - tab[1][(crc >> 8) & 255] ^ \ - tab[2][(crc >> 16) & 255] ^ \ - tab[3][(crc >> 24) & 255] +# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# define DO_CRC4 crc = t0[(crc) & 255] ^ \ + t1[(crc >> 8) & 255] ^ \ + t2[(crc >> 16) & 255] ^ \ + t3[(crc >> 24) & 255] # endif const u32 *b; size_t rem_len; + const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; /* Align it */ if (unlikely((long)buf & 3 && len)) { diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a78b7c6e042c..77cb245f8e7b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -268,12 +268,16 @@ static void debug_print_object(struct debug_obj *obj, char *msg) * Try to repair the damage, so we have a better chance to get useful * debug output. */ -static void +static int debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state) { + int fixed = 0; + if (fixup) - debug_objects_fixups += fixup(addr, state); + fixed = fixup(addr, state); + debug_objects_fixups += fixed; + return fixed; } static void debug_object_is_on_stack(void *addr, int onstack) @@ -386,6 +390,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; if (!debug_objects_enabled) return; @@ -425,8 +432,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) * let the type specific code decide whether this is * true or not. */ - debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE); + if (debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE)) + debug_print_object(&o, "activate"); } /** @@ -563,6 +571,44 @@ out_unlock: } /** + * debug_object_assert_init - debug checks when object should be init-ed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + raw_spin_unlock_irqrestore(&db->lock, flags); + /* + * Maybe the object is static. Let the type specific + * code decide what to do. + */ + if (debug_object_fixup(descr->fixup_assert_init, addr, + ODEBUG_STATE_NOTAVAILABLE)) + debug_print_object(&o, "assert_init"); + return; + } + + raw_spin_unlock_irqrestore(&db->lock, flags); +} + +/** * debug_object_active_state - debug checks object usage state machine * @addr: address of the object * @descr: pointer to an object specific debug description structure diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index a7b80c1d6a0d..31c5f7675fbf 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -1,4 +1,3 @@ -/* vi: set sw = 4 ts = 4: */ /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), @@ -691,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, outbuf = malloc(BZIP2_IOBUF_SIZE); if (!outbuf) { - error("Could not allocate output bufer"); + error("Could not allocate output buffer"); return RETVAL_OUT_OF_MEMORY; } if (buf) @@ -699,7 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, else inbuf = malloc(BZIP2_IOBUF_SIZE); if (!inbuf) { - error("Could not allocate input bufer"); + error("Could not allocate input buffer"); i = RETVAL_OUT_OF_MEMORY; goto exit_0; } diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 476c65af9709..32adb73a9038 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c @@ -562,7 +562,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, else inbuf = malloc(LZMA_IOBUF_SIZE); if (!inbuf) { - error("Could not allocate input bufer"); + error("Could not allocate input buffer"); goto exit_0; } diff --git a/lib/devres.c b/lib/devres.c index 7c0e953a7486..4fbc09e6e9e6 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -85,6 +85,57 @@ void devm_iounmap(struct device *dev, void __iomem *addr) } EXPORT_SYMBOL(devm_iounmap); +/** + * devm_request_and_ioremap() - Check, request region, and ioremap resource + * @dev: Generic device to handle the resource for + * @res: resource to be handled + * + * Takes all necessary steps to ioremap a mem resource. Uses managed device, so + * everything is undone on driver detach. Checks arguments, so you can feed + * it the result from e.g. platform_get_resource() directly. Returns the + * remapped pointer or NULL on error. Usage example: + * + * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * base = devm_request_and_ioremap(&pdev->dev, res); + * if (!base) + * return -EADDRNOTAVAIL; + */ +void __iomem *devm_request_and_ioremap(struct device *dev, + struct resource *res) +{ + resource_size_t size; + const char *name; + void __iomem *dest_ptr; + + BUG_ON(!dev); + + if (!res || resource_type(res) != IORESOURCE_MEM) { + dev_err(dev, "invalid resource\n"); + return NULL; + } + + size = resource_size(res); + name = res->name ?: dev_name(dev); + + if (!devm_request_mem_region(dev, res->start, size, name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return NULL; + } + + if (res->flags & IORESOURCE_CACHEABLE) + dest_ptr = devm_ioremap(dev, res->start, size); + else + dest_ptr = devm_ioremap_nocache(dev, res->start, size); + + if (!dest_ptr) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + devm_release_mem_region(dev, res->start, size); + } + + return dest_ptr; +} +EXPORT_SYMBOL(devm_request_and_ioremap); + #ifdef CONFIG_HAS_IOPORT /* * Generic iomap devres @@ -348,5 +399,5 @@ void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) } } EXPORT_SYMBOL(pcim_iounmap_regions); -#endif -#endif +#endif /* CONFIG_PCI */ +#endif /* CONFIG_HAS_IOPORT */ diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 74c6c7fce749..fea790a2b176 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket, static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { - return ((a->dev_addr == a->dev_addr) && + return ((a->dev_addr == b->dev_addr) && (a->dev == b->dev)) ? true : false; } diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c new file mode 100644 index 000000000000..3d1bdcdd7db4 --- /dev/null +++ b/lib/dynamic_queue_limits.c @@ -0,0 +1,133 @@ +/* + * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h + * + * Copyright (c) 2011, Tom Herbert <therbert@google.com> + */ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/dynamic_queue_limits.h> + +#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) + +/* Records completed count and recalculates the queue limit */ +void dql_completed(struct dql *dql, unsigned int count) +{ + unsigned int inprogress, prev_inprogress, limit; + unsigned int ovlimit, all_prev_completed, completed; + + /* Can't complete more than what's in queue */ + BUG_ON(count > dql->num_queued - dql->num_completed); + + completed = dql->num_completed + count; + limit = dql->limit; + ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); + inprogress = dql->num_queued - completed; + prev_inprogress = dql->prev_num_queued - dql->num_completed; + all_prev_completed = POSDIFF(completed, dql->prev_num_queued); + + if ((ovlimit && !inprogress) || + (dql->prev_ovlimit && all_prev_completed)) { + /* + * Queue considered starved if: + * - The queue was over-limit in the last interval, + * and there is no more data in the queue. + * OR + * - The queue was over-limit in the previous interval and + * when enqueuing it was possible that all queued data + * had been consumed. This covers the case when queue + * may have becomes starved between completion processing + * running and next time enqueue was scheduled. + * + * When queue is starved increase the limit by the amount + * of bytes both sent and completed in the last interval, + * plus any previous over-limit. + */ + limit += POSDIFF(completed, dql->prev_num_queued) + + dql->prev_ovlimit; + dql->slack_start_time = jiffies; + dql->lowest_slack = UINT_MAX; + } else if (inprogress && prev_inprogress && !all_prev_completed) { + /* + * Queue was not starved, check if the limit can be decreased. + * A decrease is only considered if the queue has been busy in + * the whole interval (the check above). + * + * If there is slack, the amount of execess data queued above + * the the amount needed to prevent starvation, the queue limit + * can be decreased. To avoid hysteresis we consider the + * minimum amount of slack found over several iterations of the + * completion routine. + */ + unsigned int slack, slack_last_objs; + + /* + * Slack is the maximum of + * - The queue limit plus previous over-limit minus twice + * the number of objects completed. Note that two times + * number of completed bytes is a basis for an upper bound + * of the limit. + * - Portion of objects in the last queuing operation that + * was not part of non-zero previous over-limit. That is + * "round down" by non-overlimit portion of the last + * queueing operation. + */ + slack = POSDIFF(limit + dql->prev_ovlimit, + 2 * (completed - dql->num_completed)); + slack_last_objs = dql->prev_ovlimit ? + POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; + + slack = max(slack, slack_last_objs); + + if (slack < dql->lowest_slack) + dql->lowest_slack = slack; + + if (time_after(jiffies, + dql->slack_start_time + dql->slack_hold_time)) { + limit = POSDIFF(limit, dql->lowest_slack); + dql->slack_start_time = jiffies; + dql->lowest_slack = UINT_MAX; + } + } + + /* Enforce bounds on limit */ + limit = clamp(limit, dql->min_limit, dql->max_limit); + + if (limit != dql->limit) { + dql->limit = limit; + ovlimit = 0; + } + + dql->adj_limit = limit + completed; + dql->prev_ovlimit = ovlimit; + dql->prev_last_obj_cnt = dql->last_obj_cnt; + dql->num_completed = completed; + dql->prev_num_queued = dql->num_queued; +} +EXPORT_SYMBOL(dql_completed); + +void dql_reset(struct dql *dql) +{ + /* Reset all dynamic values */ + dql->limit = 0; + dql->num_queued = 0; + dql->num_completed = 0; + dql->last_obj_cnt = 0; + dql->prev_num_queued = 0; + dql->prev_last_obj_cnt = 0; + dql->prev_ovlimit = 0; + dql->lowest_slack = UINT_MAX; + dql->slack_start_time = jiffies; +} +EXPORT_SYMBOL(dql_reset); + +int dql_init(struct dql *dql, unsigned hold_time) +{ + dql->max_limit = DQL_MAX_LIMIT; + dql->min_limit = 0; + dql->slack_hold_time = hold_time; + dql_reset(dql); + return 0; +} +EXPORT_SYMBOL(dql_init); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 4f7554025e30..b4801f51b607 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -149,7 +149,7 @@ static int debugfs_ul_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); -static struct dentry *debugfs_create_ul(const char *name, mode_t mode, +static struct dentry *debugfs_create_ul(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_ul); @@ -169,7 +169,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, debugfs_stacktrace_depth_set, "%llu\n"); static struct dentry *debugfs_create_stacktrace_depth( - const char *name, mode_t mode, + const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, @@ -193,7 +193,7 @@ static int debugfs_atomic_t_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); -static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, +static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value) { return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); @@ -202,7 +202,7 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, struct dentry *fault_create_debugfs_attr(const char *name, struct dentry *parent, struct fault_attr *attr) { - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = debugfs_create_dir(name, parent); diff --git a/lib/kobject.c b/lib/kobject.c index 640bd98a4c8a..c33d7a18d635 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -746,43 +746,11 @@ void kset_unregister(struct kset *k) */ struct kobject *kset_find_obj(struct kset *kset, const char *name) { - return kset_find_obj_hinted(kset, name, NULL); -} - -/** - * kset_find_obj_hinted - search for object in kset given a predecessor hint. - * @kset: kset we're looking in. - * @name: object's name. - * @hint: hint to possible object's predecessor. - * - * Check the hint's next object and if it is a match return it directly, - * otherwise, fall back to the behavior of kset_find_obj(). Either way - * a reference for the returned object is held and the reference on the - * hinted object is released. - */ -struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name, - struct kobject *hint) -{ struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); - if (!hint) - goto slow_search; - - /* end of list detection */ - if (hint->entry.next == kset->list.next) - goto slow_search; - - k = container_of(hint->entry.next, struct kobject, entry); - if (!kobject_name(k) || strcmp(kobject_name(k), name)) - goto slow_search; - - ret = kobject_get(k); - goto unlock_exit; - -slow_search: list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get(k); @@ -790,12 +758,7 @@ slow_search: } } -unlock_exit: spin_unlock(&kset->list_lock); - - if (hint) - kobject_put(hint); - return ret; } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index ad72a03ce5e9..e66e9b632617 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -259,6 +259,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, struct sk_buff *skb; size_t len; + if (!netlink_has_listeners(uevent_sock, 1)) + continue; + /* allocate message with the maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; skb = alloc_skb(len + env->buflen, GFP_KERNEL); diff --git a/lib/kref.c b/lib/kref.c deleted file mode 100644 index 3efb882b11db..000000000000 --- a/lib/kref.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * kref.c - library routines for handling generic reference counted objects - * - * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> - * Copyright (C) 2004 IBM Corp. - * - * based on lib/kobject.c which was: - * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> - * - * This file is released under the GPLv2. - * - */ - -#include <linux/kref.h> -#include <linux/module.h> -#include <linux/slab.h> - -/** - * kref_init - initialize object. - * @kref: object in question. - */ -void kref_init(struct kref *kref) -{ - atomic_set(&kref->refcount, 1); - smp_mb(); -} - -/** - * kref_get - increment refcount for object. - * @kref: object. - */ -void kref_get(struct kref *kref) -{ - WARN_ON(!atomic_read(&kref->refcount)); - atomic_inc(&kref->refcount); - smp_mb__after_atomic_inc(); -} - -/** - * kref_put - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * - * Decrement the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ -int kref_put(struct kref *kref, void (*release)(struct kref *kref)) -{ - WARN_ON(release == NULL); - WARN_ON(release == (void (*)(struct kref *))kfree); - - if (atomic_dec_and_test(&kref->refcount)) { - release(kref); - return 1; - } - return 0; -} - - -/** - * kref_sub - subtract a number of refcounts for object. - * @kref: object. - * @count: Number of recounts to subtract. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * - * Subtract @count from the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ -int kref_sub(struct kref *kref, unsigned int count, - void (*release)(struct kref *kref)) -{ - WARN_ON(release == NULL); - WARN_ON(release == (void (*)(struct kref *))kfree); - - if (atomic_sub_and_test((int) count, &kref->refcount)) { - release(kref); - return 1; - } - return 0; -} - -EXPORT_SYMBOL(kref_init); -EXPORT_SYMBOL(kref_get); -EXPORT_SYMBOL(kref_put); -EXPORT_SYMBOL(kref_sub); diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 6a3bd48fa2a0..75510e94f7d0 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c @@ -1,5 +1,6 @@ #include <asm/div64.h> #include <linux/reciprocal_div.h> +#include <linux/export.h> u32 reciprocal_value(u32 k) { @@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k) do_div(val, k); return (u32)val; } +EXPORT_SYMBOL(reciprocal_value); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 99093b396145..058935ef3975 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -unsigned long swioltb_nr_tbl(void) +unsigned long swiotlb_nr_tbl(void) { return io_tlb_nslabs; } - +EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -321,6 +321,7 @@ void __init swiotlb_free(void) free_bootmem_late(__pa(io_tlb_start), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } + io_tlb_nslabs = 0; } static int is_swiotlb_buffer(phys_addr_t paddr) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 993599e66e5a..8e75003d62f6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -777,6 +777,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } +static +char *netdev_feature_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec) +{ + spec.flags |= SPECIAL | SMALL | ZEROPAD; + if (spec.field_width == -1) + spec.field_width = 2 + 2 * sizeof(netdev_features_t); + spec.base = 16; + + return number(buf, end, *(const netdev_features_t *)addr, spec); +} + int kptr_restrict __read_mostly; /* @@ -824,6 +836,7 @@ int kptr_restrict __read_mostly; * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. * - 'K' For a kernel pointer that should be hidden from unprivileged users + * - 'NF' For a netdev_features_t * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -896,6 +909,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, has_capability_noaudit(current, CAP_SYSLOG)))) ptr = NULL; break; + case 'N': + switch (fmt[1]) { + case 'F': + return netdev_feature_string(buf, end, ptr, spec); + } + break; } spec.flags |= SMALL; if (spec.field_width == -1) { |