diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-02-10 01:42:55 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-11 10:51:17 -0800 |
commit | 7c5cae368a6c44eac0e817ae130301b65ff446dc (patch) | |
tree | dccede940e8ef4b341af03a8a0ac9da951e74616 | |
parent | 8c8cc2c10c21943dd3499a2df7aac835093b37f3 (diff) |
[PATCH] slab: use parameter passed to cache_reap to determine pointer to work structure
Use the pointer passed to cache_reap to determine the work pointer and
consolidate exit paths.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slab.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c index b595323e24a2..caa8f87e04eb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4029,18 +4029,17 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ -static void cache_reap(struct work_struct *unused) +static void cache_reap(struct work_struct *w) { struct kmem_cache *searchp; struct kmem_list3 *l3; int node = numa_node_id(); + struct delayed_work *work = + container_of(w, struct delayed_work, work); - if (!mutex_trylock(&cache_chain_mutex)) { + if (!mutex_trylock(&cache_chain_mutex)) /* Give up. Setup the next iteration. */ - schedule_delayed_work(&__get_cpu_var(reap_work), - round_jiffies_relative(REAPTIMEOUT_CPUC)); - return; - } + goto out; list_for_each_entry(searchp, &cache_chain, next) { check_irq_on(); @@ -4083,9 +4082,9 @@ next: mutex_unlock(&cache_chain_mutex); next_reap_node(); refresh_cpu_vm_stats(smp_processor_id()); +out: /* Set up the next iteration */ - schedule_delayed_work(&__get_cpu_var(reap_work), - round_jiffies_relative(REAPTIMEOUT_CPUC)); + schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); } #ifdef CONFIG_PROC_FS |