summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2016-03-15 14:53:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 16:55:16 -0700
commit2a777eac173a53b33f2f7dbed2b61a1f5eb0b531 (patch)
treec499f583786db5cd2ae9da5130be1051539eb586
parentd5e3ed66d6f260b3bb68cb5cf0fe79777e8febf0 (diff)
slab: implement bulk alloc in SLAB allocator
This patch implements the alloc side of bulk API for the SLAB allocator. Further optimization are still possible by changing the call to __do_cache_alloc() into something that can return multiple objects. This optimization is left for later, given end results already show in the area of 80% speedup. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f872208a0912..0c2e2cafbc83 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3392,9 +3392,42 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
EXPORT_SYMBOL(kmem_cache_free_bulk);
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
+ void **p)
{
- return __kmem_cache_alloc_bulk(s, flags, size, p);
+ size_t i;
+
+ s = slab_pre_alloc_hook(s, flags);
+ if (!s)
+ return 0;
+
+ cache_alloc_debugcheck_before(s, flags);
+
+ local_irq_disable();
+ for (i = 0; i < size; i++) {
+ void *objp = __do_cache_alloc(s, flags);
+
+ /* this call could be done outside IRQ disabled section */
+ objp = cache_alloc_debugcheck_after(s, flags, objp, _RET_IP_);
+
+ if (unlikely(!objp))
+ goto error;
+ p[i] = objp;
+ }
+ local_irq_enable();
+
+ /* Clear memory outside IRQ disabled section */
+ if (unlikely(flags & __GFP_ZERO))
+ for (i = 0; i < size; i++)
+ memset(p[i], 0, s->object_size);
+
+ slab_post_alloc_hook(s, flags, size, p);
+ /* FIXME: Trace call missing. Christoph would like a bulk variant */
+ return size;
+error:
+ local_irq_enable();
+ slab_post_alloc_hook(s, flags, i, p);
+ __kmem_cache_free_bulk(s, i, p);
+ return 0;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);