From f007a3d66c5480c8dae3fa20a89a06861ef1f5db Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 2 Feb 2021 18:19:22 +0100 Subject: block: streamline bvec_alloc Avoid the pointless goto by trying the slab allocation first and falling through to the mempool. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'block/bio.c') diff --git a/block/bio.c b/block/bio.c index c2152c4bf8a3..321b3479a154 100644 --- a/block/bio.c +++ b/block/bio.c @@ -172,8 +172,6 @@ static inline gfp_t bvec_alloc_gfp(gfp_t gfp) struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, mempool_t *pool) { - struct bio_vec *bvl; - /* * see comment near bvec_array define! */ @@ -201,28 +199,24 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, } /* - * idx now points to the pool we want to allocate from. only the - * 1-vec entry pool is mempool backed. + * Try a slab allocation first for all smaller allocations. If that + * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. + * The mempool is sized to handle up to BIO_MAX_PAGES entries. */ - if (*idx == BVEC_POOL_MAX) { -fallback: - bvl = mempool_alloc(pool, gfp_mask); - } else { + if (*idx < BVEC_POOL_MAX) { struct biovec_slab *bvs = bvec_slabs + *idx; + struct bio_vec *bvl; - /* - * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM - * is set, retry with the 1-entry mempool - */ bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); - if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { - *idx = BVEC_POOL_MAX; - goto fallback; + if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) { + (*idx)++; + return bvl; } + *idx = BVEC_POOL_MAX; } (*idx)++; - return bvl; + return mempool_alloc(pool, gfp_mask); } void bio_uninit(struct bio *bio) -- cgit v1.2.3-58-ga151