diff options
author | Christoph Hellwig <hch@lst.de> | 2017-01-27 09:51:45 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-27 15:08:35 -0700 |
commit | 6d247d7f71d1fa4b66a5f4da7b1daa21510d529b (patch) | |
tree | 449921f154b9bd0176453478fdd7a8c6911be5ad | |
parent | 5ea708d15a928f7a479987704203616d3274c03b (diff) |
block: allow specifying size for extra command data
This mirrors the blk-mq capabilities to allocate extra drivers-specific
data behind struct request by setting a cmd_size field, as well as having
a constructor / destructor for it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-core.c | 59 | ||||
-rw-r--r-- | block/blk-flush.c | 5 | ||||
-rw-r--r-- | block/blk-sysfs.c | 7 | ||||
-rw-r--r-- | include/linux/blkdev.h | 7 |
4 files changed, 61 insertions, 17 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 09819d24d385..0a485ad802c9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -606,17 +606,41 @@ void blk_cleanup_queue(struct request_queue *q) EXPORT_SYMBOL(blk_cleanup_queue); /* Allocate memory local to the request queue */ -static void *alloc_request_struct(gfp_t gfp_mask, void *data) +static void *alloc_request_simple(gfp_t gfp_mask, void *data) { - int nid = (int)(long)data; - return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); + struct request_queue *q = data; + + return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); } -static void free_request_struct(void *element, void *unused) +static void free_request_simple(void *element, void *data) { kmem_cache_free(request_cachep, element); } +static void *alloc_request_size(gfp_t gfp_mask, void *data) +{ + struct request_queue *q = data; + struct request *rq; + + rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, + q->node); + if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { + kfree(rq); + rq = NULL; + } + return rq; +} + +static void free_request_size(void *element, void *data) +{ + struct request_queue *q = data; + + if (q->exit_rq_fn) + q->exit_rq_fn(q, element); + kfree(element); +} + int blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask) { @@ -629,10 +653,15 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q, init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); - rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, - free_request_struct, - (void *)(long)q->node, gfp_mask, - q->node); + if (q->cmd_size) { + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, + alloc_request_size, free_request_size, + q, gfp_mask, q->node); + } else { + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, + alloc_request_simple, free_request_simple, + q, gfp_mask, q->node); + } if (!rl->rq_pool) return -ENOMEM; @@ -846,12 +875,15 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); int blk_init_allocated_queue(struct request_queue *q) { - q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); + q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); if (!q->fq) return -ENOMEM; + if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) + goto out_free_flush_queue; + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) - goto fail; + goto out_exit_flush_rq; INIT_WORK(&q->timeout_work, blk_timeout_work); q->queue_flags |= QUEUE_FLAG_DEFAULT; @@ -869,13 +901,16 @@ int blk_init_allocated_queue(struct request_queue *q) /* init elevator */ if (elevator_init(q, NULL)) { mutex_unlock(&q->sysfs_lock); - goto fail; + goto out_exit_flush_rq; } mutex_unlock(&q->sysfs_lock); return 0; -fail: +out_exit_flush_rq: + if (q->exit_rq_fn) + q->exit_rq_fn(q, q->fq->flush_rq); +out_free_flush_queue: blk_free_flush_queue(q->fq); wbt_exit(q); return -ENOMEM; diff --git a/block/blk-flush.c b/block/blk-flush.c index 4427896641ac..0a0358e48b76 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -547,11 +547,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, if (!fq) goto fail; - if (q->mq_ops) { + if (q->mq_ops) spin_lock_init(&fq->mq_flush_lock); - rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); - } + rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); if (!fq->flush_rq) goto fail_rq; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1dbce057592d..894f77342fd4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -814,10 +814,13 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); - if (!q->mq_ops) + if (!q->mq_ops) { + if (q->exit_rq_fn) + q->exit_rq_fn(q, q->fq->flush_rq); blk_free_flush_queue(q->fq); - else + } else { blk_mq_release(q); + } blk_trace_shutdown(q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6b1efc5760ea..461b7cf6af1d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -273,6 +273,8 @@ typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); typedef int (lld_busy_fn) (struct request_queue *q); typedef int (bsg_job_fn) (struct bsg_job *); +typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); +typedef void (exit_rq_fn)(struct request_queue *, struct request *); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED, @@ -408,6 +410,8 @@ struct request_queue { rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; + init_rq_fn *init_rq_fn; + exit_rq_fn *exit_rq_fn; const struct blk_mq_ops *mq_ops; @@ -577,6 +581,9 @@ struct request_queue { #endif bool mq_sysfs_init_done; + + size_t cmd_size; + void *rq_alloc_data; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |