diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-10-29 13:13:29 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 13:44:59 -0700 |
commit | f31967f0e455d08d3ea1d2f849bf62dafc92dbf4 (patch) | |
tree | 22fbc79b5ba286e2add8f995a8cfa9f211200192 /block/blk-mq.c | |
parent | f9afca4d367b8c915f28d29fcaba7460640403ff (diff) |
blk-mq: allow software queue to map to multiple hardware queues
The mapping used to be dependent on just the CPU location, but
now it's a tuple of (type, cpu) instead. This is a prep patch
for allowing a single software queue to map to multiple hardware
queues. No functional changes in this patch.
This changes the software queue count to an unsigned short
to save a bit of space. We can still support 64K-1 CPUs,
which should be enough. Add a check to catch a wrap.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 67dec64440dd..31976bff8ad2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -75,14 +75,18 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { - if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) - sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); + const int bit = ctx->index_hw[hctx->type]; + + if (!sbitmap_test_bit(&hctx->ctx_map, bit)) + sbitmap_set_bit(&hctx->ctx_map, bit); } static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { - sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); + const int bit = ctx->index_hw[hctx->type]; + + sbitmap_clear_bit(&hctx->ctx_map, bit); } struct mq_inflight { @@ -955,7 +959,7 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start) { - unsigned off = start ? start->index_hw : 0; + unsigned off = start ? start->index_hw[hctx->type] : 0; struct dispatch_rq_data data = { .hctx = hctx, .rq = NULL, @@ -2343,10 +2347,16 @@ static void blk_mq_map_swqueue(struct request_queue *q) ctx = per_cpu_ptr(q->queue_ctx, i); hctx = blk_mq_map_queue_type(q, 0, i); - + hctx->type = 0; cpumask_set_cpu(i, hctx->cpumask); - ctx->index_hw = hctx->nr_ctx; + ctx->index_hw[hctx->type] = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; + + /* + * If the nr_ctx type overflows, we have exceeded the + * amount of sw queues we can support. + */ + BUG_ON(!hctx->nr_ctx); } mutex_unlock(&q->sysfs_lock); |