diff options
author | Ming Lei <ming.lei@redhat.com> | 2018-04-25 04:01:44 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-04-25 09:49:22 -0600 |
commit | 4412efecf7fda3b8f9f18feed7938f2281f5ccbc (patch) | |
tree | 911c447444e015012fa54ea15915f530f6bcef39 /block | |
parent | fe644072dfee069d97a66ea9a80f4bc461499e6a (diff) |
Revert "blk-mq: remove code for dealing with remapping queue"
This reverts commit 37c7c6c76d431dd7ef9c29d95f6052bd425f004c.
Turns out some drivers(most are FC drivers) may not use managed
IRQ affinity, and has their customized .map_queues meantime, so
still keep this code for avoiding regression.
Reported-by: Laurence Oberman <loberman@redhat.com>
Tested-by: Laurence Oberman <loberman@redhat.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Stefan Haberland <sth@linux.vnet.ibm.com>
Cc: Ewan Milne <emilne@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 34 |
1 files changed, 31 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e4aa36817367..c3621453ad87 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2336,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, static void blk_mq_map_swqueue(struct request_queue *q) { - unsigned int i; + unsigned int i, hctx_idx; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; @@ -2353,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q) /* * Map software to hardware queues. + * + * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { + hctx_idx = q->mq_map[i]; + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[hctx_idx] && + !__blk_mq_alloc_rq_map(set, hctx_idx)) { + /* + * If tags initialization fail for some hctx, + * that hctx won't be brought online. In this + * case, remap the current ctx to hctx[0] which + * is guaranteed to always have tags allocated + */ + q->mq_map[i] = 0; + } + ctx = per_cpu_ptr(q->queue_ctx, i); hctx = blk_mq_map_queue(q, i); @@ -2366,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q) mutex_unlock(&q->sysfs_lock); queue_for_each_hw_ctx(q, hctx, i) { - /* every hctx should get mapped by at least one CPU */ - WARN_ON(!hctx->nr_ctx); + /* + * If no software queues are mapped to this hardware queue, + * disable it and free the request entries. + */ + if (!hctx->nr_ctx) { + /* Never unmap queue 0. We need it as a + * fallback in case of a new remap fails + * allocation + */ + if (i && set->tags[i]) + blk_mq_free_map_and_requests(set, i); + + hctx->tags = NULL; + continue; + } hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); |