summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2020-09-11 18:41:14 +0800
committerJens Axboe <axboe@kernel.dk>2020-09-11 05:26:19 -0600
commit285008501c65a3fcee05d2c2c26cbf629ceff2f0 (patch)
treec93edb67935d4dcde37c42660ea74eadce19fda5
parent84ed2573c5427de47c71a69d8b0a18020d5f6dd5 (diff)
blk-mq: always allow reserved allocation in hctx_may_queue
NVMe shares tagset between fabric queue and admin queue or between connect_q and NS queue, so hctx_may_queue() can be called to allocate request for these queues. Tags can be reserved in these tagset. Before error recovery, there is often lots of in-flight requests which can't be completed, and new reserved request may be needed in error recovery path. However, hctx_may_queue() can always return false because there is too many in-flight requests which can't be completed during error handling. Finally, nothing can proceed. Fix this issue by always allowing reserved tag allocation in hctx_may_queue(). This is reasonable because reserved tags are supposed to always be available. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: David Milburn <dmilburn@redhat.com> Cc: Ewan D. Milne <emilne@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c5
2 files changed, 5 insertions, 3 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c31c4a0478a5..aacf10decdbd 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -76,7 +76,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{
- if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
+ if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
+ !hctx_may_queue(data->hctx, bt))
return BLK_MQ_NO_TAG;
if (data->shallow_depth)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4abb71459f94..e04b759add75 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
bt = rq->mq_hctx->tags->breserved_tags;
tag_offset = 0;
+ } else {
+ if (!hctx_may_queue(rq->mq_hctx, bt))
+ return false;
}
- if (!hctx_may_queue(rq->mq_hctx, bt))
- return false;
tag = __sbitmap_queue_get(bt);
if (tag == BLK_MQ_NO_TAG)
return false;