diff options
author | Omar Sandoval <osandov@fb.com> | 2017-04-14 01:00:01 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-14 14:06:57 -0600 |
commit | c05f8525f67b7d6489b0502211d4ed35622d9beb (patch) | |
tree | fe474e51721e497374bbf6e674f883e54138e699 /block | |
parent | 5b72727299307e53888277729f980ab03264dac8 (diff) |
blk-mq-sched: make completed_request() callback more useful
Currently, this callback is called right after put_request() and has no
distinguishable purpose. Instead, let's call it before put_request() as
soon as I/O has completed on the request, before we account it in
blk-stat. With this, Kyber can enable stats when it sees a latency
outlier and make sure the outlier gets accounted.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.h | 11 | ||||
-rw-r--r-- | block/blk-mq.c | 5 |
2 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index f4bc186c3440..120c6abc37cc 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -82,17 +82,12 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, return true; } -static inline void -blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq) +static inline void blk_mq_sched_completed_request(struct request *rq) { - struct elevator_queue *e = hctx->queue->elevator; + struct elevator_queue *e = rq->q->elevator; if (e && e->type->ops.mq.completed_request) - e->type->ops.mq.completed_request(hctx, rq); - - BUG_ON(rq->internal_tag == -1); - - blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag); + e->type->ops.mq.completed_request(rq); } static inline void blk_mq_sched_started_request(struct request *rq) diff --git a/block/blk-mq.c b/block/blk-mq.c index 7138cd98146e..e2ef7b460924 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -350,7 +350,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, if (rq->tag != -1) blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) - blk_mq_sched_completed_request(hctx, rq); + blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); } @@ -444,6 +444,9 @@ static void __blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; + if (rq->internal_tag != -1) + blk_mq_sched_completed_request(rq); + blk_mq_stat_add(rq); if (!q->softirq_done_fn) |