diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-10-29 15:06:13 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 13:44:59 -0700 |
commit | ea4f995ee8b8f0578b3319949f2edd5d812fdb0a (patch) | |
tree | f7516777fbd8b2fc16cf75b846792981c3a07434 /block/blk-mq.h | |
parent | 392546aed22009060911f76b6ea24520e2f8b50f (diff) |
blk-mq: cache request hardware queue mapping
We call blk_mq_map_queue() a lot, at least two times for each
request per IO, sometimes more. Since we now have an indirect
call as well in that function. cache the mapping so we don't
have to re-call blk_mq_map_queue() for the same request
multiple times.
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.h')
-rw-r--r-- | block/blk-mq.h | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/block/blk-mq.h b/block/blk-mq.h index 053862270125..facb6e9ddce4 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -223,13 +223,10 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, static inline void blk_mq_put_driver_tag(struct request *rq) { - struct blk_mq_hw_ctx *hctx; - if (rq->tag == -1 || rq->internal_tag == -1) return; - hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); - __blk_mq_put_driver_tag(hctx, rq); + __blk_mq_put_driver_tag(rq->mq_hctx, rq); } static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) |