diff options
author | Bart Van Assche <bvanassche@acm.org> | 2021-09-27 15:03:25 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-10-18 06:17:02 -0600 |
commit | e2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14 (patch) | |
tree | a1e775a7dbdfe1b11947fbb8cf8029c8d1495a56 /block/mq-deadline.c | |
parent | 24b83deb29b7f06a5573b65f2ce96f5482755d43 (diff) |
block/mq-deadline: Improve request accounting further
The scheduler .insert_requests() callback is called when a request is
queued for the first time and also when it is requeued. Only count a
request the first time it is queued. Additionally, since the mq-deadline
scheduler only performs zone locking for requests that have been
inserted, skip the zone unlock code for requests that have not been
inserted into the mq-deadline scheduler.
Fixes: 38ba64d12d4c ("block/mq-deadline: Track I/O statistics")
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Niklas Cassel <Niklas.Cassel@wdc.com>
Cc: Hannes Reinecke <hare@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20210927220328.1410161-2-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/mq-deadline.c')
-rw-r--r-- | block/mq-deadline.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 47f042fa6a68..c27b4347ca91 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -677,8 +677,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, inserted, prio); - rq->elv.priv[0] = (void *)(uintptr_t)1; + if (!rq->elv.priv[0]) { + dd_count(dd, inserted, prio); + rq->elv.priv[0] = (void *)(uintptr_t)1; + } if (blk_mq_sched_try_insert_merge(q, rq, &free)) { blk_mq_free_requests(&free); @@ -759,12 +761,13 @@ static void dd_finish_request(struct request *rq) /* * The block layer core may call dd_finish_request() without having - * called dd_insert_requests(). Hence only update statistics for - * requests for which dd_insert_requests() has been called. See also - * blk_mq_request_bypass_insert(). + * called dd_insert_requests(). Skip requests that bypassed I/O + * scheduling. See also blk_mq_request_bypass_insert(). */ - if (rq->elv.priv[0]) - dd_count(dd, completed, prio); + if (!rq->elv.priv[0]) + return; + + dd_count(dd, completed, prio); if (blk_queue_is_zoned(q)) { unsigned long flags; |