diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 10:21:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 10:21:43 -0700 |
commit | 72fbbc3d0e3e3117c29a73d0b4d928dc00ed99ce (patch) | |
tree | 7b786dde386ed70c3930de6356e6fd406f2db36e /block | |
parent | 34845d92bca527b5c2cf8b2293b71b9c746c79ca (diff) | |
parent | e2e530867245d051dc7800b0d07193b3e581f5b9 (diff) |
Merge tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block
Pull block request execute cleanups from Jens Axboe:
"This change was advertised in the initial core block pull request, but
didn't actually make that branch as we deferred it to a post-merge
pull request to avoid a bunch of cross branch issues.
This series cleans up the block execute path quite nicely"
* tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block:
blk-mq: remove the done argument to blk_execute_rq_nowait
blk-mq: avoid a mess of casts for blk_end_sync_rq
blk-mq: remove __blk_execute_rq_nowait
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 113 |
1 files changed, 49 insertions, 64 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 30e4bdcd8d7f..e9bf950983c7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1152,24 +1152,6 @@ void blk_mq_start_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_start_request); -/** - * blk_end_sync_rq - executes a completion event on a request - * @rq: request to complete - * @error: end I/O status of the request - */ -static void blk_end_sync_rq(struct request *rq, blk_status_t error) -{ - struct completion *waiting = rq->end_io_data; - - rq->end_io_data = (void *)(uintptr_t)error; - - /* - * complete last, if this is a stack request the process (and thus - * the rq pointer) could be invalid right after this complete() - */ - complete(waiting); -} - /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging @@ -1204,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) plug->rq_count++; } -static void __blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *done, bool use_plug) -{ - WARN_ON(irqs_disabled()); - WARN_ON(!blk_rq_is_passthrough(rq)); - - rq->end_io = done; - - blk_account_io_start(rq); - - if (use_plug && current->plug) { - blk_add_rq_to_plug(current->plug, rq); - return; - } - /* - * don't check dying flag for MQ because the request won't - * be reused after dying flag is set - */ - blk_mq_sched_insert_request(rq, at_head, true, false); -} - - /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert * @at_head: insert request at head or tail of queue - * @done: I/O completion handler * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue @@ -1239,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head, * Note: * This function will invoke @done directly if the queue is dead. */ -void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) +void blk_execute_rq_nowait(struct request *rq, bool at_head) { - __blk_execute_rq_nowait(rq, at_head, done, true); + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + blk_account_io_start(rq); + if (current->plug) + blk_add_rq_to_plug(current->plug, rq); + else + blk_mq_sched_insert_request(rq, at_head, true, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +static void blk_end_sync_rq(struct request *rq, blk_status_t ret) +{ + struct blk_rq_wait *wait = rq->end_io_data; + + wait->ret = ret; + complete(&wait->done); +} + static bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) @@ -1277,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { - DECLARE_COMPLETION_ONSTACK(wait); - unsigned long hang_check; + struct blk_rq_wait wait = { + .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), + }; + + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); - /* - * iopoll requires request to be submitted to driver, so can't - * use plug - */ rq->end_io_data = &wait; - __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, - !blk_rq_is_poll(rq)); - - /* Prevent hang_check timer from firing at us during very long I/O */ - hang_check = sysctl_hung_task_timeout_secs; - - if (blk_rq_is_poll(rq)) - blk_rq_poll_completion(rq, &wait); - else if (hang_check) - while (!wait_for_completion_io_timeout(&wait, - hang_check * (HZ/2))) - ; - else - wait_for_completion_io(&wait); + rq->end_io = blk_end_sync_rq; + + blk_account_io_start(rq); + blk_mq_sched_insert_request(rq, at_head, true, false); + + if (blk_rq_is_poll(rq)) { + blk_rq_poll_completion(rq, &wait.done); + } else { + /* + * Prevent hang_check timer from firing at us during very long + * I/O + */ + unsigned long hang_check = sysctl_hung_task_timeout_secs; + + if (hang_check) + while (!wait_for_completion_io_timeout(&wait.done, + hang_check * (HZ/2))) + ; + else + wait_for_completion_io(&wait.done); + } - return (blk_status_t)(uintptr_t)rq->end_io_data; + return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); |