diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 10:21:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 10:21:43 -0700 |
commit | 72fbbc3d0e3e3117c29a73d0b4d928dc00ed99ce (patch) | |
tree | 7b786dde386ed70c3930de6356e6fd406f2db36e | |
parent | 34845d92bca527b5c2cf8b2293b71b9c746c79ca (diff) | |
parent | e2e530867245d051dc7800b0d07193b3e581f5b9 (diff) |
Merge tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block
Pull block request execute cleanups from Jens Axboe:
"This change was advertised in the initial core block pull request, but
didn't actually make that branch as we deferred it to a post-merge
pull request to avoid a bunch of cross branch issues.
This series cleans up the block execute path quite nicely"
* tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block:
blk-mq: remove the done argument to blk_execute_rq_nowait
blk-mq: avoid a mess of casts for blk_end_sync_rq
blk-mq: remove __blk_execute_rq_nowait
-rw-r--r-- | block/blk-mq.c | 113 | ||||
-rw-r--r-- | drivers/block/sx8.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 10 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 3 | ||||
-rw-r--r-- | drivers/scsi/scsi_error.c | 5 | ||||
-rw-r--r-- | drivers/scsi/sg.c | 3 | ||||
-rw-r--r-- | drivers/scsi/st.c | 3 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshpb.c | 6 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 3 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 3 |
12 files changed, 77 insertions, 82 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 30e4bdcd8d7f..e9bf950983c7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1152,24 +1152,6 @@ void blk_mq_start_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_start_request); -/** - * blk_end_sync_rq - executes a completion event on a request - * @rq: request to complete - * @error: end I/O status of the request - */ -static void blk_end_sync_rq(struct request *rq, blk_status_t error) -{ - struct completion *waiting = rq->end_io_data; - - rq->end_io_data = (void *)(uintptr_t)error; - - /* - * complete last, if this is a stack request the process (and thus - * the rq pointer) could be invalid right after this complete() - */ - complete(waiting); -} - /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging @@ -1204,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) plug->rq_count++; } -static void __blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *done, bool use_plug) -{ - WARN_ON(irqs_disabled()); - WARN_ON(!blk_rq_is_passthrough(rq)); - - rq->end_io = done; - - blk_account_io_start(rq); - - if (use_plug && current->plug) { - blk_add_rq_to_plug(current->plug, rq); - return; - } - /* - * don't check dying flag for MQ because the request won't - * be reused after dying flag is set - */ - blk_mq_sched_insert_request(rq, at_head, true, false); -} - - /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert * @at_head: insert request at head or tail of queue - * @done: I/O completion handler * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue @@ -1239,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head, * Note: * This function will invoke @done directly if the queue is dead. */ -void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) +void blk_execute_rq_nowait(struct request *rq, bool at_head) { - __blk_execute_rq_nowait(rq, at_head, done, true); + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + blk_account_io_start(rq); + if (current->plug) + blk_add_rq_to_plug(current->plug, rq); + else + blk_mq_sched_insert_request(rq, at_head, true, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +static void blk_end_sync_rq(struct request *rq, blk_status_t ret) +{ + struct blk_rq_wait *wait = rq->end_io_data; + + wait->ret = ret; + complete(&wait->done); +} + static bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) @@ -1277,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { - DECLARE_COMPLETION_ONSTACK(wait); - unsigned long hang_check; + struct blk_rq_wait wait = { + .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), + }; + + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); - /* - * iopoll requires request to be submitted to driver, so can't - * use plug - */ rq->end_io_data = &wait; - __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, - !blk_rq_is_poll(rq)); - - /* Prevent hang_check timer from firing at us during very long I/O */ - hang_check = sysctl_hung_task_timeout_secs; - - if (blk_rq_is_poll(rq)) - blk_rq_poll_completion(rq, &wait); - else if (hang_check) - while (!wait_for_completion_io_timeout(&wait, - hang_check * (HZ/2))) - ; - else - wait_for_completion_io(&wait); + rq->end_io = blk_end_sync_rq; + + blk_account_io_start(rq); + blk_mq_sched_insert_request(rq, at_head, true, false); + + if (blk_rq_is_poll(rq)) { + blk_rq_poll_completion(rq, &wait.done); + } else { + /* + * Prevent hang_check timer from firing at us during very long + * I/O + */ + unsigned long hang_check = sysctl_hung_task_timeout_secs; + + if (hang_check) + while (!wait_for_completion_io_timeout(&wait.done, + hang_check * (HZ/2))) + ; + else + wait_for_completion_io(&wait.done); + } - return (blk_status_t)(uintptr_t)rq->end_io_data; + return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index b361583944b9..63b4f6431d2e 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) spin_unlock_irq(&host->lock); DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); - blk_execute_rq_nowait(rq, true, NULL); + blk_execute_rq_nowait(rq, true); return 0; @@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) crq->msg_bucket = (u32) rc; DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); - blk_execute_rq_nowait(rq, true, NULL); + blk_execute_rq_nowait(rq, true); return 0; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 72f7c955c707..727c12cbe327 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; + rq->end_io = nvme_keep_alive_end_io; rq->end_io_data = ctrl; rq->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io); + blk_execute_rq_nowait(rq, false); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 096b1b47d750..a2e89db1cd63 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, blk_flags); if (IS_ERR(req)) return PTR_ERR(req); + req->end_io = nvme_uring_cmd_end_io; req->end_io_data = ioucmd; /* to free bio on completion, as req->bio will be null at that time */ @@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, pdu->meta_buffer = nvme_to_user_ptr(d.metadata); pdu->meta_len = d.metadata_len; - blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io); + blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a98a7de0964..0403b6d10bb4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } nvme_init_request(abort_req, &cmd); + abort_req->end_io = abort_endio; abort_req->end_io_data = NULL; abort_req->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(abort_req, false, abort_endio); + blk_execute_rq_nowait(abort_req, false); /* * The aborted req will be completed on receiving the abort req. @@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) return PTR_ERR(req); nvme_init_request(req, &cmd); + if (opcode == nvme_admin_delete_cq) + req->end_io = nvme_del_cq_end; + else + req->end_io = nvme_del_queue_end; req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); req->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ? - nvme_del_cq_end : nvme_del_queue_end); + blk_execute_rq_nowait(req, false); return 0; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 5247c24538eb..3cc4d6709c93 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); } else { + rq->end_io = nvmet_passthru_req_done; rq->end_io_data = req; - blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); + blk_execute_rq_nowait(rq, false); } if (ns) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index cdaca13ac1f1..49ef864df581 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) scmd->cmnd[4] = SCSI_REMOVAL_PREVENT; scmd->cmnd[5] = 0; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + scmd->allowed = 5; req->rq_flags |= RQF_QUIET; req->timeout = 10 * HZ; - scmd->allowed = 5; + req->end_io = eh_lock_door_done; - blk_execute_rq_nowait(req, true, eh_lock_door_done); + blk_execute_rq_nowait(req, true); } /** diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cbffa712b9f3..118c7b4a8af2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ - blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io); + srp->rq->end_io = sg_rq_end_io; + blk_execute_rq_nowait(srp->rq, at_head); return 0; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 56a093a90b92..850172a2b8f1 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, memcpy(scmd->cmnd, cmd, scmd->cmd_len); req->timeout = timeout; scmd->allowed = retries; + req->end_io = st_scsi_execute_end; req->end_io_data = SRpnt; - blk_execute_rq_nowait(req, true, st_scsi_execute_end); + blk_execute_rq_nowait(req, true); return 0; } diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 8882b47f76d3..002c19c2b31f 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, req->timeout = 0; req->end_io_data = umap_req; + req->end_io = ufshpb_umap_req_compl_fn; ufshpb_set_unmap_cmd(scmd->cmnd, rgn); scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; - blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn); + blk_execute_rq_nowait(req, true); hpb->stats.umap_req_cnt++; } @@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, blk_rq_append_bio(req, map_req->bio); req->end_io_data = map_req; + req->end_io = ufshpb_map_req_compl_fn; if (unlikely(last)) mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; @@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, map_req->rb.srgn_idx, mem_size); scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; - blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn); + blk_execute_rq_nowait(req, true); hpb->stats.map_req_cnt++; return 0; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index bb3fb18b2316..e6a967ddc08c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd) cmd->priv = scmd->cmnd; - blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG, - pscsi_req_done); + blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG); return 0; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9f07061418db..e2d9daf7e8dd 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *); int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); int blk_rq_append_bio(struct request *rq, struct bio *bio); -void blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *end_io); +void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); struct req_iterator { |