diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-11-23 11:33:37 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-11-23 10:44:00 -0700 |
commit | e276ae344a770f91912a81c6a338d92efd319be2 (patch) | |
tree | 362f0c0671ea88f02b9a863f1983744e6c836fe9 /io_uring/io_uring.c | |
parent | 2ccc92f4effcfa1c51c4fcf1e34d769099d3cad4 (diff) |
io_uring: hold locks for io_req_complete_failed
A preparation patch, make sure we always hold uring_lock around
io_req_complete_failed(). The only place deviating from the rule
is io_cancel_defer_files(), queue a tw instead.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/70760344eadaecf2939287084b9d4ba5c05a6984.1669203009.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r-- | io_uring/io_uring.c | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 2260fb7aa7f2..4d16f3b1ee11 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -862,9 +862,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) } void io_req_complete_failed(struct io_kiocb *req, s32 res) + __must_hold(&ctx->uring_lock) { const struct io_op_def *def = &io_op_defs[req->opcode]; + lockdep_assert_held(&req->ctx->uring_lock); + req_set_fail(req); io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); if (def->fail) @@ -1615,6 +1618,7 @@ static u32 io_get_sequence(struct io_kiocb *req) } static __cold void io_drain_req(struct io_kiocb *req) + __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -2849,7 +2853,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); - io_req_complete_failed(de->req, -ECANCELED); + io_req_task_queue_fail(de->req, -ECANCELED); kfree(de); } return true; |