diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-01-29 13:46:44 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-01-29 13:46:44 -0700 |
commit | f86cd20c9454847a524ddbdcdec32c0380ed7c9b (patch) | |
tree | c6f996fd423f2793fe0aba28e1a0b7b4f98dc3aa /fs/io_uring.c | |
parent | 75c6a03904e0dd414a4d99a3072075cb5117e5bc (diff) |
io_uring: fix linked command file table usage
We're not consistent in how the file table is grabbed and assigned if we
have a command linked that requires the use of it.
Add ->file_table to the io_op_defs[] array, and use that to determine
when to grab the table instead of having the handlers set it if they
need to defer. This also means we can kill the IO_WQ_WORK_NEEDS_FILES
flag. We always initialize work->files, so io-wq can just check for
that.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 8bcf0538e2e1..0d8d0e217847 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -603,6 +603,8 @@ struct io_op_def { unsigned unbound_nonreg_file : 1; /* opcode is not supported by this kernel */ unsigned not_supported : 1; + /* needs file table */ + unsigned file_table : 1; }; static const struct io_op_def io_op_defs[] = { @@ -661,6 +663,7 @@ static const struct io_op_def io_op_defs[] = { .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, + .file_table = 1, }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { @@ -679,12 +682,15 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_OPENAT] = { .needs_file = 1, .fd_non_neg = 1, + .file_table = 1, }, [IORING_OP_CLOSE] = { .needs_file = 1, + .file_table = 1, }, [IORING_OP_FILES_UPDATE] = { .needs_mm = 1, + .file_table = 1, }, [IORING_OP_STATX] = { .needs_mm = 1, @@ -720,6 +726,7 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_OPENAT2] = { .needs_file = 1, .fd_non_neg = 1, + .file_table = 1, }, }; @@ -732,6 +739,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req); static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); +static int io_grab_files(struct io_kiocb *req); static struct kmem_cache *req_cachep; @@ -2568,10 +2576,8 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt, struct file *file; int ret; - if (force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (force_nonblock) return -EAGAIN; - } ret = build_open_flags(&req->open.how, &op); if (ret) @@ -2797,10 +2803,8 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt, return ret; /* if the file has a flush method, be safe and punt to async */ - if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) goto eagain; - } /* * No ->flush(), safely close from here and just punt the @@ -3244,7 +3248,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, ret = __io_accept(req, nxt, force_nonblock); if (ret == -EAGAIN && force_nonblock) { req->work.func = io_accept_finish; - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; io_put_req(req); return -EAGAIN; } @@ -3967,10 +3970,8 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) struct io_uring_files_update up; int ret; - if (force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (force_nonblock) return -EAGAIN; - } up.offset = req->files_update.offset; up.fds = req->files_update.arg; @@ -3991,6 +3992,12 @@ static int io_req_defer_prep(struct io_kiocb *req, { ssize_t ret = 0; + if (io_op_defs[req->opcode].file_table) { + ret = io_grab_files(req); + if (unlikely(ret)) + return ret; + } + io_req_work_grab_env(req, &io_op_defs[req->opcode]); switch (req->opcode) { @@ -4424,6 +4431,8 @@ static int io_grab_files(struct io_kiocb *req) int ret = -EBADF; struct io_ring_ctx *ctx = req->ctx; + if (req->work.files) + return 0; if (!ctx->ring_file) return -EBADF; @@ -4542,7 +4551,7 @@ again: if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT))) { punt: - if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { + if (io_op_defs[req->opcode].file_table) { ret = io_grab_files(req); if (ret) goto err; |