diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-03-06 11:02:12 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-03-07 14:12:43 -0700 |
commit | 13bf43f5f4739739751c0049a1582610c283bdde (patch) | |
tree | 8871494c81857a87106bea42e0a636f947101a7f /fs | |
parent | 2941267bd3dad018de1d51fe2cd996b7bc1e5a5d (diff) |
io_uring: introduce ctx to tctx back map
For each pair tcxt-ctx create an object and chain it into ctx, so we
have a way to traverse all tctx that are using current ctx. Preparation
patch, will be used later.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 58 |
1 files changed, 44 insertions, 14 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 00a736867b76..9a2cff0662e0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -454,6 +454,7 @@ struct io_ring_ctx { /* Keep this last, we don't need it for the fast path */ struct work_struct exit_work; + struct list_head tctx_list; }; /* @@ -805,6 +806,13 @@ struct io_kiocb { struct io_wq_work work; }; +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct file *file; + struct io_ring_ctx *ctx; +}; + struct io_defer_entry { struct list_head list; struct io_kiocb *req; @@ -1144,6 +1152,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->rsrc_ref_list); INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); init_llist_head(&ctx->rsrc_put_llist); + INIT_LIST_HEAD(&ctx->tctx_list); INIT_LIST_HEAD(&ctx->submit_state.comp.free_list); INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list); return ctx; @@ -8748,6 +8757,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) { struct io_uring_task *tctx = current->io_uring; + struct io_tctx_node *node; int ret; if (unlikely(!tctx)) { @@ -8760,13 +8770,25 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) void *old = xa_load(&tctx->xa, (unsigned long)file); if (!old) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->ctx = ctx; + node->file = file; + node->task = current; + get_file(file); ret = xa_err(xa_store(&tctx->xa, (unsigned long)file, - file, GFP_KERNEL)); + node, GFP_KERNEL)); if (ret) { fput(file); + kfree(node); return ret; } + + mutex_lock(&ctx->uring_lock); + list_add(&node->ctx_node, &ctx->tctx_list); + mutex_unlock(&ctx->uring_lock); } tctx->last = file; } @@ -8788,23 +8810,31 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) static void io_uring_del_task_file(unsigned long index) { struct io_uring_task *tctx = current->io_uring; - struct file *file; + struct io_tctx_node *node; - file = xa_erase(&tctx->xa, index); - if (!file) + node = xa_erase(&tctx->xa, index); + if (!node) return; - if (tctx->last == file) + WARN_ON_ONCE(current != node->task); + WARN_ON_ONCE(list_empty(&node->ctx_node)); + + mutex_lock(&node->ctx->uring_lock); + list_del(&node->ctx_node); + mutex_unlock(&node->ctx->uring_lock); + + if (tctx->last == node->file) tctx->last = NULL; - fput(file); + fput(node->file); + kfree(node); } static void io_uring_clean_tctx(struct io_uring_task *tctx) { - struct file *file; + struct io_tctx_node *node; unsigned long index; - xa_for_each(&tctx->xa, index, file) + xa_for_each(&tctx->xa, index, node) io_uring_del_task_file(index); if (tctx->io_wq) { io_wq_put_and_exit(tctx->io_wq); @@ -8815,13 +8845,13 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx) void __io_uring_files_cancel(struct files_struct *files) { struct io_uring_task *tctx = current->io_uring; - struct file *file; + struct io_tctx_node *node; unsigned long index; /* make sure overflow events are dropped */ atomic_inc(&tctx->in_idle); - xa_for_each(&tctx->xa, index, file) - io_uring_cancel_task_requests(file->private_data, files); + xa_for_each(&tctx->xa, index, node) + io_uring_cancel_task_requests(node->ctx, files); atomic_dec(&tctx->in_idle); if (files) @@ -8884,11 +8914,11 @@ void __io_uring_task_cancel(void) atomic_inc(&tctx->in_idle); if (tctx->sqpoll) { - struct file *file; + struct io_tctx_node *node; unsigned long index; - xa_for_each(&tctx->xa, index, file) - io_uring_cancel_sqpoll(file->private_data); + xa_for_each(&tctx->xa, index, node) + io_uring_cancel_sqpoll(node->ctx); } do { |