summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-02-02 10:06:38 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-08 13:27:06 -0700
commit42c0905f0cac9a86d2cb8138665a6d62ea607078 (patch)
tree35bc5e66e87e7ba8bbfffbb2ee2d9ee77b3c6c16 /io_uring
parent3cdc4be114a9be61b7041a53e44aa71718a7cf28 (diff)
io_uring: cleanup handle_tw_list() calling convention
Now that we don't loop around task_work anymore, there's no point in maintaining the ring and locked state outside of handle_tw_list(). Get rid of passing in those pointers (and pointers to pointers) and just do the management internally in handle_tw_list(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d31e8b110de9..0b1a065a21c1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1173,10 +1173,10 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
percpu_ref_put(&ctx->refs);
}
-static unsigned int handle_tw_list(struct llist_node *node,
- struct io_ring_ctx **ctx,
- struct io_tw_state *ts)
+static unsigned int handle_tw_list(struct llist_node *node)
{
+ struct io_ring_ctx *ctx = NULL;
+ struct io_tw_state ts = { };
unsigned int count = 0;
do {
@@ -1184,25 +1184,26 @@ static unsigned int handle_tw_list(struct llist_node *node,
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
- if (req->ctx != *ctx) {
- ctx_flush_and_put(*ctx, ts);
- *ctx = req->ctx;
+ if (req->ctx != ctx) {
+ ctx_flush_and_put(ctx, &ts);
+ ctx = req->ctx;
/* if not contended, grab and improve batching */
- ts->locked = mutex_trylock(&(*ctx)->uring_lock);
- percpu_ref_get(&(*ctx)->refs);
+ ts.locked = mutex_trylock(&ctx->uring_lock);
+ percpu_ref_get(&ctx->refs);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, ts);
+ req, &ts);
node = next;
count++;
if (unlikely(need_resched())) {
- ctx_flush_and_put(*ctx, ts);
- *ctx = NULL;
+ ctx_flush_and_put(ctx, &ts);
+ ctx = NULL;
cond_resched();
}
} while (node);
+ ctx_flush_and_put(ctx, &ts);
return count;
}
@@ -1250,8 +1251,6 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
void tctx_task_work(struct callback_head *cb)
{
- struct io_tw_state ts = {};
- struct io_ring_ctx *ctx = NULL;
struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
task_work);
struct llist_node *node;
@@ -1264,9 +1263,7 @@ void tctx_task_work(struct callback_head *cb)
node = llist_del_all(&tctx->task_list);
if (node)
- count = handle_tw_list(llist_reverse_order(node), &ctx, &ts);
-
- ctx_flush_and_put(ctx, &ts);
+ count = handle_tw_list(llist_reverse_order(node));
/* relaxed read is enough as only the task itself sets ->in_cancel */
if (unlikely(atomic_read(&tctx->in_cancel)))