summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-01-29 20:59:18 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-08 13:27:06 -0700
commit8435c6f380d622639d8acbc0af585d941396fa57 (patch)
treefd6ef35de4c6faf1fead3ba35d31027643f54e95 /io_uring/kbuf.c
parent949249e25f1098315a971b70b893c1a2e2e4a819 (diff)
io_uring/kbuf: cleanup passing back cflags
We have various functions calculating the CQE cflags we need to pass back, but it's all the same everywhere. Make a number of the putting functions void, and just have the two main helps for this, io_put_kbuf() and io_put_kbuf_comp() calculate the actual mask and pass it back. While at it, cleanup how we put REQ_F_BUFFER_RING buffers. Before this change, we would call into __io_put_kbuf() only to go right back in to the header defined functions. As clearing this type of buffer is just re-assigning the buf_index and incrementing the head, this is very wasteful. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 71880615bb78..ee866d646997 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -102,10 +102,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
return true;
}
-unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
+void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{
- unsigned int cflags;
-
/*
* We can add this buffer back to two lists:
*
@@ -118,21 +116,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
* We migrate buffers from the comp_list to the issue cache list
* when we need one.
*/
- if (req->flags & REQ_F_BUFFER_RING) {
- /* no buffers to recycle for this case */
- cflags = __io_put_kbuf_list(req, NULL);
- } else if (issue_flags & IO_URING_F_UNLOCKED) {
+ if (issue_flags & IO_URING_F_UNLOCKED) {
struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock);
- cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
+ __io_put_kbuf_list(req, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock);
} else {
lockdep_assert_held(&req->ctx->uring_lock);
- cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
+ __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
}
- return cflags;
}
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,