diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-08-24 23:53:26 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-08-24 17:16:19 -0600 |
commit | 20d6b633870495fda1d92d283ebf890d80f68ecd (patch) | |
tree | 7524f936651157f0f4df43444f89f5f718e6004d /io_uring | |
parent | b24c5d752962fa0970cd7e3d74b1cd0e843358de (diff) |
io_uring: refactor __io_get_cqe()
Make __io_get_cqe simpler by not grabbing the cqe from refilled cached,
but letting io_get_cqe() do it for us. That's cleaner and removes some
duplication.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/74dc8fdf2657e438b2e05e1d478a3596924604e9.1692916914.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 13 | ||||
-rw-r--r-- | io_uring/io_uring.h | 23 |
2 files changed, 16 insertions, 20 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 0aeb33256a6d..de05831eeca7 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -818,7 +818,7 @@ void io_req_cqe_overflow(struct io_kiocb *req) * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ -struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) +bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) { struct io_rings *rings = ctx->rings; unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); @@ -830,7 +830,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) * Force overflow the completion. */ if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) - return NULL; + return false; /* userspace may cheat modifying the tail, be safe and do min */ queued = min(__io_cqring_events(ctx), ctx->cq_entries); @@ -838,7 +838,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) /* we need a contiguous range, limit based on the current array offset */ len = min(free, ctx->cq_entries - off); if (!len) - return NULL; + return false; if (ctx->flags & IORING_SETUP_CQE32) { off <<= 1; @@ -847,12 +847,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) ctx->cqe_cached = &rings->cqes[off]; ctx->cqe_sentinel = ctx->cqe_cached + len; - - ctx->cached_cq_tail++; - ctx->cqe_cached++; - if (ctx->flags & IORING_SETUP_CQE32) - ctx->cqe_cached++; - return &rings->cqes[off]; + return true; } static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 9b5dfb6ef484..9c80d20fe18f 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -38,7 +38,7 @@ enum { IOU_STOP_MULTISHOT = -ECANCELED, }; -struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); +bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); void io_req_cqe_overflow(struct io_kiocb *req); int io_run_task_work_sig(struct io_ring_ctx *ctx); void io_req_defer_failed(struct io_kiocb *req, s32 res); @@ -112,19 +112,20 @@ static inline void io_req_task_work_add(struct io_kiocb *req) static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, bool overflow) { - io_lockdep_assert_cq_locked(ctx); + struct io_uring_cqe *cqe; - if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { - struct io_uring_cqe *cqe = ctx->cqe_cached; + io_lockdep_assert_cq_locked(ctx); - ctx->cached_cq_tail++; - ctx->cqe_cached++; - if (ctx->flags & IORING_SETUP_CQE32) - ctx->cqe_cached++; - return cqe; + if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { + if (unlikely(!io_cqe_cache_refill(ctx, overflow))) + return NULL; } - - return __io_get_cqe(ctx, overflow); + cqe = ctx->cqe_cached; + ctx->cached_cq_tail++; + ctx->cqe_cached++; + if (ctx->flags & IORING_SETUP_CQE32) + ctx->cqe_cached++; + return cqe; } static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) |