summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-01-24 16:58:56 -0700
committerJens Axboe <axboe@kernel.dk>2021-01-24 18:13:56 -0700
commitb18032bb0a883cd7edd22a7fe6c57e1059b81ed0 (patch)
treec42ad53c60d1696372582e41fd7354d867ff4188
parent84965ff8a84f0368b154c9b367b62e59c1193f30 (diff)
io_uring: only call io_cqring_ev_posted() if events were posted
This normally doesn't cause any extra harm, but it does mean that we'll increment the eventfd notification count, if one has been registered with the ring. This can confuse applications, when they see more notifications on the eventfd side than are available in the ring. Do the nice thing and only increment this count, if we actually posted (or even overflowed) events. Reported-and-tested-by: Dan Melnic <dmm@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 695fe00bafdc..2166c469789d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1779,12 +1779,13 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
struct io_kiocb *req, *tmp;
struct io_uring_cqe *cqe;
unsigned long flags;
- bool all_flushed;
+ bool all_flushed, posted;
LIST_HEAD(list);
if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
return false;
+ posted = false;
spin_lock_irqsave(&ctx->completion_lock, flags);
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
if (!io_match_task(req, tsk, files))
@@ -1804,6 +1805,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
WRITE_ONCE(ctx->rings->cq_overflow,
ctx->cached_cq_overflow);
}
+ posted = true;
}
all_flushed = list_empty(&ctx->cq_overflow_list);
@@ -1813,9 +1815,11 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
}
- io_commit_cqring(ctx);
+ if (posted)
+ io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- io_cqring_ev_posted(ctx);
+ if (posted)
+ io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, compl.list);