summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-02-02 10:45:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-02-02 10:45:17 -0800
commit717ca0b8e55eea49c5d71c026eafbe1e64d4b556 (patch)
tree40889fa3e19f9626e5fe9184abef01ec3163ebb8
parentec86369c88f6794a6cfa0383f715f276305399ed (diff)
parent72bd80252feeb3bef8724230ee15d9f7ab541c6e (diff)
Merge tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Fix for missing retry for read multishot. If we trigger the execution of it and there's more than one buffer to be read, then we don't always read more than the first one. As it's edge triggered, this can lead to stalls. - Limit inline receive multishot retries for fairness reasons. If we have a very bursty socket receiving data, we still need to ensure we process other requests as well. This is really two minor cleanups, then adding a way for poll reissue to trigger a requeue, and then finally having multishot receive utilize that. - Fix for a weird corner case for non-multishot receive with MSG_WAITALL, using provided buffers, and setting the length to zero (to let the buffer dictate the receive size). * tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux: io_uring/net: fix sr->len for IORING_OP_RECV with MSG_WAITALL and buffers io_uring/net: limit inline multishot retries io_uring/poll: add requeue return code from poll multishot handling io_uring/net: un-indent mshot retry path in io_recv_finish() io_uring/poll: move poll execution helpers higher up io_uring/rw: ensure poll based multishot read retries appropriately
-rw-r--r--io_uring/io_uring.h8
-rw-r--r--io_uring/net.c50
-rw-r--r--io_uring/poll.c49
-rw-r--r--io_uring/poll.h9
-rw-r--r--io_uring/rw.c10
5 files changed, 89 insertions, 37 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 04e33f25919c..d5495710c178 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -15,12 +15,18 @@
#include <trace/events/io_uring.h>
#endif
-
enum {
IOU_OK = 0,
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
+ * Requeue the task_work to restart operations on this request. The
+ * actual value isn't important, should just be not an otherwise
+ * valid error code, yet less than -MAX_ERRNO and valid internally.
+ */
+ IOU_REQUEUE = -3072,
+
+ /*
* Intended only when both IO_URING_F_MULTISHOT is passed
* to indicate to the poll runner that multishot should be
* removed and the result is set on req->cqe.res.
diff --git a/io_uring/net.c b/io_uring/net.c
index 75d494dad7e2..43bc9a5f96f9 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -60,6 +60,7 @@ struct io_sr_msg {
unsigned len;
unsigned done_io;
unsigned msg_flags;
+ unsigned nr_multishot_loops;
u16 flags;
/* initialised and used only by !msg send variants */
u16 addr_len;
@@ -70,6 +71,13 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
+/*
+ * Number of times we'll try and do receives if there's more data. If we
+ * exceed this limit, then add us to the back of the queue and retry from
+ * there. This helps fairness between flooding clients.
+ */
+#define MULTISHOT_MAX_RETRY 32
+
static inline bool io_check_multishot(struct io_kiocb *req,
unsigned int issue_flags)
{
@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->done_io = 0;
+ sr->nr_multishot_loops = 0;
return 0;
}
@@ -645,23 +654,35 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
return true;
}
- if (!mshot_finished) {
- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
- *ret, cflags | IORING_CQE_F_MORE)) {
- io_recv_prep_retry(req);
- /* Known not-empty or unknown state, retry */
- if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
- msg->msg_inq == -1)
+ if (mshot_finished)
+ goto finish;
+
+ /*
+ * Fill CQE for this receive and see if we should keep trying to
+ * receive from this socket.
+ */
+ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE)) {
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
+
+ io_recv_prep_retry(req);
+ /* Known not-empty or unknown state, retry */
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
+ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
- if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = IOU_ISSUE_SKIP_COMPLETE;
- else
- *ret = -EAGAIN;
- return true;
+ /* mshot retries exceeded, force a requeue */
+ sr->nr_multishot_loops = 0;
+ mshot_retry_ret = IOU_REQUEUE;
}
- /* Otherwise stop multishot but use the current result. */
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ *ret = mshot_retry_ret;
+ else
+ *ret = -EAGAIN;
+ return true;
}
-
+ /* Otherwise stop multishot but use the current result. */
+finish:
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -902,6 +923,7 @@ retry_multishot:
if (!buf)
return -ENOBUFS;
sr->buf = buf;
+ sr->len = len;
}
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d59b74a99d4e..7513afc7b702 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -226,8 +226,29 @@ enum {
IOU_POLL_NO_ACTION = 1,
IOU_POLL_REMOVE_POLL_USE_RES = 2,
IOU_POLL_REISSUE = 3,
+ IOU_POLL_REQUEUE = 4,
};
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+ unsigned flags = 0;
+
+ io_req_set_res(req, mask, 0);
+ req->io_task_work.func = io_poll_task_func;
+
+ trace_io_uring_task_add(req, mask);
+
+ if (!(req->flags & REQ_F_POLL_NO_LAZY))
+ flags = IOU_F_TWQ_LAZY_WAKE;
+ __io_req_task_work_add(req, flags);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, res);
+}
+
/*
* All poll tw should go through this. Checks for poll events, manages
* references, does rewait, etc.
@@ -309,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
int ret = io_poll_issue(req, ts);
if (ret == IOU_STOP_MULTISHOT)
return IOU_POLL_REMOVE_POLL_USE_RES;
+ else if (ret == IOU_REQUEUE)
+ return IOU_POLL_REQUEUE;
if (ret < 0)
return ret;
}
@@ -331,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
int ret;
ret = io_poll_check_events(req, ts);
- if (ret == IOU_POLL_NO_ACTION)
+ if (ret == IOU_POLL_NO_ACTION) {
+ return;
+ } else if (ret == IOU_POLL_REQUEUE) {
+ __io_poll_execute(req, 0);
return;
+ }
io_poll_remove_entries(req);
io_poll_tw_hash_eject(req, ts);
@@ -364,26 +391,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
}
}
-static void __io_poll_execute(struct io_kiocb *req, int mask)
-{
- unsigned flags = 0;
-
- io_req_set_res(req, mask, 0);
- req->io_task_work.func = io_poll_task_func;
-
- trace_io_uring_task_add(req, mask);
-
- if (!(req->flags & REQ_F_POLL_NO_LAZY))
- flags = IOU_F_TWQ_LAZY_WAKE;
- __io_req_task_work_add(req, flags);
-}
-
-static inline void io_poll_execute(struct io_kiocb *req, int res)
-{
- if (io_poll_get_ownership(req))
- __io_poll_execute(req, res);
-}
-
static void io_poll_cancel_req(struct io_kiocb *req)
{
io_poll_mark_cancelled(req);
diff --git a/io_uring/poll.h b/io_uring/poll.h
index ff4d5d753387..1dacae9e816c 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -24,6 +24,15 @@ struct async_poll {
struct io_poll *double_poll;
};
+/*
+ * Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
+ * potentially other cases where we already "own" this poll request.
+ */
+static inline void io_poll_multishot_retry(struct io_kiocb *req)
+{
+ atomic_inc(&req->poll_refs);
+}
+
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 118cc9f1cf16..d5e79d9bdc71 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -18,6 +18,7 @@
#include "opdef.h"
#include "kbuf.h"
#include "rsrc.h"
+#include "poll.h"
#include "rw.h"
struct io_rw {
@@ -962,8 +963,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
if (io_fill_cqe_req_aux(req,
issue_flags & IO_URING_F_COMPLETE_DEFER,
ret, cflags | IORING_CQE_F_MORE)) {
- if (issue_flags & IO_URING_F_MULTISHOT)
+ if (issue_flags & IO_URING_F_MULTISHOT) {
+ /*
+ * Force retry, as we might have more data to
+ * be read and otherwise it won't get retried
+ * until (if ever) another poll is triggered.
+ */
+ io_poll_multishot_retry(req);
return IOU_ISSUE_SKIP_COMPLETE;
+ }
return -EAGAIN;
}
}