summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-05 15:39:16 -0700
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:25 -0600
commit4a3223f7bfda14c532856152b12aace525cf8079 (patch)
treeaa7f270f10943eddfee32e4bda12e6e8c1b5a2c4 /io_uring/net.c
parent54cdcca05abde32acc3233950ddc79d8be25515f (diff)
io_uring/net: switch io_recv() to using io_async_msghdr
No functional changes in this patch, just in preparation for carrying more state than what is available now, if necessary. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c75
1 files changed, 47 insertions, 28 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 240a77976fda..2be4919f99c6 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -320,7 +320,7 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
return ret;
}
-int io_send_prep_async(struct io_kiocb *req)
+int io_sendrecv_prep_async(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
@@ -703,13 +703,13 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
- struct msghdr *msg, bool mshot_finished,
- unsigned issue_flags)
+ struct io_async_msghdr *kmsg,
+ bool mshot_finished, unsigned issue_flags)
{
unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags);
- if (msg->msg_inq > 0)
+ if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
/*
@@ -723,7 +723,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
- if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
/* mshot retries exceeded, force a requeue */
@@ -924,7 +924,7 @@ retry_multishot:
else
io_kbuf_recycle(req, issue_flags);
- if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
+ if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished)
@@ -938,29 +938,42 @@ retry_multishot:
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct msghdr msg;
+ struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
size_t len = sr->len;
+ if (req_has_async_data(req)) {
+ kmsg = req->async_data;
+ } else {
+ kmsg = &iomsg;
+ kmsg->free_iov = NULL;
+ kmsg->msg.msg_name = NULL;
+ kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_control = NULL;
+ kmsg->msg.msg_get_inq = 1;
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_iocb = NULL;
+ kmsg->msg.msg_ubuf = NULL;
+
+ if (!io_do_buffer_select(req)) {
+ ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ }
+ }
+
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
- return -EAGAIN;
+ return io_setup_async_msg(req, kmsg, issue_flags);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_get_inq = 1;
- msg.msg_controllen = 0;
- msg.msg_iocb = NULL;
- msg.msg_ubuf = NULL;
-
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
@@ -974,22 +987,23 @@ retry_multishot:
return -ENOBUFS;
sr->buf = buf;
sr->len = len;
+ ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ goto out_free;
}
- ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
- if (unlikely(ret))
- goto out_free;
-
- msg.msg_inq = -1;
- msg.msg_flags = 0;
+ kmsg->msg.msg_inq = -1;
+ kmsg->msg.msg_flags = 0;
if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
- ret = sock_recvmsg(sock, &msg, flags);
+ ret = sock_recvmsg(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if (issue_flags & IO_URING_F_MULTISHOT) {
+ ret = io_setup_async_msg(req, kmsg, issue_flags);
+ if (ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -1001,12 +1015,12 @@ retry_multishot:
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
- } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+ } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
@@ -1018,9 +1032,14 @@ out_free:
else
io_kbuf_recycle(req, issue_flags);
- if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
+ if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
goto retry_multishot;
+ if (ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg, issue_flags);
+ else if (ret != IOU_OK && ret != IOU_STOP_MULTISHOT)
+ io_req_msg_cleanup(req, kmsg, issue_flags);
+
return ret;
}