diff options
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/cancel.c | 2 | ||||
-rw-r--r-- | io_uring/io_uring.c | 32 | ||||
-rw-r--r-- | io_uring/kbuf.h | 8 | ||||
-rw-r--r-- | io_uring/msg_ring.c | 3 | ||||
-rw-r--r-- | io_uring/net.c | 138 | ||||
-rw-r--r-- | io_uring/net.h | 2 | ||||
-rw-r--r-- | io_uring/notif.c | 93 | ||||
-rw-r--r-- | io_uring/notif.h | 54 | ||||
-rw-r--r-- | io_uring/opdef.c | 18 | ||||
-rw-r--r-- | io_uring/opdef.h | 2 | ||||
-rw-r--r-- | io_uring/poll.c | 2 | ||||
-rw-r--r-- | io_uring/rsrc.c | 55 | ||||
-rw-r--r-- | io_uring/rsrc.h | 4 | ||||
-rw-r--r-- | io_uring/rw.c | 30 | ||||
-rw-r--r-- | io_uring/uring_cmd.c | 7 |
15 files changed, 170 insertions, 280 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c index e4e1dc0325f0..5fc5d3e80fcb 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -218,7 +218,7 @@ static int __io_sync_cancel(struct io_uring_task *tctx, (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) { unsigned long file_ptr; - if (unlikely(fd > ctx->nr_user_files)) + if (unlikely(fd >= ctx->nr_user_files)) return -EBADF; fd = array_index_nospec(fd, ctx->nr_user_files); file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index ebfdb2212ec2..242d896c00f3 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1450,9 +1450,10 @@ int io_req_prep_async(struct io_kiocb *req) return 0; if (WARN_ON_ONCE(req_has_async_data(req))) return -EFAULT; - if (io_alloc_async_data(req)) - return -EAGAIN; - + if (!io_op_defs[req->opcode].manual_alloc) { + if (io_alloc_async_data(req)) + return -EAGAIN; + } return def->prep_async(req); } @@ -1727,6 +1728,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: + io_kbuf_recycle(req, 0); io_req_task_queue(req); break; case IO_APOLL_ABORTED: @@ -2639,7 +2641,6 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_unregister_personality(ctx, index); if (ctx->rings) io_poll_remove_all(ctx, NULL, true); - io_notif_unregister(ctx); mutex_unlock(&ctx->uring_lock); /* failed during ring init, it couldn't have issued any requests */ @@ -2647,6 +2648,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_kill_timeouts(ctx, NULL, true); /* if we failed setting up the ctx, we might not have any rings */ io_iopoll_try_reap_events(ctx); + /* drop cached put refs after potentially doing completions */ + if (current->io_uring) + io_uring_drop_tctx_refs(current); } INIT_WORK(&ctx->exit_work, io_ring_exit_work); @@ -3353,6 +3357,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, goto err; } + if (ctx->flags & IORING_SETUP_SINGLE_ISSUER + && !(ctx->flags & IORING_SETUP_R_DISABLED)) + ctx->submitter_task = get_task_struct(current); + file = io_uring_get_file(ctx); if (IS_ERR(file)) { ret = PTR_ERR(file); @@ -3544,6 +3552,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx) if (!(ctx->flags & IORING_SETUP_R_DISABLED)) return -EBADFD; + if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) + ctx->submitter_task = get_task_struct(current); + if (ctx->restrictions.registered) ctx->restricted = 1; @@ -3838,15 +3849,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_register_file_alloc_range(ctx, arg); break; - case IORING_REGISTER_NOTIFIERS: - ret = io_notif_register(ctx, arg, nr_args); - break; - case IORING_UNREGISTER_NOTIFIERS: - ret = -EINVAL; - if (arg || nr_args) - break; - ret = io_notif_unregister(ctx); - break; default: ret = -EINVAL; break; @@ -3932,8 +3934,8 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(42, __u16, personality); BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); BUILD_BUG_SQE_ELEM(44, __u32, file_index); - BUILD_BUG_SQE_ELEM(44, __u16, notification_idx); - BUILD_BUG_SQE_ELEM(46, __u16, addr_len); + BUILD_BUG_SQE_ELEM(44, __u16, addr_len); + BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); BUILD_BUG_SQE_ELEM(48, __u64, addr3); BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); BUILD_BUG_SQE_ELEM(56, __u64, __pad2); diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index d6af208d109f..746fbf31a703 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -91,9 +91,13 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) * buffer data. However if that buffer is recycled the original request * data stored in addr is lost. Therefore forbid recycling for now. */ - if (req->opcode == IORING_OP_READV) + if (req->opcode == IORING_OP_READV) { + if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) { + req->buf_list->head++; + req->buf_list = NULL; + } return; - + } if (req->flags & REQ_F_BUFFER_SELECTED) io_kbuf_recycle_legacy(req, issue_flags); if (req->flags & REQ_F_BUFFER_RING) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 976c4ba68ee7..4a7e5d030c78 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -165,7 +165,8 @@ done: req_set_fail(req); io_req_set_res(req, ret, 0); /* put file to avoid an attempt to IOPOLL the req */ - io_put_file(req->file); + if (!(req->flags & REQ_F_FIXED_FILE)) + io_put_file(req->file); req->file = NULL; return IOU_OK; } diff --git a/io_uring/net.c b/io_uring/net.c index 6d71748e2c5a..60e392f7f2dc 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -65,12 +65,12 @@ struct io_sendzc { struct file *file; void __user *buf; size_t len; - u16 slot_idx; unsigned msg_flags; unsigned flags; unsigned addr_len; void __user *addr; size_t done_io; + struct io_kiocb *notif; }; #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED) @@ -116,7 +116,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr *hdr = req->async_data; - if (!hdr || issue_flags & IO_URING_F_UNLOCKED) + if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) return; /* Let normal cleanup path reap it if we fail adding to the cache */ @@ -152,9 +152,9 @@ static int io_setup_async_msg(struct io_kiocb *req, struct io_async_msghdr *kmsg, unsigned int issue_flags) { - struct io_async_msghdr *async_msg = req->async_data; + struct io_async_msghdr *async_msg; - if (async_msg) + if (req_has_async_data(req)) return -EAGAIN; async_msg = io_recvmsg_alloc_async(req, issue_flags); if (!async_msg) { @@ -182,6 +182,37 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, &iomsg->free_iov); } +int io_sendzc_prep_async(struct io_kiocb *req) +{ + struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); + struct io_async_msghdr *io; + int ret; + + if (!zc->addr || req_has_async_data(req)) + return 0; + if (io_alloc_async_data(req)) + return -ENOMEM; + + io = req->async_data; + ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); + return ret; +} + +static int io_setup_async_addr(struct io_kiocb *req, + struct sockaddr_storage *addr, + unsigned int issue_flags) +{ + struct io_async_msghdr *io; + + if (!addr || req_has_async_data(req)) + return -EAGAIN; + if (io_alloc_async_data(req)) + return -ENOMEM; + io = req->async_data; + memcpy(&io->addr, addr, sizeof(io->addr)); + return -EAGAIN; +} + int io_sendmsg_prep_async(struct io_kiocb *req) { int ret; @@ -848,18 +879,39 @@ out_free: return ret; } +void io_sendzc_cleanup(struct io_kiocb *req) +{ + struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); + + zc->notif->flags |= REQ_F_CQE_SKIP; + io_notif_flush(zc->notif); + zc->notif = NULL; +} + int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *notif; - if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)) + if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) || + READ_ONCE(sqe->__pad3[0])) + return -EINVAL; + /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ + if (req->flags & REQ_F_CQE_SKIP) return -EINVAL; zc->flags = READ_ONCE(sqe->ioprio); if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | - IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH)) + IORING_RECVSEND_FIXED_BUF)) return -EINVAL; + notif = zc->notif = io_alloc_notif(ctx); + if (!notif) + return -ENOMEM; + notif->cqe.user_data = req->cqe.user_data; + notif->cqe.res = 0; + notif->cqe.flags = IORING_CQE_F_NOTIF; + req->flags |= REQ_F_NEED_CLEANUP; if (zc->flags & IORING_RECVSEND_FIXED_BUF) { unsigned idx = READ_ONCE(sqe->buf_index); @@ -867,13 +919,12 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EFAULT; idx = array_index_nospec(idx, ctx->nr_user_bufs); req->imu = READ_ONCE(ctx->user_bufs[idx]); - io_req_set_rsrc_node(req, ctx, 0); + io_req_set_rsrc_node(notif, ctx, 0); } zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); zc->len = READ_ONCE(sqe->len); zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; - zc->slot_idx = READ_ONCE(sqe->notification_idx); if (zc->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; @@ -925,7 +976,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, shinfo->nr_frags = frag; from->bvec += bi.bi_idx; from->nr_segs -= bi.bi_idx; - from->count = bi.bi_size; + from->count -= copied; from->iov_offset = bi.bi_bvec_done; skb->data_len += copied; @@ -944,62 +995,57 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) { - struct sockaddr_storage address; - struct io_ring_ctx *ctx = req->ctx; + struct sockaddr_storage __address, *addr = NULL; struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); - struct io_notif_slot *notif_slot; - struct io_kiocb *notif; struct msghdr msg; struct iovec iov; struct socket *sock; - unsigned msg_flags; + unsigned msg_flags, cflags; int ret, min_ret = 0; - if (!(req->flags & REQ_F_POLLED) && - (zc->flags & IORING_RECVSEND_POLL_FIRST)) - return -EAGAIN; - - if (issue_flags & IO_URING_F_UNLOCKED) - return -EAGAIN; sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; - notif_slot = io_get_notif_slot(ctx, zc->slot_idx); - if (!notif_slot) - return -EINVAL; - notif = io_get_notif(ctx, notif_slot); - if (!notif) - return -ENOMEM; - msg.msg_name = NULL; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; + if (zc->addr) { + if (req_has_async_data(req)) { + struct io_async_msghdr *io = req->async_data; + + msg.msg_name = addr = &io->addr; + } else { + ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); + if (unlikely(ret < 0)) + return ret; + msg.msg_name = (struct sockaddr *)&__address; + addr = &__address; + } + msg.msg_namelen = zc->addr_len; + } + + if (!(req->flags & REQ_F_POLLED) && + (zc->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_addr(req, addr, issue_flags); + if (zc->flags & IORING_RECVSEND_FIXED_BUF) { ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu, (u64)(uintptr_t)zc->buf, zc->len); if (unlikely(ret)) - return ret; + return ret; } else { ret = import_single_range(WRITE, zc->buf, zc->len, &iov, &msg.msg_iter); if (unlikely(ret)) return ret; - ret = io_notif_account_mem(notif, zc->len); + ret = io_notif_account_mem(zc->notif, zc->len); if (unlikely(ret)) return ret; } - if (zc->addr) { - ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address); - if (unlikely(ret < 0)) - return ret; - msg.msg_name = (struct sockaddr *)&address; - msg.msg_namelen = zc->addr_len; - } - msg_flags = zc->msg_flags | MSG_ZEROCOPY; if (issue_flags & IO_URING_F_NONBLOCK) msg_flags |= MSG_DONTWAIT; @@ -1007,31 +1053,37 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) min_ret = iov_iter_count(&msg.msg_iter); msg.msg_flags = msg_flags; - msg.msg_ubuf = &io_notif_to_data(notif)->uarg; + msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; msg.sg_from_iter = io_sg_from_iter; ret = sock_sendmsg(sock, &msg); if (unlikely(ret < min_ret)) { if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) - return -EAGAIN; + return io_setup_async_addr(req, addr, issue_flags); + if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { zc->len -= ret; zc->buf += ret; zc->done_io += ret; req->flags |= REQ_F_PARTIAL_IO; - return -EAGAIN; + return io_setup_async_addr(req, addr, issue_flags); } + if (ret < 0 && !zc->done_io) + zc->notif->flags |= REQ_F_CQE_SKIP; if (ret == -ERESTARTSYS) ret = -EINTR; - } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) { - io_notif_slot_flush_submit(notif_slot, 0); + req_set_fail(req); } if (ret >= 0) ret += zc->done_io; else if (zc->done_io) ret = zc->done_io; - io_req_set_res(req, ret, 0); + + io_notif_flush(zc->notif); + req->flags &= ~REQ_F_NEED_CLEANUP; + cflags = ret >= 0 ? IORING_CQE_F_MORE : 0; + io_req_set_res(req, ret, cflags); return IOU_OK; } diff --git a/io_uring/net.h b/io_uring/net.h index 7c438d39c089..d744a0a874e7 100644 --- a/io_uring/net.h +++ b/io_uring/net.h @@ -31,6 +31,7 @@ struct io_async_connect { int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_shutdown(struct io_kiocb *req, unsigned int issue_flags); +int io_sendzc_prep_async(struct io_kiocb *req); int io_sendmsg_prep_async(struct io_kiocb *req); void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req); int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); @@ -54,6 +55,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags); int io_sendzc(struct io_kiocb *req, unsigned int issue_flags); int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); +void io_sendzc_cleanup(struct io_kiocb *req); void io_netmsg_cache_free(struct io_cache_entry *entry); #else diff --git a/io_uring/notif.c b/io_uring/notif.c index 977736e82c1a..e37c6569d82e 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -21,14 +21,6 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked) io_req_task_complete(notif, locked); } -static inline void io_notif_complete(struct io_kiocb *notif) - __must_hold(¬if->ctx->uring_lock) -{ - bool locked = true; - - __io_notif_complete_tw(notif, &locked); -} - static void io_uring_tx_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, bool success) @@ -42,8 +34,7 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb, } } -struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx, - struct io_notif_slot *slot) +struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_kiocb *notif; @@ -59,99 +50,23 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx, io_get_task_refs(1); notif->rsrc_node = NULL; io_req_set_rsrc_node(notif, ctx, 0); - notif->cqe.user_data = slot->tag; - notif->cqe.flags = slot->seq++; - notif->cqe.res = 0; nd = io_notif_to_data(notif); nd->account_pages = 0; nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; nd->uarg.callback = io_uring_tx_zerocopy_callback; - /* master ref owned by io_notif_slot, will be dropped on flush */ refcount_set(&nd->uarg.refcnt, 1); return notif; } -void io_notif_slot_flush(struct io_notif_slot *slot) - __must_hold(&ctx->uring_lock) +void io_notif_flush(struct io_kiocb *notif) + __must_hold(&slot->notif->ctx->uring_lock) { - struct io_kiocb *notif = slot->notif; struct io_notif_data *nd = io_notif_to_data(notif); - slot->notif = NULL; - /* drop slot's master ref */ - if (refcount_dec_and_test(&nd->uarg.refcnt)) - io_notif_complete(notif); -} - -__cold int io_notif_unregister(struct io_ring_ctx *ctx) - __must_hold(&ctx->uring_lock) -{ - int i; - - if (!ctx->notif_slots) - return -ENXIO; - - for (i = 0; i < ctx->nr_notif_slots; i++) { - struct io_notif_slot *slot = &ctx->notif_slots[i]; - struct io_kiocb *notif = slot->notif; - struct io_notif_data *nd; - - if (!notif) - continue; - nd = io_notif_to_data(notif); - slot->notif = NULL; - if (!refcount_dec_and_test(&nd->uarg.refcnt)) - continue; + if (refcount_dec_and_test(&nd->uarg.refcnt)) { notif->io_task_work.func = __io_notif_complete_tw; io_req_task_work_add(notif); } - - kvfree(ctx->notif_slots); - ctx->notif_slots = NULL; - ctx->nr_notif_slots = 0; - return 0; -} - -__cold int io_notif_register(struct io_ring_ctx *ctx, - void __user *arg, unsigned int size) - __must_hold(&ctx->uring_lock) -{ - struct io_uring_notification_slot __user *slots; - struct io_uring_notification_slot slot; - struct io_uring_notification_register reg; - unsigned i; - - if (ctx->nr_notif_slots) - return -EBUSY; - if (size != sizeof(reg)) - return -EINVAL; - if (copy_from_user(®, arg, sizeof(reg))) - return -EFAULT; - if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS) - return -EINVAL; - if (reg.resv || reg.resv2 || reg.resv3) - return -EINVAL; - - slots = u64_to_user_ptr(reg.data); - ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]), - GFP_KERNEL_ACCOUNT); - if (!ctx->notif_slots) - return -ENOMEM; - - for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) { - struct io_notif_slot *notif_slot = &ctx->notif_slots[i]; - - if (copy_from_user(&slot, &slots[i], sizeof(slot))) { - io_notif_unregister(ctx); - return -EFAULT; - } - if (slot.resv[0] | slot.resv[1] | slot.resv[2]) { - io_notif_unregister(ctx); - return -EINVAL; - } - notif_slot->tag = slot.tag; - } - return 0; } diff --git a/io_uring/notif.h b/io_uring/notif.h index 65f0b42f2555..5b4d710c8ca5 100644 --- a/io_uring/notif.h +++ b/io_uring/notif.h @@ -8,7 +8,6 @@ #include "rsrc.h" #define IO_NOTIF_SPLICE_BATCH 32 -#define IORING_MAX_NOTIF_SLOTS (1U << 10) struct io_notif_data { struct file *file; @@ -16,63 +15,14 @@ struct io_notif_data { unsigned long account_pages; }; -struct io_notif_slot { - /* - * Current/active notifier. A slot holds only one active notifier at a - * time and keeps one reference to it. Flush releases the reference and - * lazily replaces it with a new notifier. - */ - struct io_kiocb *notif; - - /* - * Default ->user_data for this slot notifiers CQEs - */ - u64 tag; - /* - * Notifiers of a slot live in generations, we create a new notifier - * only after flushing the previous one. Track the sequential number - * for all notifiers and copy it into notifiers's cqe->cflags - */ - u32 seq; -}; - -int io_notif_register(struct io_ring_ctx *ctx, - void __user *arg, unsigned int size); -int io_notif_unregister(struct io_ring_ctx *ctx); - -void io_notif_slot_flush(struct io_notif_slot *slot); -struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx, - struct io_notif_slot *slot); +void io_notif_flush(struct io_kiocb *notif); +struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx); static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif) { return io_kiocb_to_cmd(notif, struct io_notif_data); } -static inline struct io_kiocb *io_get_notif(struct io_ring_ctx *ctx, - struct io_notif_slot *slot) -{ - if (!slot->notif) - slot->notif = io_alloc_notif(ctx, slot); - return slot->notif; -} - -static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx, - unsigned idx) - __must_hold(&ctx->uring_lock) -{ - if (idx >= ctx->nr_notif_slots) - return NULL; - idx = array_index_nospec(idx, ctx->nr_notif_slots); - return &ctx->notif_slots[idx]; -} - -static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot, - unsigned int issue_flags) -{ - io_notif_slot_flush(slot); -} - static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len) { struct io_ring_ctx *ctx = notif->ctx; diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 72dd2b2d8a9d..c4dddd0fd709 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -246,13 +246,12 @@ const struct io_op_def io_op_defs[] = { .prep = io_close_prep, .issue = io_close, }, - [IORING_OP_RSRC_UPDATE] = { + [IORING_OP_FILES_UPDATE] = { .audit_skip = 1, .iopoll = 1, - .name = "RSRC_UPDATE", - .prep = io_rsrc_update_prep, - .issue = io_rsrc_update, - .ioprio = 1, + .name = "FILES_UPDATE", + .prep = io_files_update_prep, + .issue = io_files_update, }, [IORING_OP_STATX] = { .audit_skip = 1, @@ -471,20 +470,23 @@ const struct io_op_def io_op_defs[] = { .issue = io_uring_cmd, .prep_async = io_uring_cmd_prep_async, }, - [IORING_OP_SENDZC_NOTIF] = { - .name = "SENDZC_NOTIF", + [IORING_OP_SEND_ZC] = { + .name = "SEND_ZC", .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .audit_skip = 1, .ioprio = 1, + .manual_alloc = 1, #if defined(CONFIG_NET) + .async_size = sizeof(struct io_async_msghdr), .prep = io_sendzc_prep, .issue = io_sendzc, + .prep_async = io_sendzc_prep_async, + .cleanup = io_sendzc_cleanup, #else .prep = io_eopnotsupp_prep, #endif - }, }; diff --git a/io_uring/opdef.h b/io_uring/opdef.h index ece8ed4f96c4..763c6e54e2ee 100644 --- a/io_uring/opdef.h +++ b/io_uring/opdef.h @@ -25,6 +25,8 @@ struct io_op_def { unsigned ioprio : 1; /* supports iopoll */ unsigned iopoll : 1; + /* opcode specific path will handle ->async_data allocation if needed */ + unsigned manual_alloc : 1; /* size of async data needed, if any */ unsigned short async_size; diff --git a/io_uring/poll.c b/io_uring/poll.c index d5bad0bea6e4..0d9f49c575e0 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -857,7 +857,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (sqe->buf_index || sqe->off || sqe->addr) return -EINVAL; flags = READ_ONCE(sqe->len); - if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL)) + if (flags & ~IORING_POLL_ADD_MULTI) return -EINVAL; if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) return -EINVAL; diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 71359a4d0bd4..cf3272113214 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -15,14 +15,12 @@ #include "io_uring.h" #include "openclose.h" #include "rsrc.h" -#include "notif.h" struct io_rsrc_update { struct file *file; u64 arg; u32 nr_args; u32 offset; - int type; }; static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, @@ -655,7 +653,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, return -EINVAL; } -int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); @@ -669,7 +667,6 @@ int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!up->nr_args) return -EINVAL; up->arg = READ_ONCE(sqe->addr); - up->type = READ_ONCE(sqe->ioprio); return 0; } @@ -712,7 +709,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req, return ret; } -static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) +int io_files_update(struct io_kiocb *req, unsigned int issue_flags) { struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); struct io_ring_ctx *ctx = req->ctx; @@ -741,54 +738,6 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } -static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags) -{ - struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); - struct io_ring_ctx *ctx = req->ctx; - unsigned len = up->nr_args; - unsigned idx_end, idx = up->offset; - int ret = 0; - - io_ring_submit_lock(ctx, issue_flags); - if (unlikely(check_add_overflow(idx, len, &idx_end))) { - ret = -EOVERFLOW; - goto out; - } - if (unlikely(idx_end > ctx->nr_notif_slots)) { - ret = -EINVAL; - goto out; - } - - for (; idx < idx_end; idx++) { - struct io_notif_slot *slot = &ctx->notif_slots[idx]; - - if (!slot->notif) - continue; - if (up->arg) - slot->tag = up->arg; - io_notif_slot_flush_submit(slot, issue_flags); - } -out: - io_ring_submit_unlock(ctx, issue_flags); - if (ret < 0) - req_set_fail(req); - io_req_set_res(req, ret, 0); - return IOU_OK; -} - -int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags) -{ - struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); - - switch (up->type) { - case IORING_RSRC_UPDATE_FILES: - return io_files_update(req, issue_flags); - case IORING_RSRC_UPDATE_NOTIF: - return io_notif_update(req, issue_flags); - } - return -EINVAL; -} - int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, struct io_rsrc_node *node, void *rsrc) { diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index f3a9a177941f..9bce15665444 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -167,8 +167,8 @@ static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) return &data->tags[table_idx][off]; } -int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags); -int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_files_update(struct io_kiocb *req, unsigned int issue_flags); +int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int __io_account_mem(struct user_struct *user, unsigned long nr_pages); diff --git a/io_uring/rw.c b/io_uring/rw.c index 1babd77da79c..76ebcfebc9a6 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -206,6 +206,20 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) return false; } +static inline int io_fixup_rw_res(struct io_kiocb *req, long res) +{ + struct io_async_rw *io = req->async_data; + + /* add previously done IO, if any */ + if (req_has_async_data(req) && io->bytes_done > 0) { + if (res < 0) + res = io->bytes_done; + else + res += io->bytes_done; + } + return res; +} + static void io_complete_rw(struct kiocb *kiocb, long res) { struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); @@ -213,7 +227,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res) if (__io_complete_rw_common(req, res)) return; - io_req_set_res(req, res, 0); + io_req_set_res(req, io_fixup_rw_res(req, res), 0); req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); } @@ -240,22 +254,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) static int kiocb_done(struct io_kiocb *req, ssize_t ret, unsigned int issue_flags) { - struct io_async_rw *io = req->async_data; struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - - /* add previously done IO, if any */ - if (req_has_async_data(req) && io->bytes_done > 0) { - if (ret < 0) - ret = io->bytes_done; - else - ret += io->bytes_done; - } + unsigned final_ret = io_fixup_rw_res(req, ret); if (req->flags & REQ_F_CUR_POS) req->file->f_pos = rw->kiocb.ki_pos; if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { if (!__io_complete_rw_common(req, ret)) { - io_req_set_res(req, req->cqe.res, + io_req_set_res(req, final_ret, io_put_kbuf(req, issue_flags)); return IOU_OK; } @@ -268,7 +274,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, if (io_resubmit_prep(req)) io_req_task_queue_reissue(req); else - io_req_task_queue_fail(req, ret); + io_req_task_queue_fail(req, final_ret); } return IOU_ISSUE_SKIP_COMPLETE; } diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 8e0cc2d9205e..e78b6f980d77 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -3,6 +3,7 @@ #include <linux/errno.h> #include <linux/file.h> #include <linux/io_uring.h> +#include <linux/security.h> #include <uapi/linux/io_uring.h> @@ -88,6 +89,10 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) if (!req->file->f_op->uring_cmd) return -EOPNOTSUPP; + ret = security_uring_cmd(ioucmd); + if (ret) + return ret; + if (ctx->flags & IORING_SETUP_SQE128) issue_flags |= IO_URING_F_SQE128; if (ctx->flags & IORING_SETUP_CQE32) @@ -112,7 +117,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return ret; } return IOU_ISSUE_SKIP_COMPLETE; |