diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-17 11:12:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-17 11:12:07 -0700 |
commit | b7966a5a5cd009a682ccb0823e89f1e9fb719f27 (patch) | |
tree | 1fcdb7af812f338553061b14e46799ae307e87b9 | |
parent | 02ef7d39fd552cf9e1de58a3003f77e743d1fb6b (diff) | |
parent | d2acf789088bb562cea342b6a24e646df4d47839 (diff) |
Merge tag 'io_uring-6.3-2023-03-16' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe:
- When PF_NO_SETAFFINITY was removed for io-wq threads, we kind of
forgot about the SQPOLL thread. Remove it there as well, there's even
less of a reason to set it there (Michal)
- Fixup a confusing 'ret' setting (Li)
- When MSG_RING is used to send a direct descriptor to another ring,
it's possible to have it allocate it on the target ring rather than
provide a specific index for it. If this is done, return the chosen
value in the CQE, like we would've done locally (Pavel)
- Fix a regression in this series on huge page bvec collapsing (Pavel)
* tag 'io_uring-6.3-2023-03-16' of git://git.kernel.dk/linux:
io_uring/rsrc: fix folio accounting
io_uring/msg_ring: let target know allocated index
io_uring: rsrc: Optimize return value variable 'ret'
io_uring/sqpoll: Do not set PF_NO_SETAFFINITY on sqpoll threads
-rw-r--r-- | io_uring/msg_ring.c | 4 | ||||
-rw-r--r-- | io_uring/rsrc.c | 10 | ||||
-rw-r--r-- | io_uring/sqpoll.c | 1 |
3 files changed, 11 insertions, 4 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 8803c0979e2a..85fd7ce5f05b 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -202,7 +202,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag * completes with -EOVERFLOW, then the sender must ensure that a * later IORING_OP_MSG_RING delivers the message. */ - if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) + if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0)) ret = -EOVERFLOW; out_unlock: io_double_unlock_ctx(target_ctx); @@ -229,6 +229,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct file *src_file = msg->src_file; + if (msg->len) + return -EINVAL; if (target_ctx == ctx) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 056f40946ff6..e2bac9f89902 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -410,7 +410,7 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, unsigned nr, struct io_rsrc_data **pdata) { struct io_rsrc_data *data; - int ret = -ENOMEM; + int ret = 0; unsigned i; data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -1235,7 +1235,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, } } if (folio) { - folio_put_refs(folio, nr_pages - 1); + /* + * The pages are bound to the folio, it doesn't + * actually unpin them but drops all but one reference, + * which is usually put down by io_buffer_unmap(). + * Note, needs a better helper. + */ + unpin_user_pages(&pages[1], nr_pages - 1); nr_pages = 1; } } diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 0119d3f1a556..9db4bc1f521a 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -233,7 +233,6 @@ static int io_sq_thread(void *data) set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); else set_cpus_allowed_ptr(current, cpu_online_mask); - current->flags |= PF_NO_SETAFFINITY; mutex_lock(&sqd->lock); while (1) { |