From 5a978dcfc0f054e4f6983a0a26355a65e34708cb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 27 Mar 2021 09:59:30 +0000 Subject: io_uring: always go for cancellation spin on exec Always try to do cancellation in __io_uring_task_cancel() at least once, so it actually goes and cleans its sqpoll tasks (i.e. via io_sqpoll_cancel_sync()), otherwise sqpoll task may submit new requests after cancellation and it's racy for many reasons. Fixes: 521d6a737a31c ("io_uring: cancel sqpoll via task_work") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/0a21bd6d794bb1629bc906dd57a57b2c2985a8ac.1616839147.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1949b80677e7..a4a944da95a0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9002,6 +9002,8 @@ void __io_uring_task_cancel(void) /* make sure overflow events are dropped */ atomic_inc(&tctx->in_idle); + __io_uring_files_cancel(NULL); + do { /* read completions before cancelations */ inflight = tctx_inflight(tctx); -- cgit v1.2.3-58-ga151 From 51520426f4bc3e61cbbf7a39ccf4e411b665002d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 29 Mar 2021 11:39:29 +0100 Subject: io_uring: handle setup-failed ctx in kill_timeouts general protection fault, probably for non-canonical address 0xdffffc0000000018: 0000 [#1] KASAN: null-ptr-deref in range [0x00000000000000c0-0x00000000000000c7] RIP: 0010:io_commit_cqring+0x37f/0xc10 fs/io_uring.c:1318 Call Trace: io_kill_timeouts+0x2b5/0x320 fs/io_uring.c:8606 io_ring_ctx_wait_and_kill+0x1da/0x400 fs/io_uring.c:8629 io_uring_create fs/io_uring.c:9572 [inline] io_uring_setup+0x10da/0x2ae0 fs/io_uring.c:9599 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae It can get into wait_and_kill() before setting up ctx->rings, and hence io_commit_cqring() fails. Mimic poll cancel and do it only when we completed events, there can't be any requests if it failed before initialising rings. Fixes: 80c4cbdb5ee60 ("io_uring: do post-completion chore on t-out cancel") Reported-by: syzbot+0e905eb8228070c457a0@syzkaller.appspotmail.com Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/660261a48f0e7abf260c8e43c87edab3c16736fa.1617014345.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a4a944da95a0..088a9d3c420a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8603,9 +8603,9 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, canceled++; } } - io_commit_cqring(ctx); + if (canceled != 0) + io_commit_cqring(ctx); spin_unlock_irq(&ctx->completion_lock); - if (canceled != 0) io_cqring_ev_posted(ctx); return canceled != 0; -- cgit v1.2.3-58-ga151 From 82734c5b1b24c020d701cf90ccb075e43a5ccb07 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 29 Mar 2021 06:52:44 -0600 Subject: io_uring: drop sqd lock before handling signals for SQPOLL Don't call into get_signal() with the sqd mutex held, it'll fail if we're freezing the task and we'll get complaints on locks still being held: ==================================== WARNING: iou-sqp-8386/8387 still has locks held! 5.12.0-rc4-syzkaller #0 Not tainted ------------------------------------ 1 lock held by iou-sqp-8386/8387: #0: ffff88801e1d2470 (&sqd->lock){+.+.}-{3:3}, at: io_sq_thread+0x24c/0x13a0 fs/io_uring.c:6731 stack backtrace: CPU: 1 PID: 8387 Comm: iou-sqp-8386 Not tainted 5.12.0-rc4-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x141/0x1d7 lib/dump_stack.c:120 try_to_freeze include/linux/freezer.h:66 [inline] get_signal+0x171a/0x2150 kernel/signal.c:2576 io_sq_thread+0x8d2/0x13a0 fs/io_uring.c:6748 Fold the get_signal() case in with the parking checks, as we need to drop the lock in both cases, and since we need to be checking for parking when juggling the lock anyway. Reported-by: syzbot+796d767eb376810256f5@syzkaller.appspotmail.com Fixes: dbe1bdbb39db ("io_uring: handle signals for IO threads like a normal thread") Signed-off-by: Jens Axboe --- fs/io_uring.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 088a9d3c420a..6d7a1b69712b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6733,22 +6733,25 @@ static int io_sq_thread(void *data) int ret; bool cap_entries, sqt_spin, needs_sched; - if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { + if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || + signal_pending(current)) { + bool did_sig = false; + mutex_unlock(&sqd->lock); + if (signal_pending(current)) { + struct ksignal ksig; + + did_sig = get_signal(&ksig); + } cond_resched(); mutex_lock(&sqd->lock); + if (did_sig) + break; io_run_task_work(); io_run_task_work_head(&sqd->park_task_work); timeout = jiffies + sqd->sq_thread_idle; continue; } - if (signal_pending(current)) { - struct ksignal ksig; - - if (!get_signal(&ksig)) - continue; - break; - } sqt_spin = false; cap_entries = !list_is_singular(&sqd->ctx_list); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { -- cgit v1.2.3-58-ga151 From 4b982bd0f383db9132e892c0c5144117359a6289 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 1 Apr 2021 08:38:34 -0600 Subject: io_uring: don't mark S_ISBLK async work as unbounded S_ISBLK is marked as unbounded work for async preparation, because it doesn't match S_ISREG. That is incorrect, as any read/write to a block device is also a bounded operation. Fix it up and ensure that S_ISBLK isn't marked unbounded. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6d7a1b69712b..a16b7df934d1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1213,7 +1213,7 @@ static void io_prep_async_work(struct io_kiocb *req) if (req->flags & REQ_F_ISREG) { if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); - } else { + } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } -- cgit v1.2.3-58-ga151 From 696ee88a7c50f96573f98aa76cc74286033140c1 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 1 Apr 2021 09:55:04 +0100 Subject: io_uring/io-wq: protect against sprintf overflow task_pid may be large enough to not fit into the left space of TASK_COMM_LEN-sized buffers and overflow in sprintf. We not so care about uniqueness, so replace it with safer snprintf(). Reported-by: Alexey Dobriyan Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1702c6145d7e1c46fbc382f28334c02e1a3d3994.1617267273.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.c | 4 ++-- fs/io_uring.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 7434eb40ca8c..433c4d3c3c1c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -484,7 +484,7 @@ static int io_wqe_worker(void *data) worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); io_wqe_inc_running(worker); - sprintf(buf, "iou-wrk-%d", wq->task_pid); + snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid); set_task_comm(current, buf); while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { @@ -711,7 +711,7 @@ static int io_wq_manager(void *data) char buf[TASK_COMM_LEN]; int node; - sprintf(buf, "iou-mgr-%d", wq->task_pid); + snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid); set_task_comm(current, buf); do { diff --git a/fs/io_uring.c b/fs/io_uring.c index a16b7df934d1..4a6701b5065e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6718,7 +6718,7 @@ static int io_sq_thread(void *data) char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); - sprintf(buf, "iou-sqp-%d", sqd->task_pid); + snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); set_task_comm(current, buf); current->pf_io_worker = NULL; -- cgit v1.2.3-58-ga151 From 07204f21577a1d882f0259590c3553fe6a476381 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 1 Apr 2021 12:18:48 +0100 Subject: io_uring: fix EIOCBQUEUED iter revert iov_iter_revert() is done in completion handlers that happensf before read/write returns -EIOCBQUEUED, no need to repeat reverting afterwards. Moreover, even though it may appear being just a no-op, it's actually races with 1) user forging a new iovec of a different size 2) reissue, that is done via io-wq continues completely asynchronously. Fixes: 3e6a0d3c7571c ("io_uring: fix -EAGAIN retry with IOPOLL") Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 4a6701b5065e..717942474fa9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3284,8 +3284,6 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ret = io_iter_do_read(req, iter); if (ret == -EIOCBQUEUED) { - if (req->async_data) - iov_iter_revert(iter, io_size - iov_iter_count(iter)); goto out_free; } else if (ret == -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ @@ -3418,8 +3416,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) /* no retry on NONBLOCK nor RWF_NOWAIT */ if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) goto done; - if (ret2 == -EIOCBQUEUED && req->async_data) - iov_iter_revert(iter, io_size - iov_iter_count(iter)); if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) -- cgit v1.2.3-58-ga151 From 230d50d448acb6639991440913299e50cacf1daf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 1 Apr 2021 20:41:15 -0600 Subject: io_uring: move reissue into regular IO path It's non-obvious how retry is done for block backed files, when it happens off the kiocb done path. It also makes it tricky to deal with the iov_iter handling. Just mark the req as needing a reissue, and handling it from the submission path instead. This makes it directly obvious that we're not re-importing the iovec from userspace past the submit point, and it means that we can just reuse our usual -EAGAIN retry path from the read/write handling. At some point in the future, we'll gain the ability to always reliably return -EAGAIN through the stack. A previous attempt on the block side didn't pan out and got reverted, hence the need to check for this information out-of-band right now. Signed-off-by: Jens Axboe --- fs/io_uring.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 717942474fa9..8be542050648 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -697,6 +697,7 @@ enum { REQ_F_NO_FILE_TABLE_BIT, REQ_F_LTIMEOUT_ACTIVE_BIT, REQ_F_COMPLETE_INLINE_BIT, + REQ_F_REISSUE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -740,6 +741,8 @@ enum { REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT), /* completion is deferred through io_comp_state */ REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), }; struct async_poll { @@ -2503,8 +2506,10 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2, if (req->rw.kiocb.ki_flags & IOCB_WRITE) kiocb_end_write(req); - if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req)) + if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE; return; + } if (res != req->result) req_set_fail_links(req); if (req->flags & REQ_F_BUFFER_SELECTED) @@ -3283,9 +3288,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ret = io_iter_do_read(req, iter); - if (ret == -EIOCBQUEUED) { - goto out_free; - } else if (ret == -EAGAIN) { + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) goto done; @@ -3295,6 +3298,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) /* some cases will consume bytes even on error returns */ iov_iter_revert(iter, io_size - iov_iter_count(iter)); ret = 0; + } else if (ret == -EIOCBQUEUED) { + goto out_free; } else if (ret <= 0 || ret == io_size || !force_nonblock || (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) { /* read all, failed, already did sync or don't want to retry */ @@ -3407,6 +3412,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) else ret2 = -EINVAL; + if (req->flags & REQ_F_REISSUE) + ret2 = -EAGAIN; + /* * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just * retry them without IOCB_NOWAIT. @@ -6160,6 +6168,7 @@ static void io_wq_submit_work(struct io_wq_work *work) ret = -ECANCELED; if (!ret) { + req->flags &= ~REQ_F_REISSUE; do { ret = io_issue_sqe(req, 0); /* -- cgit v1.2.3-58-ga151