summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-23 10:06:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-23 10:06:38 -0700
commit4a22709e21c2b1bedf90f68c823daf65d8e6b491 (patch)
treeb480c70465c11cb6b4d9e4f47b8e1824df2d5eea /fs/io_uring.c
parent0a14d7649872be966d12bc6c3056bb37c27b94bd (diff)
parent91989c707884ecc7cd537281ab1a4b8fb7219da3 (diff)
Merge tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block
Pull arch task_work cleanups from Jens Axboe: "Two cleanups that don't fit other categories: - Finally get the task_work_add() cleanup done properly, so we don't have random 0/1/false/true/TWA_SIGNAL confusing use cases. Updates all callers, and also fixes up the documentation for task_work_add(). - While working on some TIF related changes for 5.11, this TIF_NOTIFY_RESUME cleanup fell out of that. Remove some arch duplication for how that is handled" * tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block: task_work: cleanup notification modes tracehook: clear TIF_NOTIFY_RESUME in tracehook_notify_resume()
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 02dc81622081..626a9d111744 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1976,7 +1976,8 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
- int ret, notify;
+ enum task_work_notify_mode notify;
+ int ret;
if (tsk->flags & PF_EXITING)
return -ESRCH;
@@ -1987,7 +1988,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
* processing task_work. There's no reliable way to tell if TWA_RESUME
* will do the job.
*/
- notify = 0;
+ notify = TWA_NONE;
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
notify = TWA_SIGNAL;
@@ -2056,7 +2057,7 @@ static void io_req_task_queue(struct io_kiocb *req)
init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, 0);
+ task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
}
@@ -2177,7 +2178,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
struct task_struct *tsk;
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, 0);
+ task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
}
@@ -3291,7 +3292,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* queue just for cancelation */
init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, 0);
+ task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
return 1;
@@ -4857,7 +4858,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, 0);
+ task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
return 1;