summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <jiangshan.ljs@antgroup.com>2024-06-21 15:32:23 +0800
committerTejun Heo <tj@kernel.org>2024-06-21 12:34:02 -1000
commitf45b1c3c33373c8c29a95a5188165d6eb634823a (patch)
treeb5ea82bc491212a899d5b736c9d018ab093049f5 /kernel/workqueue.c
parent68f83057b913467a999e1bf9e0da6a119668f769 (diff)
workqueue: Don't bind the rescuer in the last working cpu
So that when the rescuer is woken up next time, it will not interrupt the last working cpu which might be busy on other crucial works but have nothing to do with the rescuer's incoming works. Cc: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9c5df3985435..cd1895630145 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2684,6 +2684,17 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_unlock(&wq_pool_attach_mutex);
}
+static void unbind_worker(struct worker *worker)
+{
+ lockdep_assert_held(&wq_pool_attach_mutex);
+
+ kthread_set_per_cpu(worker->task, -1);
+ if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+ else
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+}
+
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
@@ -2701,7 +2712,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
- kthread_set_per_cpu(worker->task, -1);
+ unbind_worker(worker);
list_del(&worker->node);
worker->pool = NULL;
@@ -2796,17 +2807,6 @@ fail:
return NULL;
}
-static void unbind_worker(struct worker *worker)
-{
- lockdep_assert_held(&wq_pool_attach_mutex);
-
- kthread_set_per_cpu(worker->task, -1);
- if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
- else
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
-}
-
static void wake_dying_workers(struct list_head *cull_list)
{
struct worker *worker;