summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLai Jiangshan <jiangshan.ljs@antgroup.com>2024-06-21 15:32:24 +0800
committerTejun Heo <tj@kernel.org>2024-06-21 12:34:02 -1000
commitf4b7b53c94afdec6dd0f1f834cfcc40595ddc916 (patch)
tree8cf61aacbd5124f8425aa40bf3a9aa36b829af3a /kernel
parentf45b1c3c33373c8c29a95a5188165d6eb634823a (diff)
workqueue: Detach workers directly in idle_cull_fn()
The code to kick off the destruction of workers is now in a process context (idle_cull_fn()), and the detaching of a worker is not required to be inside the worker thread now, so just do the detaching directly in idle_cull_fn(). wake_dying_workers() is renamed to detach_dying_workers() and the unneeded wakeup in wake_dying_workers() is also removed. Cc: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c45
1 files changed, 19 insertions, 26 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cd1895630145..04168972814b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
}
+
+static void detach_worker(struct worker *worker)
+{
+ lockdep_assert_held(&wq_pool_attach_mutex);
+
+ unbind_worker(worker);
+ list_del(&worker->node);
+ worker->pool = NULL;
+}
+
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker)
WARN_ON_ONCE(pool->flags & POOL_BH);
mutex_lock(&wq_pool_attach_mutex);
-
- unbind_worker(worker);
- list_del(&worker->node);
- worker->pool = NULL;
-
+ detach_worker(worker);
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
@@ -2807,24 +2813,12 @@ fail:
return NULL;
}
-static void wake_dying_workers(struct list_head *cull_list)
+static void detach_dying_workers(struct list_head *cull_list)
{
struct worker *worker;
- list_for_each_entry(worker, cull_list, entry) {
- unbind_worker(worker);
- /*
- * If the worker was somehow already running, then it had to be
- * in pool->idle_list when set_worker_dying() happened or we
- * wouldn't have gotten here.
- *
- * Thus, the worker must either have observed the WORKER_DIE
- * flag, or have set its state to TASK_IDLE. Either way, the
- * below will be observed by the worker and is safe to do
- * outside of pool->lock.
- */
- wake_up_process(worker->task);
- }
+ list_for_each_entry(worker, cull_list, entry)
+ detach_worker(worker);
}
static void reap_dying_workers(struct list_head *cull_list)
@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work)
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
- * cannot proceed beyong worker_detach_from_pool() in its self-destruct
- * path. This is required as a previously-preempted worker could run after
- * set_worker_dying() has happened but before wake_dying_workers() did.
+ * cannot proceed beyong set_pf_worker() in its self-destruct path.
+ * This is required as a previously-preempted worker could run after
+ * set_worker_dying() has happened but before detach_dying_workers() did.
*/
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work)
}
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);
reap_dying_workers(&cull_list);
@@ -3336,7 +3330,6 @@ woke_up:
set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
- worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry));
return 0;
}
@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool)
WARN_ON(pool->nr_workers || pool->nr_idle);
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);