diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2014-05-20 17:46:35 +0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-05-20 10:59:32 -0400 |
commit | 4736cbf7a4b2a6bbb50a809a6933fb7eb29dc38f (patch) | |
tree | 0e362a58838340c4b172e090f89a120b8b80c5d2 /kernel/workqueue.c | |
parent | 92f9c5c40cc67ffcc5ac7f55fdbd6ae8afc7e0b4 (diff) |
workqueue: separate pool-attaching code out from create_worker()
Currently, the code to attach a new worker to its pool is embedded in
create_worker(). Separating this code out will make the codes clearer
and will allow rescuers to share the code path later.
tj: Description and comment updates.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 61 |
1 files changed, 38 insertions, 23 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 38b9ea7c204c..b1de6ac4a0e3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -66,7 +66,7 @@ enum { * * Note that DISASSOCIATED should be flipped only while holding * attach_mutex to avoid changing binding state while - * create_worker() is in progress. + * worker_attach_to_pool() is in progress. */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ POOL_FREEZING = 1 << 3, /* freeze in progress */ @@ -1683,13 +1683,46 @@ static struct worker *alloc_worker(void) } /** + * worker_attach_to_pool() - attach a worker to a pool + * @worker: worker to be attached + * @pool: the target pool + * + * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and + * cpu-binding of @worker are kept coordinated with the pool across + * cpu-[un]hotplugs. + */ +static void worker_attach_to_pool(struct worker *worker, + struct worker_pool *pool) +{ + mutex_lock(&pool->attach_mutex); + + /* + * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any + * online CPUs. It'll be re-applied when any of the CPUs come up. + */ + set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); + + /* + * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains + * stable across this function. See the comments above the + * flag definition for details. + */ + if (pool->flags & POOL_DISASSOCIATED) + worker->flags |= WORKER_UNBOUND; + + list_add_tail(&worker->node, &pool->workers); + + mutex_unlock(&pool->attach_mutex); +} + +/** * worker_detach_from_pool() - detach a worker from its pool * @worker: worker which is attached to its pool * @pool: the pool @worker is attached to * - * Undo the attaching which had been done in create_worker(). The caller - * worker shouldn't access to the pool after detached except it has other - * reference to the pool. + * Undo the attaching which had been done in worker_attach_to_pool(). The + * caller worker shouldn't access to the pool after detached except it has + * other reference to the pool. */ static void worker_detach_from_pool(struct worker *worker, struct worker_pool *pool) @@ -1753,26 +1786,8 @@ static struct worker *create_worker(struct worker_pool *pool) /* prevent userland from meddling with cpumask of workqueue workers */ worker->task->flags |= PF_NO_SETAFFINITY; - mutex_lock(&pool->attach_mutex); - - /* - * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any - * online CPUs. It'll be re-applied when any of the CPUs come up. - */ - set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); - - /* - * The pool->attach_mutex ensures %POOL_DISASSOCIATED - * remains stable across this function. See the comments above the - * flag definition for details. - */ - if (pool->flags & POOL_DISASSOCIATED) - worker->flags |= WORKER_UNBOUND; - /* successful, attach the worker to the pool */ - list_add_tail(&worker->node, &pool->workers); - - mutex_unlock(&pool->attach_mutex); + worker_attach_to_pool(worker, pool); return worker; |