diff options
author | Daniel Jordan <daniel.m.jordan@oracle.com> | 2019-09-05 21:40:23 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-09-13 21:15:40 +1000 |
commit | 509b3204890ab31c3e652c26424a0706bb809933 (patch) | |
tree | 90af2e93c5e654af3fbc5696a93a133333b80ec7 /kernel | |
parent | 513c98d08682957cc9eba20e7e4bb349970711f3 (diff) |
workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs
Change the calling convention for apply_workqueue_attrs to require CPU
hotplug read exclusion.
Avoids lockdep complaints about nested calls to get_online_cpus in a
future patch where padata calls apply_workqueue_attrs when changing
other CPU-hotplug-sensitive data structures with the CPU read lock
already held.
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f53705ff3ff1..bc2e09a8ea61 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4030,6 +4030,8 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, * * Performs GFP_KERNEL allocations. * + * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus(). + * * Return: 0 on success and -errno on failure. */ int apply_workqueue_attrs(struct workqueue_struct *wq, @@ -4037,9 +4039,11 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, { int ret; - apply_wqattrs_lock(); + lockdep_assert_cpus_held(); + + mutex_lock(&wq_pool_mutex); ret = apply_workqueue_attrs_locked(wq, attrs); - apply_wqattrs_unlock(); + mutex_unlock(&wq_pool_mutex); return ret; } @@ -4152,16 +4156,21 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) mutex_unlock(&wq->mutex); } return 0; - } else if (wq->flags & __WQ_ORDERED) { + } + + get_online_cpus(); + if (wq->flags & __WQ_ORDERED) { ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); /* there should only be single pwq for ordering guarantee */ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), "ordering guarantee broken for workqueue %s\n", wq->name); - return ret; } else { - return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); + ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); } + put_online_cpus(); + + return ret; } static int wq_clamp_max_active(int max_active, unsigned int flags, |