workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs

Change the calling convention for apply_workqueue_attrs to require CPU
hotplug read exclusion.

Avoids lockdep complaints about nested calls to get_online_cpus in a
future patch where padata calls apply_workqueue_attrs when changing
other CPU-hotplug-sensitive data structures with the CPU read lock
already held.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Daniel Jordan 2019-09-05 21:40:23 -04:00 committed by Herbert Xu
parent 513c98d086
commit 509b320489
1 changed files with 14 additions and 5 deletions

View File

@ -4030,6 +4030,8 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
* *
* Performs GFP_KERNEL allocations. * Performs GFP_KERNEL allocations.
* *
* Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
*
* Return: 0 on success and -errno on failure. * Return: 0 on success and -errno on failure.
*/ */
int apply_workqueue_attrs(struct workqueue_struct *wq, int apply_workqueue_attrs(struct workqueue_struct *wq,
@ -4037,9 +4039,11 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
{ {
int ret; int ret;
apply_wqattrs_lock(); lockdep_assert_cpus_held();
mutex_lock(&wq_pool_mutex);
ret = apply_workqueue_attrs_locked(wq, attrs); ret = apply_workqueue_attrs_locked(wq, attrs);
apply_wqattrs_unlock(); mutex_unlock(&wq_pool_mutex);
return ret; return ret;
} }
@ -4152,16 +4156,21 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
} }
return 0; return 0;
} else if (wq->flags & __WQ_ORDERED) { }
get_online_cpus();
if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */ /* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name); "ordering guarantee broken for workqueue %s\n", wq->name);
return ret;
} else { } else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
} }
put_online_cpus();
return ret;
} }
static int wq_clamp_max_active(int max_active, unsigned int flags, static int wq_clamp_max_active(int max_active, unsigned int flags,