padata: make padata_do_parallel find alternate callback CPU
padata_do_parallel currently returns -EINVAL if the callback CPU isn't in the callback cpumask. pcrypt tries to prevent this situation by keeping its own callback cpumask in sync with padata's and checks that the callback CPU it passes to padata is valid. Make padata handle this instead. padata_do_parallel now takes a pointer to the callback CPU and updates it for the caller if an alternate CPU is used. Overall behavior in terms of which callback CPUs are chosen stays the same. Prepares for removal of the padata cpumask notifier in pcrypt, which will fix a lockdep complaint about nested acquisition of the CPU hotplug lock later in the series. Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com> Acked-by: Steffen Klassert <steffen.klassert@secunet.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
509b320489
commit
e6ce0e0807
|
@ -57,35 +57,6 @@ struct pcrypt_aead_ctx {
|
|||
unsigned int cb_cpu;
|
||||
};
|
||||
|
||||
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
|
||||
struct padata_pcrypt *pcrypt)
|
||||
{
|
||||
unsigned int cpu_index, cpu, i;
|
||||
struct pcrypt_cpumask *cpumask;
|
||||
|
||||
cpu = *cb_cpu;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
|
||||
if (cpumask_test_cpu(cpu, cpumask->mask))
|
||||
goto out;
|
||||
|
||||
if (!cpumask_weight(cpumask->mask))
|
||||
goto out;
|
||||
|
||||
cpu_index = cpu % cpumask_weight(cpumask->mask);
|
||||
|
||||
cpu = cpumask_first(cpumask->mask);
|
||||
for (i = 0; i < cpu_index; i++)
|
||||
cpu = cpumask_next(cpu, cpumask->mask);
|
||||
|
||||
*cb_cpu = cpu;
|
||||
|
||||
out:
|
||||
rcu_read_unlock_bh();
|
||||
return padata_do_parallel(pcrypt->pinst, padata, cpu);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_setkey(struct crypto_aead *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -157,7 +128,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
|||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(creq, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
|
||||
err = padata_do_parallel(pencrypt.pinst, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
|
@ -199,7 +170,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
|||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(creq, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
|
||||
err = padata_do_parallel(pdecrypt.pinst, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ struct padata_instance {
|
|||
extern struct padata_instance *padata_alloc_possible(const char *name);
|
||||
extern void padata_free(struct padata_instance *pinst);
|
||||
extern int padata_do_parallel(struct padata_instance *pinst,
|
||||
struct padata_priv *padata, int cb_cpu);
|
||||
struct padata_priv *padata, int *cb_cpu);
|
||||
extern void padata_do_serial(struct padata_priv *padata);
|
||||
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
||||
cpumask_var_t cpumask);
|
||||
|
|
|
@ -94,17 +94,19 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
|
|||
*
|
||||
* @pinst: padata instance
|
||||
* @padata: object to be parallelized
|
||||
* @cb_cpu: cpu the serialization callback function will run on,
|
||||
* must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
|
||||
* @cb_cpu: pointer to the CPU that the serialization callback function should
|
||||
* run on. If it's not in the serial cpumask of @pinst
|
||||
* (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
|
||||
* none found, returns -EINVAL.
|
||||
*
|
||||
* The parallelization callback function will run with BHs off.
|
||||
* Note: Every object which is parallelized by padata_do_parallel
|
||||
* must be seen by padata_do_serial.
|
||||
*/
|
||||
int padata_do_parallel(struct padata_instance *pinst,
|
||||
struct padata_priv *padata, int cb_cpu)
|
||||
struct padata_priv *padata, int *cb_cpu)
|
||||
{
|
||||
int target_cpu, err;
|
||||
int i, cpu, cpu_index, target_cpu, err;
|
||||
struct padata_parallel_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
|
||||
|
@ -116,9 +118,20 @@ int padata_do_parallel(struct padata_instance *pinst,
|
|||
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
|
||||
goto out;
|
||||
|
||||
if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
|
||||
if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
|
||||
if (!cpumask_weight(pd->cpumask.cbcpu))
|
||||
goto out;
|
||||
|
||||
/* Select an alternate fallback CPU and notify the caller. */
|
||||
cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
|
||||
|
||||
cpu = cpumask_first(pd->cpumask.cbcpu);
|
||||
for (i = 0; i < cpu_index; i++)
|
||||
cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
|
||||
|
||||
*cb_cpu = cpu;
|
||||
}
|
||||
|
||||
err = -EBUSY;
|
||||
if ((pinst->flags & PADATA_RESET))
|
||||
goto out;
|
||||
|
@ -129,7 +142,7 @@ int padata_do_parallel(struct padata_instance *pinst,
|
|||
err = 0;
|
||||
atomic_inc(&pd->refcnt);
|
||||
padata->pd = pd;
|
||||
padata->cb_cpu = cb_cpu;
|
||||
padata->cb_cpu = *cb_cpu;
|
||||
|
||||
target_cpu = padata_cpu_hash(pd);
|
||||
padata->cpu = target_cpu;
|
||||
|
|
Loading…
Reference in New Issue