rcutorture: Dynamically allocate rcu_fwds structure

This commit switches from static structure to dynamic allocation
for rcu_fwds as another step towards providing multiple call_rcu()
forward-progress kthreads.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2019-11-06 08:35:08 -08:00
parent 6764100bd2
commit 5155be9994
1 changed files with 8 additions and 5 deletions

View File

@ -1686,7 +1686,7 @@ struct rcu_fwd {
unsigned long rcu_launder_gp_seq_start;
};
struct rcu_fwd rcu_fwds;
struct rcu_fwd *rcu_fwds;
bool rcu_fwd_emergency_stop;
static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
@ -1952,7 +1952,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
static int rcutorture_oom_notify(struct notifier_block *self,
unsigned long notused, void *nfreed)
{
struct rcu_fwd *rfp = &rcu_fwds;
struct rcu_fwd *rfp = rcu_fwds;
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
__func__);
@ -2010,7 +2010,7 @@ static int rcu_torture_fwd_prog(void *args)
/* If forward-progress checking is requested and feasible, spawn the thread. */
static int __init rcu_torture_fwd_prog_init(void)
{
struct rcu_fwd *rfp = &rcu_fwds;
struct rcu_fwd *rfp;
if (!fwd_progress)
return 0; /* Not requested, so don't do it. */
@ -2026,12 +2026,15 @@ static int __init rcu_torture_fwd_prog_init(void)
WARN_ON(1); /* Make sure rcutorture notices conflict. */
return 0;
}
spin_lock_init(&rfp->rcu_fwd_lock);
rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
if (fwd_progress_holdoff <= 0)
fwd_progress_holdoff = 1;
if (fwd_progress_div <= 0)
fwd_progress_div = 4;
rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
if (!rfp)
return -ENOMEM;
spin_lock_init(&rfp->rcu_fwd_lock);
rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
}