rcu: Add ACCESS_ONCE() to ->n_force_qs_lh accesses
The ->n_force_qs_lh field is accessed without the benefit of any synchronization, so this commit adds the needed ACCESS_ONCE() wrappers. Yes, increments to ->n_force_qs_lh can be lost, but contention should be low and the field is strictly statistical in nature, so this is not a problem. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
6d0abeca32
commit
3660c2813f
|
@ -2304,7 +2304,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|||
if (rnp_old != NULL)
|
||||
raw_spin_unlock(&rnp_old->fqslock);
|
||||
if (ret) {
|
||||
rsp->n_force_qs_lh++;
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh)++;
|
||||
return;
|
||||
}
|
||||
rnp_old = rnp;
|
||||
|
@ -2316,7 +2316,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|||
smp_mb__after_unlock_lock();
|
||||
raw_spin_unlock(&rnp_old->fqslock);
|
||||
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
||||
rsp->n_force_qs_lh++;
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh)++;
|
||||
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
||||
return; /* Someone beat us to it. */
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
|||
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
|
||||
rsp->n_force_qs, rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs - rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
|
||||
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
|
||||
if (rnp->level != level) {
|
||||
seq_puts(m, "\n");
|
||||
|
|
Loading…
Reference in New Issue