diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-12-03 09:24:02 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-17 15:01:10 -0800 |
commit | 3660c2813fb6d0ba48ee44bcbf9feddf7218c11d (patch) | |
tree | db632e8ff7302aae520dfab42e6b2fce4615fb7f /kernel/rcu/tree.c | |
parent | 6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff) |
rcu: Add ACCESS_ONCE() to ->n_force_qs_lh accesses
The ->n_force_qs_lh field is accessed without the benefit of any
synchronization, so this commit adds the needed ACCESS_ONCE() wrappers.
Yes, increments to ->n_force_qs_lh can be lost, but contention should
be low and the field is strictly statistical in nature, so this is not
a problem.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b3d116cd072..e6415779862 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2304,7 +2304,7 @@ static void force_quiescent_state(struct rcu_state *rsp) if (rnp_old != NULL) raw_spin_unlock(&rnp_old->fqslock); if (ret) { - rsp->n_force_qs_lh++; + ACCESS_ONCE(rsp->n_force_qs_lh)++; return; } rnp_old = rnp; @@ -2316,7 +2316,7 @@ static void force_quiescent_state(struct rcu_state *rsp) smp_mb__after_unlock_lock(); raw_spin_unlock(&rnp_old->fqslock); if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { - rsp->n_force_qs_lh++; + ACCESS_ONCE(rsp->n_force_qs_lh)++; raw_spin_unlock_irqrestore(&rnp_old->lock, flags); return; /* Someone beat us to it. */ } |