diff options
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/torture.c | 8 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 3 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 6 |
3 files changed, 13 insertions, 4 deletions
diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c index 3929cd45151..69a4ec80a78 100644 --- a/kernel/rcu/torture.c +++ b/kernel/rcu/torture.c @@ -1578,6 +1578,7 @@ static int rcu_torture_barrier_cbs(void *arg) { long myid = (long)arg; bool lastphase = 0; + bool newphase; struct rcu_head rcu; init_rcu_head_on_stack(&rcu); @@ -1585,10 +1586,11 @@ static int rcu_torture_barrier_cbs(void *arg) set_user_nice(current, 19); do { wait_event(barrier_cbs_wq[myid], - barrier_phase != lastphase || + (newphase = + ACCESS_ONCE(barrier_phase)) != lastphase || kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP); - lastphase = barrier_phase; + lastphase = newphase; smp_mb(); /* ensure barrier_phase load before ->call(). */ if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) break; @@ -1625,7 +1627,7 @@ static int rcu_torture_barrier(void *arg) if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) break; n_barrier_attempts++; - cur_ops->cb_barrier(); + cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { n_rcu_torture_barrier_error++; WARN_ON_ONCE(1); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5243ebea0fc..abef9c358d4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1533,6 +1533,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) rdp = this_cpu_ptr(rsp->rda); if (rnp == rdp->mynode) __note_gp_changes(rsp, rnp, rdp); + /* smp_mb() provided by prior unlock-lock pair. */ nocb += rcu_future_gp_cleanup(rsp, rnp); raw_spin_unlock_irq(&rnp->lock); cond_resched(); @@ -1577,6 +1578,7 @@ static int __noreturn rcu_gp_kthread(void *arg) wait_event_interruptible(rsp->gp_wq, ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_INIT); + /* Locking provides needed memory barrier. */ if (rcu_gp_init(rsp)) break; cond_resched(); @@ -1606,6 +1608,7 @@ static int __noreturn rcu_gp_kthread(void *arg) (!ACCESS_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)), j); + /* Locking provides needed memory barriers. */ /* If grace period done, leave loop. */ if (!ACCESS_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6abb03dff5c..b023e540711 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -779,8 +779,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, } if (rnp->parent == NULL) { raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (wake) + if (wake) { + smp_mb(); /* EGP done before wake_up(). */ wake_up(&sync_rcu_preempt_exp_wq); + } break; } mask = rnp->grpmask; @@ -1852,6 +1854,7 @@ static int rcu_oom_notify(struct notifier_block *self, /* Wait for callbacks from earlier instance to complete. */ wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); + smp_mb(); /* Ensure callback reuse happens after callback invocation. */ /* * Prevent premature wakeup: ensure that all increments happen @@ -2250,6 +2253,7 @@ static int rcu_nocb_kthread(void *arg) trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("Sleep")); wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); + /* Memory barrier provide by xchg() below. */ } else if (firsttime) { firsttime = 0; trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |