summaryrefslogtreecommitdiffstats
path: root/kernel/rcuclassic.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-16 13:11:29 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-16 13:11:29 +0200
commit77e442461c74068217b59b356cf18992c78ed896 (patch)
tree70f62a16bc6a81b63768279c9b40645d8f4dd4ff /kernel/rcuclassic.c
parentd54191b85e294c46f05a2249b1f55ae54930bcc7 (diff)
parent45158894d4d6704afbb4cefe55e5f6ca279fe12a (diff)
Merge branch 'linus' into x86/kprobes
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r--kernel/rcuclassic.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 65c0906080e..16eeeaa9d61 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+
+ local_irq_disable();
+ this_rdp->qlen += rdp->qlen;
+ local_irq_enable();
}
static void rcu_offline_cpu(int cpu)
@@ -516,10 +520,38 @@ void rcu_check_callbacks(int cpu, int user)
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * Also do a memory barrier. This is needed to handle
+ * the case where writes from a preempt-disable section
+ * of code get reordered into schedule() by this CPU's
+ * write buffer. The memory barrier makes sure that
+ * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+ * by other CPUs to happen after any such write.
+ */
+
+ smp_mb(); /* See above block comment. */
rcu_qsctr_inc(cpu);
rcu_bh_qsctr_inc(cpu);
- } else if (!in_softirq())
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it. The memory barrier
+ * is needed for the same reason as is the above one.
+ */
+
+ smp_mb(); /* See above block comment. */
rcu_bh_qsctr_inc(cpu);
+ }
raise_rcu_softirq();
}