summaryrefslogtreecommitdiffstats
path: root/drivers/cpuidle/coupled.c
diff options
context:
space:
mode:
authorColin Cross <ccross@android.com>2012-05-07 17:57:42 -0700
committerLen Brown <len.brown@intel.com>2012-06-02 00:49:36 -0400
commit20ff51a36b2cd25ee7eb3216b6d02b68935435ba (patch)
tree3c50651ca3cdc64c409afe9f867c13a8d50200e0 /drivers/cpuidle/coupled.c
parent4126c0197bc8c58a0bb7fcda07b01b596b6fb4c5 (diff)
cpuidle: coupled: add parallel barrier function
Adds cpuidle_coupled_parallel_barrier, which can be used by coupled cpuidle state enter functions to handle resynchronization after determining if any cpu needs to abort. The normal use case will be: static bool abort_flag; static atomic_t abort_barrier; int arch_cpuidle_enter(struct cpuidle_device *dev, ...) { if (arch_turn_off_irq_controller()) { /* returns an error if an irq is pending and would be lost if idle continued and turned off power */ abort_flag = true; } cpuidle_coupled_parallel_barrier(dev, &abort_barrier); if (abort_flag) { /* One of the cpus didn't turn off it's irq controller */ arch_turn_on_irq_controller(); return -EINTR; } /* continue with idle */ ... } This will cause all cpus to abort idle together if one of them needs to abort. Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Tested-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Colin Cross <ccross@android.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/cpuidle/coupled.c')
-rw-r--r--drivers/cpuidle/coupled.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index aab6bba8dae..2c9bf269223 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -130,6 +130,43 @@ static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
static cpumask_t cpuidle_coupled_poked_mask;
/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a: atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function. Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+ int n = dev->coupled->online_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < n)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == n * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > n)
+ cpu_relax();
+}
+
+/**
* cpuidle_state_is_coupled - check if a state is part of a coupled set
* @dev: struct cpuidle_device for the current cpu
* @drv: struct cpuidle_driver for the platform