summaryrefslogtreecommitdiffstats
path: root/drivers/cpuidle/governors/menu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle/governors/menu.c')
-rw-r--r--drivers/cpuidle/governors/menu.c42
1 files changed, 28 insertions, 14 deletions
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 78d77c5dc35..8d7cf3f3145 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
+ int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
int i;
+ /* Special case when user has set very strict latency requirement */
+ if (unlikely(latency_req == 0)) {
+ data->last_state_idx = 0;
+ return 0;
+ }
+
/* determine the expected residency time */
data->expected_us =
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
/* find the deepest idle state that satisfies our constraints */
- for (i = 1; i < dev->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
if (s->target_residency > data->expected_us)
break;
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
+ if (s->exit_latency > latency_req)
break;
}
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
- unsigned int measured_us =
- cpuidle_get_last_residency(dev) + data->elapsed_us;
+ unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &dev->states[last_idx];
+ unsigned int measured_us;
/*
* Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
* for one full standard timer tick. However, be aware that this
* could potentially result in a suboptimal state transition.
*/
- if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
- measured_us = USEC_PER_SEC / HZ;
+ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
+ last_idle_us = USEC_PER_SEC / HZ;
+
+ /*
+ * measured_us and elapsed_us are the cumulative idle time, since the
+ * last time we were woken out of idle by an interrupt.
+ */
+ if (data->elapsed_us <= data->elapsed_us + last_idle_us)
+ measured_us = data->elapsed_us + last_idle_us;
+ else
+ measured_us = -1;
+
+ /* Predict time until next break event */
+ data->predicted_us = max(measured_us, data->last_measured_us);
- /* Predict time remaining until next break event */
- if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
- data->predicted_us = max(measured_us, data->last_measured_us);
+ if (last_idle_us + BREAK_FUZZ <
+ data->expected_us - target->exit_latency) {
data->last_measured_us = measured_us;
data->elapsed_us = 0;
} else {
- if (data->elapsed_us < data->elapsed_us + measured_us)
- data->elapsed_us = measured_us;
- else
- data->elapsed_us = -1;
- data->predicted_us = max(measured_us, data->last_measured_us);
+ data->elapsed_us = measured_us;
}
}