diff options
author | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-18 13:51:42 +1100 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-18 13:51:42 +1100 |
commit | c58310bf4933986513020fa90b4190c7492995ae (patch) | |
tree | 143f2c7578d02ebef5db8fc57ae69e951ae0e2ee /drivers/acpi/processor_idle.c | |
parent | 269cdfaf769f5cd831284cc831790c7c5038040f (diff) | |
parent | 1309d4e68497184d2fd87e892ddf14076c2bda98 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 84 |
1 files changed, 66 insertions, 18 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index eb1f82f7915..980e1c33e6c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -38,7 +38,7 @@ #include <linux/dmi.h> #include <linux/moduleparam.h> #include <linux/sched.h> /* need_resched() */ -#include <linux/latency.h> +#include <linux/pm_qos_params.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> @@ -98,6 +98,9 @@ module_param(bm_history, uint, 0644); static int acpi_processor_set_power_policy(struct acpi_processor *pr); +#else /* CONFIG_CPU_IDLE */ +static unsigned int latency_factor __read_mostly = 2; +module_param(latency_factor, uint, 0644); #endif /* @@ -201,6 +204,10 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); } +/* + * Callers should disable interrupts before the call and enable + * interrupts after return. + */ static void acpi_safe_halt(void) { current_thread_info()->status &= ~TS_POLLING; @@ -261,7 +268,7 @@ static atomic_t c3_cpu_count; /* Common C-state entry for C2, C3, .. */ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) { - if (cstate->space_id == ACPI_CSTATE_FFH) { + if (cstate->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cstate); } else { @@ -413,6 +420,8 @@ static void acpi_processor_idle(void) pm_idle_save(); else acpi_safe_halt(); + + local_irq_enable(); return; } @@ -521,6 +530,7 @@ static void acpi_processor_idle(void) * skew otherwise. */ sleep_ticks = 0xFFFFFFFF; + local_irq_enable(); break; case ACPI_STATE_C2: @@ -648,7 +658,8 @@ static void acpi_processor_idle(void) if (cx->promotion.state && ((cx->promotion.state - pr->power.states) <= max_cstate)) { if (sleep_ticks > cx->promotion.threshold.ticks && - cx->promotion.state->latency <= system_latency_constraint()) { + cx->promotion.state->latency <= + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { cx->promotion.count++; cx->demotion.count = 0; if (cx->promotion.count >= @@ -692,7 +703,8 @@ static void acpi_processor_idle(void) * or if the latency of the current state is unacceptable */ if ((pr->power.state - pr->power.states) > max_cstate || - pr->power.state->latency > system_latency_constraint()) { + pr->power.state->latency > + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { if (cx->demotion.state) next_state = cx->demotion.state; } @@ -920,24 +932,29 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) cx.address = reg->address; cx.index = current_count + 1; - cx.space_id = ACPI_CSTATE_SYSTEMIO; + cx.entry_method = ACPI_CSTATE_SYSTEMIO; if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { if (acpi_processor_ffh_cstate_probe (pr->id, &cx, reg) == 0) { - cx.space_id = ACPI_CSTATE_FFH; - } else if (cx.type != ACPI_STATE_C1) { + cx.entry_method = ACPI_CSTATE_FFH; + } else if (cx.type == ACPI_STATE_C1) { /* * C1 is a special case where FIXED_HARDWARE * can be handled in non-MWAIT way as well. * In that case, save this _CST entry info. - * That is, we retain space_id of SYSTEM_IO for - * halt based C1. * Otherwise, ignore this info and continue. */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { continue; } + } else { + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); } + obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; @@ -1200,7 +1217,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) "maximum allowed latency: %d usec\n", pr->power.state ? pr->power.state - pr->power.states : 0, max_cstate, (unsigned)pr->power.bm_activity, - system_latency_constraint()); + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); seq_puts(seq, "states:\n"); @@ -1367,12 +1384,16 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, /** * acpi_idle_do_entry - a helper function that does C2 and C3 type entry * @cx: cstate data + * + * Caller disables interrupt before call and enables interrupt after return. */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { - if (cx->space_id == ACPI_CSTATE_FFH) { + if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); + } else if (cx->entry_method == ACPI_CSTATE_HALT) { + acpi_safe_halt(); } else { int unused; /* IO port based C-state */ @@ -1394,21 +1415,35 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_state *state) { + u32 t1, t2; struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + pr = processors[smp_processor_id()]; if (unlikely(!pr)) return 0; + local_irq_disable(); + + /* Do not access any ACPI IO ports in suspend path */ + if (acpi_idle_suspend) { + acpi_safe_halt(); + local_irq_enable(); + return 0; + } + if (pr->flags.bm_check) acpi_idle_update_bm_rld(pr, cx); - acpi_safe_halt(); + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + local_irq_enable(); cx->usage++; - return 0; + return ticks_elapsed_in_us(t1, t2); } /** @@ -1515,7 +1550,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (dev->safe_state) { return dev->safe_state->enter(dev, dev->safe_state); } else { + local_irq_disable(); acpi_safe_halt(); + local_irq_enable(); return 0; } } @@ -1607,7 +1644,7 @@ struct cpuidle_driver acpi_idle_driver = { */ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) { - int i, count = 0; + int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev = &pr->power.dev; @@ -1619,6 +1656,11 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) return -EINVAL; } + for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; + } + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; state = &dev->states[count]; @@ -1635,14 +1677,16 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) cpuidle_set_statedata(state, cx); snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); + strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; - state->target_residency = cx->latency * 6; + state->target_residency = cx->latency * latency_factor; state->power_usage = cx->power; state->flags = 0; switch (cx->type) { case ACPI_STATE_C1: state->flags |= CPUIDLE_FLAG_SHALLOW; + state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_c1; dev->safe_state = state; break; @@ -1665,6 +1709,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) } count++; + if (count == CPUIDLE_STATE_MAX) + break; } dev->state_count = count; @@ -1718,8 +1764,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; -#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) - register_latency_notifier(&acpi_processor_latency_notifier); +#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) + pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, + &acpi_processor_latency_notifier); #endif } @@ -1806,7 +1853,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr, */ cpu_idle_wait(); #ifdef CONFIG_SMP - unregister_latency_notifier(&acpi_processor_latency_notifier); + pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, + &acpi_processor_latency_notifier); #endif } #endif |