From dc7fd275ae60ef8edf952aff2a62462f5d892fd4 Mon Sep 17 00:00:00 2001 From: ShuoX Liu Date: Tue, 3 Jul 2012 19:05:31 +0200 Subject: cpuidle: move field disable from per-driver to per-cpu Andrew J.Schorr raises a question. When he changes the disable setting on a single CPU, it affects all the other CPUs. Basically, currently, the disable field is per-driver instead of per-cpu. All the C states of the same driver are shared by all CPU in the same machine. The patch changes the `disable' field to per-cpu, so we could set this separately for each cpu. Signed-off-by: ShuoX Liu Reported-by: Andrew J.Schorr Reviewed-by: Yanmin Zhang Signed-off-by: Andrew Morton Signed-off-by: Rafael J. Wysocki --- include/linux/cpuidle.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/cpuidle.h') diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 6c26a3da0e0..8570012a535 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -34,6 +34,7 @@ struct cpuidle_driver; struct cpuidle_state_usage { void *driver_data; + unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ }; @@ -46,7 +47,6 @@ struct cpuidle_state { unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ unsigned int target_residency; /* in US */ - unsigned int disable; int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, -- cgit v1.2.3-70-g09d2 From 6e797a078824b30afbfae6cc4b1c2b21c51761ef Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 16 Jun 2012 15:20:11 +0200 Subject: PM / cpuidle: Add driver reference counter Add a reference counter for the cpuidle driver, so that it can't be unregistered when it is in use. Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/driver.c | 29 ++++++++++++++++++++++++++++- include/linux/cpuidle.h | 6 +++++- 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'include/linux/cpuidle.h') diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 40cd3f3024d..58bf3b1ac9c 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -16,6 +16,7 @@ static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); +int cpuidle_driver_refcount; static void __cpuidle_register_driver(struct cpuidle_driver *drv) { @@ -89,8 +90,34 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv) } spin_lock(&cpuidle_driver_lock); - cpuidle_curr_driver = NULL; + + if (!WARN_ON(cpuidle_driver_refcount > 0)) + cpuidle_curr_driver = NULL; + spin_unlock(&cpuidle_driver_lock); } EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); + +struct cpuidle_driver *cpuidle_driver_ref(void) +{ + struct cpuidle_driver *drv; + + spin_lock(&cpuidle_driver_lock); + + drv = cpuidle_curr_driver; + cpuidle_driver_refcount++; + + spin_unlock(&cpuidle_driver_lock); + return drv; +} + +void cpuidle_driver_unref(void) +{ + spin_lock(&cpuidle_driver_lock); + + if (!WARN_ON(cpuidle_driver_refcount <= 0)) + cpuidle_driver_refcount--; + + spin_unlock(&cpuidle_driver_lock); +} diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 8570012a535..27cfced7b57 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -136,7 +136,9 @@ struct cpuidle_driver { extern void disable_cpuidle(void); extern int cpuidle_idle_call(void); extern int cpuidle_register_driver(struct cpuidle_driver *drv); -struct cpuidle_driver *cpuidle_get_driver(void); +extern struct cpuidle_driver *cpuidle_get_driver(void); +extern struct cpuidle_driver *cpuidle_driver_ref(void); +extern void cpuidle_driver_unref(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern int cpuidle_register_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev); @@ -157,6 +159,8 @@ static inline int cpuidle_idle_call(void) { return -ENODEV; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } +static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } +static inline void cpuidle_driver_unref(void) {} static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } static inline int cpuidle_register_device(struct cpuidle_device *dev) {return -ENODEV; } -- cgit v1.2.3-70-g09d2 From cbc9ef0287ab764d3da0129efa673808df641fe3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 3 Jul 2012 19:07:42 +0200 Subject: PM / Domains: Add preliminary support for cpuidle, v2 On some systems there are CPU cores located in the same power domains as I/O devices. Then, power can only be removed from the domain if all I/O devices in it are not in use and the CPU core is idle. Add preliminary support for that to the generic PM domains framework. First, the platform is expected to provide a cpuidle driver with one extra state designated for use with the generic PM domains code. This state should be initially disabled and its exit_latency value should be set to whatever time is needed to bring up the CPU core itself after restoring power to it, not including the domain's power on latency. Its .enter() callback should point to a procedure that will remove power from the domain containing the CPU core at the end of the CPU power transition. The remaining characteristics of the extra cpuidle state, referred to as the "domain" cpuidle state below, (e.g. power usage, target residency) should be populated in accordance with the properties of the hardware. Next, the platform should execute genpd_attach_cpuidle() on the PM domain containing the CPU core. That will cause the generic PM domains framework to treat that domain in a special way such that: * When all devices in the domain have been suspended and it is about to be turned off, the states of the devices will be saved, but power will not be removed from the domain. Instead, the "domain" cpuidle state will be enabled so that power can be removed from the domain when the CPU core is idle and the state has been chosen as the target by the cpuidle governor. * When the first I/O device in the domain is resumed and __pm_genpd_poweron(() is called for the first time after power has been removed from the domain, the "domain" cpuidle state will be disabled to avoid subsequent surprise power removals via cpuidle. The effective exit_latency value of the "domain" cpuidle state depends on the time needed to bring up the CPU core itself after restoring power to it as well as on the power on latency of the domain containing the CPU core. Thus the "domain" cpuidle state's exit_latency has to be recomputed every time the domain's power on latency is updated, which may happen every time power is restored to the domain, if the measured power on latency is greater than the latency stored in the corresponding generic_pm_domain structure. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman --- drivers/base/power/domain.c | 117 +++++++++++++++++++++++++++++++++++++++ drivers/cpuidle/cpuidle.c | 1 + drivers/cpuidle/governors/menu.c | 3 +- include/linux/cpuidle.h | 1 + include/linux/pm_domain.h | 17 ++++++ 5 files changed, 138 insertions(+), 1 deletion(-) (limited to 'include/linux/cpuidle.h') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index da1d52576ec..4b5f090fccb 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -139,6 +139,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd) genpd->status = GPD_STATE_ACTIVE; } +static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) +{ + s64 usecs64; + + if (!genpd->cpu_data) + return; + + usecs64 = genpd->power_on_latency_ns; + do_div(usecs64, NSEC_PER_USEC); + usecs64 += genpd->cpu_data->saved_exit_latency; + genpd->cpu_data->idle_state->exit_latency = usecs64; +} + /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -176,6 +189,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) return 0; } + if (genpd->cpu_data) { + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = true; + cpuidle_resume_and_unlock(); + goto out; + } + /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -215,6 +235,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; + genpd_recalc_cpu_exit_latency(genpd); if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, @@ -222,6 +243,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } } + out: genpd_set_active(genpd); return 0; @@ -455,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } + if (genpd->cpu_data) { + /* + * If cpu_data is set, cpuidle should turn the domain off when + * the CPU in it is idle. In that case we don't decrement the + * subdomain counts of the master domains, so that power is not + * removed from the current domain prematurely as a result of + * cutting off the masters' power. + */ + genpd->status = GPD_STATE_POWER_OFF; + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = false; + cpuidle_resume_and_unlock(); + goto out; + } + if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; @@ -1600,6 +1637,86 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); +int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) +{ + struct cpuidle_driver *cpuidle_drv; + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || state < 0) + return -EINVAL; + + genpd_acquire_lock(genpd); + + if (genpd->cpu_data) { + ret = -EEXIST; + goto out; + } + cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); + if (!cpu_data) { + ret = -ENOMEM; + goto out; + } + cpuidle_drv = cpuidle_driver_ref(); + if (!cpuidle_drv) { + ret = -ENODEV; + goto out; + } + if (cpuidle_drv->state_count <= state) { + ret = -EINVAL; + goto err; + } + idle_state = &cpuidle_drv->states[state]; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto err; + } + cpu_data->idle_state = idle_state; + cpu_data->saved_exit_latency = idle_state->exit_latency; + genpd->cpu_data = cpu_data; + genpd_recalc_cpu_exit_latency(genpd); + + out: + genpd_release_lock(genpd); + return ret; + + err: + cpuidle_driver_unref(); + goto out; +} + +int genpd_detach_cpuidle(struct generic_pm_domain *genpd) +{ + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + genpd_acquire_lock(genpd); + + cpu_data = genpd->cpu_data; + if (!cpu_data) { + ret = -ENODEV; + goto out; + } + idle_state = cpu_data->idle_state; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto out; + } + idle_state->exit_latency = cpu_data->saved_exit_latency; + cpuidle_driver_unref(); + genpd->cpu_data = NULL; + kfree(cpu_data); + + out: + genpd_release_lock(genpd); + return ret; +} + /* Default device callbacks for generic PM domains. */ /** diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 04e4b7674a4..0132706251d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -265,6 +265,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) state->power_usage = -1; state->flags = 0; state->enter = poll_idle; + state->disabled = false; } #else static void poll_idle_init(struct cpuidle_driver *drv) {} diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 8391d93f57d..5b1f2c372c1 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -281,6 +281,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) * unless the timer is happening really really soon. */ if (data->expected_us > 5 && + !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; @@ -292,7 +293,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state_usage *su = &dev->states_usage[i]; - if (su->disable) + if (s->disabled || su->disable) continue; if (s->target_residency > data->predicted_us) continue; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 27cfced7b57..8684a0d07b8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -47,6 +47,7 @@ struct cpuidle_state { unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ unsigned int target_residency; /* in US */ + bool disabled; /* disabled on all CPUs */ int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 30f794eb382..2febe31d267 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,6 +15,7 @@ #include #include #include +#include enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -45,6 +46,11 @@ struct gpd_dev_ops { bool (*active_wakeup)(struct device *dev); }; +struct gpd_cpu_data { + unsigned int saved_exit_latency; + struct cpuidle_state *idle_state; +}; + struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ @@ -75,6 +81,7 @@ struct generic_pm_domain { bool max_off_time_changed; bool cached_power_down_ok; struct device_node *of_node; /* Node in device tree */ + struct gpd_cpu_data *cpu_data; }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -155,6 +162,8 @@ extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td); extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); +extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); +extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); @@ -211,6 +220,14 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { return -ENOSYS; } +static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) +{ + return -ENOSYS; +} +static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd) +{ + return -ENOSYS; +} static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { -- cgit v1.2.3-70-g09d2 From 25ac77613aa8fca131599705e3d7da2a0eaa06a0 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 5 Jul 2012 15:23:25 +0200 Subject: ACPI: intel_idle : break dependency between modules When the system is booted with some cpus offline, the idle driver is not initialized. When a cpu is set online, the acpi code call the intel idle init function. Unfortunately this code introduce a dependency between intel_idle and acpi. This patch is intended to remove this dependency by using the notifier of intel_idle. This patch has the benefit of encapsulating the intel_idle driver and remove some exported functions. Signed-off-by: Daniel Lezcano Acked-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- drivers/acpi/processor_driver.c | 7 ------- drivers/idle/intel_idle.c | 41 +++++++++++++++++++++++++++-------------- include/linux/cpuidle.h | 7 ------- 3 files changed, 27 insertions(+), 28 deletions(-) (limited to 'include/linux/cpuidle.h') diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0734086537b..8648b29f6ee 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -427,18 +427,11 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, * Initialize missing things */ if (pr->flags.need_hotplug_init) { - struct cpuidle_driver *idle_driver = - cpuidle_get_driver(); - printk(KERN_INFO "Will online and init hotplugged " "CPU: %d\n", pr->id); WARN(acpi_processor_start(pr), "Failed to start CPU:" " %d\n", pr->id); pr->flags.need_hotplug_init = 0; - if (idle_driver && !strcmp(idle_driver->name, - "intel_idle")) { - intel_idle_cpu_init(pr->id); - } /* Normal CPU soft online event */ } else { acpi_processor_ppc_has_changed(pr, 0); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index d0f59c3f87e..fe95d5464a0 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -96,6 +96,7 @@ static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); +static int intel_idle_cpu_init(int cpu); static struct cpuidle_state *cpuidle_state_table; @@ -302,22 +303,35 @@ static void __setup_broadcast_timer(void *arg) clockevents_notify(reason, &cpu); } -static int setup_broadcast_cpuhp_notify(struct notifier_block *n, - unsigned long action, void *hcpu) +static int cpu_hotplug_notify(struct notifier_block *n, + unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; + struct cpuidle_device *dev; switch (action & 0xf) { case CPU_ONLINE: - smp_call_function_single(hotcpu, __setup_broadcast_timer, - (void *)true, 1); + + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) + smp_call_function_single(hotcpu, __setup_broadcast_timer, + (void *)true, 1); + + /* + * Some systems can hotplug a cpu at runtime after + * the kernel has booted, we have to initialize the + * driver in this case + */ + dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); + if (!dev->registered) + intel_idle_cpu_init(hotcpu); + break; } return NOTIFY_OK; } -static struct notifier_block setup_broadcast_notifier = { - .notifier_call = setup_broadcast_cpuhp_notify, +static struct notifier_block cpu_hotplug_notifier = { + .notifier_call = cpu_hotplug_notify, }; static void auto_demotion_disable(void *dummy) @@ -405,10 +419,10 @@ static int intel_idle_probe(void) if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; - else { + else on_each_cpu(__setup_broadcast_timer, (void *)true, 1); - register_cpu_notifier(&setup_broadcast_notifier); - } + + register_cpu_notifier(&cpu_hotplug_notifier); pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); @@ -494,7 +508,7 @@ static int intel_idle_cpuidle_driver_init(void) * allocate, initialize, register cpuidle_devices * @cpu: cpu/core to initialize */ -int intel_idle_cpu_init(int cpu) +static int intel_idle_cpu_init(int cpu) { int cstate; struct cpuidle_device *dev; @@ -539,7 +553,6 @@ int intel_idle_cpu_init(int cpu) return 0; } -EXPORT_SYMBOL_GPL(intel_idle_cpu_init); static int __init intel_idle_init(void) { @@ -581,10 +594,10 @@ static void __exit intel_idle_exit(void) intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { + + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); - unregister_cpu_notifier(&setup_broadcast_notifier); - } + unregister_cpu_notifier(&cpu_hotplug_notifier); return; } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 27cfced7b57..524bb6f3b6c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -206,14 +206,7 @@ struct cpuidle_governor { extern int cpuidle_register_governor(struct cpuidle_governor *gov); extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); -#ifdef CONFIG_INTEL_IDLE -extern int intel_idle_cpu_init(int cpu); #else -static inline int intel_idle_cpu_init(int cpu) { return -1; } -#endif - -#else -static inline int intel_idle_cpu_init(int cpu) { return -1; } static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} -- cgit v1.2.3-70-g09d2 From 8651f97bd951d0bb1c10fa24e3fa3455193f3548 Mon Sep 17 00:00:00 2001 From: Preeti U Murthy Date: Mon, 9 Jul 2012 10:12:56 +0200 Subject: PM / cpuidle: System resume hang fix with cpuidle On certain bios, resume hangs if cpus are allowed to enter idle states during suspend [1]. This was fixed in apci idle driver [2].But intel_idle driver does not have this fix. Thus instead of replicating the fix in both the idle drivers, or in more platform specific idle drivers if needed, the more general cpuidle infrastructure could handle this. A suspend callback in cpuidle_driver could handle this fix. But a cpuidle_driver provides only basic functionalities like platform idle state detection capability and mechanisms to support entry and exit into CPU idle states. All other cpuidle functions are found in the cpuidle generic infrastructure for good reason that all cpuidle drivers, irrepective of their platforms will support these functions. One option therefore would be to register a suspend callback in cpuidle which handles this fix. This could be called through a PM_SUSPEND_PREPARE notifier. But this is too generic a notfier for a driver to handle. Also, ideally the job of cpuidle is not to handle side effects of suspend. It should expose the interfaces which "handle cpuidle 'during' suspend" or any other operation, which the subsystems call during that respective operation. The fix demands that during suspend, no cpus should be allowed to enter deep C-states. The interface cpuidle_uninstall_idle_handler() in cpuidle ensures that. Not just that it also kicks all the cpus which are already in idle out of their idle states which was being done during cpu hotplug through a CPU_DYING_FROZEN callbacks. Now the question arises about when during suspend should cpuidle_uninstall_idle_handler() be called. Since we are dealing with drivers it seems best to call this function during dpm_suspend(). Delaying the call till dpm_suspend_noirq() does no harm, as long as it is before cpu_hotplug_begin() to avoid race conditions with cpu hotpulg operations. In dpm_suspend_noirq(), it would be wise to place this call before suspend_device_irqs() to avoid ugly interactions with the same. Ananlogously, during resume. References: [1] https://bugs.launchpad.net/ubuntu/+source/linux/+bug/674075. [2] http://marc.info/?l=linux-pm&m=133958534231884&w=2 Reported-and-tested-by: Dave Hansen Signed-off-by: Preeti U Murthy Reviewed-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- drivers/acpi/processor_idle.c | 30 +----------------------------- drivers/base/power/main.c | 4 +++- drivers/cpuidle/cpuidle.c | 16 ++++++++++++++++ include/linux/cpuidle.h | 4 ++++ 4 files changed, 24 insertions(+), 30 deletions(-) (limited to 'include/linux/cpuidle.h') diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 47a8caa89db..d8366ee7571 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -221,10 +221,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, #endif -/* - * Suspend / resume control - */ -static int acpi_idle_suspend; static u32 saved_bm_rld; static void acpi_idle_bm_rld_save(void) @@ -243,21 +239,13 @@ static void acpi_idle_bm_rld_restore(void) int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { - if (acpi_idle_suspend == 1) - return 0; - acpi_idle_bm_rld_save(); - acpi_idle_suspend = 1; return 0; } int acpi_processor_resume(struct acpi_device * device) { - if (acpi_idle_suspend == 0) - return 0; - acpi_idle_bm_rld_restore(); - acpi_idle_suspend = 0; return 0; } @@ -763,11 +751,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, local_irq_disable(); - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EBUSY; - } lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); @@ -838,11 +821,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, local_irq_disable(); - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EBUSY; - } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; @@ -928,8 +906,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, drv, drv->safe_state_index); } else { local_irq_disable(); - if (!acpi_idle_suspend) - acpi_safe_halt(); + acpi_safe_halt(); local_irq_enable(); return -EBUSY; } @@ -937,11 +914,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, local_irq_disable(); - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EBUSY; - } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9cb845e4933..63048f79de5 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -28,7 +28,7 @@ #include #include #include - +#include #include "../base.h" #include "power.h" @@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); + cpuidle_resume(); } /** @@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_late_early_list)) { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 04e4b7674a4..efa9a2ca30e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -201,6 +201,22 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); +/* Currently used in suspend/resume path to suspend cpuidle */ +void cpuidle_pause(void) +{ + mutex_lock(&cpuidle_lock); + cpuidle_uninstall_idle_handler(); + mutex_unlock(&cpuidle_lock); +} + +/* Currently used in suspend/resume path to resume cpuidle */ +void cpuidle_resume(void) +{ + mutex_lock(&cpuidle_lock); + cpuidle_install_idle_handler(); + mutex_unlock(&cpuidle_lock); +} + /** * cpuidle_wrap_enter - performs timekeeping and irqen around enter function * @dev: pointer to a valid cpuidle_device object diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 524bb6f3b6c..ca6cdf55eb1 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -145,6 +145,8 @@ extern void cpuidle_unregister_device(struct cpuidle_device *dev); extern void cpuidle_pause_and_lock(void); extern void cpuidle_resume_and_unlock(void); +extern void cpuidle_pause(void); +extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_wrap_enter(struct cpuidle_device *dev, @@ -168,6 +170,8 @@ static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } static inline void cpuidle_pause_and_lock(void) { } static inline void cpuidle_resume_and_unlock(void) { } +static inline void cpuidle_pause(void) { } +static inline void cpuidle_resume(void) { } static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } -- cgit v1.2.3-70-g09d2