From 4bfc8288bc4a64529c5547d17349a2a1f4675507 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 30 Mar 2011 23:52:29 -0400 Subject: x86 idle: move mwait_idle_with_hints() to where it is used ...and make it static no functional change cc: x86@kernel.org Acked-by: H. Peter Anvin Signed-off-by: Len Brown --- arch/x86/kernel/acpi/cstate.c | 23 +++++++++++++++++++++++ arch/x86/kernel/process.c | 23 ----------------------- 2 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 5812404a0d4..f50e7fb2a20 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -149,6 +149,29 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); +/* + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, + * which can obviate IPI to trigger checking of need_resched. + * We execute MONITOR against need_resched and enter optimized wait state + * through MWAIT. Whenever someone changes need_resched, we would be woken + * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. + */ +void mwait_idle_with_hints(unsigned long ax, unsigned long cx) +{ + if (!need_resched()) { + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(ax, cx); + } +} + void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e1ba8cb24e4..e7e3b019c43 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -438,29 +438,6 @@ void cpu_idle_wait(void) } EXPORT_SYMBOL_GPL(cpu_idle_wait); -/* - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, - * which can obviate IPI to trigger checking of need_resched. - * We execute MONITOR against need_resched and enter optimized wait state - * through MWAIT. Whenever someone changes need_resched, we would be woken - * up from MWAIT (without an IPI). - * - * New with Core Duo processors, MWAIT can take some hints based on CPU - * capability. - */ -void mwait_idle_with_hints(unsigned long ax, unsigned long cx) -{ - if (!need_resched()) { - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(ax, cx); - } -} - /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { -- cgit v1.2.3-70-g09d2 From a0bfa1373859e9d11dc92561a8667588803e42d8 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 1 Apr 2011 19:34:59 -0400 Subject: cpuidle: stop depending on pm_idle cpuidle users should call cpuidle_call_idle() directly rather than via (pm_idle)() function pointer. Architecture may choose to continue using (pm_idle)(), but cpuidle need not depend on it: my_arch_cpu_idle() ... if(cpuidle_call_idle()) pm_idle(); cc: Kevin Hilman cc: Paul Mundt cc: x86@kernel.org Acked-by: H. Peter Anvin Signed-off-by: Len Brown --- arch/arm/kernel/process.c | 4 +++- arch/sh/kernel/idle.c | 6 ++++-- arch/x86/kernel/process_32.c | 4 +++- arch/x86/kernel/process_64.c | 4 +++- drivers/cpuidle/cpuidle.c | 38 ++++++++++++++++++-------------------- include/linux/cpuidle.h | 2 ++ 6 files changed, 33 insertions(+), 25 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 5e1e5419722..d7ee0d4c072 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -196,7 +197,8 @@ void cpu_idle(void) cpu_relax(); } else { stop_critical_timings(); - pm_idle(); + if (cpuidle_call_idle()) + pm_idle(); start_critical_timings(); /* * This will eventually be removed - pm_idle diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 425d604e3a2..9c7099ebfe1 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -16,12 +16,13 @@ #include #include #include +#include #include #include #include #include -void (*pm_idle)(void) = NULL; +static void (*pm_idle)(void); static int hlt_counter; @@ -100,7 +101,8 @@ void cpu_idle(void) local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); - pm_idle(); + if (cpuidle_call_idle()) + pm_idle(); /* * Sanity check to ensure that pm_idle() returns * with IRQs enabled diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a3d0dc59067..7a3b65107a2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -109,7 +110,8 @@ void cpu_idle(void) local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); - pm_idle(); + if (cpuidle_idle_call()) + pm_idle(); start_critical_timings(); } tick_nohz_restart_sched_tick(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ca6f7ab8df3..f693e44e1bf 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -136,7 +137,8 @@ void cpu_idle(void) enter_idle(); /* Don't trace irqs off for idle */ stop_critical_timings(); - pm_idle(); + if (cpuidle_idle_call()) + pm_idle(); start_critical_timings(); /* In many cases the interrupt that ended idle diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 041df0b056b..d4c54237288 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -25,10 +25,10 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DEFINE_MUTEX(cpuidle_lock); LIST_HEAD(cpuidle_detected_devices); -static void (*pm_idle_old)(void); static int enabled_devices; static int off __read_mostly; +static int initialized __read_mostly; int cpuidle_disabled(void) { @@ -56,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here + * return non-zero on failure */ -static void cpuidle_idle_call(void) +int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; int next_state; + if (off) + return -ENODEV; + + if (!initialized) + return -ENODEV; + /* check if the device is ready */ - if (!dev || !dev->enabled) { - if (pm_idle_old) - pm_idle_old(); - else -#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) - default_idle(); -#else - local_irq_enable(); -#endif - return; - } + if (!dev || !dev->enabled) + return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ @@ -99,7 +97,7 @@ static void cpuidle_idle_call(void) next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); - return; + return 0; } target_state = &dev->states[next_state]; @@ -124,6 +122,8 @@ static void cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); + + return 0; } /** @@ -131,10 +131,10 @@ static void cpuidle_idle_call(void) */ void cpuidle_install_idle_handler(void) { - if (enabled_devices && (pm_idle != cpuidle_idle_call)) { + if (enabled_devices) { /* Make sure all changes finished before we switch to new idle */ smp_wmb(); - pm_idle = cpuidle_idle_call; + initialized = 1; } } @@ -143,8 +143,8 @@ void cpuidle_install_idle_handler(void) */ void cpuidle_uninstall_idle_handler(void) { - if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { - pm_idle = pm_idle_old; + if (enabled_devices) { + initialized = 0; cpuidle_kick_cpus(); } } @@ -440,8 +440,6 @@ static int __init cpuidle_init(void) if (cpuidle_disabled()) return -ENODEV; - pm_idle_old = pm_idle; - ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); if (ret) return ret; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b89f67da919..b51629e15cf 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -123,6 +123,7 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); +extern int cpuidle_idle_call(void); extern int cpuidle_register_driver(struct cpuidle_driver *drv); struct cpuidle_driver *cpuidle_get_driver(void); @@ -137,6 +138,7 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } +static inline int cpuidle_idle_call(void) { return -ENODEV; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } -- cgit v1.2.3-70-g09d2