From ee761f629d598579594d7e1eb8c552f3c5f71e4d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2013 22:49:32 +0100 Subject: arch: Consolidate tsk_is_polling() Move it to a common place. Preparatory patch for implementing set/clear for the idle need_resched poll implementation. Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Rusty Russell Cc: Paul McKenney Cc: Peter Zijlstra Reviewed-by: Cc: Srivatsa S. Bhat Cc: Magnus Damm Link: http://lkml.kernel.org/r/20130321215233.446034505@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbf..6709a5813f2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2621,6 +2621,26 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } +/* + * Idle thread specific functions to determine the need_resched + * polling state. We have two versions, one based on TS_POLLING in + * thread_info.status and one based on TIF_POLLING_NRFLAG in + * thread_info.flags + */ +#ifdef TS_POLLING +static inline int tsk_is_polling(struct task_struct *p) +{ + return task_thread_info(p)->status & TS_POLLING; +} +#elif defined(TIF_POLLING_NRFLAG) +static inline int tsk_is_polling(struct task_struct *p) +{ + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); +} +#else +static inline int tsk_is_polling(struct task_struct *p) { return 0; } +#endif + /* * Thread group CPU time accounting. */ -- cgit v1.2.3-70-g09d2 From 3a98f871ecaf44806e188184332c3fec27c8f08c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2013 22:49:33 +0100 Subject: idle: Implement set/clr functions for need_resched poll Implement set/clear functions for the idle need_resched poll implementation. Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Rusty Russell Cc: Paul McKenney Cc: Peter Zijlstra Reviewed-by: Cc: Srivatsa S. Bhat Cc: Magnus Damm Link: http://lkml.kernel.org/r/20130321215233.518839807@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6709a5813f2..21fe9a142e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2632,13 +2632,34 @@ static inline int tsk_is_polling(struct task_struct *p) { return task_thread_info(p)->status & TS_POLLING; } +static inline void current_set_polling(void) +{ + current_thread_info()->status |= TS_POLLING; +} + +static inline void current_clr_polling(void) +{ + current_thread_info()->status &= ~TS_POLLING; + smp_mb__after_clear_bit(); +} #elif defined(TIF_POLLING_NRFLAG) static inline int tsk_is_polling(struct task_struct *p) { return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); } +static inline void current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline void current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} #else static inline int tsk_is_polling(struct task_struct *p) { return 0; } +static inline void current_set_polling(void) { } +static inline void current_clr_polling(void) { } #endif /* -- cgit v1.2.3-70-g09d2 From a1a04ec3c7c27a682473fd9beb2c996316a64649 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2013 22:49:34 +0100 Subject: idle: Provide a generic entry point for the idle code For now this calls cpu_idle(), but in the long run we want to move the cpu bringup code to the core and therefor we add a state argument. Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Rusty Russell Cc: Paul McKenney Cc: Peter Zijlstra Reviewed-by: Cc: Srivatsa S. Bhat Cc: Magnus Damm Link: http://lkml.kernel.org/r/20130321215233.583190032@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/cpu.h | 8 ++++++++ init/main.c | 2 +- kernel/Makefile | 1 + kernel/cpu/Makefile | 1 + kernel/cpu/idle.c | 10 ++++++++++ 5 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 kernel/cpu/Makefile create mode 100644 kernel/cpu/idle.c (limited to 'include') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index ce7a074f251..7419e30c55f 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -212,4 +212,12 @@ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} #endif /* !CONFIG_PM_SLEEP_SMP */ +enum cpuhp_state { + CPUHP_OFFLINE, + CPUHP_ONLINE, +}; + +void cpu_startup_entry(enum cpuhp_state state); +void cpu_idle(void); + #endif /* _LINUX_CPU_H_ */ diff --git a/init/main.c b/init/main.c index 63534a141b4..adb179d3e0f 100644 --- a/init/main.c +++ b/init/main.c @@ -384,7 +384,7 @@ static noinline void __init_refok rest_init(void) init_idle_bootup_task(current); schedule_preempt_disabled(); /* Call into cpu_idle with preempt disabled */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* Check for early params. */ diff --git a/kernel/Makefile b/kernel/Makefile index bbde5f1a448..d1574d47cf2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -24,6 +24,7 @@ endif obj-y += sched/ obj-y += power/ +obj-y += cpu/ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o diff --git a/kernel/cpu/Makefile b/kernel/cpu/Makefile new file mode 100644 index 00000000000..59ab052ef7a --- /dev/null +++ b/kernel/cpu/Makefile @@ -0,0 +1 @@ +obj-y = idle.o diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c new file mode 100644 index 00000000000..1908f00e0e9 --- /dev/null +++ b/kernel/cpu/idle.c @@ -0,0 +1,10 @@ +/* + * Generic entry point for the idle threads + */ +#include +#include + +void cpu_startup_entry(enum cpuhp_state state) +{ + cpu_idle(); +} -- cgit v1.2.3-70-g09d2 From d166991234347215dc23fc9dc15a63a83a1a54e1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2013 22:49:35 +0100 Subject: idle: Implement generic idle function All idle functions in arch/* are more or less the same, plus minus a few bugs and extra instrumentation, tickless support and other optional items. Implement a generic idle function which resembles the functionality found in arch/. Provide weak arch_cpu_idle_* functions which can be overridden by the architecture code if needed. Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Rusty Russell Cc: Paul McKenney Cc: Peter Zijlstra Reviewed-by: Cc: Srivatsa S. Bhat Cc: Magnus Damm Link: http://lkml.kernel.org/r/20130321215233.646635455@linutronix.de Signed-off-by: Thomas Gleixner --- arch/Kconfig | 3 ++ include/linux/cpu.h | 8 ++++ kernel/cpu/idle.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+) (limited to 'include') diff --git a/arch/Kconfig b/arch/Kconfig index 1455579791e..a699f3767be 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -216,6 +216,9 @@ config USE_GENERIC_SMP_HELPERS config GENERIC_SMP_IDLE_THREAD bool +config GENERIC_IDLE_LOOP + bool + # Select if arch init_task initializer is different to init/init_task.c config ARCH_INIT_TASK bool diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7419e30c55f..c6f6e0839b6 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -220,4 +220,12 @@ enum cpuhp_state { void cpu_startup_entry(enum cpuhp_state state); void cpu_idle(void); +void cpu_idle_poll_ctrl(bool enable); + +void arch_cpu_idle(void); +void arch_cpu_idle_prepare(void); +void arch_cpu_idle_enter(void); +void arch_cpu_idle_exit(void); +void arch_cpu_idle_dead(void); + #endif /* _LINUX_CPU_H_ */ diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index 1908f00e0e9..54c32038393 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c @@ -3,8 +3,113 @@ */ #include #include +#include +#include +#include + +#include + +#ifndef CONFIG_GENERIC_IDLE_LOOP void cpu_startup_entry(enum cpuhp_state state) { cpu_idle(); } +#else + +static int __read_mostly cpu_idle_force_poll; + +void cpu_idle_poll_ctrl(bool enable) +{ + if (enable) { + cpu_idle_force_poll++; + } else { + cpu_idle_force_poll--; + WARN_ON_ONCE(cpu_idle_force_poll < 0); + } +} + +#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP +static int __init cpu_idle_poll_setup(char *__unused) +{ + cpu_idle_force_poll = 1; + return 1; +} +__setup("nohlt", cpu_idle_poll_setup); + +static int __init cpu_idle_nopoll_setup(char *__unused) +{ + cpu_idle_force_poll = 0; + return 1; +} +__setup("hlt", cpu_idle_nopoll_setup); +#endif + +static inline int cpu_idle_poll(void) +{ + trace_cpu_idle_rcuidle(0, smp_processor_id()); + local_irq_enable(); + while (!need_resched()) + cpu_relax(); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); + return 1; +} + +/* Weak implementations for optional arch specific functions */ +void __weak arch_cpu_idle_prepare(void) { } +void __weak arch_cpu_idle_enter(void) { } +void __weak arch_cpu_idle_exit(void) { } +void __weak arch_cpu_idle_dead(void) { } +void __weak arch_cpu_idle(void) +{ + cpu_idle_force_poll = 1; +} + +/* + * Generic idle loop implementation + */ +static void cpu_idle_loop(void) +{ + while (1) { + tick_nohz_idle_enter(); + + while (!need_resched()) { + check_pgt_cache(); + rmb(); + + if (cpu_is_offline(smp_processor_id())) + arch_cpu_idle_dead(); + + local_irq_disable(); + arch_cpu_idle_enter(); + + if (cpu_idle_force_poll) { + cpu_idle_poll(); + } else { + current_clr_polling(); + if (!need_resched()) { + stop_critical_timings(); + rcu_idle_enter(); + arch_cpu_idle(); + WARN_ON_ONCE(irqs_disabled()); + rcu_idle_exit(); + start_critical_timings(); + } else { + local_irq_enable(); + } + current_set_polling(); + } + arch_cpu_idle_exit(); + } + tick_nohz_idle_exit(); + schedule_preempt_disabled(); + } +} + +void cpu_startup_entry(enum cpuhp_state state) +{ + current_set_polling(); + arch_cpu_idle_prepare(); + cpu_idle_loop(); +} +#endif -- cgit v1.2.3-70-g09d2