summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/process.c')
-rw-r--r--arch/sparc64/kernel/process.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7d10b039709..02f9dec1d45 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -74,7 +74,9 @@ void cpu_idle(void)
while (!need_resched())
barrier();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
}
}
@@ -83,21 +85,31 @@ void cpu_idle(void)
/*
* the idle loop on a UltraMultiPenguin...
+ *
+ * TIF_POLLING_NRFLAG is set because we do not sleep the cpu
+ * inside of the idler task, so an interrupt is not needed
+ * to get a clean fast response.
+ *
+ * XXX Reverify this assumption... -DaveM
+ *
+ * Addendum: We do want it to do something for the signal
+ * delivery case, we detect that by just seeing
+ * if we are trying to send this to an idler or not.
*/
-#define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
-#define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
void cpu_idle(void)
{
+ cpuinfo_sparc *cpuinfo = &local_cpu_data();
set_thread_flag(TIF_POLLING_NRFLAG);
+
while(1) {
if (need_resched()) {
- unidle_me();
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ cpuinfo->idle_volume = 0;
+ preempt_enable_no_resched();
schedule();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ preempt_disable();
check_pgt_cache();
}
- idle_me_harder();
+ cpuinfo->idle_volume++;
/* The store ordering is so that IRQ handlers on
* other cpus see our increasing idleness for the buddy