diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-10 13:06:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-10 13:06:05 -0700 |
commit | 310959e831783d220d1fcaaca90e17c923830ed6 (patch) | |
tree | fe4a5a23270ed540e3469bf4ea7459e6a7a88296 | |
parent | bc51b0c22cebf5c311a6f1895fcca9f78efd0478 (diff) | |
parent | 50fb31cfed9218b439360caf7c0399b00042da15 (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt:
"It looks like my rewrite of our lazy irq scheme is still exposing
"interesting" issues left and right. The previous fixes are now
causing an occasional BUG_ON to trigger (which this patch turns into a
WARN_ON while at it), due to another issue of disconnect of the lazy
irq state vs the processor state in the idle loop on pseries and
cell.
This should fix it properly once for all moving the nasty code to a
common helper function.
There's also couple more fixes for some debug stuff that didn't build
(and helped resolving those problems so it's worth having), along with
a compile fix for newer gcc's."
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
tty/hvc_opal: Fix debug function name
powerpc/numa: Avoid stupid uninitialized warning from gcc
powerpc: Fix build of some debug irq code
powerpc: More fixes for lazy IRQ vs. idle
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 48 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 17 | ||||
-rw-r--r-- | drivers/tty/hvc/hvc_opal.c | 2 |
6 files changed, 69 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6eb75b80488..0554ab062bd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void) } #ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); -#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); +#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") +#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") #else #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) @@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !regs->softe; } +extern bool prep_irq_for_idle(void); + #else /* CONFIG_PPC64 */ #define SET_MSR_EE(x) mtmsr(x) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1b415027ec0..1f017bb7a7c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); -#ifdef CONFIG_TRACE_IRQFLAG +#ifdef CONFIG_TRACE_IRQFLAGS else { /* * We should already be hard disabled here. We had bugs @@ -286,6 +286,52 @@ void notrace restore_interrupts(void) __hard_irq_enable(); } +/* + * This is a helper to use when about to go into idle low-power + * when the latter has the side effect of re-enabling interrupts + * (such as calling H_CEDE under pHyp). + * + * You call this function with interrupts soft-disabled (this is + * already the case when ppc_md.power_save is called). The function + * will return whether to enter power save or just return. + * + * In the former case, it will have notified lockdep of interrupts + * being re-enabled and generally sanitized the lazy irq state, + * and in the latter case it will leave with interrupts hard + * disabled and marked as such, so the local_irq_enable() call + * in cpu_idle() will properly re-enable everything. + */ +bool prep_irq_for_idle(void) +{ + /* + * First we need to hard disable to ensure no interrupt + * occurs before we effectively enter the low power state + */ + hard_irq_disable(); + + /* + * If anything happened while we were soft-disabled, + * we return now and do not enter the low power state. + */ + if (lazy_irq_pending()) + return false; + + /* Tell lockdep we are about to re-enable */ + trace_hardirqs_on(); + + /* + * Mark interrupts as soft-enabled and clear the + * PACA_IRQ_HARD_DIS from the pending mask since we + * are about to hard enable as well as a side effect + * of entering the low power state. + */ + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + local_paca->soft_enabled = 1; + + /* Tell the caller to enter the low power state */ + return true; +} + #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6e8f677f564..1e95556dc69 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory) unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned long lmb_size, base, size, sz; int nid; - struct assoc_arrays aa; + struct assoc_arrays aa = { .arrays = NULL }; n = of_get_drconf_memory(memory, &dm); if (!n) diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index efdacc82957..d17e98bc0c1 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -42,11 +42,9 @@ static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; - /* - * We need to hard disable interrupts, the local_irq_enable() done by - * our caller upon return will hard re-enable. - */ - hard_irq_disable(); + /* Ensure our interrupt state is properly tracked */ + if (!prep_irq_for_idle()) + return; ctrl = mfspr(SPRN_CTRLF); @@ -81,6 +79,9 @@ static void cbe_power_save(void) */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); + + /* Re-enable interrupts in MSR */ + __hard_irq_enable(); } static int cbe_system_reset_exception(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index e61483e8e96..c71be66bd5d 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -99,15 +99,18 @@ out: static void check_and_cede_processor(void) { /* - * Interrupts are soft-disabled at this point, - * but not hard disabled. So an interrupt might have - * occurred before entering NAP, and would be potentially - * lost (edge events, decrementer events, etc...) unless - * we first hard disable then check. + * Ensure our interrupt state is properly tracked, + * also checks if no interrupt has occurred while we + * were soft-disabled */ - hard_irq_disable(); - if (!lazy_irq_pending()) + if (prep_irq_for_idle()) { cede_processor(); +#ifdef CONFIG_TRACE_IRQFLAGS + /* Ensure that H_CEDE returns with IRQs on */ + if (WARN_ON(!(mfmsr() & MSR_EE))) + __hard_irq_enable(); +#endif + } } static int dedicated_cede_loop(struct cpuidle_device *dev, diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index ced26c8ccd5..0d2ea0c224c 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -401,7 +401,7 @@ out: } #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW -void __init udbg_init_debug_opal(void) +void __init udbg_init_debug_opal_raw(void) { u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; hvc_opal_privs[index] = &hvc_opal_boot_priv; |