diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/domain.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-test-arm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/arm/mach-versatile/pci.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 74 | ||||
-rw-r--r-- | arch/mips/pci/pci-lantiq.c | 4 | ||||
-rw-r--r-- | arch/mn10300/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 48 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 17 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 3 |
23 files changed, 211 insertions, 49 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 68374ba6a94..c79f61faa3a 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,7 +243,7 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } -static inline u64 atomic64_read(atomic64_t *v) +static inline u64 atomic64_read(const atomic64_t *v) { u64 result; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 3d2220498ab..6ddbe446425 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -60,13 +60,13 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_CPU_USE_DOMAINS -#define set_domain(x) \ - do { \ - __asm__ __volatile__( \ - "mcr p15, 0, %0, c3, c0 @ set domain" \ - : : "r" (x)); \ - isb(); \ - } while (0) +static inline void set_domain(unsigned val) +{ + asm volatile( + "mcr p15, 0, %0, c3, c0 @ set domain" + : : "r" (val)); + isb(); +} #define modify_domain(dom,type) \ do { \ @@ -78,8 +78,8 @@ } while (0) #else -#define set_domain(x) do { } while (0) -#define modify_domain(dom,type) do { } while (0) +static inline void set_domain(unsigned val) { } +static inline void modify_domain(unsigned dom, unsigned type) { } #endif /* diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index b79f8e97f77..af7b0bda335 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_RESTARTSYS 10 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) -#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_RESTARTSYS) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index ba32b393b3f..38c1a3b103a 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void) TEST_BF_R ("mov pc, r",0,2f,"") TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"") TEST_BB( "sub pc, pc, #1b-2b+8") -#if __LINUX_ARM_ARCH__ >= 6 - TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */ +#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7) + TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */ #endif TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 186c8cb982c..a02eada3aa5 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event) event_requires_mode_exclusion(&event->attr)) { pr_debug("ARM performance counters do not support " "mode exclusion\n"); - return -EPERM; + return -EOPNOTSUPP; } /* diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5700a7ae7f0..14e38261cd3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -25,7 +25,6 @@ #include <linux/regset.h> #include <linux/audit.h> #include <linux/tracehook.h> -#include <linux/unistd.h> #include <asm/pgtable.h> #include <asm/traps.h> @@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); - if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS)) - scno = __NR_restart_syscall - __NR_SYSCALL_BASE; if (!test_thread_flag(TIF_SYSCALL_TRACE)) return scno; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index fd2392a17ac..536c5d6b340 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -27,6 +27,7 @@ */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) +#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. @@ -47,6 +48,18 @@ const unsigned long sigreturn_codes[7] = { }; /* + * Either we support OABI only, or we have EABI with the OABI + * compat layer enabled. In the later case we don't know if + * user space is EABI or not, and if not we must not clobber r7. + * Always using the OABI syscall solves that issue and works for + * all those cases. + */ +const unsigned long syscall_restart_code[2] = { + SWI_SYS_RESTART, /* swi __NR_restart_syscall */ + 0xe49df004, /* ldr pc, [sp], #4 */ +}; + +/* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) @@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall) case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: - case -ERESTART_RESTARTBLOCK: regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; + case -ERESTART_RESTARTBLOCK: + regs->ARM_r0 = -EINTR; + break; } } @@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall) * debugger has chosen to restart at a different PC. */ if (regs->ARM_pc == restart_addr) { - if (retval == -ERESTARTNOHAND || - retval == -ERESTART_RESTARTBLOCK + if (retval == -ERESTARTNOHAND || (retval == -ERESTARTSYS && !(ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; regs->ARM_pc = continue_addr; } - clear_thread_flag(TIF_SYSCALL_RESTARTSYS); } handle_signal(signr, &ka, &info, regs); @@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall) * ignore the restart. */ if (retval == -ERESTART_RESTARTBLOCK - && regs->ARM_pc == restart_addr) - set_thread_flag(TIF_SYSCALL_RESTARTSYS); + && regs->ARM_pc == continue_addr) { + if (thumb_mode(regs)) { + regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; + regs->ARM_pc -= 2; + } else { +#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) + regs->ARM_r7 = __NR_restart_syscall; + regs->ARM_pc -= 4; +#else + u32 __user *usp; + + regs->ARM_sp -= 4; + usp = (u32 __user *)regs->ARM_sp; + + if (put_user(regs->ARM_pc, usp) == 0) { + regs->ARM_pc = KERN_RESTART_CODE; + } else { + regs->ARM_sp += 4; + force_sigsegv(0, current); + } +#endif + } + } } restore_saved_sigmask(); diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h index 5ff067b7c75..6fcfe8398aa 100644 --- a/arch/arm/kernel/signal.h +++ b/arch/arm/kernel/signal.h @@ -8,5 +8,7 @@ * published by the Free Software Foundation. */ #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) +#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) extern const unsigned long sigreturn_codes[7]; +extern const unsigned long syscall_restart_code[2]; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 4928d89758f..3647170e9a1 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base) */ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), sigreturn_codes, sizeof(sigreturn_codes)); + memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), + syscall_restart_code, sizeof(syscall_restart_code)); flush_icache_range(vectors, vectors + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 43a31fb0631..36ff15bbfdd 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -183,7 +183,9 @@ SECTIONS } #endif +#ifdef CONFIG_SMP PERCPU_SECTION(L1_CACHE_BYTES) +#endif #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index bec933b04ef..e95bf84cc83 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void) static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; - int devslot = PCI_SLOT(dev->devfn); /* slot, pin, irq * 24 1 27 diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c471436c795..2e8a1efdf7b 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #ifdef CONFIG_ZONE_DMA extern phys_addr_t arm_dma_limit; #else -#define arm_dma_limit ((u32)~0) +#define arm_dma_limit ((phys_addr_t)~0) #endif extern phys_addr_t arm_lowmem_limit; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e5dad60b558..cf4528d5177 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr) } } +#ifndef CONFIG_ARM_LPAE + +/* + * The Linux PMD is made of two consecutive section entries covering 2MB + * (see definition in include/asm/pgtable-2level.h). However a call to + * create_mapping() may optimize static mappings by using individual + * 1MB section mappings. This leaves the actual PMD potentially half + * initialized if the top or bottom section entry isn't used, leaving it + * open to problems if a subsequent ioremap() or vmalloc() tries to use + * the virtual space left free by that unused section entry. + * + * Let's avoid the issue by inserting dummy vm entries covering the unused + * PMD halves once the static mappings are in place. + */ + +static void __init pmd_empty_section_gap(unsigned long addr) +{ + struct vm_struct *vm; + + vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm->addr = (void *)addr; + vm->size = SECTION_SIZE; + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->caller = pmd_empty_section_gap; + vm_area_add_early(vm); +} + +static void __init fill_pmd_gaps(void) +{ + struct vm_struct *vm; + unsigned long addr, next = 0; + pmd_t *pmd; + + /* we're still single threaded hence no lock needed here */ + for (vm = vmlist; vm; vm = vm->next) { + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + addr = (unsigned long)vm->addr; + if (addr < next) + continue; + + /* + * Check if this vm starts on an odd section boundary. + * If so and the first section entry for this PMD is free + * then we block the corresponding virtual address. + */ + if ((addr & ~PMD_MASK) == SECTION_SIZE) { + pmd = pmd_off_k(addr); + if (pmd_none(*pmd)) + pmd_empty_section_gap(addr & PMD_MASK); + } + + /* + * Then check if this vm ends on an odd section boundary. + * If so and the second section entry for this PMD is empty + * then we block the corresponding virtual address. + */ + addr += vm->size; + if ((addr & ~PMD_MASK) == SECTION_SIZE) { + pmd = pmd_off_k(addr) + 1; + if (pmd_none(*pmd)) + pmd_empty_section_gap(addr); + } + + /* no need to look at any vm entry until we hit the next PMD */ + next = (addr + PMD_SIZE - 1) & PMD_MASK; + } +} + +#else +#define fill_pmd_gaps() do { } while (0) +#endif + static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); @@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) */ if (mdesc->map_io) mdesc->map_io(); + fill_pmd_gaps(); /* * Finally flush the caches and tlb to ensure that we're in a diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index ea453532a33..075d87acd12 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev) /* setup reset gpio used by pci */ reset_gpio = of_get_named_gpio(node, "gpio-reset", 0); - if (reset_gpio > 0) + if (gpio_is_valid(reset_gpio)) devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset"); /* enable auto-switching between PCI and EBU */ @@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); /* toggle reset pin */ - if (reset_gpio > 0) { + if (gpio_is_valid(reset_gpio)) { __gpio_set_value(reset_gpio, 0); wmb(); mdelay(1); diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 08251d6f6b1..ac519bbd42f 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void) } #ifndef CONFIG_KGDB -void arch_release_thread_info(struct thread_info *ti) +void arch_release_thread_info(struct thread_info *ti); #endif #define get_thread_info(ti) get_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6eb75b80488..0554ab062bd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void) } #ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); -#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); +#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") +#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") #else #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) @@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !regs->softe; } +extern bool prep_irq_for_idle(void); + #else /* CONFIG_PPC64 */ #define SET_MSR_EE(x) mtmsr(x) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1b415027ec0..1f017bb7a7c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); -#ifdef CONFIG_TRACE_IRQFLAG +#ifdef CONFIG_TRACE_IRQFLAGS else { /* * We should already be hard disabled here. We had bugs @@ -286,6 +286,52 @@ void notrace restore_interrupts(void) __hard_irq_enable(); } +/* + * This is a helper to use when about to go into idle low-power + * when the latter has the side effect of re-enabling interrupts + * (such as calling H_CEDE under pHyp). + * + * You call this function with interrupts soft-disabled (this is + * already the case when ppc_md.power_save is called). The function + * will return whether to enter power save or just return. + * + * In the former case, it will have notified lockdep of interrupts + * being re-enabled and generally sanitized the lazy irq state, + * and in the latter case it will leave with interrupts hard + * disabled and marked as such, so the local_irq_enable() call + * in cpu_idle() will properly re-enable everything. + */ +bool prep_irq_for_idle(void) +{ + /* + * First we need to hard disable to ensure no interrupt + * occurs before we effectively enter the low power state + */ + hard_irq_disable(); + + /* + * If anything happened while we were soft-disabled, + * we return now and do not enter the low power state. + */ + if (lazy_irq_pending()) + return false; + + /* Tell lockdep we are about to re-enable */ + trace_hardirqs_on(); + + /* + * Mark interrupts as soft-enabled and clear the + * PACA_IRQ_HARD_DIS from the pending mask since we + * are about to hard enable as well as a side effect + * of entering the low power state. + */ + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + local_paca->soft_enabled = 1; + + /* Tell the caller to enter the low power state */ + return true; +} + #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a84aafce2a1..a1044f43bec 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) lwz r3,VCORE_NAPPING_THREADS(r5) lwz r4,VCPU_PTID(r9) li r0,1 - sldi r0,r0,r4 + sld r0,r0,r4 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ beq 43f mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6e8f677f564..1e95556dc69 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory) unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned long lmb_size, base, size, sz; int nid; - struct assoc_arrays aa; + struct assoc_arrays aa = { .arrays = NULL }; n = of_get_drconf_memory(memory, &dm); if (!n) diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index efdacc82957..d17e98bc0c1 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -42,11 +42,9 @@ static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; - /* - * We need to hard disable interrupts, the local_irq_enable() done by - * our caller upon return will hard re-enable. - */ - hard_irq_disable(); + /* Ensure our interrupt state is properly tracked */ + if (!prep_irq_for_idle()) + return; ctrl = mfspr(SPRN_CTRLF); @@ -81,6 +79,9 @@ static void cbe_power_save(void) */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); + + /* Re-enable interrupts in MSR */ + __hard_irq_enable(); } static int cbe_system_reset_exception(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index e61483e8e96..c71be66bd5d 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -99,15 +99,18 @@ out: static void check_and_cede_processor(void) { /* - * Interrupts are soft-disabled at this point, - * but not hard disabled. So an interrupt might have - * occurred before entering NAP, and would be potentially - * lost (edge events, decrementer events, etc...) unless - * we first hard disable then check. + * Ensure our interrupt state is properly tracked, + * also checks if no interrupt has occurred while we + * were soft-disabled */ - hard_irq_disable(); - if (!lazy_irq_pending()) + if (prep_irq_for_idle()) { cede_processor(); +#ifdef CONFIG_TRACE_IRQFLAGS + /* Ensure that H_CEDE returns with IRQs on */ + if (WARN_ON(!(mfmsr() & MSR_EE))) + __hard_irq_enable(); +#endif + } } static int dedicated_cede_loop(struct cpuidle_device *dev, diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 0f3ab06d222..eab3492a45c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -971,7 +971,7 @@ static int cpu_cmd(void) /* print cpus waiting or in xmon */ printf("cpus stopped:"); count = 0; - for (cpu = 0; cpu < NR_CPUS; ++cpu) { + for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (count == 0) printf(" %x", cpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index be3cea4407f..57e168e27b5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, { struct kvm_mmu_page *page; + if (list_empty(&kvm->arch.active_mmu_pages)) + return; + page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(kvm, page, invalid_list); |