diff options
Diffstat (limited to 'arch')
119 files changed, 1263 insertions, 835 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 49d993cee51..2651b1da1c5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369 is not correctly implemented in PL310 as clean lines are not invalidated as a result of these operations. +config ARM_ERRATA_643719 + bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 643719 Cortex-A9 (prior to + r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR + register returns zero when it should return one. The workaround + corrects this value, ensuring cache maintenance operations which use + it behave as intended and avoiding data corruption. + config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" depends on CPU_V7 @@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR config KEXEC bool "Kexec system call (EXPERIMENTAL)" - depends on (!SMP || HOTPLUG_CPU) + depends on (!SMP || PM_SLEEP_SMP) help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 3580d57ea21..120b83bfde2 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \ # Make sure files are removed during clean extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ - lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) + lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ + hyp-stub.S ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) @@ -124,7 +125,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) -asflags-y := -Wa,-march=all -DZIMAGE +asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S index 6e8382d5b7a..5392ee63338 100644 --- a/arch/arm/boot/compressed/debug.S +++ b/arch/arm/boot/compressed/debug.S @@ -1,6 +1,8 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#ifndef CONFIG_DEBUG_SEMIHOSTING + #include CONFIG_DEBUG_LL_INCLUDE ENTRY(putc) @@ -10,3 +12,29 @@ ENTRY(putc) busyuart r3, r1 mov pc, lr ENDPROC(putc) + +#else + +ENTRY(putc) + adr r1, 1f + ldmia r1, {r2, r3} + add r2, r2, r1 + ldr r1, [r2, r3] + strb r0, [r1] + mov r0, #0x03 @ SYS_WRITEC + ARM( svc #0x123456 ) + THUMB( svc #0xab ) + mov pc, lr + .align 2 +1: .word _GLOBAL_OFFSET_TABLE_ - . + .word semi_writec_buf(GOT) +ENDPROC(putc) + + .bss + .global semi_writec_buf + .type semi_writec_buf, %object +semi_writec_buf: + .space 4 + .size semi_writec_buf, 4 + +#endif diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S index 6179d94dd5c..3115e313d9f 100644 --- a/arch/arm/boot/compressed/head-sa1100.S +++ b/arch/arm/boot/compressed/head-sa1100.S @@ -11,6 +11,7 @@ #include <asm/mach-types.h> .section ".start", "ax" + .arch armv4 __SA1100_start: diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S index 089c560e07f..92b56897ed6 100644 --- a/arch/arm/boot/compressed/head-shark.S +++ b/arch/arm/boot/compressed/head-shark.S @@ -18,6 +18,7 @@ .section ".start", "ax" + .arch armv4 b __beginning __ofw_data: .long 0 @ the number of memory blocks diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index fe4d9c3ad76..032a8d98714 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -11,6 +11,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> + .arch armv7-a /* * Debugging stuff * @@ -805,8 +806,8 @@ call_cache_fn: adr r12, proc_types .align 2 .type proc_types,#object proc_types: - .word 0x00000000 @ old ARM ID - .word 0x0000f000 + .word 0x41000000 @ old ARM ID + .word 0xff00f000 mov pc, lr THUMB( nop ) mov pc, lr diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bff71388e72..17d0ae8672f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +extern void flush_kernel_dcache_page(struct page *); #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 968c0a14e0a..209e6504922 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - /* Read TPIDRPRW */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + register unsigned long *sp asm ("sp"); + + /* + * Read TPIDRPRW. + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + return off; } #define __my_cpu_offset __my_cpu_offset() diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 99a19512ee2..bdf2b8458ec 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -33,18 +33,6 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> -/* - * We need to delay page freeing for SMP as other CPUs can access pages - * which have been removed but not yet had their TLB entries invalidated. - * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, - * we need to apply this same delaying tactic to ensure correct operation. - */ -#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) -#define tlb_fast_mode(tlb) 0 -#else -#define tlb_fast_mode(tlb) 1 -#endif - #define MMU_GATHER_BUNDLE 8 /* @@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush(tlb); - if (!tlb_fast_mode(tlb)) { - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - if (tlb->pages == tlb->local) - __tlb_alloc_page(tlb); - } + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + if (tlb->pages == tlb->local) + __tlb_alloc_page(tlb); } static inline void @@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ - } - tlb->pages[tlb->nr++] = page; VM_BUG_ON(tlb->nr > tlb->max); return tlb->max - tlb->nr; diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8ef8c933780..4fb074c446b 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; + if (num_online_cpus() > 1) { + pr_err("kexec: error: multiple CPUs still online\n"); + return; + } page_list = image->head & PAGE_MASK; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 282de4826ab..6e8931ccf13 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -184,30 +184,61 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in disable_nonboot_cpus() to achieve this. + */ void machine_shutdown(void) { -#ifdef CONFIG_SMP - smp_send_stop(); -#endif + disable_nonboot_cpus(); } +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ void machine_halt(void) { - machine_shutdown(); + smp_send_stop(); + local_irq_disable(); while (1); } +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ void machine_power_off(void) { - machine_shutdown(); + smp_send_stop(); + if (pm_power_off) pm_power_off(); } +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with a single CPU can + * use soft_restart() as their machine descriptor's .restart hook, since that + * will cause the only available CPU to reset. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ void machine_restart(char *cmd) { - machine_shutdown(); + smp_send_stop(); arm_pm_restart(reboot_mode, cmd); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 550d63cef68..5919eb451bb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } -#ifdef CONFIG_HOTPLUG_CPU -static void smp_kill_cpus(cpumask_t *mask) -{ - unsigned int cpu; - for_each_cpu(cpu, mask) - platform_cpu_kill(cpu); -} -#else -static void smp_kill_cpus(cpumask_t *mask) { } -#endif - void smp_send_stop(void) { unsigned long timeout; @@ -679,8 +668,6 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); - - smp_kill_cpus(&mask); } /* diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index f10316b4ecd..c5a59546a25 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -13,6 +13,7 @@ #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/node.h> @@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} * cpu topology table */ struct cputopo_arm cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_coregroup_mask(int cpu) { diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 37d216d814c..ef1703b9587 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(®, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(®_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 965706578f1..84ba67b982c 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector; static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); + /* + * This function also gets called when dealing with HYP page + * tables. As HYP doesn't have an associated struct kvm (and + * the HYP page tables are fairly static), we don't do + * anything there. + */ + if (kvm) + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); } static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, @@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } -static void clear_pud_entry(pud_t *pud) +static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { pmd_t *pmd_table = pmd_offset(pud, 0); pud_clear(pud); + kvm_tlb_flush_vmid_ipa(kvm, addr); pmd_free(NULL, pmd_table); put_page(virt_to_page(pud)); } -static void clear_pmd_entry(pmd_t *pmd) +static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) { pte_t *pte_table = pte_offset_kernel(pmd, 0); pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); pte_free_kernel(NULL, pte_table); put_page(virt_to_page(pmd)); } @@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd) return page_count(pmd_page) == 1; } -static void clear_pte_entry(pte_t *pte) +static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) { if (pte_present(*pte)) { kvm_set_pte(pte, __pte(0)); put_page(virt_to_page(pte)); + kvm_tlb_flush_vmid_ipa(kvm, addr); } } @@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte) return page_count(pte_page) == 1; } -static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) +static void unmap_range(struct kvm *kvm, pgd_t *pgdp, + unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; @@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) } pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(pte); + clear_pte_entry(kvm, pte, addr); range = PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (pte_empty(pte)) { - clear_pmd_entry(pmd); + clear_pmd_entry(kvm, pmd, addr); range = PMD_SIZE; if (pmd_empty(pmd)) { - clear_pud_entry(pud); + clear_pud_entry(kvm, pud, addr); range = PUD_SIZE; } } @@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void) mutex_lock(&kvm_hyp_pgd_mutex); if (boot_hyp_pgd) { - unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); - unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); + unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); + unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); kfree(boot_hyp_pgd); boot_hyp_pgd = NULL; } if (hyp_pgd) - unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); + unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); kfree(init_bounce_page); init_bounce_page = NULL; @@ -200,9 +211,10 @@ void free_hyp_pgds(void) if (hyp_pgd) { for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) - unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) - unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + kfree(hyp_pgd); hyp_pgd = NULL; } @@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) { - unmap_range(kvm->arch.pgd, start, size); + unmap_range(kvm, kvm->arch.pgd, start, size); } /** @@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm, static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) { unmap_stage2_range(kvm, gpa, PAGE_SIZE); - kvm_tlb_flush_vmid_ipa(kvm, gpa); } int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 15451ee4acc..515b00064da 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis) mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr +#ifdef CONFIG_ARM_ERRATA_643719 + ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register + ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do + ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? + biceq r2, r2, #0x0000000f @ clear minor revision number + teqeq r2, r1 @ test for errata affected core and if so... + orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') +#endif ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 moveq pc, lr @ return if level == 0 diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0d473cce501..32aa5861119 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page) EXPORT_SYMBOL(flush_dcache_page); /* + * Ensure cache coherency for the kernel mapping of this page. We can + * assume that the page is pinned via kmap. + * + * If the page only exists in the page cache and there are no user + * space mappings, this is a no-op since the page was already marked + * dirty at creation. Otherwise, we need to flush the dirty kernel + * cache lines directly. + */ +void flush_kernel_dcache_page(struct page *page) +{ + if (cache_is_vivt() || cache_is_vipt_aliasing()) { + struct address_space *mapping; + + mapping = page_mapping(page); + + if (!mapping || mapping_mapped(mapping)) { + void *addr; + + addr = page_address(page); + /* + * kmap_atomic() doesn't set the page virtual + * address for highmem pages, and + * kunmap_atomic() takes care of cache + * flushing already. + */ + if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + } + } +} +EXPORT_SYMBOL(flush_kernel_dcache_page); + +/* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: * diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565671a..4d409e6a552 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init map_init_section(pmd_t *pmd, unsigned long addr, +static void __init __map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type) { + pmd_t *p = pmd; + #ifndef CONFIG_ARM_LPAE /* * In classic MMU format, puds and pmds are folded in to @@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr, phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); - flush_pmd_entry(pmd); + flush_pmd_entry(p); } static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, @@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, */ if (type->prot_sect && ((addr | next | phys) & ~SECTION_MASK) == 0) { - map_init_section(pmd, addr, next, phys, type); + __map_init_section(pmd, addr, next, phys, type); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), type); diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 2c73a7301ff..4c8c9c10a38 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -409,8 +409,8 @@ __v7_ca9mp_proc_info: */ .type __v7_pj4b_proc_info, #object __v7_pj4b_proc_info: - .long 0x562f5840 - .long 0xfffffff0 + .long 0x560f5800 + .long 0xff0fff00 __v7_proc __v7_pj4b_setup .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1e49e5eb81e..9ba33c40cdf 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, return; } + perf_callchain_store(entry, regs->pc); tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < PERF_MAX_STACK_DEPTH && diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index 1bf2cf2f4ab..cec6c06b52c 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h @@ -11,6 +11,7 @@ #define _ASM_IA64_IRQFLAGS_H #include <asm/pal.h> +#include <asm/kregs.h> #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index c3ffe3e54ed..ef3a9de0195 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -46,12 +46,6 @@ #include <asm/tlbflush.h> #include <asm/machvec.h> -#ifdef CONFIG_SMP -# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) -#else -# define tlb_fast_mode(tlb) (1) -#endif - /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -60,7 +54,7 @@ struct mmu_gather { struct mm_struct *mm; - unsigned int nr; /* == ~0U => fast mode */ + unsigned int nr; unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ @@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; static inline void ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) { + unsigned long i; unsigned int nr; if (!tlb->need_flush) @@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e /* lastly, release the freed pages */ nr = tlb->nr; - if (!tlb_fast_mode(tlb)) { - unsigned long i; - tlb->nr = 0; - tlb->start_addr = ~0UL; - for (i = 0; i < nr; ++i) - free_page_and_swap_cache(tlb->pages[i]); - } + + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); } static inline void __tlb_alloc_page(struct mmu_gather *tlb) @@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; - /* - * Use fast mode if only 1 CPU is online. - * - * It would be tempting to turn on fast-mode for full_mm_flush as well. But this - * doesn't work because of speculative accesses and software prefetching: the page - * table of "mm" may (and usually is) the currently active page table and even - * though the kernel won't do any user-space accesses during the TLB shoot down, a - * compiler might use speculation or lfetch.fault on what happens to be a valid - * user-space address. This in turn could trigger a TLB miss fault (or a VHPT - * walk) and re-insert a TLB entry we just removed. Slow mode avoids such - * problems. (We could make fast-mode work by switching the current task to a - * different "mm" during the shootdown.) --davidm 08/02/2002 - */ - tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; + tlb->nr = 0; tlb->fullmm = full_mm_flush; tlb->start_addr = ~0UL; } @@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ - } - if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 8cc83431805..2f6eec1e34b 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } +#ifndef CONFIG_GPIOLIB static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { int err; @@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha return err; } - +#endif /* !CONFIG_GPIOLIB */ #endif diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index d197e7ff62c..ac85f16534a 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -2752,11 +2752,9 @@ func_return get_new_page #ifdef CONFIG_MAC L(scc_initable_mac): - .byte 9,12 /* Reset */ .byte 4,0x44 /* x16, 1 stopbit, no parity */ .byte 3,0xc0 /* receiver: 8 bpc */ .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ - .byte 9,0 /* no interrupts */ .byte 10,0 /* NRZ */ .byte 11,0x50 /* use baud rate generator */ .byte 12,1,13,0 /* 38400 baud */ @@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 is_not_mac(L(serial_init_not_mac)) #ifdef SERIAL_DEBUG + /* You may define either or both of these. */ #define MAC_USE_SCC_A /* Modem port */ #define MAC_USE_SCC_B /* Printer port */ @@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1 #define mac_scc_cha_b_data_offset 0x4 #define mac_scc_cha_a_data_offset 0x6 +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) + movel %pc@(L(mac_sccbase)),%a0 + /* Reset SCC device */ + moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) + moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) + /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ + /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */ + movel #35,%d0 +5: + subq #1,%d0 + jne 5b +#endif + #ifdef MAC_USE_SCC_A /* Initialize channel A */ - movel %pc@(L(mac_sccbase)),%a0 lea %pc@(L(scc_initable_mac)),%a1 5: moveb %a1@+,%d0 jmi 6f @@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 #ifdef MAC_USE_SCC_B /* Initialize channel B */ -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ - movel %pc@(L(mac_sccbase)),%a0 -#endif /* MAC_USE_SCC_A */ lea %pc@(L(scc_initable_mac)),%a1 7: moveb %a1@+,%d0 jmi 8f @@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 jra 7b 8: #endif /* MAC_USE_SCC_B */ + #endif /* SERIAL_DEBUG */ jra L(serial_init_done) @@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1 #ifdef SERIAL_DEBUG -#ifdef MAC_USE_SCC_A +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a1 +#endif + +#ifdef MAC_USE_SCC_A 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) jeq 3b moveb %d0,%a1@(mac_scc_cha_a_data_offset) #endif /* MAC_USE_SCC_A */ #ifdef MAC_USE_SCC_B -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ - movel %pc@(L(mac_sccbase)),%a1 -#endif /* MAC_USE_SCC_A */ 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) jeq 4b moveb %d0,%a1@(mac_scc_cha_b_data_offset) diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h index f545477e61f..471f481e67f 100644 --- a/arch/metag/include/asm/hugetlb.h +++ b/arch/metag/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_METAG_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index 0f553bc009a..ffea82a16d2 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h @@ -102,21 +102,23 @@ do { \ #define flush_cache_range(vma, start, len) do { } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - u32 addr = virt_to_phys(dst); \ - memcpy((dst), (src), (len)); \ - if (vma->vm_flags & VM_EXEC) { \ - invalidate_icache_range((unsigned) (addr), \ - (unsigned) (addr) + PAGE_SIZE); \ - flush_dcache_range((unsigned) (addr), \ - (unsigned) (addr) + PAGE_SIZE); \ - } \ -} while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - memcpy((dst), (src), (len)); \ -} while (0) +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + u32 addr = virt_to_phys(dst); + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) { + invalidate_icache_range(addr, addr + PAGE_SIZE); + flush_dcache_range(addr, addr + PAGE_SIZE); + } +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + memcpy(dst, src, len); +} #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index efe59d88178..04e49553bdf 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr, if ((get_fs().seg < ((unsigned long)addr)) || (get_fs().seg < ((unsigned long)addr + size - 1))) { pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (u32)addr, (u32)size, + type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, (u32)get_fs().seg); return 0; } ok: pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (u32)addr, (u32)size, + type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, (u32)get_fs().seg); return 1; } diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b0baa299f89..01b1b3f94fe 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -428,13 +428,16 @@ static void octeon_restart(char *command) */ static void octeon_kill_core(void *arg) { - mb(); - if (octeon_is_simulation()) { - /* The simulator needs the watchdog to stop for dead cores */ - cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); + if (octeon_is_simulation()) /* A break instruction causes the simulator stop a core */ - asm volatile ("sync\nbreak"); - } + asm volatile ("break" ::: "memory"); + + local_irq_disable(); + /* Disable watchdog on this core. */ + cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); + /* Spin in a low power mode. */ + while (true) + asm volatile ("wait" ::: "memory"); } diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 143875c6c95..4d6fa0bf130 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -496,10 +496,6 @@ struct kvm_mips_callbacks { uint32_t cause); int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause); - int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, - struct kvm_regs *regs); - int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, - struct kvm_regs *regs); }; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 820116067c1..516e6e9a559 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) if (! ((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); -#ifdef CONFIG_VIRTUALIZATION +#ifdef CONFIG_KVM kvm_local_flush_tlb_all(); /* start new asid cycle */ #else local_flush_tlb_all(); /* start new asid cycle */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index a3186f2bb8a..5e6cd094739 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -16,6 +16,38 @@ #include <asm/isadep.h> #include <uapi/asm/ptrace.h> +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { +#ifdef CONFIG_32BIT + /* Pad bytes for argument save space on the stack. */ + unsigned long pad0[6]; +#endif + + /* Saved main processor registers. */ + unsigned long regs[32]; + + /* Saved special registers. */ + unsigned long cp0_status; + unsigned long hi; + unsigned long lo; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + unsigned long acx; +#endif + unsigned long cp0_badvaddr; + unsigned long cp0_cause; + unsigned long cp0_epc; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long cp0_tcstatus; +#endif /* CONFIG_MIPS_MT_SMTC */ +#ifdef CONFIG_CPU_CAVIUM_OCTEON + unsigned long long mpl[3]; /* MTM{0,1,2} */ + unsigned long long mtp[3]; /* MTP{0,1,2} */ +#endif +} __aligned(8); + struct task_struct; extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 85789eacbf1..f09ff5ae205 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -1,55 +1,135 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Cavium, Inc. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #ifndef __LINUX_KVM_MIPS_H #define __LINUX_KVM_MIPS_H #include <linux/types.h> -#define __KVM_MIPS - -#define N_MIPS_COPROC_REGS 32 -#define N_MIPS_COPROC_SEL 8 +/* + * KVM MIPS specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ -/* for KVM_GET_REGS and KVM_SET_REGS */ +/* + * for KVM_GET_REGS and KVM_SET_REGS + * + * If Config[AT] is zero (32-bit CPU), the register contents are + * stored in the lower 32-bits of the struct kvm_regs fields and sign + * extended to 64-bits. + */ struct kvm_regs { - __u32 gprs[32]; - __u32 hi; - __u32 lo; - __u32 pc; - - __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; -}; - -/* for KVM_GET_SREGS and KVM_SET_SREGS */ -struct kvm_sregs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 hi; + __u64 lo; + __u64 pc; }; -/* for KVM_GET_FPU and KVM_SET_FPU */ +/* + * for KVM_GET_FPU and KVM_SET_FPU + * + * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs + * are zero filled. + */ struct kvm_fpu { + __u64 fpr[32]; + __u32 fir; + __u32 fccr; + __u32 fexr; + __u32 fenr; + __u32 fcsr; + __u32 pad; }; + +/* + * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 + * registers. The id field is broken down as follows: + * + * bits[2..0] - Register 'sel' index. + * bits[7..3] - Register 'rd' index. + * bits[15..8] - Must be zero. + * bits[31..16] - 1 -> CP0 registers. + * bits[51..32] - Must be zero. + * bits[63..52] - As per linux/kvm.h + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + * + * The registers defined in struct kvm_regs are also accessible, the + * id values for these are below. + */ + +#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) +#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) +#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) +#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) +#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) +#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) +#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) +#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) +#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) +#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) +#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) +#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) +#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) +#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) +#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) +#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) +#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) +#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) +#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) +#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) +#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) +#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) +#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) +#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) +#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) +#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) + +#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) +#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) +#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) + +/* + * KVM MIPS specific structures and definitions + * + */ struct kvm_debug_exit_arch { + __u64 epc; }; /* for KVM_SET_GUEST_DEBUG */ struct kvm_guest_debug_arch { }; +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + struct kvm_mips_interrupt { /* in */ __u32 cpu; __u32 irq; }; -/* definition of registers in kvm_run */ -struct kvm_sync_regs { -}; - #endif /* __LINUX_KVM_MIPS_H */ diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index 4d58d846870..b26f7e31727 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h @@ -22,16 +22,12 @@ #define DSP_CONTROL 77 #define ACX 78 +#ifndef __KERNEL__ /* * This struct defines the way the registers are stored on the stack during a * system call/exception. As usual the registers k0/k1 aren't being saved. */ struct pt_regs { -#ifdef CONFIG_32BIT - /* Pad bytes for argument save space on the stack. */ - unsigned long pad0[6]; -#endif - /* Saved main processor registers. */ unsigned long regs[32]; @@ -39,20 +35,11 @@ struct pt_regs { unsigned long cp0_status; unsigned long hi; unsigned long lo; -#ifdef CONFIG_CPU_HAS_SMARTMIPS - unsigned long acx; -#endif unsigned long cp0_badvaddr; unsigned long cp0_cause; unsigned long cp0_epc; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long cp0_tcstatus; -#endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON - unsigned long long mpl[3]; /* MTM{0,1,2} */ - unsigned long long mtp[3]; /* MTP{0,1,2} */ -#endif } __attribute__ ((aligned (8))); +#endif /* __KERNEL__ */ /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index e06f777e9c4..1188e00bb12 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 97c5a1668e5..202e581e609 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index cf5509f13dd..dba90ec0dc3 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -25,12 +25,16 @@ #define MCOUNT_OFFSET_INSNS 4 #endif +#ifdef CONFIG_DYNAMIC_FTRACE + /* Arch override because MIPS doesn't need to run this from stop_machine() */ void arch_ftrace_update_code(int command) { ftrace_modify_all_code(command); } +#endif + /* * Check if the address is in kernel space * diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 3b09b888afa..0c655deeea4 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void) } /* - * The Au1xxx wait is available only if using 32khz counter or - * external timer source, but specifically not CP0 Counter. - * alchemy/common/time.c may override cpu_wait! + * Au1 'wait' is only useful when the 32kHz counter is used as timer, + * since coreclock (and the cp0 counter) stops upon executing it. Only an + * interrupt can wake it, so they must be enabled before entering idle modes. */ static void au1k_wait(void) { + unsigned long c0status = read_c0_status() | 1; /* irqs on */ + __asm__( " .set mips3 \n" " cache 0x14, 0(%0) \n" " cache 0x14, 32(%0) \n" " sync \n" - " nop \n" + " mtc0 %1, $12 \n" /* wr c0status */ " wait \n" " nop \n" " nop \n" " nop \n" " nop \n" " .set mips0 \n" - : : "r" (au1k_wait)); - local_irq_enable(); + : : "r" (au1k_wait), "r" (c0status)); } static int __initdata nowait; diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 93c070b41b0..6fa198db899 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -40,6 +40,7 @@ #include <asm/processor.h> #include <asm/vpe.h> #include <asm/rtlx.h> +#include <asm/setup.h> static struct rtlx_info *rtlx; static int major; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e3be67012d7..a75ae40184a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -897,22 +897,24 @@ out_sigsegv: asmlinkage void do_tr(struct pt_regs *regs) { - unsigned int opcode, tcode = 0; + u32 opcode, tcode = 0; u16 instr[2]; - unsigned long epc = exception_epc(regs); + unsigned long epc = msk_isa16_mode(exception_epc(regs)); - if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || - (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) + if (get_isa16_mode(regs->cp0_epc)) { + if (__get_user(instr[0], (u16 __user *)(epc + 0)) || + __get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) { - if (get_isa16_mode(regs->cp0_epc)) - /* microMIPS */ - tcode = (opcode >> 12) & 0x1f; - else - tcode = ((opcode >> 6) & ((1 << 10) - 1)); + opcode = (instr[0] << 16) | instr[1]; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 12) & ((1 << 4) - 1); + } else { + if (__get_user(opcode, (u32 __user *)epc)) + goto out_sigsegv; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 6) & ((1 << 10) - 1); } do_trap_or_bp(regs, tcode, "Trap"); diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index e0dad028979..dd203e59e6f 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -EINVAL; + return -ENOIOCTLCMD; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, @@ -401,7 +401,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EINVAL; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -475,14 +475,248 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -EINVAL; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -EINVAL; + return -ENOIOCTLCMD; +} + +#define MIPS_CP0_32(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) + +#define MIPS_CP0_64(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) + +#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) +#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) +#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) +#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) +#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) +#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) +#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) +#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) +#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) +#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) +#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) +#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) +#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) +#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) +#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) +#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) +#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) +#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) +#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) +#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) + +static u64 kvm_mips_get_one_regs[] = { + KVM_REG_MIPS_R0, + KVM_REG_MIPS_R1, + KVM_REG_MIPS_R2, + KVM_REG_MIPS_R3, + KVM_REG_MIPS_R4, + KVM_REG_MIPS_R5, + KVM_REG_MIPS_R6, + KVM_REG_MIPS_R7, + KVM_REG_MIPS_R8, + KVM_REG_MIPS_R9, + KVM_REG_MIPS_R10, + KVM_REG_MIPS_R11, + KVM_REG_MIPS_R12, + KVM_REG_MIPS_R13, + KVM_REG_MIPS_R14, + KVM_REG_MIPS_R15, + KVM_REG_MIPS_R16, + KVM_REG_MIPS_R17, + KVM_REG_MIPS_R18, + KVM_REG_MIPS_R19, + KVM_REG_MIPS_R20, + KVM_REG_MIPS_R21, + KVM_REG_MIPS_R22, + KVM_REG_MIPS_R23, + KVM_REG_MIPS_R24, + KVM_REG_MIPS_R25, + KVM_REG_MIPS_R26, + KVM_REG_MIPS_R27, + KVM_REG_MIPS_R28, + KVM_REG_MIPS_R29, + KVM_REG_MIPS_R30, + KVM_REG_MIPS_R31, + + KVM_REG_MIPS_HI, + KVM_REG_MIPS_LO, + KVM_REG_MIPS_PC, + + KVM_REG_MIPS_CP0_INDEX, + KVM_REG_MIPS_CP0_CONTEXT, + KVM_REG_MIPS_CP0_PAGEMASK, + KVM_REG_MIPS_CP0_WIRED, + KVM_REG_MIPS_CP0_BADVADDR, + KVM_REG_MIPS_CP0_ENTRYHI, + KVM_REG_MIPS_CP0_STATUS, + KVM_REG_MIPS_CP0_CAUSE, + /* EPC set via kvm_regs, et al. */ + KVM_REG_MIPS_CP0_CONFIG, + KVM_REG_MIPS_CP0_CONFIG1, + KVM_REG_MIPS_CP0_CONFIG2, + KVM_REG_MIPS_CP0_CONFIG3, + KVM_REG_MIPS_CP0_CONFIG7, + KVM_REG_MIPS_CP0_ERROREPC +}; + +static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + s64 v; + + switch (reg->id) { + case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: + v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; + break; + case KVM_REG_MIPS_HI: + v = (long)vcpu->arch.hi; + break; + case KVM_REG_MIPS_LO: + v = (long)vcpu->arch.lo; + break; + case KVM_REG_MIPS_PC: + v = (long)vcpu->arch.pc; + break; + + case KVM_REG_MIPS_CP0_INDEX: + v = (long)kvm_read_c0_guest_index(cop0); + break; + case KVM_REG_MIPS_CP0_CONTEXT: + v = (long)kvm_read_c0_guest_context(cop0); + break; + case KVM_REG_MIPS_CP0_PAGEMASK: + v = (long)kvm_read_c0_guest_pagemask(cop0); + break; + case KVM_REG_MIPS_CP0_WIRED: + v = (long)kvm_read_c0_guest_wired(cop0); + break; + case KVM_REG_MIPS_CP0_BADVADDR: + v = (long)kvm_read_c0_guest_badvaddr(cop0); + break; + case KVM_REG_MIPS_CP0_ENTRYHI: + v = (long)kvm_read_c0_guest_entryhi(cop0); + break; + case KVM_REG_MIPS_CP0_STATUS: + v = (long)kvm_read_c0_guest_status(cop0); + break; + case KVM_REG_MIPS_CP0_CAUSE: + v = (long)kvm_read_c0_guest_cause(cop0); + break; + case KVM_REG_MIPS_CP0_ERROREPC: + v = (long)kvm_read_c0_guest_errorepc(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG: + v = (long)kvm_read_c0_guest_config(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG1: + v = (long)kvm_read_c0_guest_config1(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG2: + v = (long)kvm_read_c0_guest_config2(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG3: + v = (long)kvm_read_c0_guest_config3(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG7: + v = (long)kvm_read_c0_guest_config7(cop0); + break; + default: + return -EINVAL; + } + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + return put_user(v, uaddr64); + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; + u32 v32 = (u32)v; + return put_user(v32, uaddr32); + } else { + return -EINVAL; + } +} + +static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + u64 v; + + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + + if (get_user(v, uaddr64) != 0) + return -EFAULT; + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; + s32 v32; + + if (get_user(v32, uaddr32) != 0) + return -EFAULT; + v = (s64)v32; + } else { + return -EINVAL; + } + + switch (reg->id) { + case KVM_REG_MIPS_R0: + /* Silently ignore requests to set $0 */ + break; + case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: + vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; + break; + case KVM_REG_MIPS_HI: + vcpu->arch.hi = v; + break; + case KVM_REG_MIPS_LO: + vcpu->arch.lo = v; + break; + case KVM_REG_MIPS_PC: + vcpu->arch.pc = v; + break; + + case KVM_REG_MIPS_CP0_INDEX: + kvm_write_c0_guest_index(cop0, v); + break; + case KVM_REG_MIPS_CP0_CONTEXT: + kvm_write_c0_guest_context(cop0, v); + break; + case KVM_REG_MIPS_CP0_PAGEMASK: + kvm_write_c0_guest_pagemask(cop0, v); + break; + case KVM_REG_MIPS_CP0_WIRED: + kvm_write_c0_guest_wired(cop0, v); + break; + case KVM_REG_MIPS_CP0_BADVADDR: + kvm_write_c0_guest_badvaddr(cop0, v); + break; + case KVM_REG_MIPS_CP0_ENTRYHI: + kvm_write_c0_guest_entryhi(cop0, v); + break; + case KVM_REG_MIPS_CP0_STATUS: + kvm_write_c0_guest_status(cop0, v); + break; + case KVM_REG_MIPS_CP0_CAUSE: + kvm_write_c0_guest_cause(cop0, v); + break; + case KVM_REG_MIPS_CP0_ERROREPC: + kvm_write_c0_guest_errorepc(cop0, v); + break; + default: + return -EINVAL; + } + return 0; } long @@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; - int intr; switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + if (copy_from_user(®, argp, sizeof(reg))) + return -EFAULT; + if (ioctl == KVM_SET_ONE_REG) + return kvm_mips_set_reg(vcpu, ®); + else + return kvm_mips_get_reg(vcpu, ®); + } + case KVM_GET_REG_LIST: { + struct kvm_reg_list __user *user_list = argp; + u64 __user *reg_dest; + struct kvm_reg_list reg_list; + unsigned n; + + if (copy_from_user(®_list, user_list, sizeof(reg_list))) + return -EFAULT; + n = reg_list.n; + reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); + if (copy_to_user(user_list, ®_list, sizeof(reg_list))) + return -EFAULT; + if (n < reg_list.n) + return -E2BIG; + reg_dest = user_list->reg; + if (copy_to_user(reg_dest, kvm_mips_get_one_regs, + sizeof(kvm_mips_get_one_regs))) + return -EFAULT; + return 0; + } case KVM_NMI: /* Treat the NMI as a CPU reset */ r = kvm_mips_reset_vcpu(vcpu); @@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) if (copy_from_user(&irq, argp, sizeof(irq))) goto out; - intr = (int)irq.irq; - kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); @@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) break; } default: - r = -EINVAL; + r = -ENOIOCTLCMD; } out: @@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) switch (ioctl) { default: - r = -EINVAL; + r = -ENOIOCTLCMD; } return r; @@ -593,13 +854,13 @@ void kvm_arch_exit(void) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) @@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { + case KVM_CAP_ONE_REG: + r = 1; + break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; @@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext) break; } return r; - } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 0; i < 32; i++) - vcpu->arch.gprs[i] = regs->gprs[i]; - + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ vcpu->arch.hi = regs->hi; vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; - return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); + return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 0; i < 32; i++) - regs->gprs[i] = vcpu->arch.gprs[i]; + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; regs->hi = vcpu->arch.hi; regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; - return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); + return 0; } void kvm_mips_comparecount_func(unsigned long data) diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c index 466aeef044b..30d725321db 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/kvm_trap_emul.c @@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } -static int -kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); - kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); - kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); - kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); - kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); - - kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); - kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); - kvm_write_c0_guest_pagemask(cop0, - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); - kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); - kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); - - return 0; -} - -static int -kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); - regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); - regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); - regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); - regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); - - regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); - regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = - kvm_read_c0_guest_pagemask(cop0); - regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); - regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); - - regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); - - return 0; -} - static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .dequeue_io_int = kvm_mips_dequeue_io_int_cb, .irq_deliver = kvm_mips_irq_deliver_cb, .irq_clear = kvm_mips_irq_clear_cb, - .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, - .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ce9818eef7d..afeef93f81a 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -301,10 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata; static struct uasm_label labels[128] __cpuinitdata; static struct uasm_reloc relocs[128] __cpuinitdata; -#ifdef CONFIG_64BIT -static int check_for_high_segbits __cpuinitdata; -#endif - static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index fb1569580de..6b5f3406f41 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -88,7 +88,7 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size, + add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, BOOT_MEM_RAM); else detect_memory_region(soc_info.mem_base, diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h index 678f68d5f37..8730c0a3c37 100644 --- a/arch/mn10300/include/asm/irqflags.h +++ b/arch/mn10300/include/asm/irqflags.h @@ -13,9 +13,8 @@ #define _ASM_IRQFLAGS_H #include <asm/cpu-regs.h> -#ifndef __ASSEMBLY__ -#include <linux/smp.h> -#endif +/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ +#include <asm/smp.h> /* * interrupt control diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h index 6745dbe6494..56c42417d42 100644 --- a/arch/mn10300/include/asm/smp.h +++ b/arch/mn10300/include/asm/smp.h @@ -24,6 +24,7 @@ #ifndef __ASSEMBLY__ #include <linux/threads.h> #include <linux/cpumask.h> +#include <linux/thread_info.h> #endif #ifdef CONFIG_SMP @@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map; extern void smp_init_cpus(void); extern void smp_cache_interrupt(void); extern void send_IPI_allbutself(int irq); -extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); +extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu); #ifndef __ASSEMBLY__ static inline void smp_init_cpus(void) {} +#define raw_smp_processor_id() 0 #endif /* __ASSEMBLY__ */ #endif /* CONFIG_SMP */ diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index cc50d33b7b8..b6b34a0987e 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h @@ -27,7 +27,7 @@ extern struct node_map_data node_data[]; #define PFNNID_SHIFT (30 - PAGE_SHIFT) #define PFNNID_MAP_MAX 512 /* support 512GB */ -extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; +extern signed char pfnnid_map[PFNNID_MAP_MAX]; #ifndef CONFIG_64BIT #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) @@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn) i = pfn >> PFNNID_SHIFT; BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); - return (int)pfnnid_map[i]; + return pfnnid_map[i]; } static inline int pfn_valid(int pfn) diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3234f492d57..465154076d2 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } +#define HAVE_PCI_MMAP + +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + #endif /* __ASM_PARISC_PCI_H */ diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 9e2d2e40852..872275659d9 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c @@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, + {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 36d7f402e48..b743a80eaba 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) #endif ldil L%dcache_stride, %r1 - ldw R%dcache_stride(%r1), %r1 + ldw R%dcache_stride(%r1), r31 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r1, %r25 - - -1: fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) + sub %r25, r31, %r25 + + +1: fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) cmpb,COND(<<) %r28, %r25,1b - fdc,m %r1(%r28) + fdc,m r31(%r28) sync @@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) #endif ldil L%icache_stride, %r1 - ldw R%icache_stride(%r1), %r1 + ldw R%icache_stride(%r1), %r31 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r1, %r25 + sub %r25, %r31, %r25 /* fic only has the type 26 form on PA1.1, requiring an * explicit space specification, so use %sr4 */ -1: fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) +1: fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r1(%sr4,%r28) + fic,m %r31(%sr4,%r28) sync diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 60309051875..64f2764a8ce 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + unsigned long prot; + + /* + * I/O space can be accessed via normal processor loads and stores on + * this platform but for now we elect not to do this and portable + * drivers should not do this anyway. + */ + if (mmap_state == pci_mmap_io) + return -EINVAL; + + if (write_combine) + return -EINVAL; + + /* + * Ignore write-combine; for now only return uncached mappings. + */ + prot = pgprot_val(vma->vm_page_prot); + prot |= _PAGE_NO_CACHE; + vma->vm_page_prot = __pgprot(prot); + + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 1c965642068..505b56c6b9b 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt #ifdef CONFIG_DISCONTIGMEM struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; +signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; #endif static struct resource data_resource = { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 26807e5aff5..6f3887d884d 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -176,6 +176,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) +#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) #ifndef __ASSEMBLY__ @@ -394,19 +395,20 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ - CPU_FTR_HVMODE) + CPU_FTR_HVMODE | CPU_FTR_DABRX) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \ + CPU_FTR_DABRX) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -415,7 +417,7 @@ extern const char *powerpc_base_platform; CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ - CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) + CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX) #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -430,14 +432,15 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ - CPU_FTR_UNALIGNED_LD_STD) + CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_PURR | CPU_FTR_REAL_LE) + CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ - CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_ICSWX | CPU_FTR_DABRX ) #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 8e5fae8beaf..46793b58a76 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -513,7 +513,7 @@ label##_common: \ */ #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ - FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) + FINISH_NAP;DISABLE_INTS;RUNLATCH_ON) /* * When the idle code in power4_idle puts the CPU into NAP mode, diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index b9dd382cb34..851bac7afa4 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -54,8 +54,16 @@ #define BOOKE_INTERRUPT_DEBUG 15 /* E500 */ -#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 -#define BOOKE_INTERRUPT_SPE_FP_DATA 33 +#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 +#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 +/* + * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines + */ +#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL +#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST +#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL +#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \ + BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 #define BOOKE_INTERRUPT_DOORBELL 36 @@ -67,10 +75,6 @@ #define BOOKE_INTERRUPT_HV_SYSCALL 40 #define BOOKE_INTERRUPT_HV_PRIV 41 -/* altivec */ -#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 -#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 - /* book3s */ #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1f0937d7d4b..2a45d0f0438 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -452,8 +452,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, - .oprofile_type = PPC_OPROFILE_POWER4, - .oprofile_cpu_type = 0, + .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .platform = "power8", @@ -506,8 +506,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, - .oprofile_cpu_type = 0, - .oprofile_type = PPC_OPROFILE_POWER4, + .oprofile_cpu_type = "ppc64/power8", + .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .platform = "power8", diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 246b11c4fe7..8741c854e03 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -465,20 +465,6 @@ BEGIN_FTR_SECTION std r0, THREAD_EBBHR(r3) mfspr r0, SPRN_EBBRR std r0, THREAD_EBBRR(r3) - - /* PMU registers made user read/(write) by EBB */ - mfspr r0, SPRN_SIAR - std r0, THREAD_SIAR(r3) - mfspr r0, SPRN_SDAR - std r0, THREAD_SDAR(r3) - mfspr r0, SPRN_SIER - std r0, THREAD_SIER(r3) - mfspr r0, SPRN_MMCR0 - std r0, THREAD_MMCR0(r3) - mfspr r0, SPRN_MMCR2 - std r0, THREAD_MMCR2(r3) - mfspr r0, SPRN_MMCRA - std r0, THREAD_MMCRA(r3) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #endif @@ -581,20 +567,6 @@ BEGIN_FTR_SECTION ld r0, THREAD_EBBRR(r4) mtspr SPRN_EBBRR, r0 - /* PMU registers made user read/(write) by EBB */ - ld r0, THREAD_SIAR(r4) - mtspr SPRN_SIAR, r0 - ld r0, THREAD_SDAR(r4) - mtspr SPRN_SDAR, r0 - ld r0, THREAD_SIER(r4) - mtspr SPRN_SIER, r0 - ld r0, THREAD_MMCR0(r4) - mtspr SPRN_MMCR0, r0 - ld r0, THREAD_MMCR2(r4) - mtspr SPRN_MMCR2, r0 - ld r0, THREAD_MMCRA(r4) - mtspr SPRN_MMCRA, r0 - ld r0,THREAD_TAR(r4) mtspr SPRN_TAR,r0 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e6eba1bf61a..40e4a17c8ba 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -454,38 +454,14 @@ BEGIN_FTR_SECTION xori r10,r10,(MSR_FE0|MSR_FE1) mtmsrd r10 sync - fmr 0,0 - fmr 1,1 - fmr 2,2 - fmr 3,3 - fmr 4,4 - fmr 5,5 - fmr 6,6 - fmr 7,7 - fmr 8,8 - fmr 9,9 - fmr 10,10 - fmr 11,11 - fmr 12,12 - fmr 13,13 - fmr 14,14 - fmr 15,15 - fmr 16,16 - fmr 17,17 - fmr 18,18 - fmr 19,19 - fmr 20,20 - fmr 21,21 - fmr 22,22 - fmr 23,23 - fmr 24,24 - fmr 25,25 - fmr 26,26 - fmr 27,27 - fmr 28,28 - fmr 29,29 - fmr 30,30 - fmr 31,31 + +#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 +#define FMR4(n) FMR2(n) ; FMR2(n+2) +#define FMR8(n) FMR4(n) ; FMR4(n+4) +#define FMR16(n) FMR8(n) ; FMR8(n+8) +#define FMR32(n) FMR16(n) ; FMR16(n+16) + FMR32(0) + FTR_SECTION_ELSE /* * To denormalise we need to move a copy of the register to itself. @@ -495,39 +471,25 @@ FTR_SECTION_ELSE oris r10,r10,MSR_VSX@h mtmsrd r10 sync - XVCPSGNDP(0,0,0) - XVCPSGNDP(1,1,1) - XVCPSGNDP(2,2,2) - XVCPSGNDP(3,3,3) - XVCPSGNDP(4,4,4) - XVCPSGNDP(5,5,5) - XVCPSGNDP(6,6,6) - XVCPSGNDP(7,7,7) - XVCPSGNDP(8,8,8) - XVCPSGNDP(9,9,9) - XVCPSGNDP(10,10,10) - XVCPSGNDP(11,11,11) - XVCPSGNDP(12,12,12) - XVCPSGNDP(13,13,13) - XVCPSGNDP(14,14,14) - XVCPSGNDP(15,15,15) - XVCPSGNDP(16,16,16) - XVCPSGNDP(17,17,17) - XVCPSGNDP(18,18,18) - XVCPSGNDP(19,19,19) - XVCPSGNDP(20,20,20) - XVCPSGNDP(21,21,21) - XVCPSGNDP(22,22,22) - XVCPSGNDP(23,23,23) - XVCPSGNDP(24,24,24) - XVCPSGNDP(25,25,25) - XVCPSGNDP(26,26,26) - XVCPSGNDP(27,27,27) - XVCPSGNDP(28,28,28) - XVCPSGNDP(29,29,29) - XVCPSGNDP(30,30,30) - XVCPSGNDP(31,31,31) + +#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) +#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) +#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) +#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) +#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) + XVCPSGNDP32(0) + ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) + +BEGIN_FTR_SECTION + b denorm_done +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) +/* + * To denormalise we need to move a copy of the register to itself. + * For POWER8 we need to do that for all 64 VSX registers + */ + XVCPSGNDP32(32) +denorm_done: mtspr SPRN_HSRR0,r11 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) @@ -721,7 +683,7 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) #ifdef CONFIG_PPC_DOORBELL STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5cbcf4d5a80..ea185e0b3ca 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void) * in case we also had a rollover while hard disabled */ local_paca->irq_happened &= ~PACA_IRQ_DEC; - if (decrementer_check_overflow()) + if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) return 0x900; /* Finally check if an external interrupt happened */ diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 7f2273cc3c7..eabeec99101 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -827,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev) } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; + struct pci_bus_region reg; if (!res->flags) continue; @@ -835,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev) * at 0 as unset as well, except if PCI_PROBE_ONLY is also set * since in that case, we don't want to re-assign anything */ + pcibios_resource_to_bus(dev, ®, res); if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || - (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { + (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { /* Only print message if not re-assigning */ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a902723fdc6..076d1242507 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DABR, dabr); - mtspr(SPRN_DABRX, dabrx); + if (cpu_has_feature(CPU_FTR_DABRX)) + mtspr(SPRN_DABRX, dabrx); return 0; } #else @@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ -void __ppc64_runlatch_on(void) +void notrace __ppc64_runlatch_on(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; @@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void) } /* Called with hard IRQs off */ -void __ppc64_runlatch_off(void) +void notrace __ppc64_runlatch_off(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f18c79c324e..c0e5caf8ccc 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1165,6 +1165,16 @@ bail: exception_exit(prev_state); } +/* + * This occurs when running in hypervisor mode on POWER6 or later + * and an illegal instruction is encountered. + */ +void __kprobes emulation_assist_interrupt(struct pt_regs *regs) +{ + regs->msr |= REASON_ILLEGAL; + program_check_exception(regs); +} + void alignment_exception(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 5dd3ab46997..ed038544814 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_44x_tlbe *tlbe; unsigned int gtlb_index; + int idx; gtlb_index = kvmppc_get_gpr(vcpu, ra); if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { @@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) return EMULATE_FAIL; } + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (tlbe_is_host_safe(vcpu, tlbe)) { gva_t eaddr; gpa_t gpaddr; @@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); } + srcu_read_unlock(&vcpu->kvm->srcu, idx); + trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 1020119226d..1a1b5118977 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = s; goto out; } - kvmppc_lazy_ee_enable(); kvm_guest_enter(); @@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif + kvmppc_lazy_ee_enable(); + ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. @@ -832,6 +833,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, { int r = RESUME_HOST; int s; + int idx; + +#ifdef CONFIG_PPC64 + WARN_ON(local_paca->irq_happened != 0); +#endif + + /* + * We enter with interrupts disabled in hardware, but + * we need to call hard_irq_disable anyway to ensure that + * the software state is kept in sync. + */ + hard_irq_disable(); /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); @@ -1053,6 +1066,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } + idx = srcu_read_lock(&vcpu->kvm->srcu); + gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; @@ -1075,6 +1090,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_account_exit(vcpu, MMIO_EXITS); } + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } @@ -1098,6 +1114,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); + idx = srcu_read_lock(&vcpu->kvm->srcu); + gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; @@ -1114,6 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); } + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index c41a5a96b55..6d6f153b6c1 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) struct kvm_book3e_206_tlb_entry *gtlbe; int tlbsel, esel; int recal = 0; + int idx; tlbsel = get_tlb_tlbsel(vcpu); esel = get_tlb_esel(vcpu, tlbsel); @@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) kvmppc_set_tlb1map_range(vcpu, gtlbe); } + idx = srcu_read_lock(&vcpu->kvm->srcu); + /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { u64 eaddr = get_tlb_eaddr(gtlbe); @@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); } + srcu_read_unlock(&vcpu->kvm->srcu, idx); + kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); return EMULATE_DONE; } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 753cc99eff2..19c8379575f 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void) r = 0; else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) r = 0; - else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) - r = 0; else r = -ENOTSUPP; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 237c8e5f264..77fdd2cef33 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, do { pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); - if (pmd_none_or_clear_bad(pmd)) + if (!is_hugepd(pmd)) { + /* + * if it is not hugepd pointer, we should already find + * it cleared. + */ + WARN_ON(!pmd_none_or_clear_bad(pmd)); continue; + } #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 845c867444e..29c6482890c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1758,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs) } } } - if ((!found) && printk_ratelimit()) + if (!found && !nmi && printk_ratelimit()) printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 19506f93573..b456b157d33 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -83,7 +83,11 @@ static int pseries_eeh_init(void) ibm_configure_pe = rtas_token("ibm,configure-pe"); ibm_configure_bridge = rtas_token("ibm,configure-bridge"); - /* necessary sanity check */ + /* + * Necessary sanity check. We needn't check "get-config-addr-info" + * and its variant since the old firmware probably support address + * of domain/bus/slot/function for EEH RTAS operations. + */ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", __func__); @@ -102,12 +106,6 @@ static int pseries_eeh_init(void) pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", __func__); return -EINVAL; - } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE && - ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) { - pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and " - "<ibm,get-config-addr-info> invalid\n", - __func__); - return -EINVAL; } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,configure-pe> and " diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index bae0f402bf2..87a22092b68 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write, return 0; } if (!write) { - len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); + strncpy(buf, appldata_timer_active ? "1\n" : "0\n", + ARRAY_SIZE(buf)); + len = strnlen(buf, ARRAY_SIZE(buf)); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) @@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write, return 0; } if (!write) { - len = sprintf(buf, ops->active ? "1\n" : "0\n"); + strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf)); + len = strnlen(buf, ARRAY_SIZE(buf)); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) { diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 9411db653ba..886ac7d4937 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, { struct dma_map_ops *dma_ops = get_dma_ops(dev); - dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); } #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 379d96e2105..fd9be010f9b 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address) } void *xlate_dev_mem_ptr(unsigned long phys); +#define xlate_dev_mem_ptr xlate_dev_mem_ptr void unxlate_dev_mem_ptr(unsigned long phys, void *addr); /* diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0f0de30e3e3..e8b6e5b8932 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep) " csg %0,%1,%2\n" " jl 0b\n" : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) - : "Q" (ptep[PTRS_PER_PTE]) : "cc"); + : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); #endif return __pgste(new); } @@ -635,18 +635,26 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ " stg %1,%0\n" : "=Q" (ptep[PTRS_PER_PTE]) - : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); + : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) + : "cc", "memory"); preempt_enable(); #endif } +static inline void pgste_set(pte_t *ptep, pgste_t pgste) +{ +#ifdef CONFIG_PGSTE + *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; +#endif +} + static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE unsigned long address, bits; unsigned char skey; - if (!pte_present(*ptep)) + if (pte_val(*ptep) & _PAGE_INVALID) return pgste; address = pte_val(*ptep) & PAGE_MASK; skey = page_get_storage_key(address); @@ -680,7 +688,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) #ifdef CONFIG_PGSTE int young; - if (!pte_present(*ptep)) + if (pte_val(*ptep) & _PAGE_INVALID) return pgste; /* Get referenced bit from storage key */ young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); @@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; - unsigned long okey, nkey; + unsigned long nkey; - if (!pte_present(entry)) + if (pte_val(entry) & _PAGE_INVALID) return; + VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; - okey = nkey = page_get_storage_key(address); - nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); - /* Set page access key and fetch protection bit from pgste */ - nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; - if (okey != nkey) - page_set_storage_key(address, nkey, 0); + /* + * Set page access key and fetch protection bit from pgste. + * The guest C/R information is still in the PGSTE, set real + * key C/R to 0. + */ + nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; + page_set_storage_key(address, nkey, 0); #endif } @@ -1098,6 +1108,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, pte = *ptep; if (!mm_exclusive(mm)) __ptep_ipte(address, ptep); + + if (mm_has_pgste(mm)) { + pgste = pgste_update_all(&pte, pgste); + pgste_set(ptep, pgste); + } return pte; } @@ -1105,9 +1120,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long address, pte_t *ptep, pte_t pte) { + pgste_t pgste; + if (mm_has_pgste(mm)) { + pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); + pgste_set_key(ptep, pgste, pte); pgste_set_pte(ptep, pte); - pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); + pgste_set_unlock(ptep, pgste); } else *ptep = pte; } diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 29829747725..87acc38f73c 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) static void show_trace(struct task_struct *task, unsigned long *stack) { + const unsigned long frame_size = + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); register unsigned long __r15 asm ("15"); unsigned long sp; @@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack) sp = task ? task->thread.ksp : __r15; printk("Call Trace:\n"); #ifdef CONFIG_CHECK_STACK - sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, - S390_lowcore.panic_stack); + sp = __show_trace(sp, + S390_lowcore.panic_stack + frame_size - 4096, + S390_lowcore.panic_stack + frame_size); #endif - sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); + sp = __show_trace(sp, + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size); if (task) __show_trace(sp, (unsigned long) task_stack_page(task), (unsigned long) task_stack_page(task) + THREAD_SIZE); diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f7fb58903f6..408e866ae54 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void) spin_unlock(&ma_subclass_lock); } EXPORT_SYMBOL(measurement_alert_subclass_unregister); + +void synchronize_irq(unsigned int irq) +{ + /* + * Not needed, the handler is protected by a lock and IRQs that occur + * after the handler is deleted are just NOPs. + */ +} +EXPORT_SYMBOL_GPL(synchronize_irq); + +#ifndef CONFIG_PCI + +/* Only PCI devices have dynamically-defined IRQ handlers */ + +int request_irq(unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(request_irq); + +void free_irq(unsigned int irq, void *dev_id) +{ + WARN_ON(1); +} +EXPORT_SYMBOL_GPL(free_irq); + +void enable_irq(unsigned int irq) +{ + WARN_ON(1); +} +EXPORT_SYMBOL_GPL(enable_irq); + +void disable_irq(unsigned int irq) +{ + WARN_ON(1); +} +EXPORT_SYMBOL_GPL(disable_irq); + +#endif /* !CONFIG_PCI */ + +void disable_irq_nosync(unsigned int irq) +{ + disable_irq(irq); +} +EXPORT_SYMBOL_GPL(disable_irq_nosync); + +unsigned long probe_irq_on(void) +{ + return 0; +} +EXPORT_SYMBOL_GPL(probe_irq_on); + +int probe_irq_off(unsigned long val) +{ + return 0; +} +EXPORT_SYMBOL_GPL(probe_irq_off); + +unsigned int probe_irq_mask(unsigned long val) +{ + return val; +} +EXPORT_SYMBOL_GPL(probe_irq_mask); diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index b6506ee32a3..29bd7bec417 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -225,7 +225,7 @@ _sclp_print: ahi %r2,1 ltr %r0,%r0 # end of string? jz .LfinalizemtoS4 - chi %r0,0x15 # end of line (NL)? + chi %r0,0x0a # end of line (NL)? jz .LfinalizemtoS4 stc %r0,0(%r6,%r7) # copy to mto la %r11,0(%r6,%r7) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 05674b66900..4f977d0d25c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -428,34 +428,27 @@ void smp_stop_cpu(void) * This is the main routine where commands issued by other * cpus are handled. */ -static void do_ext_call_interrupt(struct ext_code ext_code, - unsigned int param32, unsigned long param64) +static void smp_handle_ext_call(void) { unsigned long bits; - int cpu; - - cpu = smp_processor_id(); - if (ext_code.code == 0x1202) - inc_irq_stat(IRQEXT_EXC); - else - inc_irq_stat(IRQEXT_EMS); - /* - * handle bit signal external calls - */ - bits = xchg(&pcpu_devices[cpu].ec_mask, 0); + /* handle bit signal external calls */ + bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); if (test_bit(ec_stop_cpu, &bits)) smp_stop_cpu(); - if (test_bit(ec_schedule, &bits)) scheduler_ipi(); - if (test_bit(ec_call_function, &bits)) generic_smp_call_function_interrupt(); - if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); +} +static void do_ext_call_interrupt(struct ext_code ext_code, + unsigned int param32, unsigned long param64) +{ + inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); + smp_handle_ext_call(); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -760,6 +753,8 @@ int __cpu_disable(void) { unsigned long cregs[16]; + /* Handle possible pending IPIs */ + smp_handle_ext_call(); set_cpu_online(smp_processor_id(), false); /* Disable pseudo page faults on this cpu. */ pfault_fini(); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 18dc417aaf7..a938b548f07 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment, mp = (struct gmap_pgtable *) page->index; rmap->gmap = gmap; rmap->entry = segment_ptr; - rmap->vmaddr = address; + rmap->vmaddr = address & PMD_MASK; spin_lock(&mm->page_table_lock); if (*segment_ptr == segment) { list_add(&rmap->list, &mp->mapper); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index e6f15b5d8b7..f1e5be85d59 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) return rc; } -void synchronize_irq(unsigned int irq) -{ - /* - * Not needed, the handler is protected by a lock and IRQs that occur - * after the handler is deleted are just NOPs. - */ -} -EXPORT_SYMBOL_GPL(synchronize_irq); - void enable_irq(unsigned int irq) { struct msi_desc *msi = irq_get_msi_desc(irq); @@ -327,30 +318,6 @@ void disable_irq(unsigned int irq) } EXPORT_SYMBOL_GPL(disable_irq); -void disable_irq_nosync(unsigned int irq) -{ - disable_irq(irq); -} -EXPORT_SYMBOL_GPL(disable_irq_nosync); - -unsigned long probe_irq_on(void) -{ - return 0; -} -EXPORT_SYMBOL_GPL(probe_irq_on); - -int probe_irq_off(unsigned long val) -{ - return 0; -} -EXPORT_SYMBOL_GPL(probe_irq_off); - -unsigned int probe_irq_mask(unsigned long val) -{ - return val; -} -EXPORT_SYMBOL_GPL(probe_irq_mask); - void pcibios_fixup_bus(struct pci_bus *bus) { } diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index ff18e3cfb6b..7e4a97fbded 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += linkage.h generic-y += local64.h generic-y += mutex.h generic-y += irq_regs.h diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 15a716934e4..b836e9297f2 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void) #ifdef CONFIG_SMP # define LEON3_IRQ_IPI_DEFAULT 13 -# define LEON3_IRQ_TICKER (leon3_ticker_irq) +# define LEON3_IRQ_TICKER (leon3_gptimer_irq) # define LEON3_IRQ_CROSS_CALL 15 #endif diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h index f3034eddf46..24ec48c3ff9 100644 --- a/arch/sparc/include/asm/leon_amba.h +++ b/arch/sparc/include/asm/leon_amba.h @@ -47,6 +47,7 @@ struct amba_prom_registers { #define LEON3_GPTIMER_LD 4 #define LEON3_GPTIMER_IRQEN 8 #define LEON3_GPTIMER_SEPIRQ 8 +#define LEON3_GPTIMER_TIMERS 0x7 #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ /* 0 = hold scalar and counter */ diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h deleted file mode 100644 index 291c2d01c44..00000000000 --- a/arch/sparc/include/asm/linkage.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_LINKAGE_H -#define __ASM_LINKAGE_H - -/* Nothing to see here... */ - -#endif diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 75bb608c423..5ef48dab563 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command) unsigned long len; strcpy(full_boot_str, "boot "); - strcpy(full_boot_str + strlen("boot "), boot_command); + strlcpy(full_boot_str + strlen("boot "), boot_command, + sizeof(full_boot_str + strlen("boot "))); len = strlen(full_boot_str); if (reboot_data_supported) { diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 7c0231dabe4..b7c68976cbc 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock); unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ -int leon3_ticker_irq; /* Timer ticker IRQ */ unsigned int sparc_leon_eirq; #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) #define LEON_IACK (&leon3_irqctrl_regs->iclear) @@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) leon_clear_profile_irq(cpu); + if (cpu == boot_cpu_id) + timer_interrupt(irq, NULL); + ce = &per_cpu(sparc32_clockevent, cpu); irq_enter(); @@ -299,6 +301,7 @@ void __init leon_init_timers(void) int icsel; int ampopts; int err; + u32 config; sparc_config.get_cycles_offset = leon_cycles_offset; sparc_config.cs_period = 1000000 / HZ; @@ -377,23 +380,6 @@ void __init leon_init_timers(void) LEON3_BYPASS_STORE_PA( &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); -#ifdef CONFIG_SMP - leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; - - if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & - (1<<LEON3_GPTIMER_SEPIRQ))) { - printk(KERN_ERR "timer not configured with separate irqs\n"); - BUG(); - } - - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, - 0); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, - (((1000000/HZ) - 1))); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, - 0); -#endif - /* * The IRQ controller may (if implemented) consist of multiple * IRQ controllers, each mapped on a 4Kb boundary. @@ -416,13 +402,6 @@ void __init leon_init_timers(void) if (eirq != 0) leon_eirq_setup(eirq); - irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); - err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); - if (err) { - printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); - prom_halt(); - } - #ifdef CONFIG_SMP { unsigned long flags; @@ -439,30 +418,31 @@ void __init leon_init_timers(void) } #endif - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, - LEON3_GPTIMER_EN | - LEON3_GPTIMER_RL | - LEON3_GPTIMER_LD | - LEON3_GPTIMER_IRQEN); + config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); + if (config & (1 << LEON3_GPTIMER_SEPIRQ)) + leon3_gptimer_irq += leon3_gptimer_idx; + else if ((config & LEON3_GPTIMER_TIMERS) > 1) + pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); #ifdef CONFIG_SMP /* Install per-cpu IRQ handler for broadcasted ticker */ - irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, + irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, "per-cpu", 0); err = request_irq(irq, leon_percpu_timer_ce_interrupt, - IRQF_PERCPU | IRQF_TIMER, "ticker", - NULL); + IRQF_PERCPU | IRQF_TIMER, "timer", NULL); +#else + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); +#endif if (err) { - printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); + pr_err("Unable to attach timer IRQ%d\n", irq); prom_halt(); } - - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); -#endif return; bad: printk(KERN_ERR "No Timer/irqctrl found\n"); diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 7739a54315e..6df26e37f87 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev) /* find device register base address */ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - regs = devm_request_and_ioremap(&ofdev->dev, res); - if (!regs) { - dev_err(&ofdev->dev, "io-regs mapping failed\n"); - return -EADDRNOTAVAIL; - } + regs = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); /* * check that we're in Host Slot and that we can act as a Host Bridge diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c index bdf53d9a8d4..b0b3967a2dd 100644 --- a/arch/sparc/kernel/leon_pmc.c +++ b/arch/sparc/kernel/leon_pmc.c @@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void) * MMU does not get a TLB miss here by using the MMU BYPASS ASI. */ register unsigned int address = (unsigned int)leon3_irqctrl_regs; + + /* Interrupts need to be enabled to not hang the CPU */ + local_irq_enable(); + __asm__ __volatile__ ( "wr %%g0, %%asr19\n" "lda [%0] %1, %%g0\n" @@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void) */ void pmc_leon_idle(void) { + /* Interrupts need to be enabled to not hang the CPU */ + local_irq_enable(); + /* For systems without power-down, this will be no-op */ __asm__ __volatile__ ("wr %g0, %asr19\n\t"); } diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 9f20566b077..79cc0d1a477 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex); int of_set_property(struct device_node *dp, const char *name, void *val, int len) { struct property **prevp; + unsigned long flags; void *new_val; int err; @@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len err = -ENODEV; mutex_lock(&of_set_property_mutex); - raw_spin_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); prevp = &dp->properties; while (*prevp) { struct property *prop = *prevp; @@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len } prevp = &(*prevp)->next; } - raw_spin_unlock(&devtree_lock); + raw_spin_unlock_irqrestore(&devtree_lock, flags); mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 38bf80a22f0..1434526970a 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strcpy(boot_command_line, *cmdline_p); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 88a127b9c69..13785547e43 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strcpy(boot_command_line, *cmdline_p); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a7171997adf..04fd55a6e46 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md) m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); - m->offset = *val; + + /* The address-congruence-offset property is optional. + * Explicity zero it be identifty this. + */ + if (val) + m->offset = *val; + else + m->offset = 0UL; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 83d89bcb44a..37e7bc4c95b 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - global_flush_tlb_page(mm, vaddr); flush_tsb_user_page(mm, vaddr); + global_flush_tlb_page(mm, vaddr); goto out; } diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c index f5ec32e0d41..d2b49d2365e 100644 --- a/arch/sparc/prom/bootstr_32.c +++ b/arch/sparc/prom/bootstr_32.c @@ -23,23 +23,25 @@ prom_getbootargs(void) return barg_buf; } - switch(prom_vers) { + switch (prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ - for(iter = 1; iter < 8; iter++) { + for (iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; - while(*arg != 0) { + while (*arg != 0) { /* Leave place for space and null. */ - if(cp >= barg_buf + BARG_LEN-2){ + if (cp >= barg_buf + BARG_LEN - 2) /* We might issue a warning here. */ break; - } *cp++ = *arg++; } *cp++ = ' '; + if (cp >= barg_buf + BARG_LEN - 1) + /* We might issue a warning here. */ + break; } *cp = 0; break; diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 92204c3800b..bd1b2a3ac34 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c @@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node) return prom_node_to_node("child", node); } -inline phandle prom_getchild(phandle node) +phandle prom_getchild(phandle node) { phandle cnode; @@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node) return prom_node_to_node(prom_peer_name, node); } -inline phandle prom_getsibling(phandle node) +phandle prom_getsibling(phandle node) { phandle sibnode; @@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ -inline int prom_getproplen(phandle node, const char *prop) +int prom_getproplen(phandle node, const char *prop) { unsigned long args[6]; @@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen); * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ -inline int prom_getproperty(phandle node, const char *prop, - char *buffer, int bufsize) +int prom_getproperty(phandle node, const char *prop, + char *buffer, int bufsize) { unsigned long args[8]; int plen; @@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ -inline int prom_getint(phandle node, const char *prop) +int prom_getint(phandle node, const char *prop) { int intprop; @@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop"; /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ -inline char *prom_firstprop(phandle node, char *buffer) +char *prom_firstprop(phandle node, char *buffer) { unsigned long args[7]; @@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop); * at node 'node' . Returns NULL string if no more * property types for this node. */ -inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) +char *prom_nextprop(phandle node, const char *oprop, char *buffer) { unsigned long args[7]; char buf[32]; diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index 4385cb6fa00..a93b02a2522 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashrdi3); uint64_t __ashldi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashldi3); +int __ffsdi2(uint64_t); +EXPORT_SYMBOL(__ffsdi2); #endif diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index d7d21851e60..3df3bd54449 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req) } do { - loff_t pos; + loff_t pos = file->f_pos; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 685692c94f0..fe120da2562 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt" config IA32_EMULATION bool "IA32 Emulation" depends on X86_64 + select BINFMT_ELF select COMPAT_BINFMT_ELF select HAVE_UID16 ---help--- diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 35ee62fccf9..c205035a6b9 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } -static efi_status_t setup_efi_vars(struct boot_params *params) -{ - struct setup_data *data; - struct efi_var_bootdata *efidata; - u64 store_size, remaining_size, var_size; - efi_status_t status; - - if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) - return EFI_UNSUPPORTED; - - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; - - while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; - - status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, - EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, &store_size, - &remaining_size, &var_size); - - if (status != EFI_SUCCESS) - return status; - - status = efi_call_phys3(sys_table->boottime->allocate_pool, - EFI_LOADER_DATA, sizeof(*efidata), &efidata); - - if (status != EFI_SUCCESS) - return status; - - efidata->data.type = SETUP_EFI_VARS; - efidata->data.len = sizeof(struct efi_var_bootdata) - - sizeof(struct setup_data); - efidata->data.next = 0; - efidata->store_size = store_size; - efidata->remaining_size = remaining_size; - efidata->max_var_size = var_size; - - if (data) - data->next = (unsigned long)efidata; - else - params->hdr.setup_data = (unsigned long)efidata; - -} - static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); - setup_efi_vars(boot_params); - setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 62fe22cd4cb..477e9d75149 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8) addq %rcx, KEYP movdqa IV, STATE1 - pxor 0x00(INP), STATE1 + movdqu 0x00(INP), INC + pxor INC, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - pxor 0x10(INP), STATE2 + movdqu 0x10(INP), INC + pxor INC, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - pxor 0x20(INP), STATE3 + movdqu 0x20(INP), INC + pxor INC, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - pxor 0x30(INP), STATE4 + movdqu 0x30(INP), INC + pxor INC, STATE4 movdqu IV, 0x30(OUTP) call *%r11 - pxor 0x00(OUTP), STATE1 + movdqu 0x00(OUTP), INC + pxor INC, STATE1 movdqu STATE1, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE1 - pxor 0x40(INP), STATE1 + movdqu 0x40(INP), INC + pxor INC, STATE1 movdqu IV, 0x40(OUTP) - pxor 0x10(OUTP), STATE2 + movdqu 0x10(OUTP), INC + pxor INC, STATE2 movdqu STATE2, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - pxor 0x50(INP), STATE2 + movdqu 0x50(INP), INC + pxor INC, STATE2 movdqu IV, 0x50(OUTP) - pxor 0x20(OUTP), STATE3 + movdqu 0x20(OUTP), INC + pxor INC, STATE3 movdqu STATE3, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - pxor 0x60(INP), STATE3 + movdqu 0x60(INP), INC + pxor INC, STATE3 movdqu IV, 0x60(OUTP) - pxor 0x30(OUTP), STATE4 + movdqu 0x30(OUTP), INC + pxor INC, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - pxor 0x70(INP), STATE4 + movdqu 0x70(INP), INC + pxor INC, STATE4 movdqu IV, 0x70(OUTP) _aesni_gf128mul_x_ble() @@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8) call *%r11 - pxor 0x40(OUTP), STATE1 + movdqu 0x40(OUTP), INC + pxor INC, STATE1 movdqu STATE1, 0x40(OUTP) - pxor 0x50(OUTP), STATE2 + movdqu 0x50(OUTP), INC + pxor INC, STATE2 movdqu STATE2, 0x50(OUTP) - pxor 0x60(OUTP), STATE3 + movdqu 0x60(OUTP), INC + pxor INC, STATE3 movdqu STATE3, 0x60(OUTP) - pxor 0x70(OUTP), STATE4 + movdqu 0x70(OUTP), INC + pxor INC, STATE4 movdqu STATE4, 0x70(OUTP) ret diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 805078e0801..52ff81cce00 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, /* struct user */ DUMP_WRITE(&dump, sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ - DUMP_SEEK(PAGE_SIZE); + DUMP_SEEK(PAGE_SIZE - sizeof(dump)); /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 2fb5d5884e2..60c89f30c72 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); -struct efi_var_bootdata { - struct setup_data data; - u64 store_size; - u64 remaining_size; - u64 max_var_size; -}; - #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ba870bb6dd8..57873beb329 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector); extern void init_ISA_irqs(void); +#ifdef CONFIG_X86_LOCAL_APIC +void arch_trigger_all_cpu_backtrace(void); +#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +#endif + #endif /* _ASM_X86_IRQ_H */ diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 6825e2efd1b..6bc3985ee47 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} #ifdef CONFIG_MICROCODE_EARLY #define MAX_UCODE_COUNT 128 extern void __init load_ucode_bsp(void); -extern __init void load_ucode_ap(void); +extern void __cpuinit load_ucode_ap(void); extern int __init save_microcode_in_initrd(void); #else static inline void __init load_ucode_bsp(void) {} -static inline __init void load_ucode_ap(void) {} +static inline void __cpuinit load_ucode_ap(void) {} static inline int __init save_microcode_in_initrd(void) { return 0; diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c0fa356e90d..86f9301903c 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int unknown_nmi_panic; -void arch_trigger_all_cpu_backtrace(void); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace -#endif +#endif /* CONFIG_X86_LOCAL_APIC */ #define NMI_FLAG_FIRST 1 diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 08744242b8d..c15ddaf9071 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,7 +6,6 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 -#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 31cb9ae992b..a698d7165c9 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -9,6 +9,7 @@ * */ #include <asm/apic.h> +#include <asm/nmi.h> #include <linux/cpumask.h> #include <linux/kdebug.h> diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 35ffda5d072..5f90b85ff22 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) if (mtrr_tom2) x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; - nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); /* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it: */ - nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, + nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, 1ULL<<(20 - PAGE_SHIFT)); - /* Sort the ranges: */ - sort_range(range, nr_range); + /* add from var mtrr at last */ + nr_range = x86_get_mtrr_mem_range(range, nr_range, + x_remove_base, x_remove_size); range_sums = sum_ranges(range, nr_range); printk(KERN_INFO "total RAM covered: %ldM\n", diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f60d41ff9a9..a9e22073bd5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), - INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index d2c381280e3..3dd37ebd591 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -242,6 +242,7 @@ void __init kvmclock_init(void) if (!mem) return; hv_clock = __va(mem); + memset(hv_clock, 0, size); if (kvm_register_clock("boot clock")) { hv_clock = NULL; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4e7a37ff03a..81a5f5e8f14 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -277,18 +277,6 @@ void exit_idle(void) } #endif -void arch_cpu_idle_prepare(void) -{ - /* - * If we're the non-boot CPU, nothing set the stack canary up - * for us. CPU0 already has it initialized but no harm in - * doing it again. This is a good place for updating it, as - * we wont ever return from this function (so the invalid - * canaries already on the stack wont ever trigger). - */ - boot_init_stack_canary(); -} - void arch_cpu_idle_enter(void) { local_touch_nmi(); diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 7a6f3b3be3c..f2bb9c96720 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -160,7 +160,7 @@ identity_mapped: xorq %rbp, %rbp xorq %r8, %r8 xorq %r9, %r9 - xorq %r10, %r9 + xorq %r10, %r10 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9c73b51817e..bfd348e9936 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) void __cpuinit set_cpu_sibling_map(int cpu) { - bool has_mc = boot_cpu_data.x86_max_cores > 1; bool has_smt = smp_num_siblings > 1; + bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *o; int i; cpumask_set_cpu(cpu, cpu_sibling_setup_mask); - if (!has_smt && !has_mc) { + if (!has_mp) { cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); cpumask_set_cpu(cpu, cpu_core_mask(cpu)); @@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) if ((i == cpu) || (has_smt && match_smt(c, o))) link_mask(sibling, cpu, i); - if ((i == cpu) || (has_mc && match_llc(c, o))) + if ((i == cpu) || (has_mp && match_llc(c, o))) link_mask(llc_shared, cpu, i); } @@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) for_each_cpu(i, cpu_sibling_setup_mask) { o = &cpu_data(i); - if ((i == cpu) || (has_mc && match_mc(c, o))) { + if ((i == cpu) || (has_mp && match_mc(c, o))) { link_mask(core, cpu, i); /* diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8db0010ed15..5953dcea752 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1240,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3) { + int highbyte_regs = ctxt->rex_prefix == 0; + op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; - op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); + op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, + highbyte_regs && (ctxt->d & ByteOp)); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; @@ -3997,7 +4000,8 @@ static const struct opcode twobyte_table[256] = { DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, /* 0x10 - 0x1F */ - N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, + D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), @@ -4836,6 +4840,7 @@ twobyte_insn: case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ + case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e1adbb4aca7..0eee2c8b64d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; unsigned int sipi_vector; + unsigned long pe; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) return; - if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { + pe = xchg(&apic->pending_events, 0); + + if (test_bit(KVM_APIC_INIT, &pe)) { kvm_lapic_reset(vcpu); kvm_vcpu_reset(vcpu); if (kvm_vcpu_is_bsp(apic->vcpu)) @@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) else vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; } - if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && + if (test_bit(KVM_APIC_SIPI, &pe) && vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { /* evaluate pending_events before reading the vector */ smp_rmb(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 094b5d96ab1..e8ba99c3418 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; - if (kvm_x86_ops->get_cpl(vcpu) != 0) - return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) @@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (__kvm_set_xcr(vcpu, index, xcr)) { + if (kvm_x86_ops->get_cpl(vcpu) != 0 || + __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index eaac1743def..1f34e921977 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); + if (!after_bootmem) + adjust_range_page_size_mask(mr, nr_range); + /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; @@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, nr_range--; } - if (!after_bootmem) - adjust_range_page_size_mask(mr, nr_range); - for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 305c68b8d53..981c2dbd72c 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev) pa_data = boot_params.hdr.setup_data; while (pa_data) { - data = phys_to_virt(pa_data); + data = ioremap(pa_data, sizeof(*rom)); + if (!data) + return -ENOMEM; if (data->type == SETUP_PCI) { rom = (struct pci_setup_rom *)data; @@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev) } } pa_data = data->next; + iounmap(data); } return 0; } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 82089d8b195..d2fbcedcf6e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -42,7 +42,6 @@ #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> -#include <linux/ucs2_string.h> #include <asm/setup.h> #include <asm/efi.h> @@ -54,12 +53,12 @@ #define EFI_DEBUG 1 -/* - * There's some additional metadata associated with each - * variable. Intel's reference implementation is 60 bytes - bump that - * to account for potential alignment constraints - */ -#define VAR_METADATA_SIZE 64 +#define EFI_MIN_RESERVE 5120 + +#define EFI_DUMMY_GUID \ + EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) + +static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, @@ -79,13 +78,6 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; -static u64 efi_var_store_size; -static u64 efi_var_remaining_size; -static u64 efi_var_max_var_size; -static u64 boot_used_size; -static u64 boot_var_size; -static u64 active_size; - unsigned long x86_efi_facility; /* @@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - efi_status_t status; - static bool finished = false; - static u64 var_size; - - status = efi_call_virt3(get_next_variable, - name_size, name, vendor); - - if (status == EFI_NOT_FOUND) { - finished = true; - if (var_size < boot_used_size) { - boot_var_size = boot_used_size - var_size; - active_size += boot_var_size; - } else { - printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); - } - } - - if (boot_used_size && !finished) { - unsigned long size = 0; - u32 attr; - efi_status_t s; - void *tmp; - - s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); - - if (s != EFI_BUFFER_TOO_SMALL || !size) - return status; - - tmp = kmalloc(size, GFP_ATOMIC); - - if (!tmp) - return status; - - s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); - - if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { - var_size += size; - var_size += ucs2_strsize(name, 1024); - active_size += size; - active_size += VAR_METADATA_SIZE; - active_size += ucs2_strsize(name, 1024); - } - - kfree(tmp); - } - - return status; + return efi_call_virt3(get_next_variable, + name_size, name, vendor); } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - efi_status_t status; - u32 orig_attr = 0; - unsigned long orig_size = 0; - - status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, - NULL); - - if (status != EFI_BUFFER_TOO_SMALL) - orig_size = 0; - - status = efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); - - if (status == EFI_SUCCESS) { - if (orig_size) { - active_size -= orig_size; - active_size -= ucs2_strsize(name, 1024); - active_size -= VAR_METADATA_SIZE; - } - if (data_size) { - active_size += data_size; - active_size += ucs2_strsize(name, 1024); - active_size += VAR_METADATA_SIZE; - } - } - - return status; + return efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -786,9 +708,6 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; - struct setup_data *data; - struct efi_var_bootdata *efi_var_data; - u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -806,22 +725,6 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; - pa_data = boot_params.hdr.setup_data; - while (pa_data) { - data = early_ioremap(pa_data, sizeof(*efi_var_data)); - if (data->type == SETUP_EFI_VARS) { - efi_var_data = (struct efi_var_bootdata *)data; - - efi_var_store_size = efi_var_data->store_size; - efi_var_remaining_size = efi_var_data->remaining_size; - efi_var_max_var_size = efi_var_data->max_var_size; - } - pa_data = data->next; - early_iounmap(data, sizeof(*efi_var_data)); - } - - boot_used_size = efi_var_store_size - efi_var_remaining_size; - set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void) runtime_code_page_mkexec(); kfree(new_memmap); + + /* clean DUMMY object */ + efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + 0, NULL); } /* @@ -1136,33 +1046,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) efi_status_t status; u64 storage_size, remaining_size, max_size; + if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) + return 0; + status = efi.query_variable_info(attributes, &storage_size, &remaining_size, &max_size); if (status != EFI_SUCCESS) return status; - if (!max_size && remaining_size > size) - printk_once(KERN_ERR FW_BUG "Broken EFI implementation" - " is returning MaxVariableSize=0\n"); /* * Some firmware implementations refuse to boot if there's insufficient * space in the variable store. We account for that by refusing the * write if permitting it would reduce the available space to under - * 50%. However, some firmware won't reclaim variable space until - * after the used (not merely the actively used) space drops below - * a threshold. We can approximate that case with the value calculated - * above. If both the firmware and our calculations indicate that the - * available space would drop below 50%, refuse the write. + * 5KB. This figure was provided by Samsung, so should be safe. */ + if ((remaining_size - size < EFI_MIN_RESERVE) && + !efi_no_storage_paranoia) { + + /* + * Triggering garbage collection may require that the firmware + * generate a real EFI_OUT_OF_RESOURCES error. We can force + * that by attempting to use more space than is available. + */ + unsigned long dummy_size = remaining_size + 1024; + void *dummy = kzalloc(dummy_size, GFP_ATOMIC); + + if (!dummy) + return EFI_OUT_OF_RESOURCES; + + status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + dummy_size, dummy); + + if (status == EFI_SUCCESS) { + /* + * This should have failed, so if it didn't make sure + * that we delete it... + */ + efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + 0, dummy); + } + + kfree(dummy); - if (!storage_size || size > remaining_size || - (max_size && size > max_size)) - return EFI_OUT_OF_RESOURCES; + /* + * The runtime code may now have triggered a garbage collection + * run, so check the variable info again + */ + status = efi.query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); - if (!efi_no_storage_paranoia && - ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && - (remaining_size - size < storage_size / 2))) - return EFI_OUT_OF_RESOURCES; + if (status != EFI_SUCCESS) + return status; + + /* + * There still isn't enough room, so return an error + */ + if (remaining_size - size < EFI_MIN_RESERVE) + return EFI_OUT_OF_RESOURCES; + } return EFI_SUCCESS; } diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 590be109089..f7bab68a4b8 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "^(xen_irq_disable_direct_reloc$|" "xen_save_fl_direct_reloc$|" "VDSO|" -#if ELF_BITS == 64 - "__vvar_page|" -#endif "__crc_)", /* @@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "__per_cpu_load|" "init_per_cpu__.*|" "__end_rodata_hpage_align|" + "__vvar_page|" #endif "_end)$" }; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index fb44426fe93..d99cae8147d 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/irq_work.h> +#include <linux/tick.h> #include <asm/paravirt.h> #include <asm/desc.h> @@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); + /* + * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) + * clears certain data that the cpu_idle loop (which called us + * and that we return from) expects. The only way to get that + * data back is to call: + */ + tick_nohz_idle_enter(); } #else /* !CONFIG_HOTPLUG_CPU */ |