diff options
author | James Bottomley <JBottomley@Parallels.com> | 2013-06-26 23:08:22 -0700 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2013-06-26 23:08:22 -0700 |
commit | a9e94ec3504ead4b87a929bc57f5aa99a6590437 (patch) | |
tree | b6e7aaec0e844dc066e93296a85e5769542ef525 /arch/mips | |
parent | 36a279686bc02d340eb213a9f2a0b67e50de64b4 (diff) | |
parent | 2884d4230867c8a46cf701214051e923301e7429 (diff) |
Merge tag 'fcoe1' into fixes
This patch fixes a critical bug that was introduced in 3.9
related to VLAN tagging FCoE frames.
Diffstat (limited to 'arch/mips')
59 files changed, 988 insertions, 622 deletions
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index cb0f6afb738..9edc35ff8cf 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c @@ -31,6 +31,7 @@ #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <asm/bootinfo.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <prom.h> diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 38afb11ba2c..93fa586d52e 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -36,6 +36,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <asm/idle.h> #include <asm/processor.h> #include <asm/time.h> #include <asm/mach-au1x00/au1000.h> diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index a0233a2c198..8be4e856b8b 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -19,6 +19,7 @@ #include <linux/clk.h> #include <asm/bootinfo.h> +#include <asm/idle.h> #include <asm/time.h> /* for mips_hpt_frequency */ #include <asm/reboot.h> /* for _machine_{restart,halt} */ #include <asm/mips_machine.h> diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b0baa299f89..01b1b3f94fe 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -428,13 +428,16 @@ static void octeon_restart(char *command) */ static void octeon_kill_core(void *arg) { - mb(); - if (octeon_is_simulation()) { - /* The simulator needs the watchdog to stop for dead cores */ - cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); + if (octeon_is_simulation()) /* A break instruction causes the simulator stop a core */ - asm volatile ("sync\nbreak"); - } + asm volatile ("break" ::: "memory"); + + local_irq_disable(); + /* Disable watchdog on this core. */ + cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); + /* Spin in a low power mode. */ + while (true) + asm volatile ("wait" ::: "memory"); } diff --git a/arch/mips/cobalt/reset.c b/arch/mips/cobalt/reset.c index 516b4428df4..4eedd481dd0 100644 --- a/arch/mips/cobalt/reset.c +++ b/arch/mips/cobalt/reset.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/leds.h> +#include <asm/idle.h> #include <asm/processor.h> #include <cobalt.h> diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig index face9d26e6d..bac26b971c5 100644 --- a/arch/mips/configs/db1000_defconfig +++ b/arch/mips/configs/db1000_defconfig @@ -228,7 +228,6 @@ CONFIG_HIDRAW=y CONFIG_USB_HID=y CONFIG_USB_SUPPORT=y CONFIG_USB=y -CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig index 14752dde754..e2b4ad55462 100644 --- a/arch/mips/configs/db1235_defconfig +++ b/arch/mips/configs/db1235_defconfig @@ -344,7 +344,6 @@ CONFIG_UHID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index b6acd2f256b..343bebc4b63 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -300,7 +300,6 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_SUSPEND=y CONFIG_USB_OTG_WHITELIST=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h index c9456e7a728..778e32d817b 100644 --- a/arch/mips/include/asm/clock.h +++ b/arch/mips/include/asm/clock.h @@ -6,8 +6,6 @@ #include <linux/seq_file.h> #include <linux/clk.h> -extern void (*cpu_wait) (void); - struct clk; struct clk_ops { diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h new file mode 100644 index 00000000000..d192158886b --- /dev/null +++ b/arch/mips/include/asm/idle.h @@ -0,0 +1,23 @@ +#ifndef __ASM_IDLE_H +#define __ASM_IDLE_H + +#include <linux/linkage.h> + +extern void (*cpu_wait)(void); +extern void r4k_wait(void); +extern asmlinkage void __r4k_wait(void); +extern void r4k_wait_irqoff(void); +extern void __pastwait(void); + +static inline int using_rollback_handler(void) +{ + return cpu_wait == r4k_wait; +} + +static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) +{ + return addr >= (unsigned long)r4k_wait_irqoff && + addr < (unsigned long)__pastwait; +} + +#endif /* __ASM_IDLE_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 1be13727323..b7e59853fd3 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base) */ static inline unsigned long virt_to_phys(volatile const void *address) { - return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; + return __pa(address); } /* diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h deleted file mode 100644 index 85789eacbf1..00000000000 --- a/arch/mips/include/asm/kvm.h +++ /dev/null @@ -1,55 +0,0 @@ -/* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ - -#ifndef __LINUX_KVM_MIPS_H -#define __LINUX_KVM_MIPS_H - -#include <linux/types.h> - -#define __KVM_MIPS - -#define N_MIPS_COPROC_REGS 32 -#define N_MIPS_COPROC_SEL 8 - -/* for KVM_GET_REGS and KVM_SET_REGS */ -struct kvm_regs { - __u32 gprs[32]; - __u32 hi; - __u32 lo; - __u32 pc; - - __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; -}; - -/* for KVM_GET_SREGS and KVM_SET_SREGS */ -struct kvm_sregs { -}; - -/* for KVM_GET_FPU and KVM_SET_FPU */ -struct kvm_fpu { -}; - -struct kvm_debug_exit_arch { -}; - -/* for KVM_SET_GUEST_DEBUG */ -struct kvm_guest_debug_arch { -}; - -struct kvm_mips_interrupt { - /* in */ - __u32 cpu; - __u32 irq; -}; - -/* definition of registers in kvm_run */ -struct kvm_sync_regs { -}; - -#endif /* __LINUX_KVM_MIPS_H */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e68781e1838..4d6fa0bf130 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -336,7 +336,7 @@ enum emulation_result { #define VPN2_MASK 0xffffe000 #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) -#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) +#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) struct kvm_mips_tlb { @@ -496,10 +496,6 @@ struct kvm_mips_callbacks { uint32_t cause); int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause); - int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, - struct kvm_regs *regs); - int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, - struct kvm_regs *regs); }; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 1554721e480..516e6e9a559 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -67,68 +67,45 @@ extern unsigned long pgd_current[]; TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) #endif #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) -#define ASID_INC(asid) \ -({ \ - unsigned long __asid = asid; \ - __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ - ".section\t__asid_inc,\"a\"\n\t" \ - ".word\t1b\n\t" \ - ".previous" \ - :"=r" (__asid) \ - :"0" (__asid)); \ - __asid; \ -}) -#define ASID_MASK(asid) \ -({ \ - unsigned long __asid = asid; \ - __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ - ".section\t__asid_mask,\"a\"\n\t" \ - ".word\t1b\n\t" \ - ".previous" \ - :"=r" (__asid) \ - :"r" (__asid)); \ - __asid; \ -}) -#define ASID_VERSION_MASK \ -({ \ - unsigned long __asid; \ - __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ - ".section\t__asid_version_mask,\"a\"\n\t" \ - ".word\t1b\n\t" \ - ".previous" \ - :"=r" (__asid)); \ - __asid; \ -}) -#define ASID_FIRST_VERSION \ -({ \ - unsigned long __asid = asid; \ - __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ - ".section\t__asid_first_version,\"a\"\n\t" \ - ".word\t1b\n\t" \ - ".previous" \ - :"=r" (__asid)); \ - __asid; \ -}) - -#define ASID_FIRST_VERSION_R3000 0x1000 -#define ASID_FIRST_VERSION_R4000 0x100 -#define ASID_FIRST_VERSION_R8000 0x1000 -#define ASID_FIRST_VERSION_RM9000 0x1000 +#define ASID_INC 0x40 +#define ASID_MASK 0xfc0 + +#elif defined(CONFIG_CPU_R8000) + +#define ASID_INC 0x10 +#define ASID_MASK 0xff0 + +#elif defined(CONFIG_MIPS_MT_SMTC) + +#define ASID_INC 0x1 +extern unsigned long smtc_asid_mask; +#define ASID_MASK (smtc_asid_mask) +#define HW_ASID_MASK 0xff +/* End SMTC/34K debug hack */ +#else /* FIXME: not correct for R6000 */ + +#define ASID_INC 0x1 +#define ASID_MASK 0xff -#ifdef CONFIG_MIPS_MT_SMTC -#define SMTC_HW_ASID_MASK 0xff -extern unsigned int smtc_asid_mask; #endif #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) -#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) +#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) #define asid_cache(cpu) (cpu_data[cpu].asid_cache) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) +#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) + #ifndef CONFIG_MIPS_MT_SMTC /* Normal, classic MIPS get_new_mmu_context */ static inline void @@ -137,10 +114,10 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) extern void kvm_local_flush_tlb_all(void); unsigned long asid = asid_cache(cpu); - if (!ASID_MASK((asid = ASID_INC(asid)))) { + if (! ((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); -#ifdef CONFIG_VIRTUALIZATION +#ifdef CONFIG_KVM kvm_local_flush_tlb_all(); /* start new asid cycle */ #else local_flush_tlb_all(); /* start new asid cycle */ @@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * free up the ASID value for use and flush any old * instances of it from the TLB. */ - oldasid = ASID_MASK(read_c0_entryhi()); + oldasid = (read_c0_entryhi() & ASID_MASK); if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) @@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * having ASID_MASK smaller than the hardware maximum, * make sure no "soft" bits become "hard"... */ - write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | + write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); @@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ mtflags = dvpe(); - oldasid = ASID_MASK(read_c0_entryhi()); + oldasid = read_c0_entryhi() & ASID_MASK; if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | - cpu_asid(cpu, next)); + write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | + cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); #else @@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ prevvpe = dvpe(); - oldasid = ASID_MASK(read_c0_entryhi()); + oldasid = (read_c0_entryhi() & ASID_MASK); if (smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) + write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | cpu_asid(cpu, mm)); ehb(); /* Make sure it propagates to TCStatus */ evpe(prevvpe); diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index eab99e536b5..f59552fae91 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -46,7 +46,6 @@ #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #include <linux/pfn.h> -#include <asm/io.h> extern void build_clear_page(void); extern void build_copy_page(void); @@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) #endif #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) +#include <asm/io.h> /* * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad @@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) \ -({ \ - unsigned long __pfn = (pfn); \ - /* avoid <linux/bootmem.h> include hell */ \ - extern unsigned long min_low_pfn; \ - \ - __pfn >= min_low_pfn && __pfn < max_mapnr; \ -}) +static inline int pfn_valid(unsigned long pfn) +{ + /* avoid <linux/mm.h> include hell */ + extern unsigned long max_mapnr; + + return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; +} #elif defined(CONFIG_SPARSEMEM) diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 71686c897de..1470b7b68b0 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -28,7 +28,6 @@ /* * System setup and hardware flags.. */ -extern void (*cpu_wait)(void); extern unsigned int vced_count, vcei_count; diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index a3186f2bb8a..5e6cd094739 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -16,6 +16,38 @@ #include <asm/isadep.h> #include <uapi/asm/ptrace.h> +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { +#ifdef CONFIG_32BIT + /* Pad bytes for argument save space on the stack. */ + unsigned long pad0[6]; +#endif + + /* Saved main processor registers. */ + unsigned long regs[32]; + + /* Saved special registers. */ + unsigned long cp0_status; + unsigned long hi; + unsigned long lo; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + unsigned long acx; +#endif + unsigned long cp0_badvaddr; + unsigned long cp0_cause; + unsigned long cp0_epc; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long cp0_tcstatus; +#endif /* CONFIG_MIPS_MT_SMTC */ +#ifdef CONFIG_CPU_CAVIUM_OCTEON + unsigned long long mpl[3]; /* MTM{0,1,2} */ + unsigned long long mtp[3]; /* MTP{0,1,2} */ +#endif +} __aligned(8); + struct task_struct; extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h new file mode 100644 index 00000000000..f09ff5ae205 --- /dev/null +++ b/arch/mips/include/uapi/asm/kvm.h @@ -0,0 +1,135 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Cavium, Inc. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ + +#ifndef __LINUX_KVM_MIPS_H +#define __LINUX_KVM_MIPS_H + +#include <linux/types.h> + +/* + * KVM MIPS specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +/* + * for KVM_GET_REGS and KVM_SET_REGS + * + * If Config[AT] is zero (32-bit CPU), the register contents are + * stored in the lower 32-bits of the struct kvm_regs fields and sign + * extended to 64-bits. + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 hi; + __u64 lo; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + * + * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs + * are zero filled. + */ +struct kvm_fpu { + __u64 fpr[32]; + __u32 fir; + __u32 fccr; + __u32 fexr; + __u32 fenr; + __u32 fcsr; + __u32 pad; +}; + + +/* + * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 + * registers. The id field is broken down as follows: + * + * bits[2..0] - Register 'sel' index. + * bits[7..3] - Register 'rd' index. + * bits[15..8] - Must be zero. + * bits[31..16] - 1 -> CP0 registers. + * bits[51..32] - Must be zero. + * bits[63..52] - As per linux/kvm.h + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + * + * The registers defined in struct kvm_regs are also accessible, the + * id values for these are below. + */ + +#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) +#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) +#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) +#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) +#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) +#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) +#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) +#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) +#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) +#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) +#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) +#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) +#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) +#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) +#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) +#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) +#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) +#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) +#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) +#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) +#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) +#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) +#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) +#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) +#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) +#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) + +#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) +#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) +#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) + +/* + * KVM MIPS specific structures and definitions + * + */ +struct kvm_debug_exit_arch { + __u64 epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_mips_interrupt { + /* in */ + __u32 cpu; + __u32 irq; +}; + +#endif /* __LINUX_KVM_MIPS_H */ diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index 4d58d846870..b26f7e31727 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h @@ -22,16 +22,12 @@ #define DSP_CONTROL 77 #define ACX 78 +#ifndef __KERNEL__ /* * This struct defines the way the registers are stored on the stack during a * system call/exception. As usual the registers k0/k1 aren't being saved. */ struct pt_regs { -#ifdef CONFIG_32BIT - /* Pad bytes for argument save space on the stack. */ - unsigned long pad0[6]; -#endif - /* Saved main processor registers. */ unsigned long regs[32]; @@ -39,20 +35,11 @@ struct pt_regs { unsigned long cp0_status; unsigned long hi; unsigned long lo; -#ifdef CONFIG_CPU_HAS_SMARTMIPS - unsigned long acx; -#endif unsigned long cp0_badvaddr; unsigned long cp0_cause; unsigned long cp0_epc; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long cp0_tcstatus; -#endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON - unsigned long long mpl[3]; /* MTM{0,1,2} */ - unsigned long long mtp[3]; /* MTP{0,1,2} */ -#endif } __attribute__ ((aligned (8))); +#endif /* __KERNEL__ */ /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 16338b84fa7..1dee279f966 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -694,16 +694,17 @@ #define __NR_process_vm_writev (__NR_Linux + 305) #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) +#define __NR_getdents64 (__NR_Linux + 308) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 307 +#define __NR_Linux_syscalls 308 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 307 +#define __NR_64_Linux_syscalls 308 #if _MIPS_SIM == _MIPS_SIM_NABI32 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 6ad9e04bdf6..423d871a946 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ +obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ prom.o ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o watch.o vdso.o diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index e06f777e9c4..1188e00bb12 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 97c5a1668e5..202e581e609 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 4bbffdb9024..c6568bf4b1b 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -27,105 +27,6 @@ #include <asm/spram.h> #include <asm/uaccess.h> -/* - * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, - * the implementation of the "wait" feature differs between CPU families. This - * points to the function that implements CPU specific wait. - * The wait instruction stops the pipeline and reduces the power consumption of - * the CPU very much. - */ -void (*cpu_wait)(void); -EXPORT_SYMBOL(cpu_wait); - -static void r3081_wait(void) -{ - unsigned long cfg = read_c0_conf(); - write_c0_conf(cfg | R30XX_CONF_HALT); -} - -static void r39xx_wait(void) -{ - local_irq_disable(); - if (!need_resched()) - write_c0_conf(read_c0_conf() | TX39_CONF_HALT); - local_irq_enable(); -} - -extern void r4k_wait(void); - -/* - * This variant is preferable as it allows testing need_resched and going to - * sleep depending on the outcome atomically. Unfortunately the "It is - * implementation-dependent whether the pipeline restarts when a non-enabled - * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes - * using this version a gamble. - */ -void r4k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__(" .set push \n" - " .set mips3 \n" - " wait \n" - " .set pop \n"); - local_irq_enable(); - __asm__(" .globl __pastwait \n" - "__pastwait: \n"); -} - -/* - * The RM7000 variant has to handle erratum 38. The workaround is to not - * have any pending stores when the WAIT instruction is executed. - */ -static void rm7k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__( - " .set push \n" - " .set mips3 \n" - " .set noat \n" - " mfc0 $1, $12 \n" - " sync \n" - " mtc0 $1, $12 # stalls until W stage \n" - " wait \n" - " mtc0 $1, $12 # stalls until W stage \n" - " .set pop \n"); - local_irq_enable(); -} - -/* - * The Au1xxx wait is available only if using 32khz counter or - * external timer source, but specifically not CP0 Counter. - * alchemy/common/time.c may override cpu_wait! - */ -static void au1k_wait(void) -{ - __asm__(" .set mips3 \n" - " cache 0x14, 0(%0) \n" - " cache 0x14, 32(%0) \n" - " sync \n" - " nop \n" - " wait \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - " .set mips0 \n" - : : "r" (au1k_wait)); -} - -static int __initdata nowait; - -static int __init wait_disable(char *s) -{ - nowait = 1; - - return 1; -} - -__setup("nowait", wait_disable); - static int __cpuinitdata mips_fpu_disabled; static int __init fpu_disable(char *s) @@ -150,105 +51,6 @@ static int __init dsp_disable(char *s) __setup("nodsp", dsp_disable); -void __init check_wait(void) -{ - struct cpuinfo_mips *c = ¤t_cpu_data; - - if (nowait) { - printk("Wait instruction disabled.\n"); - return; - } - - switch (c->cputype) { - case CPU_R3081: - case CPU_R3081E: - cpu_wait = r3081_wait; - break; - case CPU_TX3927: - cpu_wait = r39xx_wait; - break; - case CPU_R4200: -/* case CPU_R4300: */ - case CPU_R4600: - case CPU_R4640: - case CPU_R4650: - case CPU_R4700: - case CPU_R5000: - case CPU_R5500: - case CPU_NEVADA: - case CPU_4KC: - case CPU_4KEC: - case CPU_4KSC: - case CPU_5KC: - case CPU_25KF: - case CPU_PR4450: - case CPU_BMIPS3300: - case CPU_BMIPS4350: - case CPU_BMIPS4380: - case CPU_BMIPS5000: - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_JZRISC: - case CPU_LOONGSON1: - case CPU_XLR: - case CPU_XLP: - cpu_wait = r4k_wait; - break; - - case CPU_RM7000: - cpu_wait = rm7k_wait_irqoff; - break; - - case CPU_M14KC: - case CPU_M14KEC: - case CPU_24K: - case CPU_34K: - case CPU_1004K: - cpu_wait = r4k_wait; - if (read_c0_config7() & MIPS_CONF7_WII) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_74K: - cpu_wait = r4k_wait; - if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_TX49XX: - cpu_wait = r4k_wait_irqoff; - break; - case CPU_ALCHEMY: - cpu_wait = au1k_wait; - break; - case CPU_20KC: - /* - * WAIT on Rev1.0 has E1, E2, E3 and E16. - * WAIT on Rev2.0 and Rev3.0 has E16. - * Rev3.1 WAIT is nop, why bother - */ - if ((c->processor_id & 0xff) <= 0x64) - break; - - /* - * Another rev is incremeting c0_count at a reduced clock - * rate while in WAIT mode. So we basically have the choice - * between using the cp0 timer as clocksource or avoiding - * the WAIT instruction. Until more details are known, - * disable the use of WAIT for 20Kc entirely. - cpu_wait = r4k_wait; - */ - break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; - default: - break; - } -} - static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 35bed0d2342..3be9e7bb30f 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -2,6 +2,7 @@ #include <linux/bootmem.h> #include <linux/crash_dump.h> #include <asm/uaccess.h> +#include <linux/slab.h> static int __init parse_savemaxmem(char *p) { diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index cf5509f13dd..dba90ec0dc3 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -25,12 +25,16 @@ #define MCOUNT_OFFSET_INSNS 4 #endif +#ifdef CONFIG_DYNAMIC_FTRACE + /* Arch override because MIPS doesn't need to run this from stop_machine() */ void arch_ftrace_update_code(int command) { ftrace_modify_all_code(command); } +#endif + /* * Check if the address is in kernel space * diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5c2ba9f08a8..31fa856829c 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -122,7 +122,7 @@ handle_vcei: __FINIT .align 5 /* 32 byte rollback region */ -LEAF(r4k_wait) +LEAF(__r4k_wait) .set push .set noreorder /* start of rollback region */ @@ -146,14 +146,14 @@ LEAF(r4k_wait) jr ra nop .set pop - END(r4k_wait) + END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, r4k_wait + PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, 9f @@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI - andi k1, 0xff /* ASID_MASK patched at run-time!! */ + andi k1, 0xff /* ASID_MASK */ MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c new file mode 100644 index 00000000000..0c655deeea4 --- /dev/null +++ b/arch/mips/kernel/idle.c @@ -0,0 +1,245 @@ +/* + * MIPS idle loop and WAIT instruction support. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/export.h> +#include <linux/init.h> +#include <linux/irqflags.h> +#include <linux/printk.h> +#include <linux/sched.h> +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/idle.h> +#include <asm/mipsregs.h> + +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void); +EXPORT_SYMBOL(cpu_wait); + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); + local_irq_enable(); +} + +static void r39xx_wait(void) +{ + if (!need_resched()) + write_c0_conf(read_c0_conf() | TX39_CONF_HALT); + local_irq_enable(); +} + +void r4k_wait(void) +{ + local_irq_enable(); + __r4k_wait(); +} + +/* + * This variant is preferable as it allows testing need_resched and going to + * sleep depending on the outcome atomically. Unfortunately the "It is + * implementation-dependent whether the pipeline restarts when a non-enabled + * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes + * using this version a gamble. + */ +void r4k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " wait \n" + " .set pop \n"); + local_irq_enable(); + __asm__( + " .globl __pastwait \n" + "__pastwait: \n"); +} + +/* + * The RM7000 variant has to handle erratum 38. The workaround is to not + * have any pending stores when the WAIT instruction is executed. + */ +static void rm7k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " sync \n" + " mtc0 $1, $12 # stalls until W stage \n" + " wait \n" + " mtc0 $1, $12 # stalls until W stage \n" + " .set pop \n"); + local_irq_enable(); +} + +/* + * Au1 'wait' is only useful when the 32kHz counter is used as timer, + * since coreclock (and the cp0 counter) stops upon executing it. Only an + * interrupt can wake it, so they must be enabled before entering idle modes. + */ +static void au1k_wait(void) +{ + unsigned long c0status = read_c0_status() | 1; /* irqs on */ + + __asm__( + " .set mips3 \n" + " cache 0x14, 0(%0) \n" + " cache 0x14, 32(%0) \n" + " sync \n" + " mtc0 %1, $12 \n" /* wr c0status */ + " wait \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " .set mips0 \n" + : : "r" (au1k_wait), "r" (c0status)); +} + +static int __initdata nowait; + +static int __init wait_disable(char *s) +{ + nowait = 1; + + return 1; +} + +__setup("nowait", wait_disable); + +void __init check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + if (nowait) { + printk("Wait instruction disabled.\n"); + return; + } + + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5500: + case CPU_NEVADA: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: + case CPU_25KF: + case CPU_PR4450: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_JZRISC: + case CPU_LOONGSON1: + case CPU_XLR: + case CPU_XLP: + cpu_wait = r4k_wait; + break; + + case CPU_RM7000: + cpu_wait = rm7k_wait_irqoff; + break; + + case CPU_M14KC: + case CPU_M14KEC: + case CPU_24K: + case CPU_34K: + case CPU_1004K: + cpu_wait = r4k_wait; + if (read_c0_config7() & MIPS_CONF7_WII) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_74K: + cpu_wait = r4k_wait; + if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_TX49XX: + cpu_wait = r4k_wait_irqoff; + break; + case CPU_ALCHEMY: + cpu_wait = au1k_wait; + break; + case CPU_20KC: + /* + * WAIT on Rev1.0 has E1, E2, E3 and E16. + * WAIT on Rev2.0 and Rev3.0 has E16. + * Rev3.1 WAIT is nop, why bother + */ + if ((c->processor_id & 0xff) <= 0x64) + break; + + /* + * Another rev is incremeting c0_count at a reduced clock + * rate while in WAIT mode. So we basically have the choice + * between using the cp0 timer as clocksource or avoiding + * the WAIT instruction. Until more details are known, + * disable the use of WAIT for 20Kc entirely. + cpu_wait = r4k_wait; + */ + break; + case CPU_RM9000: + if ((c->processor_id & 0x00ff) >= 0x40) + cpu_wait = r4k_wait; + break; + default: + break; + } +} + +static void smtc_idle_hook(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_idle_loop_hook(void); + + smtc_idle_loop_hook(); +#endif +} + +void arch_cpu_idle(void) +{ + smtc_idle_hook(); + if (cpu_wait) + cpu_wait(); + else + local_irq_enable(); +} diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 12bc4ebdf55..1f8187ab099 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - free_insn_slot(p->ainsn.insn, 0); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index a3e461408b7..acb34373679 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -10,6 +10,7 @@ #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/cpu-features.h> +#include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/prom.h> diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb902c1f0ca..c6a041d9d05 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void) } #endif -void arch_cpu_idle(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - extern void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif - if (cpu_wait) - (*cpu_wait)(); - else - local_irq_enable(); -} - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -224,6 +211,9 @@ struct mips_frame_info { int pc_offset; }; +#define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + static inline int is_ra_save_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS @@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip) #endif } -static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) +static inline int is_jump_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS /* @@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) return 0; return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); #else + if (ip->j_format.opcode == j_op) + return 1; if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) @@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info) for (i = 0; i < max_insns; i++, ip++) { - if (is_jal_jalr_jr_ins(ip)) + if (is_jump_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) @@ -393,15 +385,42 @@ err: static struct mips_frame_info schedule_mfi __read_mostly; +#ifdef CONFIG_KALLSYMS +static unsigned long get___schedule_addr(void) +{ + return kallsyms_lookup_name("__schedule"); +} +#else +static unsigned long get___schedule_addr(void) +{ + union mips_instruction *ip = (void *)schedule; + int max_insns = 8; + int i; + + for (i = 0; i < max_insns; i++, ip++) { + if (ip->j_format.opcode == j_op) + return J_TARGET(ip, ip->j_format.target); + } + return 0; +} +#endif + static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; +#endif + unsigned long addr; + + addr = get___schedule_addr(); + if (!addr) + addr = (unsigned long)schedule; - kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); +#ifdef CONFIG_KALLSYMS + kallsyms_lookup_size_offset(addr, &size, &ofs); #endif - schedule_mfi.func = schedule; + schedule_mfi.func = (void *)addr; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 93c070b41b0..6fa198db899 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -40,6 +40,7 @@ #include <asm/processor.h> #include <asm/vpe.h> #include <asm/rtlx.h> +#include <asm/setup.h> static struct rtlx_info *rtlx; static int major; diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 36cfd4060e1..97a5909a61c 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -423,4 +423,5 @@ sys_call_table: PTR sys_process_vm_writev /* 5305 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_getdents64 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c17619fe18e..6e7862ab46c 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -37,6 +37,7 @@ #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> +#include <asm/idle.h> #include <asm/r4k-timer.h> #include <asm/mmu_context.h> #include <asm/time.h> diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 31d22f3121c..75a4fd70984 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -34,6 +34,7 @@ #include <asm/hardirq.h> #include <asm/hazards.h> #include <asm/irq.h> +#include <asm/idle.h> #include <asm/mmu_context.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> @@ -111,7 +112,7 @@ static int vpe0limit; static int ipibuffers; static int nostlb; static int asidmask; -unsigned int smtc_asid_mask = 0xff; +unsigned long smtc_asid_mask = 0xff; static int __init vpe0tcs(char *str) { @@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) unsigned long flags; int mtflags; unsigned long tcrestart; - extern void r4k_wait_irqoff(void), __pastwait(void); int set_resched_flag = (type == LINUX_SMP_IPI && action == SMP_RESCHEDULE_YOURSELF); @@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) */ if (cpu_wait == r4k_wait_irqoff) { tcrestart = read_tc_c0_tcrestart(); - if (tcrestart >= (unsigned long)r4k_wait_irqoff - && tcrestart < (unsigned long)__pastwait) { + if (address_is_in_r4k_wait_irqoff(tcrestart)) { write_tc_c0_tcrestart(__pastwait); tcstatus &= ~TCSTATUS_IXMT; write_tc_c0_tcstatus(tcstatus); @@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = asid_cache(cpu); do { - if (!ASID_MASK(ASID_INC(asid))) { + if (!((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contiguous range) */ @@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) mips_ihb(); } tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); + smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); if (!prevhalt) write_tc_c0_tchalt(0); } @@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = ASID_FIRST_VERSION; local_flush_tlb_all(); /* start new asid cycle */ } - } while (smtc_live_asid[tlb][ASID_MASK(asid)]); + } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); /* * SMTC shares the TLB within VPEs and possibly across all VPEs. @@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid) tlb_read(); ehb(); ehi = read_c0_entryhi(); - if (ASID_MASK(ehi) == asid) { + if ((ehi & ASID_MASK) == asid) { /* * Invalidate only entries with specified ASID, * makiing sure all entries differ. diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 77cff1f6d05..a75ae40184a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -41,6 +41,7 @@ #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> +#include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> @@ -57,7 +58,6 @@ #include <asm/uasm.h> extern void check_wait(void); -extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern u32 handle_tlbl[]; @@ -897,22 +897,24 @@ out_sigsegv: asmlinkage void do_tr(struct pt_regs *regs) { - unsigned int opcode, tcode = 0; + u32 opcode, tcode = 0; u16 instr[2]; - unsigned long epc = exception_epc(regs); + unsigned long epc = msk_isa16_mode(exception_epc(regs)); - if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || - (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) + if (get_isa16_mode(regs->cp0_epc)) { + if (__get_user(instr[0], (u16 __user *)(epc + 0)) || + __get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) { - if (get_isa16_mode(regs->cp0_epc)) - /* microMIPS */ - tcode = (opcode >> 12) & 0x1f; - else - tcode = ((opcode >> 6) & ((1 << 10) - 1)); + opcode = (instr[0] << 16) | instr[1]; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 12) & ((1 << 4) - 1); + } else { + if (__get_user(opcode, (u32 __user *)epc)) + goto out_sigsegv; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 6) & ((1 << 10) - 1); } do_trap_or_bp(regs, tcode, "Trap"); @@ -1542,7 +1544,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; - char *vec_start = (cpu_wait == r4k_wait) ? + char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -1656,7 +1658,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; unsigned int hwrena = cpu_hwrena_impl_bits; - unsigned long asid = 0; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); @@ -1740,9 +1741,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) } #endif /* CONFIG_MIPS_MT_SMTC */ - asid = ASID_FIRST_VERSION; - cpu_data[cpu].asid_cache = asid; - TLBMISS_HANDLER_SETUP(); + if (!cpu_data[cpu].asid_cache) + cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -1814,10 +1814,8 @@ void __init trap_init(void) extern char except_vec4; extern char except_vec3_r4000; unsigned long i; - int rollback; check_wait(); - rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) @@ -1894,7 +1892,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(0, rollback ? rollback_handle_int : handle_int); + set_except_vector(0, using_rollback_handler() ? rollback_handle_int + : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index e0dad028979..dd203e59e6f 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -EINVAL; + return -ENOIOCTLCMD; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, @@ -401,7 +401,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EINVAL; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -475,14 +475,248 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -EINVAL; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -EINVAL; + return -ENOIOCTLCMD; +} + +#define MIPS_CP0_32(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) + +#define MIPS_CP0_64(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) + +#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) +#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) +#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) +#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) +#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) +#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) +#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) +#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) +#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) +#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) +#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) +#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) +#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) +#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) +#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) +#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) +#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) +#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) +#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) +#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) + +static u64 kvm_mips_get_one_regs[] = { + KVM_REG_MIPS_R0, + KVM_REG_MIPS_R1, + KVM_REG_MIPS_R2, + KVM_REG_MIPS_R3, + KVM_REG_MIPS_R4, + KVM_REG_MIPS_R5, + KVM_REG_MIPS_R6, + KVM_REG_MIPS_R7, + KVM_REG_MIPS_R8, + KVM_REG_MIPS_R9, + KVM_REG_MIPS_R10, + KVM_REG_MIPS_R11, + KVM_REG_MIPS_R12, + KVM_REG_MIPS_R13, + KVM_REG_MIPS_R14, + KVM_REG_MIPS_R15, + KVM_REG_MIPS_R16, + KVM_REG_MIPS_R17, + KVM_REG_MIPS_R18, + KVM_REG_MIPS_R19, + KVM_REG_MIPS_R20, + KVM_REG_MIPS_R21, + KVM_REG_MIPS_R22, + KVM_REG_MIPS_R23, + KVM_REG_MIPS_R24, + KVM_REG_MIPS_R25, + KVM_REG_MIPS_R26, + KVM_REG_MIPS_R27, + KVM_REG_MIPS_R28, + KVM_REG_MIPS_R29, + KVM_REG_MIPS_R30, + KVM_REG_MIPS_R31, + + KVM_REG_MIPS_HI, + KVM_REG_MIPS_LO, + KVM_REG_MIPS_PC, + + KVM_REG_MIPS_CP0_INDEX, + KVM_REG_MIPS_CP0_CONTEXT, + KVM_REG_MIPS_CP0_PAGEMASK, + KVM_REG_MIPS_CP0_WIRED, + KVM_REG_MIPS_CP0_BADVADDR, + KVM_REG_MIPS_CP0_ENTRYHI, + KVM_REG_MIPS_CP0_STATUS, + KVM_REG_MIPS_CP0_CAUSE, + /* EPC set via kvm_regs, et al. */ + KVM_REG_MIPS_CP0_CONFIG, + KVM_REG_MIPS_CP0_CONFIG1, + KVM_REG_MIPS_CP0_CONFIG2, + KVM_REG_MIPS_CP0_CONFIG3, + KVM_REG_MIPS_CP0_CONFIG7, + KVM_REG_MIPS_CP0_ERROREPC +}; + +static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + s64 v; + + switch (reg->id) { + case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: + v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; + break; + case KVM_REG_MIPS_HI: + v = (long)vcpu->arch.hi; + break; + case KVM_REG_MIPS_LO: + v = (long)vcpu->arch.lo; + break; + case KVM_REG_MIPS_PC: + v = (long)vcpu->arch.pc; + break; + + case KVM_REG_MIPS_CP0_INDEX: + v = (long)kvm_read_c0_guest_index(cop0); + break; + case KVM_REG_MIPS_CP0_CONTEXT: + v = (long)kvm_read_c0_guest_context(cop0); + break; + case KVM_REG_MIPS_CP0_PAGEMASK: + v = (long)kvm_read_c0_guest_pagemask(cop0); + break; + case KVM_REG_MIPS_CP0_WIRED: + v = (long)kvm_read_c0_guest_wired(cop0); + break; + case KVM_REG_MIPS_CP0_BADVADDR: + v = (long)kvm_read_c0_guest_badvaddr(cop0); + break; + case KVM_REG_MIPS_CP0_ENTRYHI: + v = (long)kvm_read_c0_guest_entryhi(cop0); + break; + case KVM_REG_MIPS_CP0_STATUS: + v = (long)kvm_read_c0_guest_status(cop0); + break; + case KVM_REG_MIPS_CP0_CAUSE: + v = (long)kvm_read_c0_guest_cause(cop0); + break; + case KVM_REG_MIPS_CP0_ERROREPC: + v = (long)kvm_read_c0_guest_errorepc(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG: + v = (long)kvm_read_c0_guest_config(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG1: + v = (long)kvm_read_c0_guest_config1(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG2: + v = (long)kvm_read_c0_guest_config2(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG3: + v = (long)kvm_read_c0_guest_config3(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG7: + v = (long)kvm_read_c0_guest_config7(cop0); + break; + default: + return -EINVAL; + } + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + return put_user(v, uaddr64); + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; + u32 v32 = (u32)v; + return put_user(v32, uaddr32); + } else { + return -EINVAL; + } +} + +static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + u64 v; + + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + + if (get_user(v, uaddr64) != 0) + return -EFAULT; + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; + s32 v32; + + if (get_user(v32, uaddr32) != 0) + return -EFAULT; + v = (s64)v32; + } else { + return -EINVAL; + } + + switch (reg->id) { + case KVM_REG_MIPS_R0: + /* Silently ignore requests to set $0 */ + break; + case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: + vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; + break; + case KVM_REG_MIPS_HI: + vcpu->arch.hi = v; + break; + case KVM_REG_MIPS_LO: + vcpu->arch.lo = v; + break; + case KVM_REG_MIPS_PC: + vcpu->arch.pc = v; + break; + + case KVM_REG_MIPS_CP0_INDEX: + kvm_write_c0_guest_index(cop0, v); + break; + case KVM_REG_MIPS_CP0_CONTEXT: + kvm_write_c0_guest_context(cop0, v); + break; + case KVM_REG_MIPS_CP0_PAGEMASK: + kvm_write_c0_guest_pagemask(cop0, v); + break; + case KVM_REG_MIPS_CP0_WIRED: + kvm_write_c0_guest_wired(cop0, v); + break; + case KVM_REG_MIPS_CP0_BADVADDR: + kvm_write_c0_guest_badvaddr(cop0, v); + break; + case KVM_REG_MIPS_CP0_ENTRYHI: + kvm_write_c0_guest_entryhi(cop0, v); + break; + case KVM_REG_MIPS_CP0_STATUS: + kvm_write_c0_guest_status(cop0, v); + break; + case KVM_REG_MIPS_CP0_CAUSE: + kvm_write_c0_guest_cause(cop0, v); + break; + case KVM_REG_MIPS_CP0_ERROREPC: + kvm_write_c0_guest_errorepc(cop0, v); + break; + default: + return -EINVAL; + } + return 0; } long @@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; - int intr; switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + if (copy_from_user(®, argp, sizeof(reg))) + return -EFAULT; + if (ioctl == KVM_SET_ONE_REG) + return kvm_mips_set_reg(vcpu, ®); + else + return kvm_mips_get_reg(vcpu, ®); + } + case KVM_GET_REG_LIST: { + struct kvm_reg_list __user *user_list = argp; + u64 __user *reg_dest; + struct kvm_reg_list reg_list; + unsigned n; + + if (copy_from_user(®_list, user_list, sizeof(reg_list))) + return -EFAULT; + n = reg_list.n; + reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); + if (copy_to_user(user_list, ®_list, sizeof(reg_list))) + return -EFAULT; + if (n < reg_list.n) + return -E2BIG; + reg_dest = user_list->reg; + if (copy_to_user(reg_dest, kvm_mips_get_one_regs, + sizeof(kvm_mips_get_one_regs))) + return -EFAULT; + return 0; + } case KVM_NMI: /* Treat the NMI as a CPU reset */ r = kvm_mips_reset_vcpu(vcpu); @@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) if (copy_from_user(&irq, argp, sizeof(irq))) goto out; - intr = (int)irq.irq; - kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); @@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) break; } default: - r = -EINVAL; + r = -ENOIOCTLCMD; } out: @@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) switch (ioctl) { default: - r = -EINVAL; + r = -ENOIOCTLCMD; } return r; @@ -593,13 +854,13 @@ void kvm_arch_exit(void) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOTSUPP; + return -ENOIOCTLCMD; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) @@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { + case KVM_CAP_ONE_REG: + r = 1; + break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; @@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext) break; } return r; - } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 0; i < 32; i++) - vcpu->arch.gprs[i] = regs->gprs[i]; - + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ vcpu->arch.hi = regs->hi; vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; - return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); + return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 0; i < 32; i++) - regs->gprs[i] = vcpu->arch.gprs[i]; + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; regs->hi = vcpu->arch.hi; regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; - return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); + return 0; } void kvm_mips_comparecount_func(unsigned long data) diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index 2b2bac9a40a..4b6274b47f3 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c @@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, printk("MTCz, cop0->reg[EBASE]: %#lx\n", kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); + uint32_t nasid = + vcpu->arch.gprs[rt] & ASID_MASK; if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && - (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) - != nasid)) { + ((kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK) != nasid)) { kvm_debug ("MTCz, change ASID from %#lx to %#lx\n", - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), - ASID_MASK(vcpu->arch.gprs[rt])); + kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK, + vcpu->arch.gprs[rt] & ASID_MASK); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); @@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, * resulting handler will do the right thing */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); if (index < 0) { vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); @@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; @@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi - (vcpu->arch.cop0))); + (kvm_read_c0_guest_entryhi + (vcpu->arch.cop0) & ASID_MASK)); if (index < 0) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d..c777dd36d4a 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c @@ -17,6 +17,8 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/kvm_host.h> +#include <linux/srcu.h> + #include <asm/cpu.h> #include <asm/bootinfo.h> @@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); + return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; } uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); + return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; } inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) @@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void) old_pagemask = read_c0_pagemask(); printk("HOST TLBs:\n"); - printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); + printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) } } -static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) +static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) { + int srcu_idx, err = 0; pfn_t pfn; if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) - return; + return 0; + srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); if (kvm_mips_is_error_pfn(pfn)) { - panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + err = -EFAULT; + goto out; } kvm->arch.guest_pmap[gfn] = pfn; - return; +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; } /* Translate guest KSEG0 addresses to Host PA */ @@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, gva); return KVM_INVALID_PAGE; } - kvm_mips_map_page(vcpu->kvm, gfn); + + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return KVM_INVALID_ADDR; + return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; } @@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, even = !(gfn & 0x1); vaddr = badvaddr & (PAGE_MASK << 1); - kvm_mips_map_page(vcpu->kvm, gfn); - kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return -1; + + if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) + return -1; if (even) { pfn0 = kvm->arch.guest_pmap[gfn]; @@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, pfn0 = 0; pfn1 = 0; } else { - kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); - kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) + return -1; + + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) + return -1; pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; @@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && - (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { index = i; break; } @@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, { unsigned long asid = asid_cache(cpu); - if (!(ASID_MASK(ASID_INC(asid)))) { + if (!((asid += ASID_INC) & ASID_MASK)) { if (cpu_has_vtag_icache) { flush_icache_all(); } @@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!newasid) { /* If we preempted while the guest was executing, then reload the pre-empted ASID */ if (current->flags & PF_VCPU) { - write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); + write_c0_entryhi(vcpu->arch. + preempt_entryhi & ASID_MASK); ehb(); } } else { @@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_kernel_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_kernel_asid[cpu] & + ASID_MASK); else - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_user_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_user_asid[cpu] & + ASID_MASK); ehb(); } } @@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) kvm_mips_guest_tlb_lookup(vcpu, ((unsigned long) opc & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); if (index < 0) { kvm_err ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c index 466aeef044b..30d725321db 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/kvm_trap_emul.c @@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } -static int -kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); - kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); - kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); - kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); - kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); - - kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); - kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); - kvm_write_c0_guest_pagemask(cop0, - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); - kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); - kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); - - return 0; -} - -static int -kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); - regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); - regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); - regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); - regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); - - regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); - regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = - kvm_read_c0_guest_pagemask(cop0); - regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); - regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); - - regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); - regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); - - return 0; -} - static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .dequeue_io_int = kvm_mips_dequeue_io_int_cb, .irq_deliver = kvm_mips_irq_deliver_cb, .irq_clear = kvm_mips_irq_clear_cb, - .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, - .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 9861c8669fa..850821df924 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c @@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get resource\n"); - return -ENOMEM; - } /* remap gptu register range */ gptu_membase = devm_ioremap_resource(&pdev->dev, res); @@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev) if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { dev_err(&pdev->dev, "Failed to find magic\n"); gptu_hwexit(); + clk_disable(clk); + clk_put(clk); return -ENAVAIL; } diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 8a12d00908e..32b9f21bfd8 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -11,7 +11,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> -#include <asm/mmu_context.h> static inline const char *msk2str(unsigned int mask) { @@ -56,7 +55,7 @@ static void dump_tlb(int first, int last) s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = ASID_MASK(s_entryhi); + asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); @@ -86,7 +85,7 @@ static void dump_tlb(int first, int last) printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), - ASID_MASK(entryhi)); + entryhi & 0xff); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 8327698b993..91615c2ef0c 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <asm/mipsregs.h> -#include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> @@ -22,7 +21,7 @@ static void dump_tlb(int first, int last) unsigned int asid; unsigned long entryhi, entrylo0; - asid = ASID_MASK(read_c0_entryhi()); + asid = read_c0_entryhi() & 0xfc0; for (i = first; i <= last; i++) { write_c0_index(i<<8); @@ -36,7 +35,7 @@ static void dump_tlb(int first, int last) /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & 0xffffe000) != 0x80000000 - && (ASID_MASK(entryhi) == asid)) { + && (entryhi & 0xfc0) == asid) { /* * Only print entries in use */ @@ -45,7 +44,7 @@ static void dump_tlb(int first, int last) printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", (entryhi & 0xffffe000), - ASID_MASK(entryhi), + entryhi & 0xfc0, entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c index 35c8c646849..65bfbb5d06f 100644 --- a/arch/mips/loongson/common/reset.c +++ b/arch/mips/loongson/common/reset.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/pm.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <loongson.h> diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c index d4f610f9604..547f34b69e4 100644 --- a/arch/mips/loongson1/common/reset.c +++ b/arch/mips/loongson1/common/reset.c @@ -9,6 +9,7 @@ #include <linux/io.h> #include <linux/pm.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <loongson1.h> diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 4a13c150f31..a63d1ed0827 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -51,7 +51,7 @@ void local_flush_tlb_all(void) #endif local_irq_save(flags); - old_ctx = ASID_MASK(read_c0_entryhi()); + old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { @@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", - ASID_MASK(cpu_context(cpu, mm)), start, end); + cpu_context(cpu, mm) & ASID_MASK, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { - int oldpid = ASID_MASK(read_c0_entryhi()); - int newpid = ASID_MASK(cpu_context(cpu, mm)); + int oldpid = read_c0_entryhi() & ASID_MASK; + int newpid = cpu_context(cpu, mm) & ASID_MASK; start &= PAGE_MASK; end += PAGE_SIZE - 1; @@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif - newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); + newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); - oldpid = ASID_MASK(read_c0_entryhi()); + oldpid = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); @@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = ASID_MASK(read_c0_entryhi()); + pid = read_c0_entryhi() & ASID_MASK; #ifdef DEBUG_TLB - if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { + if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } @@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ - old_ctx = ASID_MASK(read_c0_entryhi()); + old_ctx = read_c0_entryhi() & ASID_MASK; old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); @@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #endif local_irq_save(flags); - old_ctx = ASID_MASK(read_c0_entryhi()); + old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 09653b290d5..c643de4c473 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ENTER_CRITICAL(flags); - pid = ASID_MASK(read_c0_entryhi()); + pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); pgdp = pgd_offset(vma->vm_mm, address); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 122f9207f49..91c2499f806 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = ASID_MASK(read_c0_entryhi()); + pid = read_c0_entryhi() & ASID_MASK; local_irq_save(flags); address &= PAGE_MASK; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4d46d378757..afeef93f81a 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -29,7 +29,6 @@ #include <linux/init.h> #include <linux/cache.h> -#include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/war.h> @@ -302,82 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata; static struct uasm_label labels[128] __cpuinitdata; static struct uasm_reloc relocs[128] __cpuinitdata; -#ifdef CONFIG_64BIT -static int check_for_high_segbits __cpuinitdata; -#endif - -static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, - unsigned int i_const) -{ - unsigned int **p; - - for (p = start; p < stop; p++) { -#ifndef CONFIG_CPU_MICROMIPS - unsigned int *ip; - - ip = *p; - *ip = (*ip & 0xffff0000) | i_const; -#else - unsigned short *ip; - - ip = ((unsigned short *)((unsigned int)*p - 1)); - if ((*ip & 0xf000) == 0x4000) { - *ip &= 0xfff1; - *ip |= (i_const << 1); - } else if ((*ip & 0xf000) == 0x6000) { - *ip &= 0xfff1; - *ip |= ((i_const >> 2) << 1); - } else { - ip++; - *ip = i_const; - } -#endif - local_flush_icache_range((unsigned long)ip, - (unsigned long)ip + sizeof(*ip)); - } -} - -#define asid_insn_fixup(section, const) \ -do { \ - extern unsigned int *__start_ ## section; \ - extern unsigned int *__stop_ ## section; \ - insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ -} while(0) - -/* - * Caller is assumed to flush the caches before the first context switch. - */ -static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, - unsigned int version_mask, - unsigned int first_version) -{ - extern asmlinkage void handle_ri_rdhwr_vivt(void); - unsigned long *vivt_exc; - -#ifdef CONFIG_CPU_MICROMIPS - /* - * Worst case optimised microMIPS addiu instructions support - * only a 3-bit immediate value. - */ - if(inc > 7) - panic("Invalid ASID increment value!"); -#endif - asid_insn_fixup(__asid_inc, inc); - asid_insn_fixup(__asid_mask, mask); - asid_insn_fixup(__asid_version_mask, version_mask); - asid_insn_fixup(__asid_first_version, first_version); - - /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ - vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; -#ifdef CONFIG_CPU_MICROMIPS - vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); -#endif - vivt_exc++; - *vivt_exc = (*vivt_exc & ~mask) | mask; - - current_cpu_data.asid_cache = first_version; -} - static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; @@ -2256,7 +2179,6 @@ void __cpuinit build_tlb_refill_handler(void) case CPU_TX3922: case CPU_TX3927: #ifndef CONFIG_MIPS_PGD_C0_CONTEXT - setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); if (cpu_has_local_ebase) build_r3000_tlb_refill_handler(); if (!run_once) { @@ -2282,11 +2204,6 @@ void __cpuinit build_tlb_refill_handler(void) break; default: -#ifndef CONFIG_MIPS_MT_SMTC - setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); -#else - setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); -#endif if (!run_once) { scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index af319143b59..eaa99d28cb8 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -37,6 +37,7 @@ #include <linux/pm.h> #include <linux/bootmem.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/bootinfo.h> diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index e3e094100e3..89c8c106663 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -36,6 +36,7 @@ #include <linux/serial_8250.h> #include <linux/pm.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/bootinfo.h> diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c index 0edb89a6351..1c989753166 100644 --- a/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/arch/mips/pmcs-msp71xx/msp_prom.c @@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c) return 0; /* foo */ } -static inline int str2eaddr(unsigned char *ea, unsigned char *str) +int str2eaddr(unsigned char *ea, unsigned char *str) { int index = 0; unsigned char num = 0; diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index 1651cfdbfe7..396b2967ad8 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c @@ -12,6 +12,7 @@ #include <asm/bootinfo.h> #include <asm/cacheflush.h> +#include <asm/idle.h> #include <asm/r4kcache.h> #include <asm/reboot.h> #include <asm/smp-ops.h> diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi index ef7da1e227e..e3203d414fe 100644 --- a/arch/mips/ralink/dts/rt3050.dtsi +++ b/arch/mips/ralink/dts/rt3050.dtsi @@ -55,4 +55,14 @@ reg-shift = <2>; }; }; + + usb@101c0000 { + compatible = "ralink,rt3050-usb", "snps,dwc2"; + reg = <0x101c0000 40000>; + + interrupt-parent = <&intc>; + interrupts = <18>; + + status = "disabled"; + }; }; diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts index c18c9a84f4c..0ac73ea2819 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/ralink/dts/rt3052_eval.dts @@ -43,4 +43,8 @@ reg = <0x50000 0x7b0000>; }; }; + + usb@101c0000 { + status = "ok"; + }; }; diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index fb1569580de..6b5f3406f41 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -88,7 +88,7 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size, + add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, BOOT_MEM_RAM); else detect_memory_region(soc_info.mem_base, diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 5364aabc210..681e7f86c08 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/irq.h> #include <asm/bootinfo.h> +#include <asm/idle.h> #include <asm/time.h> #include <asm/reboot.h> #include <asm/r4kcache.h> diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c index 70a3f90131d..d7f755833c3 100644 --- a/arch/mips/vr41xx/common/pmu.c +++ b/arch/mips/vr41xx/common/pmu.c @@ -27,6 +27,7 @@ #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/idle.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/reboot.h> diff --git a/arch/mips/wrppmc/reset.c b/arch/mips/wrppmc/reset.c index cc5474b24f0..80beb188ed4 100644 --- a/arch/mips/wrppmc/reset.c +++ b/arch/mips/wrppmc/reset.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <asm/cacheflush.h> +#include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/processor.h> |