diff options
Diffstat (limited to 'arch/mips')
51 files changed, 991 insertions, 424 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5cd695f905a..5e0014e864f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1756,14 +1756,14 @@ config KVM_GUEST help Select this option if building a guest kernel for KVM (Trap & Emulate) mode -config KVM_HOST_FREQ - int "KVM Host Processor Frequency (MHz)" +config KVM_GUEST_TIMER_FREQ + int "Count/Compare Timer Frequency (MHz)" depends on KVM_GUEST - default 500 + default 100 help - Select this option if building a guest kernel for KVM to skip - RTC emulation when determining guest CPU Frequency. Instead, the guest - processor frequency is automatically derived from the host frequency. + Set this to non-zero if building a guest kernel for KVM to skip RTC + emulation when determining guest CPU Frequency. Instead, the guest's + timer frequency is specified directly. choice prompt "Kernel page size" diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 1a5b4032cb6..60a359cfa32 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \ -Wa,--trap cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ -Wa,--trap -cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \ -Wa,--trap cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 331b837cec5..f1bec00d5a8 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -1053,36 +1053,26 @@ void prom_free_prom_memory(void) int octeon_prune_device_tree(void); extern const char __dtb_octeon_3xxx_begin; -extern const char __dtb_octeon_3xxx_end; extern const char __dtb_octeon_68xx_begin; -extern const char __dtb_octeon_68xx_end; void __init device_tree_init(void) { - int dt_size; - struct boot_param_header *fdt; + const void *fdt; bool do_prune; if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { fdt = phys_to_virt(octeon_bootinfo->fdt_addr); if (fdt_check_header(fdt)) panic("Corrupt Device Tree passed to kernel."); - dt_size = be32_to_cpu(fdt->totalsize); do_prune = false; } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { - fdt = (struct boot_param_header *)&__dtb_octeon_68xx_begin; - dt_size = &__dtb_octeon_68xx_end - &__dtb_octeon_68xx_begin; + fdt = &__dtb_octeon_68xx_begin; do_prune = true; } else { - fdt = (struct boot_param_header *)&__dtb_octeon_3xxx_begin; - dt_size = &__dtb_octeon_3xxx_end - &__dtb_octeon_3xxx_begin; + fdt = &__dtb_octeon_3xxx_begin; do_prune = true; } - /* Copy the default tree from init memory. */ - initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8); - if (initial_boot_params == NULL) - panic("Could not allocate initial_boot_params"); - memcpy(initial_boot_params, fdt, dt_size); + initial_boot_params = (void *)fdt; if (do_prune) { octeon_prune_device_tree(); @@ -1090,7 +1080,7 @@ void __init device_tree_init(void) } else { pr_info("Using passed Device Tree.\n"); } - unflatten_device_tree(); + unflatten_and_copy_device_tree(); } static int __initdata disable_octeon_edac_p; diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index e5b73de08fc..002680648dc 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -188,7 +188,6 @@ CONFIG_USB_KBD=y CONFIG_USB_MOUSE=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_OTG_WHITELIST=y CONFIG_USB_WUSB_CBAF=m diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 343bebc4b63..227a9de3224 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -297,7 +297,6 @@ CONFIG_HID_WACOM=m CONFIG_HID_ZEROPLUS=m CONFIG_ZEROPLUS_FF=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_DYNAMIC_MINORS=y CONFIG_USB_OTG_WHITELIST=y diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig index c16de981292..7a346605c49 100644 --- a/arch/mips/configs/mpc30x_defconfig +++ b/arch/mips/configs/mpc30x_defconfig @@ -47,7 +47,6 @@ CONFIG_GPIO_VR41XX=y # CONFIG_VGA_CONSOLE is not set # CONFIG_HID_SUPPORT is not set CONFIG_USB=m -CONFIG_USB_DEVICEFS=y CONFIG_USB_OHCI_HCD=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_VR41XX=y diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig index d1142e9cd9a..201edfb2637 100644 --- a/arch/mips/configs/msp71xx_defconfig +++ b/arch/mips/configs/msp71xx_defconfig @@ -67,7 +67,6 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_PMCMSP=y # CONFIG_USB_HID is not set CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 593946afc48..d269a5326a3 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -575,7 +575,6 @@ CONFIG_USB_HIDDEV=y CONFIG_USB_KBD=m CONFIG_USB_MOUSE=m CONFIG_USB=m -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 59d9d2fdcd4..73e7bf49461 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -301,7 +301,6 @@ CONFIG_USB_HIDDEV=y CONFIG_USB_KBD=m CONFIG_USB_MOUSE=m CONFIG_USB=m -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_TT_NEWSCHED is not set diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig index 5b0463ef938..51bab13ef6f 100644 --- a/arch/mips/configs/sb1250_swarm_defconfig +++ b/arch/mips/configs/sb1250_swarm_defconfig @@ -72,7 +72,6 @@ CONFIG_SERIO_RAW=m # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y CONFIG_EXT2_FS=y diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig index 30036b4cbeb..11f51505d56 100644 --- a/arch/mips/configs/tb0219_defconfig +++ b/arch/mips/configs/tb0219_defconfig @@ -72,7 +72,6 @@ CONFIG_GPIO_TB0219=y # CONFIG_VGA_CONSOLE is not set # CONFIG_HID_SUPPORT is not set CONFIG_USB=m -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_TT_NEWSCHED is not set diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig index 81bfa1d4d8e..d99b1905a1b 100644 --- a/arch/mips/configs/tb0226_defconfig +++ b/arch/mips/configs/tb0226_defconfig @@ -69,7 +69,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y # CONFIG_VGA_CONSOLE is not set # CONFIG_HID_SUPPORT is not set CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set CONFIG_USB_OHCI_HCD=y diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index e8eb3d53a24..37b2befe651 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* CONFIG_64BIT */ -/* - * atomic*_return operations are serializing but not the non-*_return - * versions. - */ -#define smp_mb__before_atomic_dec() smp_mb__before_llsc() -#define smp_mb__after_atomic_dec() smp_llsc_mb() -#define smp_mb__before_atomic_inc() smp_mb__before_llsc() -#define smp_mb__after_atomic_inc() smp_llsc_mb() - #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index e1aa4e4c298..d0101dd0575 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -195,4 +195,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() smp_mb__before_llsc() +#define smp_mb__after_atomic() smp_llsc_mb() + #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6a65d49e2c0..7c8816f7b7c 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -38,13 +38,6 @@ #endif /* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() smp_mb__before_llsc() -#define smp_mb__after_clear_bit() smp_llsc_mb() - - -/* * These are the "slower" versions of the functions and are in bitops.c. * These functions call raw_local_irq_{save,restore}(). */ @@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) */ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(nr, addr); } diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index dc2135be2a3..ff2707ab329 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -39,14 +39,14 @@ struct cache_desc { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned int udelay_val; - unsigned int asid_cache; + unsigned long asid_cache; /* * Capability and feature descriptor structure for MIPS CPU */ unsigned long options; unsigned long ases; + unsigned int udelay_val; unsigned int processor_id; unsigned int fpu_id; unsigned int msa_id; diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 060aaa6348d..b0aa9556575 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -19,6 +19,38 @@ #include <linux/threads.h> #include <linux/spinlock.h> +/* MIPS KVM register ids */ +#define MIPS_CP0_32(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) + +#define MIPS_CP0_64(_R, _S) \ + (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) + +#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) +#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) +#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) +#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) +#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) +#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) +#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) +#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) +#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) +#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) +#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) +#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) +#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) +#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) +#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) +#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) +#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) +#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) +#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) +#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) +#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) + #define KVM_MAX_VCPUS 1 #define KVM_USER_MEM_SLOTS 8 @@ -372,8 +404,19 @@ struct kvm_vcpu_arch { u32 io_gpr; /* GPR used as IO source/target */ - /* Used to calibrate the virutal count register for the guest */ - int32_t host_cp0_count; + struct hrtimer comparecount_timer; + /* Count timer control KVM register */ + uint32_t count_ctl; + /* Count bias from the raw time */ + uint32_t count_bias; + /* Frequency of timer in Hz */ + uint32_t count_hz; + /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ + s64 count_dyn_bias; + /* Resume time */ + ktime_t count_resume; + /* Period of timer tick in ns */ + u64 count_period; /* Bitmask of exceptions that are pending */ unsigned long pending_exceptions; @@ -394,8 +437,6 @@ struct kvm_vcpu_arch { uint32_t guest_kernel_asid[NR_CPUS]; struct mm_struct guest_kernel_mm, guest_user_mm; - struct hrtimer comparecount_timer; - int last_sched_cpu; /* WAIT executed */ @@ -410,6 +451,7 @@ struct kvm_vcpu_arch { #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) +#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) @@ -449,15 +491,74 @@ struct kvm_vcpu_arch { #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) +/* + * Some of the guest registers may be modified asynchronously (e.g. from a + * hrtimer callback in hard irq context) and therefore need stronger atomicity + * guarantees than other registers. + */ + +static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, + unsigned long val) +{ + unsigned long temp; + do { + __asm__ __volatile__( + " .set mips3 \n" + " " __LL "%0, %1 \n" + " or %0, %2 \n" + " " __SC "%0, %1 \n" + " .set mips0 \n" + : "=&r" (temp), "+m" (*reg) + : "r" (val)); + } while (unlikely(!temp)); +} + +static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, + unsigned long val) +{ + unsigned long temp; + do { + __asm__ __volatile__( + " .set mips3 \n" + " " __LL "%0, %1 \n" + " and %0, %2 \n" + " " __SC "%0, %1 \n" + " .set mips0 \n" + : "=&r" (temp), "+m" (*reg) + : "r" (~val)); + } while (unlikely(!temp)); +} + +static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, + unsigned long change, + unsigned long val) +{ + unsigned long temp; + do { + __asm__ __volatile__( + " .set mips3 \n" + " " __LL "%0, %1 \n" + " and %0, %2 \n" + " or %0, %3 \n" + " " __SC "%0, %1 \n" + " .set mips0 \n" + : "=&r" (temp), "+m" (*reg) + : "r" (~change), "r" (val & change)); + } while (unlikely(!temp)); +} + #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) -#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) -#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) + +/* Cause can be modified asynchronously from hardirq hrtimer callback */ +#define kvm_set_c0_guest_cause(cop0, val) \ + _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) +#define kvm_clear_c0_guest_cause(cop0, val) \ + _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) #define kvm_change_c0_guest_cause(cop0, change, val) \ -{ \ - kvm_clear_c0_guest_cause(cop0, change); \ - kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ -} + _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ + change, val) + #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) #define kvm_change_c0_guest_ebase(cop0, change, val) \ @@ -468,29 +569,33 @@ struct kvm_vcpu_arch { struct kvm_mips_callbacks { - int (*handle_cop_unusable) (struct kvm_vcpu *vcpu); - int (*handle_tlb_mod) (struct kvm_vcpu *vcpu); - int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu); - int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu); - int (*handle_addr_err_st) (struct kvm_vcpu *vcpu); - int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu); - int (*handle_syscall) (struct kvm_vcpu *vcpu); - int (*handle_res_inst) (struct kvm_vcpu *vcpu); - int (*handle_break) (struct kvm_vcpu *vcpu); - int (*vm_init) (struct kvm *kvm); - int (*vcpu_init) (struct kvm_vcpu *vcpu); - int (*vcpu_setup) (struct kvm_vcpu *vcpu); - gpa_t(*gva_to_gpa) (gva_t gva); - void (*queue_timer_int) (struct kvm_vcpu *vcpu); - void (*dequeue_timer_int) (struct kvm_vcpu *vcpu); - void (*queue_io_int) (struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); - void (*dequeue_io_int) (struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); - int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause); - int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause); + int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); + int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); + int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); + int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); + int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); + int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); + int (*handle_syscall)(struct kvm_vcpu *vcpu); + int (*handle_res_inst)(struct kvm_vcpu *vcpu); + int (*handle_break)(struct kvm_vcpu *vcpu); + int (*vm_init)(struct kvm *kvm); + int (*vcpu_init)(struct kvm_vcpu *vcpu); + int (*vcpu_setup)(struct kvm_vcpu *vcpu); + gpa_t (*gva_to_gpa)(gva_t gva); + void (*queue_timer_int)(struct kvm_vcpu *vcpu); + void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); + void (*queue_io_int)(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + void (*dequeue_io_int)(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*get_one_reg)(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, s64 *v); + int (*set_one_reg)(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, s64 v); }; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); @@ -609,7 +714,16 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run); -enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu); +uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); +void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); +void kvm_mips_init_count(struct kvm_vcpu *vcpu); +int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); +int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); +int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); +void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); +void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); enum emulation_result kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, @@ -646,7 +760,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu); /* Misc */ -extern void mips32_SyncICache(unsigned long addr, unsigned long size); extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h index 509cd582804..14ecc5313d2 100644 --- a/arch/mips/include/asm/mach-jz4740/dma.h +++ b/arch/mips/include/asm/mach-jz4740/dma.h @@ -22,8 +22,6 @@ enum jz4740_dma_request_type { JZ4740_DMA_TYPE_UART_RECEIVE = 21, JZ4740_DMA_TYPE_SPI_TRANSMIT = 22, JZ4740_DMA_TYPE_SPI_RECEIVE = 23, - JZ4740_DMA_TYPE_AIC_TRANSMIT = 24, - JZ4740_DMA_TYPE_AIC_RECEIVE = 25, JZ4740_DMA_TYPE_MMC_TRANSMIT = 26, JZ4740_DMA_TYPE_MMC_RECEIVE = 27, JZ4740_DMA_TYPE_TCU = 28, diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index 48616816bcb..c904c24550f 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -67,10 +67,6 @@ extern int mips_revision_sconid; -#ifdef CONFIG_OF -extern struct boot_param_header __dtb_start; -#endif - #ifdef CONFIG_PCI extern void mips_pcibios_init(void); #else diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 12d6842962b..974b0e30896 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -73,11 +73,6 @@ extern unsigned long PCIBIOS_MIN_MEM; extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index ccd2b75f152..a9494c0141f 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -21,13 +21,13 @@ extern void device_tree_init(void); struct boot_param_header; -extern void __dt_setup_arch(struct boot_param_header *bph); +extern void __dt_setup_arch(void *bph); #define dt_setup_arch(sym) \ ({ \ - extern struct boot_param_header __dtb_##sym##_begin; \ + extern char __dtb_##sym##_begin[]; \ \ - __dt_setup_arch(&__dtb_##sym##_begin); \ + __dt_setup_arch(__dtb_##sym##_begin); \ }) #else /* CONFIG_OF */ diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 413d6c612be..e55813029d5 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -29,7 +29,6 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index f09ff5ae205..2c04b6d9ff8 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -106,6 +106,41 @@ struct kvm_fpu { #define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) #define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) +/* KVM specific control registers */ + +/* + * CP0_Count control + * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now + * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer + * interrupts since COUNT_RESUME + * This can be used to freeze the timer to get a consistent snapshot of + * the CP0_Count and timer interrupt pending state, while also resuming + * safely without losing time or guest timer interrupts. + * Other: Reserved, do not change. + */ +#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ + 0x20000 | 0) +#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 + +/* + * CP0_Count resume monotonic nanoseconds + * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master + * disable). Any reads and writes of Count related registers while + * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is + * cleared again (master enable) any timer interrupts since this time will be + * emulated. + * Modifications to times in the future are rejected. + */ +#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ + 0x20000 | 1) +/* + * CP0_Count rate in Hz + * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without + * discontinuities in CP0_Count. + */ +#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ + 0x20000 | 2) + /* * KVM MIPS specific structures and definitions * diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 2692abb28e3..5805414777e 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -381,7 +381,7 @@ #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 350 +#define __NR_O32_Linux_syscalls 351 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -710,7 +710,7 @@ #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 310 +#define __NR_64_Linux_syscalls 311 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1043,6 +1043,6 @@ #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 314 +#define __NR_N32_Linux_syscalls 315 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index c01900e5d07..088e92a79ae 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -425,6 +425,15 @@ static struct platform_device qi_lb60_audio_device = { .id = -1, }; +static struct gpiod_lookup_table qi_lb60_audio_gpio_table = { + .dev_id = "qi-lb60-audio", + .table = { + GPIO_LOOKUP("Bank B", 29, "snd", 0), + GPIO_LOOKUP("Bank D", 4, "amp", 0), + { }, + }, +}; + static struct platform_device *jz_platform_devices[] __initdata = { &jz4740_udc_device, &jz4740_udc_xceiv_device, @@ -461,6 +470,8 @@ static int __init qi_lb60_init_platform_devices(void) jz4740_adc_device.dev.platform_data = &qi_lb60_battery_pdata; jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; + gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); + jz4740_serial_device_register(); spi_register_board_info(qi_lb60_spi_board_info, diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d78bf445a9..76122ff5cb5 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == beql_op) + if (insn.i_format.opcode == beql_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bnel_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == blezl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bgtzl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d1fea7a054b..1818da4dbb8 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void) void free_irqno(unsigned int irq) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(irq, irq_map); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /* diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 3c3b0df8f48..5d39bb85bf3 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -47,7 +47,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } -void __init __dt_setup_arch(struct boot_param_header *bph) +void __init __dt_setup_arch(void *bph) { if (!early_init_dt_scan(bph)) return; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 71f85f42703..f639ccd5060 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child, enum pt_watch_style style; int i; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) return -EIO; @@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child, #endif __put_user(style, &addr->style); - __put_user(current_cpu_data.watch_reg_use_cnt, + __put_user(boot_cpu_data.watch_reg_use_cnt, &addr->WATCH_STYLE.num_valid); - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __put_user(child->thread.watch.mips3264.watchlo[i], &addr->WATCH_STYLE.watchlo[i]); __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, &addr->WATCH_STYLE.watchhi[i]); - __put_user(current_cpu_data.watch_reg_masks[i], + __put_user(boot_cpu_data.watch_reg_masks[i], &addr->WATCH_STYLE.watch_masks[i]); } for (; i < 8; i++) { @@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child, unsigned long lt[NUM_WATCH_REGS]; u16 ht[NUM_WATCH_REGS]; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) return -EIO; /* Check the values. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); #ifdef CONFIG_32BIT if (lt[i] & __UA_LIMIT) @@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child, return -EINVAL; } /* Install them. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { if (lt[i] & 7) watch_active = 1; child->thread.watch.mips3264.watchlo[i] = lt[i]; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 074e857ced2..8119ac2fdfc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void) reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", reg_val & (1<<28) ? "ET " : "", @@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void) /* For the moment, report the problem and hang. */ if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S index bbace092ad0..033ac343e72 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/kvm_locore.S @@ -611,35 +611,3 @@ MIPSX(exceptions): .word _C_LABEL(MIPSX(GuestException)) # 29 .word _C_LABEL(MIPSX(GuestException)) # 30 .word _C_LABEL(MIPSX(GuestException)) # 31 - - -/* This routine makes changes to the instruction stream effective to the hardware. - * It should be called after the instruction stream is written. - * On return, the new instructions are effective. - * Inputs: - * a0 = Start address of new instruction stream - * a1 = Size, in bytes, of new instruction stream - */ - -#define HW_SYNCI_Step $1 -LEAF(MIPSX(SyncICache)) - .set push - .set mips32r2 - beq a1, zero, 20f - nop - REG_ADDU a1, a0, a1 - rdhwr v0, HW_SYNCI_Step - beq v0, zero, 20f - nop -10: - synci 0(a0) - REG_ADDU a0, a0, v0 - sltu v1, a0, a1 - bne v1, zero, 10b - nop - sync -20: - jr.hb ra - nop - .set pop -END(MIPSX(SyncICache)) diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index da5186fbd77..cd5e4f56843 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -61,11 +61,6 @@ static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) return 0; } -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we * are "runnable" if interrupts are pending */ @@ -130,8 +125,8 @@ static void kvm_mips_init_vm_percpu(void *arg) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (atomic_inc_return(&kvm_mips_instance) == 1) { - kvm_info("%s: 1st KVM instance, setup host TLB parameters\n", - __func__); + kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", + __func__); on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); } @@ -149,9 +144,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); } - - if (kvm->arch.guest_pmap) - kfree(kvm->arch.guest_pmap); + kfree(kvm->arch.guest_pmap); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_arch_vcpu_free(vcpu); @@ -186,8 +179,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) /* If this is the last instance, restore wired count */ if (atomic_dec_return(&kvm_mips_instance) == 0) { - kvm_info("%s: last KVM instance, restoring TLB parameters\n", - __func__); + kvm_debug("%s: last KVM instance, restoring TLB parameters\n", + __func__); on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); } } @@ -249,9 +242,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, goto out; } - kvm_info - ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", - npages, kvm->arch.guest_pmap); + kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", + npages, kvm->arch.guest_pmap); /* Now setup the page table */ for (i = 0; i < npages; i++) { @@ -296,7 +288,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (err) goto out_free_cpu; - kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); + kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); /* Allocate space for host mode exception handlers that handle * guest mode exits @@ -304,7 +296,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (cpu_has_veic || cpu_has_vint) { size = 0x200 + VECTORSPACING * 64; } else { - size = 0x200; + size = 0x4000; } /* Save Linux EBASE */ @@ -316,8 +308,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) err = -ENOMEM; goto out_free_cpu; } - kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", - ALIGN(size, PAGE_SIZE), gebase); + kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); /* Save new ebase */ vcpu->arch.guest_ebase = gebase; @@ -342,15 +334,16 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) /* General handler, relocate to unmapped space for sanity's sake */ offset = 0x2000; - kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n", - gebase + offset, - mips32_GuestExceptionEnd - mips32_GuestException); + kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", + gebase + offset, + mips32_GuestExceptionEnd - mips32_GuestException); memcpy(gebase + offset, mips32_GuestException, mips32_GuestExceptionEnd - mips32_GuestException); /* Invalidate the icache for these ranges */ - mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE)); + local_flush_icache_range((unsigned long)gebase, + (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); @@ -360,14 +353,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) goto out_free_gebase; } - kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); + kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); kvm_mips_commpage_init(vcpu); /* Init */ vcpu->arch.last_sched_cpu = -1; /* Start off the timer */ - kvm_mips_emulate_count(vcpu); + kvm_mips_init_count(vcpu); return vcpu; @@ -389,12 +382,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mips_dump_stats(vcpu); - if (vcpu->arch.guest_ebase) - kfree(vcpu->arch.guest_ebase); - - if (vcpu->arch.kseg0_commpage) - kfree(vcpu->arch.kseg0_commpage); - + kfree(vcpu->arch.guest_ebase); + kfree(vcpu->arch.kseg0_commpage); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -423,11 +412,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu->mmio_needed = 0; } + local_irq_disable(); /* Check if we have any exceptions/interrupts pending */ kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); - local_irq_disable(); kvm_guest_enter(); r = __kvm_mips_vcpu_run(run, vcpu); @@ -490,36 +479,6 @@ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -ENOIOCTLCMD; } -#define MIPS_CP0_32(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) - -#define MIPS_CP0_64(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) - -#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) -#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) -#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) -#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) -#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) -#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) -#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) -#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) -#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) -#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) -#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) -#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) -#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) -#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) -#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) -#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) -#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) -#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) -#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) -#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) -#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) -#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) -#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) - static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_R0, KVM_REG_MIPS_R1, @@ -560,25 +519,34 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_CP0_INDEX, KVM_REG_MIPS_CP0_CONTEXT, + KVM_REG_MIPS_CP0_USERLOCAL, KVM_REG_MIPS_CP0_PAGEMASK, KVM_REG_MIPS_CP0_WIRED, + KVM_REG_MIPS_CP0_HWRENA, KVM_REG_MIPS_CP0_BADVADDR, + KVM_REG_MIPS_CP0_COUNT, KVM_REG_MIPS_CP0_ENTRYHI, + KVM_REG_MIPS_CP0_COMPARE, KVM_REG_MIPS_CP0_STATUS, KVM_REG_MIPS_CP0_CAUSE, - /* EPC set via kvm_regs, et al. */ + KVM_REG_MIPS_CP0_EPC, KVM_REG_MIPS_CP0_CONFIG, KVM_REG_MIPS_CP0_CONFIG1, KVM_REG_MIPS_CP0_CONFIG2, KVM_REG_MIPS_CP0_CONFIG3, KVM_REG_MIPS_CP0_CONFIG7, - KVM_REG_MIPS_CP0_ERROREPC + KVM_REG_MIPS_CP0_ERROREPC, + + KVM_REG_MIPS_COUNT_CTL, + KVM_REG_MIPS_COUNT_RESUME, + KVM_REG_MIPS_COUNT_HZ, }; static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; + int ret; s64 v; switch (reg->id) { @@ -601,24 +569,36 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_CONTEXT: v = (long)kvm_read_c0_guest_context(cop0); break; + case KVM_REG_MIPS_CP0_USERLOCAL: + v = (long)kvm_read_c0_guest_userlocal(cop0); + break; case KVM_REG_MIPS_CP0_PAGEMASK: v = (long)kvm_read_c0_guest_pagemask(cop0); break; case KVM_REG_MIPS_CP0_WIRED: v = (long)kvm_read_c0_guest_wired(cop0); break; + case KVM_REG_MIPS_CP0_HWRENA: + v = (long)kvm_read_c0_guest_hwrena(cop0); + break; case KVM_REG_MIPS_CP0_BADVADDR: v = (long)kvm_read_c0_guest_badvaddr(cop0); break; case KVM_REG_MIPS_CP0_ENTRYHI: v = (long)kvm_read_c0_guest_entryhi(cop0); break; + case KVM_REG_MIPS_CP0_COMPARE: + v = (long)kvm_read_c0_guest_compare(cop0); + break; case KVM_REG_MIPS_CP0_STATUS: v = (long)kvm_read_c0_guest_status(cop0); break; case KVM_REG_MIPS_CP0_CAUSE: v = (long)kvm_read_c0_guest_cause(cop0); break; + case KVM_REG_MIPS_CP0_EPC: + v = (long)kvm_read_c0_guest_epc(cop0); + break; case KVM_REG_MIPS_CP0_ERROREPC: v = (long)kvm_read_c0_guest_errorepc(cop0); break; @@ -637,6 +617,15 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_CONFIG7: v = (long)kvm_read_c0_guest_config7(cop0); break; + /* registers to be handled specially */ + case KVM_REG_MIPS_CP0_COUNT: + case KVM_REG_MIPS_COUNT_CTL: + case KVM_REG_MIPS_COUNT_RESUME: + case KVM_REG_MIPS_COUNT_HZ: + ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); + if (ret) + return ret; + break; default: return -EINVAL; } @@ -697,12 +686,18 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_CONTEXT: kvm_write_c0_guest_context(cop0, v); break; + case KVM_REG_MIPS_CP0_USERLOCAL: + kvm_write_c0_guest_userlocal(cop0, v); + break; case KVM_REG_MIPS_CP0_PAGEMASK: kvm_write_c0_guest_pagemask(cop0, v); break; case KVM_REG_MIPS_CP0_WIRED: kvm_write_c0_guest_wired(cop0, v); break; + case KVM_REG_MIPS_CP0_HWRENA: + kvm_write_c0_guest_hwrena(cop0, v); + break; case KVM_REG_MIPS_CP0_BADVADDR: kvm_write_c0_guest_badvaddr(cop0, v); break; @@ -712,12 +707,20 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_STATUS: kvm_write_c0_guest_status(cop0, v); break; - case KVM_REG_MIPS_CP0_CAUSE: - kvm_write_c0_guest_cause(cop0, v); + case KVM_REG_MIPS_CP0_EPC: + kvm_write_c0_guest_epc(cop0, v); break; case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; + /* registers to be handled specially */ + case KVM_REG_MIPS_CP0_COUNT: + case KVM_REG_MIPS_CP0_COMPARE: + case KVM_REG_MIPS_CP0_CAUSE: + case KVM_REG_MIPS_COUNT_CTL: + case KVM_REG_MIPS_COUNT_RESUME: + case KVM_REG_MIPS_COUNT_HZ: + return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); default: return -EINVAL; } @@ -920,7 +923,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) return -1; printk("VCPU Register Dump:\n"); - printk("\tpc = 0x%08lx\n", vcpu->arch.pc);; + printk("\tpc = 0x%08lx\n", vcpu->arch.pc); printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); for (i = 0; i < 32; i += 4) { @@ -969,7 +972,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return 0; } -void kvm_mips_comparecount_func(unsigned long data) +static void kvm_mips_comparecount_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; @@ -984,15 +987,13 @@ void kvm_mips_comparecount_func(unsigned long data) /* * low level hrtimer wake routine. */ -enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) +static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); kvm_mips_comparecount_func((unsigned long) vcpu); - hrtimer_forward_now(&vcpu->arch.comparecount_timer, - ktime_set(0, MS_TO_NS(10))); - return HRTIMER_RESTART; + return kvm_mips_count_timeout(vcpu); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c index 96528e2d1ea..b80e41d858f 100644 --- a/arch/mips/kvm/kvm_mips_dyntrans.c +++ b/arch/mips/kvm/kvm_mips_dyntrans.c @@ -16,6 +16,7 @@ #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/bootmem.h> +#include <asm/cacheflush.h> #include "kvm_mips_comm.h" @@ -40,7 +41,7 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); - mips32_SyncICache(kseg0_opc, 32); + local_flush_icache_range(kseg0_opc, kseg0_opc + 32); return result; } @@ -66,7 +67,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); - mips32_SyncICache(kseg0_opc, 32); + local_flush_icache_range(kseg0_opc, kseg0_opc + 32); return result; } @@ -99,11 +100,12 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); - mips32_SyncICache(kseg0_opc, 32); + local_flush_icache_range(kseg0_opc, kseg0_opc + 32); } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { local_irq_save(flags); memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); - mips32_SyncICache((unsigned long) opc, 32); + local_flush_icache_range((unsigned long)opc, + (unsigned long)opc + 32); local_irq_restore(flags); } else { kvm_err("%s: Invalid address: %p\n", __func__, opc); @@ -134,11 +136,12 @@ kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); - mips32_SyncICache(kseg0_opc, 32); + local_flush_icache_range(kseg0_opc, kseg0_opc + 32); } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { local_irq_save(flags); memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); - mips32_SyncICache((unsigned long) opc, 32); + local_flush_icache_range((unsigned long)opc, + (unsigned long)opc + 32); local_irq_restore(flags); } else { kvm_err("%s: Invalid address: %p\n", __func__, opc); diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index e3fec99941a..8d484009008 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/err.h> +#include <linux/ktime.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/vmalloc.h> @@ -228,25 +229,520 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) return er; } -/* Everytime the compare register is written to, we need to decide when to fire - * the timer that represents timer ticks to the GUEST. +/** + * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. + * @vcpu: Virtual CPU. * + * Returns: 1 if the CP0_Count timer is disabled by either the guest + * CP0_Cause.DC bit or the count_ctl.DC bit. + * 0 otherwise (in which case CP0_Count timer is running). */ -enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu) +static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; + return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || + (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); +} + +/** + * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. + * + * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. + * + * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). + */ +static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) +{ + s64 now_ns, periods; + u64 delta; + + now_ns = ktime_to_ns(now); + delta = now_ns + vcpu->arch.count_dyn_bias; + + if (delta >= vcpu->arch.count_period) { + /* If delta is out of safe range the bias needs adjusting */ + periods = div64_s64(now_ns, vcpu->arch.count_period); + vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; + /* Recalculate delta with new bias */ + delta = now_ns + vcpu->arch.count_dyn_bias; + } + + /* + * We've ensured that: + * delta < count_period + * + * Therefore the intermediate delta*count_hz will never overflow since + * at the boundary condition: + * delta = count_period + * delta = NSEC_PER_SEC * 2^32 / count_hz + * delta * count_hz = NSEC_PER_SEC * 2^32 + */ + return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); +} + +/** + * kvm_mips_count_time() - Get effective current time. + * @vcpu: Virtual CPU. + * + * Get effective monotonic ktime. This is usually a straightforward ktime_get(), + * except when the master disable bit is set in count_ctl, in which case it is + * count_resume, i.e. the time that the count was disabled. + * + * Returns: Effective monotonic ktime for CP0_Count. + */ +static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) +{ + if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) + return vcpu->arch.count_resume; + + return ktime_get(); +} + +/** + * kvm_mips_read_count_running() - Read the current count value as if running. + * @vcpu: Virtual CPU. + * @now: Kernel time to read CP0_Count at. + * + * Returns the current guest CP0_Count register at time @now and handles if the + * timer interrupt is pending and hasn't been handled yet. + * + * Returns: The current value of the guest CP0_Count register. + */ +static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) +{ + ktime_t expires; + int running; + + /* Is the hrtimer pending? */ + expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); + if (ktime_compare(now, expires) >= 0) { + /* + * Cancel it while we handle it so there's no chance of + * interference with the timeout handler. + */ + running = hrtimer_cancel(&vcpu->arch.comparecount_timer); + + /* Nothing should be waiting on the timeout */ + kvm_mips_callbacks->queue_timer_int(vcpu); + + /* + * Restart the timer if it was running based on the expiry time + * we read, so that we don't push it back 2 periods. + */ + if (running) { + expires = ktime_add_ns(expires, + vcpu->arch.count_period); + hrtimer_start(&vcpu->arch.comparecount_timer, expires, + HRTIMER_MODE_ABS); + } + } + + /* Return the biased and scaled guest CP0_Count */ + return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); +} + +/** + * kvm_mips_read_count() - Read the current count value. + * @vcpu: Virtual CPU. + * + * Read the current guest CP0_Count value, taking into account whether the timer + * is stopped. + * + * Returns: The current guest CP0_Count value. + */ +uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + /* If count disabled just read static copy of count */ + if (kvm_mips_count_disabled(vcpu)) + return kvm_read_c0_guest_count(cop0); + + return kvm_mips_read_count_running(vcpu, ktime_get()); +} + +/** + * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. + * @vcpu: Virtual CPU. + * @count: Output pointer for CP0_Count value at point of freeze. + * + * Freeze the hrtimer safely and return both the ktime and the CP0_Count value + * at the point it was frozen. It is guaranteed that any pending interrupts at + * the point it was frozen are handled, and none after that point. + * + * This is useful where the time/CP0_Count is needed in the calculation of the + * new parameters. + * + * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). + * + * Returns: The ktime at the point of freeze. + */ +static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, + uint32_t *count) +{ + ktime_t now; + + /* stop hrtimer before finding time */ + hrtimer_cancel(&vcpu->arch.comparecount_timer); + now = ktime_get(); + + /* find count at this point and handle pending hrtimer */ + *count = kvm_mips_read_count_running(vcpu, now); + + return now; +} + - /* If COUNT is enabled */ - if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) { - hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); - hrtimer_start(&vcpu->arch.comparecount_timer, - ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL); +/** + * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. + * @vcpu: Virtual CPU. + * @now: ktime at point of resume. + * @count: CP0_Count at point of resume. + * + * Resumes the timer and updates the timer expiry based on @now and @count. + * This can be used in conjunction with kvm_mips_freeze_timer() when timer + * parameters need to be changed. + * + * It is guaranteed that a timer interrupt immediately after resume will be + * handled, but not if CP_Compare is exactly at @count. That case is already + * handled by kvm_mips_freeze_timer(). + * + * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). + */ +static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, + ktime_t now, uint32_t count) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + uint32_t compare; + u64 delta; + ktime_t expire; + + /* Calculate timeout (wrap 0 to 2^32) */ + compare = kvm_read_c0_guest_compare(cop0); + delta = (u64)(uint32_t)(compare - count - 1) + 1; + delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); + expire = ktime_add_ns(now, delta); + + /* Update hrtimer to use new timeout */ + hrtimer_cancel(&vcpu->arch.comparecount_timer); + hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); +} + +/** + * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. + * @vcpu: Virtual CPU. + * + * Recalculates and updates the expiry time of the hrtimer. This can be used + * after timer parameters have been altered which do not depend on the time that + * the change occurs (in those cases kvm_mips_freeze_hrtimer() and + * kvm_mips_resume_hrtimer() are used directly). + * + * It is guaranteed that no timer interrupts will be lost in the process. + * + * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). + */ +static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) +{ + ktime_t now; + uint32_t count; + + /* + * freeze_hrtimer takes care of a timer interrupts <= count, and + * resume_hrtimer the hrtimer takes care of a timer interrupts > count. + */ + now = kvm_mips_freeze_hrtimer(vcpu, &count); + kvm_mips_resume_hrtimer(vcpu, now, count); +} + +/** + * kvm_mips_write_count() - Modify the count and update timer. + * @vcpu: Virtual CPU. + * @count: Guest CP0_Count value to set. + * + * Sets the CP0_Count value and updates the timer accordingly. + */ +void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + ktime_t now; + + /* Calculate bias */ + now = kvm_mips_count_time(vcpu); + vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); + + if (kvm_mips_count_disabled(vcpu)) + /* The timer's disabled, adjust the static count */ + kvm_write_c0_guest_count(cop0, count); + else + /* Update timeout */ + kvm_mips_resume_hrtimer(vcpu, now, count); +} + +/** + * kvm_mips_init_count() - Initialise timer. + * @vcpu: Virtual CPU. + * + * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set + * it going if it's enabled. + */ +void kvm_mips_init_count(struct kvm_vcpu *vcpu) +{ + /* 100 MHz */ + vcpu->arch.count_hz = 100*1000*1000; + vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, + vcpu->arch.count_hz); + vcpu->arch.count_dyn_bias = 0; + + /* Starting at 0 */ + kvm_mips_write_count(vcpu, 0); +} + +/** + * kvm_mips_set_count_hz() - Update the frequency of the timer. + * @vcpu: Virtual CPU. + * @count_hz: Frequency of CP0_Count timer in Hz. + * + * Change the frequency of the CP0_Count timer. This is done atomically so that + * CP0_Count is continuous and no timer interrupt is lost. + * + * Returns: -EINVAL if @count_hz is out of range. + * 0 on success. + */ +int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int dc; + ktime_t now; + u32 count; + + /* ensure the frequency is in a sensible range... */ + if (count_hz <= 0 || count_hz > NSEC_PER_SEC) + return -EINVAL; + /* ... and has actually changed */ + if (vcpu->arch.count_hz == count_hz) + return 0; + + /* Safely freeze timer so we can keep it continuous */ + dc = kvm_mips_count_disabled(vcpu); + if (dc) { + now = kvm_mips_count_time(vcpu); + count = kvm_read_c0_guest_count(cop0); } else { - hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); + now = kvm_mips_freeze_hrtimer(vcpu, &count); } - return er; + /* Update the frequency */ + vcpu->arch.count_hz = count_hz; + vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); + vcpu->arch.count_dyn_bias = 0; + + /* Calculate adjusted bias so dynamic count is unchanged */ + vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); + + /* Update and resume hrtimer */ + if (!dc) + kvm_mips_resume_hrtimer(vcpu, now, count); + return 0; +} + +/** + * kvm_mips_write_compare() - Modify compare and update timer. + * @vcpu: Virtual CPU. + * @compare: New CP0_Compare value. + * + * Update CP0_Compare to a new value and update the timeout. + */ +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + /* if unchanged, must just be an ack */ + if (kvm_read_c0_guest_compare(cop0) == compare) + return; + + /* Update compare */ + kvm_write_c0_guest_compare(cop0, compare); + + /* Update timeout if count enabled */ + if (!kvm_mips_count_disabled(vcpu)) + kvm_mips_update_hrtimer(vcpu); +} + +/** + * kvm_mips_count_disable() - Disable count. + * @vcpu: Virtual CPU. + * + * Disable the CP0_Count timer. A timer interrupt on or before the final stop + * time will be handled but not after. + * + * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or + * count_ctl.DC has been set (count disabled). + * + * Returns: The time that the timer was stopped. + */ +static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + uint32_t count; + ktime_t now; + + /* Stop hrtimer */ + hrtimer_cancel(&vcpu->arch.comparecount_timer); + + /* Set the static count from the dynamic count, handling pending TI */ + now = ktime_get(); + count = kvm_mips_read_count_running(vcpu, now); + kvm_write_c0_guest_count(cop0, count); + + return now; +} + +/** + * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. + * @vcpu: Virtual CPU. + * + * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or + * before the final stop time will be handled if the timer isn't disabled by + * count_ctl.DC, but not after. + * + * Assumes CP0_Cause.DC is clear (count enabled). + */ +void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + kvm_set_c0_guest_cause(cop0, CAUSEF_DC); + if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) + kvm_mips_count_disable(vcpu); +} + +/** + * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. + * @vcpu: Virtual CPU. + * + * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after + * the start time will be handled if the timer isn't disabled by count_ctl.DC, + * potentially before even returning, so the caller should be careful with + * ordering of CP0_Cause modifications so as not to lose it. + * + * Assumes CP0_Cause.DC is set (count disabled). + */ +void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + uint32_t count; + + kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); + + /* + * Set the dynamic count to match the static count. + * This starts the hrtimer if count_ctl.DC allows it. + * Otherwise it conveniently updates the biases. + */ + count = kvm_read_c0_guest_count(cop0); + kvm_mips_write_count(vcpu, count); +} + +/** + * kvm_mips_set_count_ctl() - Update the count control KVM register. + * @vcpu: Virtual CPU. + * @count_ctl: Count control register new value. + * + * Set the count control KVM register. The timer is updated accordingly. + * + * Returns: -EINVAL if reserved bits are set. + * 0 on success. + */ +int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + s64 changed = count_ctl ^ vcpu->arch.count_ctl; + s64 delta; + ktime_t expire, now; + uint32_t count, compare; + + /* Only allow defined bits to be changed */ + if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) + return -EINVAL; + + /* Apply new value */ + vcpu->arch.count_ctl = count_ctl; + + /* Master CP0_Count disable */ + if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { + /* Is CP0_Cause.DC already disabling CP0_Count? */ + if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { + if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) + /* Just record the current time */ + vcpu->arch.count_resume = ktime_get(); + } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { + /* disable timer and record current time */ + vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); + } else { + /* + * Calculate timeout relative to static count at resume + * time (wrap 0 to 2^32). + */ + count = kvm_read_c0_guest_count(cop0); + compare = kvm_read_c0_guest_compare(cop0); + delta = (u64)(uint32_t)(compare - count - 1) + 1; + delta = div_u64(delta * NSEC_PER_SEC, + vcpu->arch.count_hz); + expire = ktime_add_ns(vcpu->arch.count_resume, delta); + + /* Handle pending interrupt */ + now = ktime_get(); + if (ktime_compare(now, expire) >= 0) + /* Nothing should be waiting on the timeout */ + kvm_mips_callbacks->queue_timer_int(vcpu); + + /* Resume hrtimer without changing bias */ + count = kvm_mips_read_count_running(vcpu, now); + kvm_mips_resume_hrtimer(vcpu, now, count); + } + } + + return 0; +} + +/** + * kvm_mips_set_count_resume() - Update the count resume KVM register. + * @vcpu: Virtual CPU. + * @count_resume: Count resume register new value. + * + * Set the count resume KVM register. + * + * Returns: -EINVAL if out of valid range (0..now). + * 0 on success. + */ +int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) +{ + /* + * It doesn't make sense for the resume time to be in the future, as it + * would be possible for the next interrupt to be more than a full + * period in the future. + */ + if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) + return -EINVAL; + + vcpu->arch.count_resume = ns_to_ktime(count_resume); + return 0; +} + +/** + * kvm_mips_count_timeout() - Push timer forward on timeout. + * @vcpu: Virtual CPU. + * + * Handle an hrtimer event by push the hrtimer forward a period. + * + * Returns: The hrtimer_restart value to return to the hrtimer subsystem. + */ +enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) +{ + /* Add the Count period to the current expiry time */ + hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, + vcpu->arch.count_period); + return HRTIMER_RESTART; } enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) @@ -471,8 +967,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, #endif /* Get reg */ if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ - vcpu->arch.gprs[rt] = (read_c0_count() >> 2); + vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { vcpu->arch.gprs[rt] = 0x0; #ifdef CONFIG_KVM_MIPS_DYN_TRANS @@ -539,10 +1034,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, } /* Are we writing to COUNT */ else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - /* Linux doesn't seem to write into COUNT, we throw an error - * if we notice a write to COUNT - */ - /*er = EMULATE_FAIL; */ + kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); goto done; } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", @@ -552,8 +1044,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, /* If we are writing to COMPARE */ /* Clear pending timer interrupt, if any */ kvm_mips_callbacks->dequeue_timer_int(vcpu); - kvm_write_c0_guest_compare(cop0, - vcpu->arch.gprs[rt]); + kvm_mips_write_compare(vcpu, + vcpu->arch.gprs[rt]); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { kvm_write_c0_guest_status(cop0, vcpu->arch.gprs[rt]); @@ -564,6 +1056,20 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mtc0(inst, opc, vcpu); #endif + } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { + uint32_t old_cause, new_cause; + old_cause = kvm_read_c0_guest_cause(cop0); + new_cause = vcpu->arch.gprs[rt]; + /* Update R/W bits */ + kvm_change_c0_guest_cause(cop0, 0x08800300, + new_cause); + /* DC bit enabling/disabling timer? */ + if ((old_cause ^ new_cause) & CAUSEF_DC) { + if (new_cause & CAUSEF_DC) + kvm_mips_count_disable_cause(vcpu); + else + kvm_mips_count_enable_cause(vcpu); + } } else { cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; #ifdef CONFIG_KVM_MIPS_DYN_TRANS @@ -887,7 +1393,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); - mips32_SyncICache(CKSEG0ADDR(pa), 32); + local_flush_icache_range(CKSEG0ADDR(pa), 32); return 0; } @@ -1325,8 +1831,12 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; - #ifdef DEBUG + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + int index; + /* * If address not in the guest TLB, then we are in trouble */ @@ -1553,8 +2063,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, current_cpu_data.icache.linesz); break; case 2: /* Read count register */ - printk("RDHWR: Cont register\n"); - arch->gprs[rt] = kvm_read_c0_guest_count(cop0); + arch->gprs[rt] = kvm_mips_read_count(vcpu); break; case 3: /* Count register resolution */ switch (current_cpu_data.cputype) { @@ -1810,11 +2319,9 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, er = EMULATE_FAIL; } } else { -#ifdef DEBUG kvm_debug ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); -#endif /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, NULL); diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 50ab9c4d4a5..8a5a700ad8d 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c @@ -222,26 +222,19 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, return -1; } - if (idx < 0) { - idx = read_c0_random() % current_cpu_data.tlbsize; - write_c0_index(idx); - mtc0_tlbw_hazard(); - } write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); mtc0_tlbw_hazard(); - tlb_write_indexed(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); tlbw_use_hazard(); -#ifdef DEBUG - if (debug) { - kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] " - "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", - vcpu->arch.pc, idx, read_c0_entryhi(), - read_c0_entrylo0(), read_c0_entrylo1()); - } -#endif + kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, idx, read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); /* Flush D-cache */ if (flush_dcache_mask) { @@ -348,11 +341,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, mtc0_tlbw_hazard(); tlbw_use_hazard(); -#ifdef DEBUG kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), read_c0_entrylo0(), read_c0_entrylo1()); -#endif /* Restore old ASID */ write_c0_entryhi(old_entryhi); @@ -400,10 +391,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); -#ifdef DEBUG kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo0, tlb->tlb_lo1); -#endif return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, tlb->tlb_mask); @@ -424,10 +413,8 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) } } -#ifdef DEBUG kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); -#endif return index; } @@ -461,9 +448,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) local_irq_restore(flags); -#ifdef DEBUG kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); -#endif return idx; } @@ -508,12 +493,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) local_irq_restore(flags); -#ifdef DEBUG - if (idx > 0) { + if (idx > 0) kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, - (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx); - } -#endif + (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); return 0; } @@ -658,15 +640,30 @@ void kvm_local_flush_tlb_all(void) local_irq_restore(flags); } +/** + * kvm_mips_migrate_count() - Migrate timer. + * @vcpu: Virtual CPU. + * + * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it + * if it was running prior to being cancelled. + * + * Must be called when the VCPU is migrated to a different CPU to ensure that + * timer expiry during guest execution interrupts the guest and causes the + * interrupt to be delivered in a timely manner. + */ +static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) +{ + if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) + hrtimer_restart(&vcpu->arch.comparecount_timer); +} + /* Restore ASID once we are scheduled back after preemption */ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { unsigned long flags; int newasid = 0; -#ifdef DEBUG kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); -#endif /* Alocate new kernel and user ASIDs if needed */ @@ -682,17 +679,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.guest_user_mm.context.asid[cpu]; newasid++; - kvm_info("[%d]: cpu_context: %#lx\n", cpu, - cpu_context(cpu, current->mm)); - kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n", - cpu, vcpu->arch.guest_kernel_asid[cpu]); - kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, - vcpu->arch.guest_user_asid[cpu]); + kvm_debug("[%d]: cpu_context: %#lx\n", cpu, + cpu_context(cpu, current->mm)); + kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", + cpu, vcpu->arch.guest_kernel_asid[cpu]); + kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, + vcpu->arch.guest_user_asid[cpu]); } if (vcpu->arch.last_sched_cpu != cpu) { - kvm_info("[%d->%d]KVM VCPU[%d] switch\n", - vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", + vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + /* + * Migrate the timer interrupt to the current CPU so that it + * always interrupts the guest and synchronously triggers a + * guest timer interrupt. + */ + kvm_mips_migrate_count(vcpu); } if (!newasid) { diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c index 30d725321db..693f952b2fb 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/kvm_trap_emul.c @@ -32,9 +32,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) gpa = KVM_INVALID_ADDR; } -#ifdef DEBUG kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); -#endif return gpa; } @@ -85,11 +83,9 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { -#ifdef DEBUG kvm_debug ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr); -#endif er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); if (er == EMULATE_DONE) @@ -138,11 +134,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) } } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { -#ifdef DEBUG kvm_debug ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr); -#endif er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); if (er == EMULATE_DONE) ret = RESUME_GUEST; @@ -188,10 +182,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) } } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { -#ifdef DEBUG kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", vcpu->arch.pc, badvaddr); -#endif /* User Address (UA) fault, this could happen if * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this @@ -236,9 +228,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) if (KVM_GUEST_KERNEL_MODE(vcpu) && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { -#ifdef DEBUG kvm_debug("Emulate Store to MMIO space\n"); -#endif er = kvm_mips_emulate_inst(cause, opc, run, vcpu); if (er == EMULATE_FAIL) { printk("Emulate Store to MMIO space failed\n"); @@ -268,9 +258,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) int ret = RESUME_GUEST; if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { -#ifdef DEBUG kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); -#endif er = kvm_mips_emulate_inst(cause, opc, run, vcpu); if (er == EMULATE_FAIL) { printk("Emulate Load from MMIO space failed\n"); @@ -401,6 +389,78 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + s64 *v) +{ + switch (reg->id) { + case KVM_REG_MIPS_CP0_COUNT: + *v = kvm_mips_read_count(vcpu); + break; + case KVM_REG_MIPS_COUNT_CTL: + *v = vcpu->arch.count_ctl; + break; + case KVM_REG_MIPS_COUNT_RESUME: + *v = ktime_to_ns(vcpu->arch.count_resume); + break; + case KVM_REG_MIPS_COUNT_HZ: + *v = vcpu->arch.count_hz; + break; + default: + return -EINVAL; + } + return 0; +} + +static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + s64 v) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int ret = 0; + + switch (reg->id) { + case KVM_REG_MIPS_CP0_COUNT: + kvm_mips_write_count(vcpu, v); + break; + case KVM_REG_MIPS_CP0_COMPARE: + kvm_mips_write_compare(vcpu, v); + break; + case KVM_REG_MIPS_CP0_CAUSE: + /* + * If the timer is stopped or started (DC bit) it must look + * atomic with changes to the interrupt pending bits (TI, IRQ5). + * A timer interrupt should not happen in between. + */ + if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { + if (v & CAUSEF_DC) { + /* disable timer first */ + kvm_mips_count_disable_cause(vcpu); + kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); + } else { + /* enable timer last */ + kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); + kvm_mips_count_enable_cause(vcpu); + } + } else { + kvm_write_c0_guest_cause(cop0, v); + } + break; + case KVM_REG_MIPS_COUNT_CTL: + ret = kvm_mips_set_count_ctl(vcpu, v); + break; + case KVM_REG_MIPS_COUNT_RESUME: + ret = kvm_mips_set_count_resume(vcpu, v); + break; + case KVM_REG_MIPS_COUNT_HZ: + ret = kvm_mips_set_count_hz(vcpu, v); + break; + default: + return -EINVAL; + } + return ret; +} + static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { /* exit handlers */ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, @@ -423,6 +483,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .dequeue_io_int = kvm_mips_dequeue_io_int_cb, .irq_deliver = kvm_mips_irq_deliver_cb, .irq_clear = kvm_mips_irq_clear_cb, + .get_one_reg = kvm_trap_emul_get_one_reg, + .set_one_reg = kvm_trap_emul_set_one_reg, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 19686c5bc5e..7447d322d14 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -71,23 +71,12 @@ void __init plat_mem_setup(void) * Load the builtin devicetree. This causes the chosen node to be * parsed resulting in our memory appearing */ - __dt_setup_arch(&__dtb_start); + __dt_setup_arch(__dtb_start); } void __init device_tree_init(void) { - unsigned long base, size; - - if (!initial_boot_params) - return; - - base = virt_to_phys((void *)initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); - - /* Before we do anything, lets reserve the dt blob */ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - - unflatten_device_tree(); + unflatten_and_copy_device_tree(); } void __init prom_init(void) diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h index 8e07b5f28ef..bfd2d58c1d6 100644 --- a/arch/mips/lantiq/prom.h +++ b/arch/mips/lantiq/prom.h @@ -26,6 +26,4 @@ struct ltq_soc_info { extern void ltq_soc_detect(struct ltq_soc_info *i); extern void ltq_soc_init(void); -extern struct boot_param_header __dtb_start; - #endif diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c index c639b9db001..12c75db2342 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c @@ -27,8 +27,7 @@ #include <cs5536/cs5536_mfgpt.h> -DEFINE_SPINLOCK(mfgpt_lock); -EXPORT_SYMBOL(mfgpt_lock); +static DEFINE_RAW_SPINLOCK(mfgpt_lock); static u32 mfgpt_base; @@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter); static void init_mfgpt_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&mfgpt_lock); + raw_spin_lock(&mfgpt_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&mfgpt_lock); + raw_spin_unlock(&mfgpt_lock); } static struct clock_event_device mfgpt_clockevent = { @@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs) static int old_count; static u32 old_jifs; - spin_lock_irqsave(&mfgpt_lock, flags); + raw_spin_lock_irqsave(&mfgpt_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&mfgpt_lock, flags); + raw_spin_unlock_irqrestore(&mfgpt_lock, flags); return (cycle_t) (jifs * COMPARE) + count; } diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c index 67dd94ef28e..1eed38e28b1 100644 --- a/arch/mips/loongson/lemote-2f/clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -91,10 +91,9 @@ EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) { - unsigned int rate_khz = rate / 1000; + struct cpufreq_frequency_table *pos; int ret = 0; int regval; - int i; if (likely(clk->ops && clk->ops->set_rate)) { unsigned long flags; @@ -107,22 +106,16 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) propagate_rate(clk); - for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END; - i++) { - if (loongson2_clockmod_table[i].frequency == - CPUFREQ_ENTRY_INVALID) - continue; - if (rate_khz == loongson2_clockmod_table[i].frequency) + cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table) + if (rate == pos->frequency) break; - } - if (rate_khz != loongson2_clockmod_table[i].frequency) + if (rate != pos->frequency) return -ENOTSUPP; clk->rate = rate; regval = LOONGSON_CHIPCFG0; - regval = (regval & ~0x7) | - (loongson2_clockmod_table[i].driver_data - 1); + regval = (regval & ~0x7) | (pos->driver_data - 1); LOONGSON_CHIPCFG0 = regval; return ret; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 9e67cdea3c7..f7b91d3a371 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -31,6 +31,7 @@ void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, void (*flush_icache_range)(unsigned long start, unsigned long end); EXPORT_SYMBOL_GPL(flush_icache_range); void (*local_flush_icache_range)(unsigned long start, unsigned long end); +EXPORT_SYMBOL_GPL(local_flush_icache_range); void (*__flush_cache_vmap)(void); void (*__flush_cache_vunmap)(void); diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index 77e0ae036e7..4ec8ee10d37 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -84,11 +84,6 @@ int pud_huge(pud_t pud) return (pud_val(pud) & _PAGE_HUGE) != 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 58033c44690..b611102e23b 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -273,7 +273,7 @@ void build_clear_page(void) uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, 0xa000); + uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; @@ -424,7 +424,7 @@ void build_copy_page(void) uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, 0xa000); + uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 6d0f4ab3632..f2364e41968 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L; fw_memblock_t * __init fw_getmdesc(int eva) { char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr; - unsigned long memsize, ememsize __maybe_unused = 0; + unsigned long memsize = 0, ememsize __maybe_unused = 0; static char cmdline[COMMAND_LINE_SIZE] __initdata; int tmp; diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 31900991214..3778a359f3a 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -74,18 +74,8 @@ static void __init estimate_frequencies(void) unsigned int giccount = 0, gicstart = 0; #endif -#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) - unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); - - /* - * XXXKYMA: hardwire the CPU frequency to Host Freq/4 - */ - count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3; - if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && - (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) - count *= 2; - - mips_hpt_frequency = count; +#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ + mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; return; #endif diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c index bf7fe48bf2f..e43f4801a24 100644 --- a/arch/mips/mti-sead3/sead3-setup.c +++ b/arch/mips/mti-sead3/sead3-setup.c @@ -69,17 +69,17 @@ static void __init parse_memsize_param(void) if (!memsize) return; - offset = fdt_path_offset(&__dtb_start, "/memory"); + offset = fdt_path_offset(__dtb_start, "/memory"); if (offset > 0) { uint64_t new_value; /* * reg contains 2 32-bits BE values, offset and size. We just * want to replace the size value without affecting the offset */ - prop_value = fdt_getprop(&__dtb_start, offset, "reg", &prop_len); + prop_value = fdt_getprop(__dtb_start, offset, "reg", &prop_len); new_value = be64_to_cpu(*prop_value); new_value = (new_value & ~0xffffffffllu) | memsize; - fdt_setprop_inplace_u64(&__dtb_start, offset, "reg", new_value); + fdt_setprop_inplace_u64(__dtb_start, offset, "reg", new_value); } } @@ -92,7 +92,7 @@ void __init plat_mem_setup(void) * Load the builtin devicetree. This causes the chosen node to be * parsed resulting in our memory appearing */ - __dt_setup_arch(&__dtb_start); + __dt_setup_arch(__dtb_start); } void __init device_tree_init(void) diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c index 5754097b9cd..bdde33147bc 100644 --- a/arch/mips/netlogic/xlp/dt.c +++ b/arch/mips/netlogic/xlp/dt.c @@ -42,7 +42,7 @@ #include <asm/prom.h> extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], - __dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[], __dtb_start[]; + __dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[]; static void *xlp_fdt_blob; void __init *xlp_dt_init(void *fdtp) @@ -87,22 +87,7 @@ void __init xlp_early_init_devtree(void) void __init device_tree_init(void) { - unsigned long base, size; - struct boot_param_header *fdtp = xlp_fdt_blob; - - if (!fdtp) - return; - - base = virt_to_phys(fdtp); - size = be32_to_cpu(fdtp->totalsize); - - /* Before we do anything, lets reserve the dt blob */ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - - unflatten_device_tree(); - - /* free the space reserved for the dt blob */ - free_bootmem(base, size); + unflatten_and_copy_device_tree(); } static struct of_device_id __initdata xlp_ids[] = { diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c index afd8405e018..3249685e03a 100644 --- a/arch/mips/pci/msi-xlp.c +++ b/arch/mips/pci/msi-xlp.c @@ -206,14 +206,8 @@ static struct irq_chip xlp_msix_chip = { .irq_unmask = unmask_msi_irq, }; -void destroy_irq(unsigned int irq) -{ - /* nothing to do yet */ -} - void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); } /* @@ -298,10 +292,8 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link, xirq = xirq + msivec; /* msi mapped to global irq space */ ret = irq_set_msi_desc(xirq, desc); - if (ret < 0) { - destroy_irq(xirq); + if (ret < 0) return ret; - } write_msi_msg(xirq, &msg); return 0; diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index b128cb973eb..7f6ce6d734c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = { .start = 0x50000000, .end = 0x5FFFFFFF, .flags = IORESOURCE_MEM, - .parent = &rc32434_res_pci_mem1, .sibling = NULL, .child = &rc32434_res_pci_mem2 }; diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c index 4427abbd48b..0dde80332d3 100644 --- a/arch/mips/pci/pci-xlr.c +++ b/arch/mips/pci/pci-xlr.c @@ -214,14 +214,8 @@ static int get_irq_vector(const struct pci_dev *dev) } #ifdef CONFIG_PCI_MSI -void destroy_irq(unsigned int irq) -{ - /* nothing to do yet */ -} - void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); } int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) @@ -263,10 +257,8 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) MSI_DATA_DELIVERY_FIXED; ret = irq_set_msi_desc(irq, desc); - if (ret < 0) { - destroy_irq(irq); + if (ret < 0) return ret; - } write_msi_msg(irq, &msg); return 0; diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index eccc5526155..251395210e2 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -28,8 +28,6 @@ __iomem void *rt_sysc_membase; __iomem void *rt_memc_membase; -extern struct boot_param_header __dtb_start; - __iomem void *plat_of_remap_node(const char *node) { struct resource res; @@ -52,30 +50,7 @@ __iomem void *plat_of_remap_node(const char *node) void __init device_tree_init(void) { - unsigned long base, size; - void *fdt_copy; - - if (!initial_boot_params) - return; - - base = virt_to_phys((void *)initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); - - /* Before we do anything, lets reserve the dt blob */ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - - /* The strings in the flattened tree are referenced directly by the - * device tree, so copy the flattened device tree from init memory - * to regular memory. - */ - fdt_copy = alloc_bootmem(size); - memcpy(fdt_copy, initial_boot_params, size); - initial_boot_params = fdt_copy; - - unflatten_device_tree(); - - /* free the space reserved for the dt blob */ - free_bootmem(base, size); + unflatten_and_copy_device_tree(); } void __init plat_mem_setup(void) @@ -86,7 +61,7 @@ void __init plat_mem_setup(void) * Load the builtin devicetree. This causes the chosen node to be * parsed resulting in our memory appearing */ - __dt_setup_arch(&__dtb_start); + __dt_setup_arch(__dtb_start); if (soc_info.mem_size) add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, |