From 3fd28fce765632d0fe46b31f63c0e7a7ec6c6b79 Mon Sep 17 00:00:00 2001 From: Eddie Dong Date: Thu, 19 Nov 2009 17:54:07 +0200 Subject: KVM: x86: make double/triple fault promotion generic to all exceptions Move Double-Fault generation logic out of page fault exception generating function to cover more generic case. Signed-off-by: Eddie Dong Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 89 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 61 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1e1bc9d412..8d860e0301a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -257,12 +257,68 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) } EXPORT_SYMBOL_GPL(kvm_set_apic_base); +#define EXCPT_BENIGN 0 +#define EXCPT_CONTRIBUTORY 1 +#define EXCPT_PF 2 + +static int exception_class(int vector) +{ + switch (vector) { + case PF_VECTOR: + return EXCPT_PF; + case DE_VECTOR: + case TS_VECTOR: + case NP_VECTOR: + case SS_VECTOR: + case GP_VECTOR: + return EXCPT_CONTRIBUTORY; + default: + break; + } + return EXCPT_BENIGN; +} + +static void kvm_multiple_exception(struct kvm_vcpu *vcpu, + unsigned nr, bool has_error, u32 error_code) +{ + u32 prev_nr; + int class1, class2; + + if (!vcpu->arch.exception.pending) { + queue: + vcpu->arch.exception.pending = true; + vcpu->arch.exception.has_error_code = has_error; + vcpu->arch.exception.nr = nr; + vcpu->arch.exception.error_code = error_code; + return; + } + + /* to check exception */ + prev_nr = vcpu->arch.exception.nr; + if (prev_nr == DF_VECTOR) { + /* triple fault -> shutdown */ + set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); + return; + } + class1 = exception_class(prev_nr); + class2 = exception_class(nr); + if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) + || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { + /* generate double fault per SDM Table 5-5 */ + vcpu->arch.exception.pending = true; + vcpu->arch.exception.has_error_code = true; + vcpu->arch.exception.nr = DF_VECTOR; + vcpu->arch.exception.error_code = 0; + } else + /* replace previous exception with a new one in a hope + that instruction re-execution will regenerate lost + exception */ + goto queue; +} + void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - WARN_ON(vcpu->arch.exception.pending); - vcpu->arch.exception.pending = true; - vcpu->arch.exception.has_error_code = false; - vcpu->arch.exception.nr = nr; + kvm_multiple_exception(vcpu, nr, false, 0); } EXPORT_SYMBOL_GPL(kvm_queue_exception); @@ -270,25 +326,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code) { ++vcpu->stat.pf_guest; - - if (vcpu->arch.exception.pending) { - switch(vcpu->arch.exception.nr) { - case DF_VECTOR: - /* triple fault -> shutdown */ - set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); - return; - case PF_VECTOR: - vcpu->arch.exception.nr = DF_VECTOR; - vcpu->arch.exception.error_code = 0; - return; - default: - /* replace previous exception with a new one in a hope - that instruction re-execution will regenerate lost - exception */ - vcpu->arch.exception.pending = false; - break; - } - } vcpu->arch.cr2 = addr; kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); } @@ -301,11 +338,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - WARN_ON(vcpu->arch.exception.pending); - vcpu->arch.exception.pending = true; - vcpu->arch.exception.has_error_code = true; - vcpu->arch.exception.nr = nr; - vcpu->arch.exception.error_code = error_code; + kvm_multiple_exception(vcpu, nr, true, error_code); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); -- cgit v1.2.3-70-g09d2 From cb84b55f6cde26c7c17beaf87da08645ae6ccbf9 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 11 Nov 2009 17:29:49 -0200 Subject: KVM: x86: raise TSS exception for NULL CS and SS segments Windows 2003 uses task switch to triple fault and reboot (the other exception being reserved pdptrs bits). Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8d860e0301a..27931867791 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4410,6 +4410,15 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); } +static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg, + u16 selector) +{ + /* NULL selector is not valid for CS and SS */ + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) + if (!selector) + kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3); +} + int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int type_bits, int seg) { @@ -4419,6 +4428,8 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, return kvm_load_realmode_segment(vcpu, selector, seg); if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) return 1; + + kvm_check_segment_descriptor(vcpu, seg, selector); kvm_seg.type |= type_bits; if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS && -- cgit v1.2.3-70-g09d2 From 186a3e526ac1b4063a723f90ae4893beedb24fc6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 2 Dec 2009 15:17:00 +0200 Subject: KVM: MMU: Report spte not found in rmap before BUG() In the past we've had errors of single-bit in the other two cases; the printk() may confirm it for the third case (many->many). Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 89a49fb46a2..4f499d7f710 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -662,6 +662,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) prev_desc = desc; desc = desc->more; } + pr_err("rmap_remove: %p %llx many->many\n", spte, *spte); BUG(); } } -- cgit v1.2.3-70-g09d2 From 59708670b639bff00f92e519df1ae14da240e919 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 15 Dec 2009 13:29:54 +0800 Subject: KVM: VMX: Trap and invalid MWAIT/MONITOR instruction We don't support these instructions, but guest can execute them even if the feature('monitor') haven't been exposed in CPUID. So we would trap and inject a #UD if guest try this way. Cc: stable@kernel.org Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/vmx.c | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 2b4945419a8..8f6b0111446 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -251,6 +251,7 @@ enum vmcs_field { #define EXIT_REASON_MSR_READ 31 #define EXIT_REASON_MSR_WRITE 32 #define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_INSTRUCTION 39 #define EXIT_REASON_PAUSE_INSTRUCTION 40 #define EXIT_REASON_MCE_DURING_VMENTRY 41 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d4918d6fc92..8a8e1396507 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1224,6 +1224,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_MWAIT_EXITING | + CPU_BASED_MONITOR_EXITING | CPU_BASED_INVLPG_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | @@ -3416,6 +3418,12 @@ static int handle_pause(struct kvm_vcpu *vcpu) return 1; } +static int handle_invalid_op(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -3453,6 +3461,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, + [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, + [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, }; static const int kvm_vmx_max_exit_handlers = -- cgit v1.2.3-70-g09d2 From cdc0e24456bf5678f63497569c3676c9019f82c1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 6 Dec 2009 17:21:14 +0200 Subject: KVM: VMX: Move some cr[04] related constants to vmx.c They have no place in common code. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 13 ------------- arch/x86/kvm/vmx.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f865e8b854..da6dee86276 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -38,19 +38,6 @@ #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 0xFFFFFF0000000000ULL) -#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) -#define KVM_GUEST_CR0_MASK \ - (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) -#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) -#define KVM_VM_CR0_ALWAYS_ON \ - (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) -#define KVM_GUEST_CR4_MASK \ - (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) -#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) -#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) - #define INVALID_PAGE (~(hpa_t)0) #define UNMAPPED_GVA (~(gpa_t)0) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8a8e1396507..efbb614ccd3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -61,6 +61,19 @@ module_param_named(unrestricted_guest, static int __read_mostly emulate_invalid_guest_state = 0; module_param(emulate_invalid_guest_state, bool, S_IRUGO); +#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ + (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) +#define KVM_GUEST_CR0_MASK \ + (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) +#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ + (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) +#define KVM_VM_CR0_ALWAYS_ON \ + (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) +#define KVM_GUEST_CR4_MASK \ + (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) +#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) +#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) + /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive -- cgit v1.2.3-70-g09d2 From fc78f51938e1ea866daa2045851b2e5681371668 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 7 Dec 2009 12:16:48 +0200 Subject: KVM: Add accessor for reading cr4 (or some bits of cr4) Some bits of cr4 can be owned by the guest on vmx, so when we read them, we copy them to the vcpu structure. In preparation for making the set of guest-owned bits dynamic, use helpers to access these bits so we don't need to know where the bit resides. No changes to svm since all bits are host-owned there. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/kvm_cache_regs.h | 12 ++++++++++++ arch/x86/kvm/mmu.h | 5 +++-- arch/x86/kvm/vmx.c | 13 ++++++++----- arch/x86/kvm/x86.c | 16 ++++++---------- 5 files changed, 30 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index da6dee86276..e9f4f12ec3c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -272,6 +272,7 @@ struct kvm_vcpu_arch { unsigned long cr2; unsigned long cr3; unsigned long cr4; + unsigned long cr4_guest_owned_bits; unsigned long cr8; u32 hflags; u64 pdptrs[4]; /* pae */ diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 7bcc5b6a440..35acc36e178 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) return vcpu->arch.pdptrs[index]; } +static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) +{ + if (mask & vcpu->arch.cr4_guest_owned_bits) + kvm_x86_ops->decache_cr4_guest_bits(vcpu); + return vcpu->arch.cr4 & mask; +} + +static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr4_bits(vcpu, ~0UL); +} + #endif diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 61a1b3884b4..4567d8042b2 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -2,6 +2,7 @@ #define __KVM_X86_MMU_H #include +#include "kvm_cache_regs.h" #define PT64_PT_BITS 9 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) @@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) static inline int is_pae(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr4 & X86_CR4_PAE; + return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); } static inline int is_pse(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr4 & X86_CR4_PSE; + return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); } static inline int is_paging(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index efbb614ccd3..284e905c59d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { - vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; - vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; + ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; + + vcpu->arch.cr4 &= ~cr4_guest_owned_bits; + vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) @@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, vcpu->arch.cr4); + vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, @@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, vcpu->arch.cr4); + vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) @@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); + vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK; tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; rdtscll(tsc_this); @@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) vcpu->arch.eff_db[dr] = val; break; case 4 ... 5: - if (vcpu->arch.cr4 & X86_CR4_DE) + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) kvm_queue_exception(vcpu, UD_VECTOR); break; case 6: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 27931867791..84dd33e717f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw); void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - unsigned long old_cr4 = vcpu->arch.cr4; + unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; if (cr4 & CR4_RESERVED_BITS) { @@ -1899,7 +1899,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || - !(vcpu->arch.cr4 & X86_CR4_MCE)) { + !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { printk(KERN_DEBUG "kvm: set_mce: " "injects mce exception while " "previous one is in progress!\n"); @@ -3616,7 +3616,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) { unsigned long value; - kvm_x86_ops->decache_cr4_guest_bits(vcpu); switch (cr) { case 0: value = vcpu->arch.cr0; @@ -3628,7 +3627,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) value = vcpu->arch.cr3; break; case 4: - value = vcpu->arch.cr4; + value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); @@ -3656,7 +3655,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, kvm_set_cr3(vcpu, val); break; case 4: - kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); + kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: kvm_set_cr8(vcpu, val & 0xfUL); @@ -4237,11 +4236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->gdt.limit = dt.limit; sregs->gdt.base = dt.base; - kvm_x86_ops->decache_cr4_guest_bits(vcpu); sregs->cr0 = vcpu->arch.cr0; sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = vcpu->arch.cr3; - sregs->cr4 = vcpu->arch.cr4; + sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.shadow_efer; sregs->apic_base = kvm_get_apic_base(vcpu); @@ -4737,13 +4735,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); - kvm_x86_ops->decache_cr4_guest_bits(vcpu); - mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; - mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; + mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.cr3); -- cgit v1.2.3-70-g09d2 From 4c38609ac569483152f9cb7e5a66f17355e563b2 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 7 Dec 2009 12:26:18 +0200 Subject: KVM: VMX: Make guest cr4 mask more conservative Instead of specifying the bits which we want to trap on, specify the bits which we allow the guest to change transparently. This is safer wrt future changes to cr4. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 284e905c59d..755811a564b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -69,8 +69,10 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) -#define KVM_GUEST_CR4_MASK \ - (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) +#define KVM_CR4_GUEST_OWNED_BITS \ + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ + | X86_CR4_OSXMMEXCPT) + #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) @@ -2421,8 +2423,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); - vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); - vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK; + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; + vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; rdtscll(tsc_this); -- cgit v1.2.3-70-g09d2 From ce03e4f21a33b97c5d20ce597f64c361bb247904 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 7 Dec 2009 12:29:14 +0200 Subject: KVM: VMX: When using ept, allow the guest to own cr4.pge We make no use of cr4.pge if ept is enabled, but the guest does (to flush global mappings, as with vmap()), so give the guest ownership of this bit. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 755811a564b..a7ebaa5f2b6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2424,6 +2424,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; + if (enable_ept) + vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; -- cgit v1.2.3-70-g09d2 From bc23008b610dda1b0e69cd473b31c1391d6873f2 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 8 Dec 2009 12:14:42 +0200 Subject: KVM: VMX: Fold ept_update_paging_mode_cr4() into its caller ept_update_paging_mode_cr4() accesses vcpu->arch.cr4 directly, which usually needs to be accessed via kvm_read_cr4(). In this case, we can't, since cr4 is in the process of being updated. Instead of adding inane comments, fold the function into its caller (vmx_set_cr4), so it can use the not-yet-committed cr4 directly. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a7ebaa5f2b6..694baed9bc8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1680,16 +1680,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, *hw_cr0 &= ~X86_CR0_WP; } -static void ept_update_paging_mode_cr4(unsigned long *hw_cr4, - struct kvm_vcpu *vcpu) -{ - if (!is_paging(vcpu)) { - *hw_cr4 &= ~X86_CR4_PAE; - *hw_cr4 |= X86_CR4_PSE; - } else if (!(vcpu->arch.cr4 & X86_CR4_PAE)) - *hw_cr4 &= ~X86_CR4_PAE; -} - static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -1767,8 +1757,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); vcpu->arch.cr4 = cr4; - if (enable_ept) - ept_update_paging_mode_cr4(&hw_cr4, vcpu); + if (enable_ept) { + if (!is_paging(vcpu)) { + hw_cr4 &= ~X86_CR4_PAE; + hw_cr4 |= X86_CR4_PSE; + } else if (!(cr4 & X86_CR4_PAE)) { + hw_cr4 &= ~X86_CR4_PAE; + } + } vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); -- cgit v1.2.3-70-g09d2 From 8a7e3f01e692cd202fb7c042cf2be9ff8c599a1e Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 18 Dec 2009 16:48:42 +0800 Subject: KVM: VMX: Remove redundant variable It's no longer necessary. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 694baed9bc8..f8f2fdc2689 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2399,14 +2399,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) for (i = 0; i < NR_VMX_MSR; ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; - u64 data; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; - data = data_low | ((u64)data_high << 32); vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; -- cgit v1.2.3-70-g09d2 From 2bf78fa7b9b0d2917fd6587eadb3c0f6bbaf1718 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 18 Dec 2009 16:48:44 +0800 Subject: KVM: Extended shared_msr_global to per CPU shared_msr_global saved host value of relevant MSRs, but it have an assumption that all MSRs it tracked shared the value across the different CPUs. It's not true with some MSRs, e.g. MSR_TSC_AUX. Extend it to per CPU to provide the support of MSR_TSC_AUX, and more alike MSRs. Notice now the shared_msr_global still have one assumption: it can only deal with the MSRs that won't change in host after KVM module loaded. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 55 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 84dd33e717f..4e7bbc49b7e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -93,16 +93,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); struct kvm_shared_msrs_global { int nr; - struct kvm_shared_msr { - u32 msr; - u64 value; - } msrs[KVM_NR_SHARED_MSRS]; + u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; - u64 current_value[KVM_NR_SHARED_MSRS]; + struct kvm_shared_msr_values { + u64 host; + u64 curr; + } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; @@ -147,53 +147,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; - struct kvm_shared_msr *global; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); + struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { - global = &shared_msrs_global.msrs[slot]; - if (global->value != locals->current_value[slot]) { - wrmsrl(global->msr, global->value); - locals->current_value[slot] = global->value; + values = &locals->values[slot]; + if (values->host != values->curr) { + wrmsrl(shared_msrs_global.msrs[slot], values->host); + values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } -void kvm_define_shared_msr(unsigned slot, u32 msr) +static void shared_msr_update(unsigned slot, u32 msr) { - int cpu; + struct kvm_shared_msrs *smsr; u64 value; + smsr = &__get_cpu_var(shared_msrs); + /* only read, and nobody should modify it at this time, + * so don't need lock */ + if (slot >= shared_msrs_global.nr) { + printk(KERN_ERR "kvm: invalid MSR slot!"); + return; + } + rdmsrl_safe(msr, &value); + smsr->values[slot].host = value; + smsr->values[slot].curr = value; +} + +void kvm_define_shared_msr(unsigned slot, u32 msr) +{ if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; - shared_msrs_global.msrs[slot].msr = msr; - rdmsrl_safe(msr, &value); - shared_msrs_global.msrs[slot].value = value; - for_each_online_cpu(cpu) - per_cpu(shared_msrs, cpu).current_value[slot] = value; + shared_msrs_global.msrs[slot] = msr; + /* we need ensured the shared_msr_global have been updated */ + smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; - struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs); for (i = 0; i < shared_msrs_global.nr; ++i) - locals->current_value[i] = shared_msrs_global.msrs[i].value; + shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); - if (((value ^ smsr->current_value[slot]) & mask) == 0) + if (((value ^ smsr->values[slot].curr) & mask) == 0) return; - smsr->current_value[slot] = value; - wrmsrl(shared_msrs_global.msrs[slot].msr, value); + smsr->values[slot].curr = value; + wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); -- cgit v1.2.3-70-g09d2 From be43f83dada2cf0e9e01c9a0ba42977c5bd70f9d Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 18 Dec 2009 16:48:45 +0800 Subject: x86: Raise vsyscall priority on hotplug notifier chain KVM need vsyscall_init() to initialize MSR_TSC_AUX before it read the value. Per Avi's suggestion, this patch raised vsyscall priority on hotplug notifier chain, to 30. CC: Ingo Molnar CC: linux-kernel@vger.kernel.org Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kernel/vsyscall_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 9055e5872ff..1c0c6ab9c60 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -301,7 +301,8 @@ static int __init vsyscall_init(void) register_sysctl_table(kernel_root_table2); #endif on_each_cpu(cpu_vsyscall_init, NULL, 1); - hotcpu_notifier(cpu_vsyscall_notifier, 0); + /* notifier priority > KVM */ + hotcpu_notifier(cpu_vsyscall_notifier, 30); return 0; } -- cgit v1.2.3-70-g09d2 From 0e85188049afacdfce9c026144142264981bbabb Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 18 Dec 2009 16:48:46 +0800 Subject: KVM: Add cpuid_update() callback to kvm_x86_ops Sometime, we need to adjust some state in order to reflect guest CPUID setting, e.g. if we don't expose rdtscp to guest, we won't want to enable it on hardware. cpuid_update() is introduced for this purpose. Also export kvm_find_cpuid_entry() for later use. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 6 ++++++ arch/x86/kvm/vmx.c | 6 ++++++ arch/x86/kvm/x86.c | 3 +++ 4 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e9f4f12ec3c..7ff0ea371e3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -459,6 +459,7 @@ struct kvm_x86_ops { int (*hardware_setup)(void); /* __init */ void (*hardware_unsetup)(void); /* __exit */ bool (*cpu_has_accelerated_tpr)(void); + void (*cpuid_update)(struct kvm_vcpu *vcpu); /* Create, but do not attach this VCPU */ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1d9b33843c8..41777e6d976 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2852,6 +2852,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) return 0; } +static void svm_cpuid_update(struct kvm_vcpu *vcpu) +{ +} + static const struct trace_print_flags svm_exit_reasons_str[] = { { SVM_EXIT_READ_CR0, "read_cr0" }, { SVM_EXIT_READ_CR3, "read_cr3" }, @@ -2976,6 +2980,8 @@ static struct kvm_x86_ops svm_x86_ops = { .exit_reasons_str = svm_exit_reasons_str, .gb_page_enable = svm_gb_page_enable, + + .cpuid_update = svm_cpuid_update, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f8f2fdc2689..75e8931e96c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3988,6 +3988,10 @@ static bool vmx_gb_page_enable(void) return false; } +static void vmx_cpuid_update(struct kvm_vcpu *vcpu) +{ +} + static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, @@ -4052,6 +4056,8 @@ static struct kvm_x86_ops vmx_x86_ops = { .exit_reasons_str = vmx_exit_reasons_str, .gb_page_enable = vmx_gb_page_enable, + + .cpuid_update = vmx_cpuid_update, }; static int __init vmx_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4e7bbc49b7e..e5ac21f992f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1574,6 +1574,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, cpuid_fix_nx_cap(vcpu); r = 0; kvm_apic_set_version(vcpu); + kvm_x86_ops->cpuid_update(vcpu); out_free: vfree(cpuid_entries); @@ -1596,6 +1597,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, goto out; vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); + kvm_x86_ops->cpuid_update(vcpu); return 0; out: @@ -3733,6 +3735,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, } return best; } +EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) { -- cgit v1.2.3-70-g09d2 From 4e47c7a6d714cf352b719db92a924b6ec487acc5 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 18 Dec 2009 16:48:47 +0800 Subject: KVM: VMX: Add instruction rdtscp support for guest Before enabling, execution of "rdtscp" in guest would result in #UD. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/svm.c | 7 +++++ arch/x86/kvm/vmx.c | 60 ++++++++++++++++++++++++++++++++++++++--- arch/x86/kvm/x86.c | 3 ++- 5 files changed, 68 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7ff0ea371e3..fe4df464fb3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -521,6 +521,7 @@ struct kvm_x86_ops { int (*get_tdp_level)(void); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); bool (*gb_page_enable)(void); + bool (*rdtscp_supported)(void); const struct trace_print_flags *exit_reasons_str; }; diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 8f6b0111446..713ed9a5b1d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -53,6 +53,7 @@ */ #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 +#define SECONDARY_EXEC_RDTSCP 0x00000008 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 41777e6d976..7f4e225feeb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2914,6 +2914,11 @@ static bool svm_gb_page_enable(void) return true; } +static bool svm_rdtscp_supported(void) +{ + return false; +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -2982,6 +2987,8 @@ static struct kvm_x86_ops svm_x86_ops = { .gb_page_enable = svm_gb_page_enable, .cpuid_update = svm_cpuid_update, + + .rdtscp_supported = svm_rdtscp_supported, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 75e8931e96c..74a66f0c00b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -151,6 +151,8 @@ struct vcpu_vmx { ktime_t entry_time; s64 vnmi_blocked_time; u32 exit_reason; + + bool rdtscp_enabled; }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) @@ -225,7 +227,7 @@ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif - MSR_EFER, MSR_K6_STAR, + MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR, }; #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) @@ -362,6 +364,12 @@ static inline int cpu_has_vmx_vpid(void) SECONDARY_EXEC_ENABLE_VPID; } +static inline int cpu_has_vmx_rdtscp(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDTSCP; +} + static inline int cpu_has_virtual_nmis(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; @@ -893,6 +901,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } +static bool vmx_rdtscp_supported(void) +{ + return cpu_has_vmx_rdtscp(); +} + /* * Swap MSR entry in host/guest MSR entry array. */ @@ -928,6 +941,9 @@ static void setup_msrs(struct vcpu_vmx *vmx) index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); + index = __find_msr_index(vmx, MSR_TSC_AUX); + if (index >= 0 && vmx->rdtscp_enabled) + move_msr_up(vmx, index, save_nmsrs++); /* * MSR_K6_STAR is only needed on long mode guests, and only * if efer.sce is enabled. @@ -1017,6 +1033,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) case MSR_IA32_SYSENTER_ESP: data = vmcs_readl(GUEST_SYSENTER_ESP); break; + case MSR_TSC_AUX: + if (!to_vmx(vcpu)->rdtscp_enabled) + return 1; + /* Otherwise falls through */ default: vmx_load_host_state(to_vmx(vcpu)); msr = find_msr_entry(to_vmx(vcpu), msr_index); @@ -1080,7 +1100,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) vcpu->arch.pat = data; break; } - /* Otherwise falls through to kvm_set_msr_common */ + ret = kvm_set_msr_common(vcpu, msr_index, data); + break; + case MSR_TSC_AUX: + if (!vmx->rdtscp_enabled) + return 1; + /* Check reserved bit, higher 32 bits should be zero */ + if ((data >> 32) != 0) + return 1; + /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { @@ -1260,7 +1288,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | - SECONDARY_EXEC_PAUSE_LOOP_EXITING; + SECONDARY_EXEC_PAUSE_LOOP_EXITING | + SECONDARY_EXEC_RDTSCP; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -3988,8 +4017,31 @@ static bool vmx_gb_page_enable(void) return false; } +static inline u32 bit(int bitno) +{ + return 1 << (bitno & 31); +} + static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { + struct kvm_cpuid_entry2 *best; + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 exec_control; + + vmx->rdtscp_enabled = false; + if (vmx_rdtscp_supported()) { + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + if (exec_control & SECONDARY_EXEC_RDTSCP) { + best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + if (best && (best->edx & bit(X86_FEATURE_RDTSCP))) + vmx->rdtscp_enabled = true; + else { + exec_control &= ~SECONDARY_EXEC_RDTSCP; + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + exec_control); + } + } + } } static struct kvm_x86_ops vmx_x86_ops = { @@ -4058,6 +4110,8 @@ static struct kvm_x86_ops vmx_x86_ops = { .gb_page_enable = vmx_gb_page_enable, .cpuid_update = vmx_cpuid_update, + + .rdtscp_supported = vmx_rdtscp_supported, }; static int __init vmx_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e5ac21f992f..8798504ace1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1646,6 +1646,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #else unsigned f_lm = 0; #endif + unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; /* cpuid 1.edx */ const u32 kvm_supported_word0_x86_features = @@ -1665,7 +1666,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* Reserved */ | f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | - F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ | + F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); /* cpuid 1.ecx */ const u32 kvm_supported_word4_x86_features = -- cgit v1.2.3-70-g09d2 From 953899b659adce62cbe83d6a7527550ab8797c48 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 14 Dec 2009 12:22:20 +0100 Subject: KVM: SVM: Adjust tsc_offset only if tsc_unstable The tsc_offset adjustment in svm_vcpu_load is executed unconditionally even if Linux considers the host tsc as stable. This causes a Linux guest detecting an unstable tsc in any case. This patch removes the tsc_offset adjustment if the host tsc is stable. The guest will now get the benefit of a stable tsc too. Signed-off-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 7f4e225feeb..b373ae6fb97 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -765,14 +765,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(cpu != vcpu->cpu)) { u64 delta; - /* - * Make sure that the guest sees a monotonically - * increasing TSC. - */ - delta = vcpu->arch.host_tsc - native_read_tsc(); - svm->vmcb->control.tsc_offset += delta; - if (is_nested(svm)) - svm->nested.hsave->control.tsc_offset += delta; + if (check_tsc_unstable()) { + /* + * Make sure that the guest sees a monotonically + * increasing TSC. + */ + delta = vcpu->arch.host_tsc - native_read_tsc(); + svm->vmcb->control.tsc_offset += delta; + if (is_nested(svm)) + svm->nested.hsave->control.tsc_offset += delta; + } vcpu->cpu = cpu; kvm_migrate_timers(vcpu); svm->asid_generation = 0; -- cgit v1.2.3-70-g09d2 From 50eb2a3cd0f50d912b26d0b79b7f443344608390 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 20 Dec 2009 15:00:10 +0200 Subject: KVM: Add KVM_MMIO kconfig item s390 doesn't have mmio, this will simplify ifdefing it out. Signed-off-by: Avi Kivity --- arch/ia64/kvm/Kconfig | 1 + arch/powerpc/kvm/Kconfig | 1 + arch/x86/kvm/Kconfig | 1 + virt/kvm/Kconfig | 3 +++ 4 files changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index ef3e7be29ca..bf82e47c98b 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select ANON_INODES select HAVE_KVM_IRQCHIP select KVM_APIC_ARCHITECTURE + select KVM_MMIO ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 6fb6e8aa389..be28968c794 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -20,6 +20,7 @@ config KVM bool select PREEMPT_NOTIFIERS select ANON_INODES + select KVM_MMIO config KVM_BOOK3S_64_HANDLER bool diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 4cd49833246..06871111bf5 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -29,6 +29,7 @@ config KVM select HAVE_KVM_EVENTFD select KVM_APIC_ARCHITECTURE select USER_RETURN_NOTIFIER + select KVM_MMIO ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index daece36c0a5..7f1178f6b83 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -12,3 +12,6 @@ config HAVE_KVM_EVENTFD config KVM_APIC_ARCHITECTURE bool + +config KVM_MMIO + bool -- cgit v1.2.3-70-g09d2 From 583617b7866cfcc0b3830a4b7ab20f218d180f67 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 21 Dec 2009 20:21:23 +0100 Subject: KVM: powerpc: Move vector to irqprio resolving to separate function We're using a switch table to find the irqprio that belongs to a specific interrupt vector. This table is part of the interrupt inject logic. Since we'll add a new function to stop interrupts, let's move this table out of the injection logic into a separate function. Signed-off-by: Alexander Graf Acked-by: Acked-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e294bd9b8c..241795bdf34 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -125,11 +125,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) vcpu->arch.mmu.reset_msr(vcpu); } -void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) +static int kvmppc_book3s_vec2irqprio(unsigned int vec) { unsigned int prio; - vcpu->stat.queue_intr++; switch (vec) { case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; @@ -149,7 +148,15 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) default: prio = BOOK3S_IRQPRIO_MAX; break; } - set_bit(prio, &vcpu->arch.pending_exceptions); + return prio; +} + +void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) +{ + vcpu->stat.queue_intr++; + + set_bit(kvmppc_book3s_vec2irqprio(vec), + &vcpu->arch.pending_exceptions); #ifdef EXIT_DEBUG printk(KERN_INFO "Queueing interrupt %x\n", vec); #endif -- cgit v1.2.3-70-g09d2 From 7706664d39a8eb8555408a24b1f17bd2086189c6 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 21 Dec 2009 20:21:24 +0100 Subject: KVM: powerpc: Improve DEC handling We treated the DEC interrupt like an edge based one. This is not true for Book3s. The DEC keeps firing until mtdec is issued again and thus clears the interrupt line. So let's implement this logic in KVM too. This patch moves the line clearing from the firing of the interrupt to the mtdec emulation. This makes PPC64 guests work without AGGRESSIVE_DEC defined. Signed-off-by: Alexander Graf Acked-by: Acked-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s.c | 16 +++++++++++++++- arch/powerpc/kvm/booke.c | 5 +++++ arch/powerpc/kvm/emulate.c | 3 +++ 4 files changed, 24 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 269ee46ab02..abfd0c4d567 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -82,6 +82,7 @@ extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); +extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 241795bdf34..fd3ad6c7f35 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -151,6 +151,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) return prio; } +static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, + unsigned int vec) +{ + clear_bit(kvmppc_book3s_vec2irqprio(vec), + &vcpu->arch.pending_exceptions); +} + void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) { vcpu->stat.queue_intr++; @@ -178,6 +185,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); } +void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) +{ + kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); +} + void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -275,7 +287,9 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) #endif priority = __ffs(*pending); while (priority <= (sizeof(unsigned int) * 8)) { - if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { + if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && + (priority != BOOK3S_IRQPRIO_DECREMENTER)) { + /* DEC interrupts get cleared by mtdec */ clear_bit(priority, &vcpu->arch.pending_exceptions); break; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 06f5a9ecc42..d8b63420acf 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -97,6 +97,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } +void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) +{ + clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); +} + void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4a9ac6640fa..303457b2f52 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) pr_debug("mtDEC: %x\n", vcpu->arch.dec); #ifdef CONFIG_PPC64 + /* mtdec lowers the interrupt line when positive. */ + kvmppc_core_dequeue_dec(vcpu); + /* POWER4+ triggers a dec interrupt if the value is < 0 */ if (vcpu->arch.dec & 0x80000000) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); -- cgit v1.2.3-70-g09d2 From 0bb1fb7178c55f3e295b2ffe0433c330f960458e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 21 Dec 2009 20:21:25 +0100 Subject: KVM: powerpc: Remove AGGRESSIVE_DEC Because we now emulate the DEC interrupt according to real life behavior, there's no need to keep the AGGRESSIVE_DEC hack around. Let's just remove it. Signed-off-by: Alexander Graf Acked-by: Acked-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index fd3ad6c7f35..803505d3e45 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -34,12 +34,6 @@ /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ -/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. - * When set, we retrigger a DEC interrupt after that if DEC <= 0. - * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */ - -/* #define AGGRESSIVE_DEC */ - struct kvm_stats_debugfs_item debugfs_entries[] = { { "exits", VCPU_STAT(sum_exits) }, { "mmio", VCPU_STAT(mmio_exits) }, @@ -81,7 +75,7 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; } -#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) +#if defined(EXIT_DEBUG) static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) { u64 jd = mftb() - vcpu->arch.dec_jiffies; @@ -273,14 +267,6 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned int priority; - /* XXX be more clever here - no need to mftb() on every entry */ - /* Issue DEC again if it's still active */ -#ifdef AGGRESSIVE_DEC - if (vcpu->arch.msr & MSR_EE) - if (kvmppc_get_dec(vcpu) & 0x80000000) - kvmppc_core_queue_dec(vcpu); -#endif - #ifdef EXIT_DEBUG if (vcpu->arch.pending_exceptions) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); -- cgit v1.2.3-70-g09d2 From 46a26bf55714c1e2f17e34683292a389acb8e601 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:16 -0200 Subject: KVM: modify memslots layout in struct kvm Have a pointer to an allocated region inside struct kvm. [alex: fix ppc book 3s] Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 10 ++++++---- arch/powerpc/kvm/book3s.c | 2 +- arch/x86/kvm/mmu.c | 11 ++++++----- arch/x86/kvm/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 4 ++-- include/linux/kvm_host.h | 12 ++++++++---- virt/kvm/iommu.c | 18 ++++++++++++------ virt/kvm/kvm_main.c | 36 +++++++++++++++++++++++------------- 8 files changed, 60 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 5fdeec5fddc..1ca1dbf4811 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm) static void kvm_release_vm_pages(struct kvm *kvm) { + struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, j; unsigned long base_gfn; - for (i = 0; i < kvm->nmemslots; i++) { - memslot = &kvm->memslots[i]; + slots = kvm->memslots; + for (i = 0; i < slots->nmemslots; i++) { + memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { @@ -1802,7 +1804,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, if (log->slot >= KVM_MEMORY_SLOTS) goto out; - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -1840,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; memset(memslot->dirty_bitmap, 0, n); } diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 803505d3e45..bb8873dcb20 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -865,7 +865,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4f499d7f710..81f84d326a8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -807,13 +807,14 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, { int i, j; int retval = 0; + struct kvm_memslots *slots = kvm->memslots; /* * If mmap_sem isn't taken, we can look the memslots with only * the mmu_lock by skipping over the slots with userspace_addr == 0. */ - for (i = 0; i < kvm->nmemslots; i++) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + for (i = 0; i < slots->nmemslots; i++) { + struct kvm_memory_slot *memslot = &slots->memslots[i]; unsigned long start = memslot->userspace_addr; unsigned long end; @@ -3021,8 +3022,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) unsigned int nr_mmu_pages; unsigned int nr_pages = 0; - for (i = 0; i < kvm->nmemslots; i++) - nr_pages += kvm->memslots[i].npages; + for (i = 0; i < kvm->memslots->nmemslots; i++) + nr_pages += kvm->memslots->memslots[i].npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, @@ -3295,7 +3296,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu) int i, j, k; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; + struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i]; struct kvm_rmap_desc *d; for (j = 0; j < m->npages; ++j) { diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 74a66f0c00b..18698799e36 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1503,8 +1503,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) static gva_t rmode_tss_base(struct kvm *kvm) { if (!kvm->arch.tss_addr) { - gfn_t base_gfn = kvm->memslots[0].base_gfn + - kvm->memslots[0].npages - 3; + gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn + + kvm->memslots->memslots[0].npages - 3; return base_gfn << PAGE_SHIFT; } return kvm->arch.tss_addr; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8798504ace1..3b81cb9da8b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2427,7 +2427,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; memset(memslot->dirty_bitmap, 0, n); } @@ -5223,7 +5223,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; + struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot]; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bd5a616d937..782bfb185f8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -150,14 +150,18 @@ struct kvm_irq_routing_table {}; #endif +struct kvm_memslots { + int nmemslots; + struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + + KVM_PRIVATE_MEM_SLOTS]; +}; + struct kvm { spinlock_t mmu_lock; spinlock_t requests_lock; struct rw_semaphore slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ - int nmemslots; - struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + - KVM_PRIVATE_MEM_SLOTS]; + struct kvm_memslots *memslots; #ifdef CONFIG_KVM_APIC_ARCHITECTURE u32 bsp_vcpu_id; struct kvm_vcpu *bsp_vcpu; @@ -482,7 +486,7 @@ static inline void kvm_guest_exit(void) static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) { - return slot - kvm->memslots; + return slot - kvm->memslots->memslots; } static inline gpa_t gfn_to_gpa(gfn_t gfn) diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 15147583abd..bc697a66a88 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -76,10 +76,13 @@ unmap_pages: static int kvm_iommu_map_memslots(struct kvm *kvm) { int i, r = 0; + struct kvm_memslots *slots; - for (i = 0; i < kvm->nmemslots; i++) { - r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); + slots = kvm->memslots; + + for (i = 0; i < slots->nmemslots; i++) { + r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn, + slots->memslots[i].npages); if (r) break; } @@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int i; + struct kvm_memslots *slots; + + slots = kvm->memslots; - for (i = 0; i < kvm->nmemslots; i++) { - kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); + for (i = 0; i < slots->nmemslots; i++) { + kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, + slots->memslots[i].npages); } return 0; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bc23b8e0609..86dd8f3d29c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void) INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif + r = -ENOMEM; + kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!kvm->memslots) + goto out_err; + #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; + if (!page) goto out_err; - } + kvm->coalesced_mmio_ring = (struct kvm_coalesced_mmio_ring *)page_address(page); #endif @@ -416,6 +420,7 @@ out: out_err: hardware_disable_all(); out_err_nodisable: + kfree(kvm->memslots); kfree(kvm); return ERR_PTR(r); } @@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, void kvm_free_physmem(struct kvm *kvm) { int i; + struct kvm_memslots *slots = kvm->memslots; + + for (i = 0; i < slots->nmemslots; ++i) + kvm_free_physmem_slot(&slots->memslots[i], NULL); - for (i = 0; i < kvm->nmemslots; ++i) - kvm_free_physmem_slot(&kvm->memslots[i], NULL); + kfree(kvm->memslots); } static void kvm_destroy_vm(struct kvm *kvm) @@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; - memslot = &kvm->memslots[mem->slot]; + memslot = &kvm->memslots->memslots[mem->slot]; base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; @@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots[i]; + struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; if (s == memslot || !s->npages) continue; @@ -656,8 +664,8 @@ skip_lpage: kvm_arch_flush_shadow(kvm); spin_lock(&kvm->mmu_lock); - if (mem->slot >= kvm->nmemslots) - kvm->nmemslots = mem->slot + 1; + if (mem->slot >= kvm->memslots->nmemslots) + kvm->memslots->nmemslots = mem->slot + 1; *memslot = new; spin_unlock(&kvm->mmu_lock); @@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm, if (log->slot >= KVM_MEMORY_SLOTS) goto out; - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) { int i; + struct kvm_memslots *slots = kvm->memslots; - for (i = 0; i < kvm->nmemslots; ++i) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + for (i = 0; i < slots->nmemslots; ++i) { + struct kvm_memory_slot *memslot = &slots->memslots[i]; if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) @@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; + struct kvm_memslots *slots = kvm->memslots; gfn = unalias_gfn(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + struct kvm_memory_slot *memslot = &slots->memslots[i]; if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) -- cgit v1.2.3-70-g09d2 From fef9cce0eb28a67e688a411cc30b73625e49002b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:17 -0200 Subject: KVM: modify alias layout in x86s struct kvm_arch Have a pointer to an allocated region inside x86's kvm_arch. Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 8 ++++++-- arch/x86/kvm/x86.c | 21 ++++++++++++++++----- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fe4df464fb3..7cdcb3d0f77 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -370,9 +370,13 @@ struct kvm_mem_alias { gfn_t target_gfn; }; -struct kvm_arch{ - int naliases; +struct kvm_mem_aliases { struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; + int naliases; +}; + +struct kvm_arch { + struct kvm_mem_aliases *aliases; unsigned int n_free_mmu_pages; unsigned int n_requested_mmu_pages; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b81cb9da8b..1ce83319143 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2227,9 +2227,10 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_mem_alias *alias; + struct kvm_mem_aliases *aliases = kvm->arch.aliases; - for (i = 0; i < kvm->arch.naliases; ++i) { - alias = &kvm->arch.aliases[i]; + for (i = 0; i < aliases->naliases; ++i) { + alias = &aliases->aliases[i]; if (gfn >= alias->base_gfn && gfn < alias->base_gfn + alias->npages) return alias->target_gfn + gfn - alias->base_gfn; @@ -2247,6 +2248,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, { int r, n; struct kvm_mem_alias *p; + struct kvm_mem_aliases *aliases; r = -EINVAL; /* General sanity checks */ @@ -2266,15 +2268,17 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, down_write(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); - p = &kvm->arch.aliases[alias->slot]; + aliases = kvm->arch.aliases; + + p = &aliases->aliases[alias->slot]; p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; p->npages = alias->memory_size >> PAGE_SHIFT; p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; for (n = KVM_ALIAS_SLOTS; n > 0; --n) - if (kvm->arch.aliases[n - 1].npages) + if (aliases->aliases[n - 1].npages) break; - kvm->arch.naliases = n; + aliases->naliases = n; spin_unlock(&kvm->mmu_lock); kvm_mmu_zap_all(kvm); @@ -5158,6 +5162,12 @@ struct kvm *kvm_arch_create_vm(void) if (!kvm) return ERR_PTR(-ENOMEM); + kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); + if (!kvm->arch.aliases) { + kfree(kvm); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); @@ -5214,6 +5224,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); + kfree(kvm->arch.aliases); kfree(kvm); } -- cgit v1.2.3-70-g09d2 From f7784b8ec9b6a041fa828cfbe9012fe51933f5ac Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:18 -0200 Subject: KVM: split kvm_arch_set_memory_region into prepare and commit Required for SRCU convertion later. Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 16 +++++++++++---- arch/powerpc/kvm/powerpc.c | 18 ++++++++++++---- arch/s390/kvm/kvm-s390.c | 25 +++++++++++++++-------- arch/x86/kvm/x86.c | 51 ++++++++++++++++++++++++++-------------------- include/linux/kvm_host.h | 7 ++++++- virt/kvm/kvm_main.c | 12 +++++------ 6 files changed, 82 insertions(+), 47 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 1ca1dbf4811..0757c702798 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1578,15 +1578,15 @@ out: return r; } -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, int user_alloc) { unsigned long i; unsigned long pfn; - int npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; + int npages = memslot->npages; unsigned long base_gfn = memslot->base_gfn; if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) @@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, return 0; } +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc) +{ + return; +} + void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f06cf93b178..4633e7850dd 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, + int user_alloc) { return 0; } +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc) +{ + return; +} + + void kvm_arch_flush_shadow(struct kvm *kvm) { } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3fa0a10e466..c8002193d9d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } /* Section: memory related */ -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, + int user_alloc) { - int i; - struct kvm_vcpu *vcpu; - /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a page boundary in userland and which has to end at a page boundary. @@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, if (!user_alloc) return -EINVAL; + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc) +{ + int i; + struct kvm_vcpu *vcpu; + /* request update of sie control block for all available vcpus */ kvm_for_each_vcpu(i, vcpu, kvm) { if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) continue; kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); } - - return 0; } void kvm_arch_flush_shadow(struct kvm *kvm) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ce83319143..43da65feed4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(kvm); } -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, int user_alloc) { - int npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot]; + int npages = memslot->npages; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. @@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm, if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); - /* set userspace_addr atomically for kvm_hva_to_rmapp */ - spin_lock(&kvm->mmu_lock); memslot->userspace_addr = userspace_addr; - spin_unlock(&kvm->mmu_lock); - } else { - if (!old.user_alloc && old.rmap) { - int ret; - - down_write(¤t->mm->mmap_sem); - ret = do_munmap(current->mm, old.userspace_addr, - old.npages * PAGE_SIZE); - up_write(¤t->mm->mmap_sem); - if (ret < 0) - printk(KERN_WARNING - "kvm_vm_ioctl_set_memory_region: " - "failed to munmap memory\n"); - } } } + + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc) +{ + + int npages = mem->memory_size >> PAGE_SHIFT; + + if (!user_alloc && !old.user_alloc && old.rmap && !npages) { + int ret; + + down_write(¤t->mm->mmap_sem); + ret = do_munmap(current->mm, old.userspace_addr, + old.npages * PAGE_SIZE); + up_write(¤t->mm->mmap_sem); + if (ret < 0) + printk(KERN_WARNING + "kvm_vm_ioctl_set_memory_region: " + "failed to munmap memory\n"); + } + spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); @@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm, kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); - - return 0; } void kvm_arch_flush_shadow(struct kvm *kvm) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 782bfb185f8..3c44687b342 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc); -int kvm_arch_set_memory_region(struct kvm *kvm, +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, + int user_alloc); +void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 86dd8f3d29c..c9f6cfe8312 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -663,6 +663,10 @@ skip_lpage: if (!npages) kvm_arch_flush_shadow(kvm); + r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); + if (r) + goto out_free; + spin_lock(&kvm->mmu_lock); if (mem->slot >= kvm->memslots->nmemslots) kvm->memslots->nmemslots = mem->slot + 1; @@ -670,13 +674,7 @@ skip_lpage: *memslot = new; spin_unlock(&kvm->mmu_lock); - r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); - if (r) { - spin_lock(&kvm->mmu_lock); - *memslot = old; - spin_unlock(&kvm->mmu_lock); - goto out_free; - } + kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); kvm_free_physmem_slot(&old, npages ? &new : NULL); /* Slot deletion case: we have to update the current slot */ -- cgit v1.2.3-70-g09d2 From bc6678a33d9b952981a8e44a4f876c3ad64ca4d8 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:21 -0200 Subject: KVM: introduce kvm->srcu and convert kvm_set_memory_region to SRCU update Use two steps for memslot deletion: mark the slot invalid (which stops instantiation of new shadow pages for that slot, but allows destruction), then instantiate the new empty slot. Also simplifies kvm_handle_hva locking. Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 +- arch/x86/kvm/mmu.c | 28 +++++----- arch/x86/kvm/vmx.c | 6 +- include/linux/kvm.h | 2 +- include/linux/kvm_host.h | 7 +-- virt/kvm/assigned-dev.c | 8 +-- virt/kvm/iommu.c | 4 +- virt/kvm/kvm_main.c | 141 +++++++++++++++++++++++++++++++++++------------ 8 files changed, 136 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0757c702798..b2e4d16dd39 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1382,7 +1382,7 @@ static void kvm_release_vm_pages(struct kvm *kvm) int i, j; unsigned long base_gfn; - slots = kvm->memslots; + slots = rcu_dereference(kvm->memslots); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; @@ -1837,6 +1837,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; int is_dirty = 0; + down_write(&kvm->slots_lock); spin_lock(&kvm->arch.dirty_log_lock); r = kvm_ia64_sync_dirty_log(kvm, log); @@ -1856,6 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, } r = 0; out: + up_write(&kvm->slots_lock); spin_unlock(&kvm->arch.dirty_log_lock); return r; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 81f84d326a8..f8bf42a2599 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -807,21 +808,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, { int i, j; int retval = 0; - struct kvm_memslots *slots = kvm->memslots; + struct kvm_memslots *slots; + + slots = rcu_dereference(kvm->memslots); - /* - * If mmap_sem isn't taken, we can look the memslots with only - * the mmu_lock by skipping over the slots with userspace_addr == 0. - */ for (i = 0; i < slots->nmemslots; i++) { struct kvm_memory_slot *memslot = &slots->memslots[i]; unsigned long start = memslot->userspace_addr; unsigned long end; - /* mmu_lock protects userspace_addr */ - if (!start) - continue; - end = start + (memslot->npages << PAGE_SHIFT); if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; @@ -1617,7 +1612,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) { - int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); + int slot = memslot_id(kvm, gfn); struct kvm_mmu_page *sp = page_header(__pa(pte)); __set_bit(slot, sp->slot_bitmap); @@ -3021,9 +3016,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) int i; unsigned int nr_mmu_pages; unsigned int nr_pages = 0; + struct kvm_memslots *slots; - for (i = 0; i < kvm->memslots->nmemslots; i++) - nr_pages += kvm->memslots->memslots[i].npages; + slots = rcu_dereference(kvm->memslots); + for (i = 0; i < slots->nmemslots; i++) + nr_pages += slots->memslots[i].npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, @@ -3293,10 +3290,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu) static int count_rmaps(struct kvm_vcpu *vcpu) { int nmaps = 0; - int i, j, k; + int i, j, k, idx; + idx = srcu_read_lock(&kvm->srcu); + slots = rcu_dereference(kvm->memslots); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i]; + struct kvm_memory_slot *m = &slots->memslots[i]; struct kvm_rmap_desc *d; for (j = 0; j < m->npages; ++j) { @@ -3319,6 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu) } } } + srcu_read_unlock(&kvm->srcu, idx); return nmaps; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 18698799e36..f1cae7d6113 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1503,7 +1503,11 @@ static void enter_pmode(struct kvm_vcpu *vcpu) static gva_t rmode_tss_base(struct kvm *kvm) { if (!kvm->arch.tss_addr) { - gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn + + struct kvm_memslots *slots; + gfn_t base_gfn; + + slots = rcu_dereference(kvm->memslots); + base_gfn = kvm->memslots->memslots[0].base_gfn + kvm->memslots->memslots[0].npages - 3; return base_gfn << PAGE_SHIFT; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index a24de0b1858..f2feef68ffd 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -103,7 +103,7 @@ struct kvm_userspace_memory_region { /* for kvm_memory_region::flags */ #define KVM_MEM_LOG_DIRTY_PAGES 1UL - +#define KVM_MEMSLOT_INVALID (1UL << 1) /* for KVM_IRQ_LINE */ struct kvm_irq_level { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9af240387fe..93bd30701ca 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -162,6 +162,7 @@ struct kvm { struct rw_semaphore slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots *memslots; + struct srcu_struct srcu; #ifdef CONFIG_KVM_APIC_ARCHITECTURE u32 bsp_vcpu_id; struct kvm_vcpu *bsp_vcpu; @@ -275,6 +276,7 @@ void kvm_set_page_accessed(struct page *page); pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn); +int memslot_id(struct kvm *kvm, gfn_t gfn); void kvm_release_pfn_dirty(pfn_t); void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); @@ -490,11 +492,6 @@ static inline void kvm_guest_exit(void) current->flags &= ~PF_VCPU; } -static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) -{ - return slot - kvm->memslots->memslots; -} - static inline gpa_t gfn_to_gpa(gfn_t gfn) { return (gpa_t)gfn << PAGE_SHIFT; diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index f73de631e3e..f51e684dd23 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -504,12 +504,12 @@ out: static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { - int r = 0; + int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; mutex_lock(&kvm->lock); - down_read(&kvm->slots_lock); + idx = srcu_read_lock(&kvm->srcu); match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, assigned_dev->assigned_dev_id); @@ -573,7 +573,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, } out: - up_read(&kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, idx); mutex_unlock(&kvm->lock); return r; out_list_del: @@ -585,7 +585,7 @@ out_put: pci_dev_put(dev); out_free: kfree(match); - up_read(&kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, idx); mutex_unlock(&kvm->lock); return r; } diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index cf567d8033d..65a51432c8e 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -78,7 +78,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) int i, r = 0; struct kvm_memslots *slots; - slots = kvm->memslots; + slots = rcu_dereference(kvm->memslots); for (i = 0; i < slots->nmemslots; i++) { r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); @@ -214,7 +214,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm) int i; struct kvm_memslots *slots; - slots = kvm->memslots; + slots = rcu_dereference(kvm->memslots); for (i = 0; i < slots->nmemslots; i++) { kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 87d296d8b27..2bb24a814fd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -213,7 +214,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); - int need_tlb_flush; + int need_tlb_flush, idx; /* * When ->invalidate_page runs, the linux pte has been zapped @@ -233,10 +234,12 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, * pte after kvm_unmap_hva returned, without noticing the page * is going to be freed. */ + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; need_tlb_flush = kvm_unmap_hva(kvm, address); spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) @@ -250,11 +253,14 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; kvm_set_spte_hva(kvm, address, pte); spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, @@ -263,8 +269,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); - int need_tlb_flush = 0; + int need_tlb_flush = 0, idx; + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no @@ -275,6 +282,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, for (; start < end; start += PAGE_SIZE) need_tlb_flush |= kvm_unmap_hva(kvm, start); spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) @@ -312,11 +320,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young; + int young, idx; + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_age_hva(kvm, address); spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); if (young) kvm_flush_remote_tlbs(kvm); @@ -379,11 +389,15 @@ static struct kvm *kvm_create_vm(void) kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) goto out_err; + if (init_srcu_struct(&kvm->srcu)) + goto out_err; #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) + if (!page) { + cleanup_srcu_struct(&kvm->srcu); goto out_err; + } kvm->coalesced_mmio_ring = (struct kvm_coalesced_mmio_ring *)page_address(page); @@ -391,6 +405,7 @@ static struct kvm *kvm_create_vm(void) r = kvm_init_mmu_notifier(kvm); if (r) { + cleanup_srcu_struct(&kvm->srcu); #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET put_page(page); #endif @@ -480,6 +495,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #else kvm_arch_flush_shadow(kvm); #endif + cleanup_srcu_struct(&kvm->srcu); kvm_arch_destroy_vm(kvm); hardware_disable_all(); mmdrop(mm); @@ -521,12 +537,13 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc) { - int r; + int r, flush_shadow = 0; gfn_t base_gfn; unsigned long npages; unsigned long i; struct kvm_memory_slot *memslot; struct kvm_memory_slot old, new; + struct kvm_memslots *slots, *old_memslots; r = -EINVAL; /* General sanity checks */ @@ -588,15 +605,7 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.rmap, 0, npages * sizeof(*new.rmap)); new.user_alloc = user_alloc; - /* - * hva_to_rmmap() serialzies with the mmu_lock and to be - * safe it has to ignore memslots with !user_alloc && - * !userspace_addr. - */ - if (user_alloc) - new.userspace_addr = mem->userspace_addr; - else - new.userspace_addr = 0; + new.userspace_addr = mem->userspace_addr; } if (!npages) goto skip_lpage; @@ -651,8 +660,9 @@ skip_lpage: if (!new.dirty_bitmap) goto out_free; memset(new.dirty_bitmap, 0, dirty_bytes); + /* destroy any largepage mappings for dirty tracking */ if (old.npages) - kvm_arch_flush_shadow(kvm); + flush_shadow = 1; } #else /* not defined CONFIG_S390 */ new.user_alloc = user_alloc; @@ -660,34 +670,72 @@ skip_lpage: new.userspace_addr = mem->userspace_addr; #endif /* not defined CONFIG_S390 */ - if (!npages) + if (!npages) { + r = -ENOMEM; + slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!slots) + goto out_free; + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); + if (mem->slot >= slots->nmemslots) + slots->nmemslots = mem->slot + 1; + slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; + + old_memslots = kvm->memslots; + rcu_assign_pointer(kvm->memslots, slots); + synchronize_srcu_expedited(&kvm->srcu); + /* From this point no new shadow pages pointing to a deleted + * memslot will be created. + * + * validation of sp->gfn happens in: + * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) + * - kvm_is_visible_gfn (mmu_check_roots) + */ kvm_arch_flush_shadow(kvm); + kfree(old_memslots); + } r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); if (r) goto out_free; - spin_lock(&kvm->mmu_lock); - if (mem->slot >= kvm->memslots->nmemslots) - kvm->memslots->nmemslots = mem->slot + 1; +#ifdef CONFIG_DMAR + /* map the pages in iommu page table */ + if (npages) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_free; + } +#endif - *memslot = new; - spin_unlock(&kvm->mmu_lock); + r = -ENOMEM; + slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!slots) + goto out_free; + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); + if (mem->slot >= slots->nmemslots) + slots->nmemslots = mem->slot + 1; + + /* actual memory is freed via old in kvm_free_physmem_slot below */ + if (!npages) { + new.rmap = NULL; + new.dirty_bitmap = NULL; + for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) + new.lpage_info[i] = NULL; + } + + slots->memslots[mem->slot] = new; + old_memslots = kvm->memslots; + rcu_assign_pointer(kvm->memslots, slots); + synchronize_srcu_expedited(&kvm->srcu); kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); - kvm_free_physmem_slot(&old, npages ? &new : NULL); - /* Slot deletion case: we have to update the current slot */ - spin_lock(&kvm->mmu_lock); - if (!npages) - *memslot = old; - spin_unlock(&kvm->mmu_lock); -#ifdef CONFIG_DMAR - /* map the pages in iommu page table */ - r = kvm_iommu_map_pages(kvm, memslot); - if (r) - goto out; -#endif + kvm_free_physmem_slot(&old, &new); + kfree(old_memslots); + + if (flush_shadow) + kvm_arch_flush_shadow(kvm); + return 0; out_free: @@ -787,7 +835,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memslots *slots = kvm->memslots; + struct kvm_memslots *slots = rcu_dereference(kvm->memslots); for (i = 0; i < slots->nmemslots; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -809,12 +857,15 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memslots *slots = kvm->memslots; + struct kvm_memslots *slots = rcu_dereference(kvm->memslots); gfn = unalias_gfn(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; + if (memslot->flags & KVM_MEMSLOT_INVALID) + continue; + if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) return 1; @@ -823,13 +874,31 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); +int memslot_id(struct kvm *kvm, gfn_t gfn) +{ + int i; + struct kvm_memslots *slots = rcu_dereference(kvm->memslots); + struct kvm_memory_slot *memslot = NULL; + + gfn = unalias_gfn(kvm, gfn); + for (i = 0; i < slots->nmemslots; ++i) { + memslot = &slots->memslots[i]; + + if (gfn >= memslot->base_gfn + && gfn < memslot->base_gfn + memslot->npages) + break; + } + + return memslot - slots->memslots; +} + unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; gfn = unalias_gfn(kvm, gfn); slot = gfn_to_memslot_unaliased(kvm, gfn); - if (!slot) + if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); } -- cgit v1.2.3-70-g09d2 From b050b015abbef8225826eecb6f6b4d4a6dea7b79 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:22 -0200 Subject: KVM: use SRCU for dirty log Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 43da65feed4..e7488350ca1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2415,27 +2415,60 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - int r; - int n; + int r, n, i; struct kvm_memory_slot *memslot; - int is_dirty = 0; + unsigned long is_dirty = 0; + unsigned long *dirty_bitmap = NULL; down_write(&kvm->slots_lock); - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) + r = -EINVAL; + if (log->slot >= KVM_MEMORY_SLOTS) + goto out; + + memslot = &kvm->memslots->memslots[log->slot]; + r = -ENOENT; + if (!memslot->dirty_bitmap) + goto out; + + n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + + r = -ENOMEM; + dirty_bitmap = vmalloc(n); + if (!dirty_bitmap) goto out; + memset(dirty_bitmap, 0, n); + + for (i = 0; !is_dirty && i < n/sizeof(long); i++) + is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { + struct kvm_memslots *slots, *old_slots; + spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); - memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; - memset(memslot->dirty_bitmap, 0, n); + + slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!slots) + goto out_free; + + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); + slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; + + old_slots = kvm->memslots; + rcu_assign_pointer(kvm->memslots, slots); + synchronize_srcu_expedited(&kvm->srcu); + dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; + kfree(old_slots); } + r = 0; + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) + r = -EFAULT; +out_free: + vfree(dirty_bitmap); out: up_write(&kvm->slots_lock); return r; -- cgit v1.2.3-70-g09d2 From a983fb238728e1123177e8058d4f644b949a7d05 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:23 -0200 Subject: KVM: x86: switch kvm_set_memory_alias to SRCU update Using a similar two-step procedure as for memslots. Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 4 +++ arch/x86/kvm/x86.c | 60 ++++++++++++++++++++++++++++++++++------- include/linux/kvm_host.h | 6 +++++ virt/kvm/kvm_main.c | 4 +-- 4 files changed, 63 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7cdcb3d0f77..6c8c7c578c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -368,8 +368,12 @@ struct kvm_mem_alias { gfn_t base_gfn; unsigned long npages; gfn_t target_gfn; +#define KVM_ALIAS_INVALID 1UL + unsigned long flags; }; +#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION + struct kvm_mem_aliases { struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; int naliases; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e7488350ca1..28127c936c3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #undef TRACE_INCLUDE_FILE #define CREATE_TRACE_POINTS @@ -2223,11 +2224,32 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) return kvm->arch.n_alloc_mmu_pages; } +gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn) +{ + int i; + struct kvm_mem_alias *alias; + struct kvm_mem_aliases *aliases; + + aliases = rcu_dereference(kvm->arch.aliases); + + for (i = 0; i < aliases->naliases; ++i) { + alias = &aliases->aliases[i]; + if (alias->flags & KVM_ALIAS_INVALID) + continue; + if (gfn >= alias->base_gfn + && gfn < alias->base_gfn + alias->npages) + return alias->target_gfn + gfn - alias->base_gfn; + } + return gfn; +} + gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_mem_alias *alias; - struct kvm_mem_aliases *aliases = kvm->arch.aliases; + struct kvm_mem_aliases *aliases; + + aliases = rcu_dereference(kvm->arch.aliases); for (i = 0; i < aliases->naliases; ++i) { alias = &aliases->aliases[i]; @@ -2248,7 +2270,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, { int r, n; struct kvm_mem_alias *p; - struct kvm_mem_aliases *aliases; + struct kvm_mem_aliases *aliases, *old_aliases; r = -EINVAL; /* General sanity checks */ @@ -2265,28 +2287,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, < alias->target_phys_addr) goto out; + r = -ENOMEM; + aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); + if (!aliases) + goto out; + down_write(&kvm->slots_lock); - spin_lock(&kvm->mmu_lock); - aliases = kvm->arch.aliases; + /* invalidate any gfn reference in case of deletion/shrinking */ + memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); + aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID; + old_aliases = kvm->arch.aliases; + rcu_assign_pointer(kvm->arch.aliases, aliases); + synchronize_srcu_expedited(&kvm->srcu); + kvm_mmu_zap_all(kvm); + kfree(old_aliases); + + r = -ENOMEM; + aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); + if (!aliases) + goto out_unlock; + + memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); p = &aliases->aliases[alias->slot]; p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; p->npages = alias->memory_size >> PAGE_SHIFT; p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; + p->flags &= ~(KVM_ALIAS_INVALID); for (n = KVM_ALIAS_SLOTS; n > 0; --n) if (aliases->aliases[n - 1].npages) break; aliases->naliases = n; - spin_unlock(&kvm->mmu_lock); - kvm_mmu_zap_all(kvm); + old_aliases = kvm->arch.aliases; + rcu_assign_pointer(kvm->arch.aliases, aliases); + synchronize_srcu_expedited(&kvm->srcu); + kfree(old_aliases); + r = 0; +out_unlock: up_write(&kvm->slots_lock); - - return 0; - out: return r; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 93bd30701ca..20941c0f404 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -266,6 +266,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_disable_largepages(void); void kvm_arch_flush_shadow(struct kvm *kvm); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); +gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn); + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); void kvm_release_page_clean(struct page *page); @@ -539,6 +541,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se } #endif +#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION +#define unalias_gfn_instantiation unalias_gfn +#endif + #ifdef CONFIG_HAVE_KVM_IRQCHIP #define KVM_MAX_IRQ_ROUTES 1024 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2bb24a814fd..c680f7b64c6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -859,7 +859,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int i; struct kvm_memslots *slots = rcu_dereference(kvm->memslots); - gfn = unalias_gfn(kvm, gfn); + gfn = unalias_gfn_instantiation(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -896,7 +896,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - gfn = unalias_gfn(kvm, gfn); + gfn = unalias_gfn_instantiation(kvm, gfn); slot = gfn_to_memslot_unaliased(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); -- cgit v1.2.3-70-g09d2 From e93f8a0f821e290ac5149830110a5f704db7a1fc Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:24 -0200 Subject: KVM: convert io_bus to SRCU Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 +- arch/x86/kvm/i8254.c | 6 +-- arch/x86/kvm/i8259.c | 4 +- arch/x86/kvm/x86.c | 13 +++--- include/linux/kvm_host.h | 27 ++++++------ virt/kvm/coalesced_mmio.c | 4 +- virt/kvm/eventfd.c | 8 ++-- virt/kvm/ioapic.c | 4 +- virt/kvm/kvm_main.c | 106 +++++++++++++++++++++++++++------------------- 9 files changed, 101 insertions(+), 75 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index b2e4d16dd39..d0ad538f008 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; mmio: if (p->dir) - r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, + r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); else - r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, + r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); if (r) printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 15578f180e5..4b433de02e5 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -645,13 +645,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); kvm_iodevice_init(&pit->dev, &pit_dev_ops); - ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); + ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev); if (ret < 0) goto fail; if (flags & KVM_PIT_SPEAKER_DUMMY) { kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); - ret = __kvm_io_bus_register_dev(&kvm->pio_bus, + ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); if (ret < 0) goto fail_unregister; @@ -660,7 +660,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) return pit; fail_unregister: - __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev); + kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); fail: if (pit->irq_source_id >= 0) diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index d057c0cbd24..b7d145b2095 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -533,7 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) * Initialize PIO device */ kvm_iodevice_init(&s->dev, &picdev_ops); - ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev); + down_write(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); + up_write(&kvm->slots_lock); if (ret < 0) { kfree(s); return NULL; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 28127c936c3..9b42673df4a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2851,7 +2851,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v)) return 0; - return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v); + return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) @@ -2860,7 +2860,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v)) return 0; - return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v); + return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, @@ -3345,11 +3345,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) int r; if (vcpu->arch.pio.in) - r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port, + r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else - r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port, - vcpu->arch.pio.size, pd); + r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, + vcpu->arch.pio.port, vcpu->arch.pio.size, + pd); return r; } @@ -3360,7 +3361,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu) int i, r = 0; for (i = 0; i < io->cur_count; i++) { - if (kvm_io_bus_write(&vcpu->kvm->pio_bus, + if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, io->port, io->size, pd)) { r = -EOPNOTSUPP; break; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 20941c0f404..5e9cb902550 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -57,20 +57,20 @@ struct kvm_io_bus { struct kvm_io_device *devs[NR_IOBUS_DEVS]; }; -void kvm_io_bus_init(struct kvm_io_bus *bus); -void kvm_io_bus_destroy(struct kvm_io_bus *bus); -int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, - const void *val); -int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, +enum kvm_bus { + KVM_MMIO_BUS, + KVM_PIO_BUS, + KVM_NR_BUSES +}; + +int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, const void *val); +int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); -int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, - struct kvm_io_device *dev); -int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, +int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); -void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, - struct kvm_io_device *dev); -void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus, - struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_vcpu { struct kvm *kvm; @@ -171,8 +171,7 @@ struct kvm { atomic_t online_vcpus; struct list_head vm_list; struct mutex lock; - struct kvm_io_bus mmio_bus; - struct kvm_io_bus pio_bus; + struct kvm_io_bus *buses[KVM_NR_BUSES]; #ifdef CONFIG_HAVE_KVM_EVENTFD struct { spinlock_t lock; diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index d68e6c68e0f..a736a93ca7b 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -110,7 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) dev->kvm = kvm; kvm->coalesced_mmio_dev = dev; - ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev); + down_write(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); + up_write(&kvm->slots_lock); if (ret < 0) goto out_free_dev; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a9d3fc6c681..315a586ec4d 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -463,7 +463,7 @@ static int kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; - struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; + enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; struct _ioeventfd *p; struct eventfd_ctx *eventfd; int ret; @@ -518,7 +518,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) kvm_iodevice_init(&p->dev, &ioeventfd_ops); - ret = __kvm_io_bus_register_dev(bus, &p->dev); + ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev); if (ret < 0) goto unlock_fail; @@ -542,7 +542,7 @@ static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; - struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; + enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; struct _ioeventfd *p, *tmp; struct eventfd_ctx *eventfd; int ret = -ENOENT; @@ -565,7 +565,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) if (!p->wildcard && p->datamatch != args->datamatch) continue; - __kvm_io_bus_unregister_dev(bus, &p->dev); + kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); ioeventfd_release(p); ret = 0; break; diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 38a2d20b89d..f326a6f301c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -372,7 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm) kvm_ioapic_reset(ioapic); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); ioapic->kvm = kvm; - ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev); + down_write(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); + up_write(&kvm->slots_lock); if (ret < 0) kfree(ioapic); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c680f7b64c6..659bc12ad16 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, static int hardware_enable_all(void); static void hardware_disable_all(void); +static void kvm_io_bus_destroy(struct kvm_io_bus *bus); + static bool kvm_rebooting; static bool largepages_enabled = true; @@ -367,7 +369,7 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) static struct kvm *kvm_create_vm(void) { - int r = 0; + int r = 0, i; struct kvm *kvm = kvm_arch_create_vm(); #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET struct page *page; @@ -391,6 +393,14 @@ static struct kvm *kvm_create_vm(void) goto out_err; if (init_srcu_struct(&kvm->srcu)) goto out_err; + for (i = 0; i < KVM_NR_BUSES; i++) { + kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), + GFP_KERNEL); + if (!kvm->buses[i]) { + cleanup_srcu_struct(&kvm->srcu); + goto out_err; + } + } #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -416,11 +426,9 @@ static struct kvm *kvm_create_vm(void) atomic_inc(&kvm->mm->mm_count); spin_lock_init(&kvm->mmu_lock); spin_lock_init(&kvm->requests_lock); - kvm_io_bus_init(&kvm->pio_bus); kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); - kvm_io_bus_init(&kvm->mmio_bus); init_rwsem(&kvm->slots_lock); atomic_set(&kvm->users_count, 1); spin_lock(&kvm_lock); @@ -435,6 +443,8 @@ out: out_err: hardware_disable_all(); out_err_nodisable: + for (i = 0; i < KVM_NR_BUSES; i++) + kfree(kvm->buses[i]); kfree(kvm->memslots); kfree(kvm); return ERR_PTR(r); @@ -480,6 +490,7 @@ void kvm_free_physmem(struct kvm *kvm) static void kvm_destroy_vm(struct kvm *kvm) { + int i; struct mm_struct *mm = kvm->mm; kvm_arch_sync_events(kvm); @@ -487,8 +498,8 @@ static void kvm_destroy_vm(struct kvm *kvm) list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); - kvm_io_bus_destroy(&kvm->pio_bus); - kvm_io_bus_destroy(&kvm->mmio_bus); + for (i = 0; i < KVM_NR_BUSES; i++) + kvm_io_bus_destroy(kvm->buses[i]); kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); @@ -1949,12 +1960,7 @@ static struct notifier_block kvm_reboot_notifier = { .priority = 0, }; -void kvm_io_bus_init(struct kvm_io_bus *bus) -{ - memset(bus, 0, sizeof(*bus)); -} - -void kvm_io_bus_destroy(struct kvm_io_bus *bus) +static void kvm_io_bus_destroy(struct kvm_io_bus *bus) { int i; @@ -1963,13 +1969,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus) kvm_iodevice_destructor(pos); } + kfree(bus); } /* kvm_io_bus_write - called under kvm->slots_lock */ -int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, +int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val) { int i; + struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); for (i = 0; i < bus->dev_count; i++) if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) return 0; @@ -1977,59 +1985,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, } /* kvm_io_bus_read - called under kvm->slots_lock */ -int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val) +int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, void *val) { int i; + struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); + for (i = 0; i < bus->dev_count; i++) if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) return 0; return -EOPNOTSUPP; } -int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, - struct kvm_io_device *dev) +/* Caller must have write lock on slots_lock. */ +int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { - int ret; - - down_write(&kvm->slots_lock); - ret = __kvm_io_bus_register_dev(bus, dev); - up_write(&kvm->slots_lock); + struct kvm_io_bus *new_bus, *bus; - return ret; -} - -/* An unlocked version. Caller must have write lock on slots_lock. */ -int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, - struct kvm_io_device *dev) -{ + bus = kvm->buses[bus_idx]; if (bus->dev_count > NR_IOBUS_DEVS-1) return -ENOSPC; - bus->devs[bus->dev_count++] = dev; + new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); + if (!new_bus) + return -ENOMEM; + memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); + new_bus->devs[new_bus->dev_count++] = dev; + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); return 0; } -void kvm_io_bus_unregister_dev(struct kvm *kvm, - struct kvm_io_bus *bus, - struct kvm_io_device *dev) +/* Caller must have write lock on slots_lock. */ +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { - down_write(&kvm->slots_lock); - __kvm_io_bus_unregister_dev(bus, dev); - up_write(&kvm->slots_lock); -} + int i, r; + struct kvm_io_bus *new_bus, *bus; -/* An unlocked version. Caller must have write lock on slots_lock. */ -void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, - struct kvm_io_device *dev) -{ - int i; + new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); + if (!new_bus) + return -ENOMEM; - for (i = 0; i < bus->dev_count; i++) - if (bus->devs[i] == dev) { - bus->devs[i] = bus->devs[--bus->dev_count]; + bus = kvm->buses[bus_idx]; + memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); + + r = -ENOENT; + for (i = 0; i < new_bus->dev_count; i++) + if (new_bus->devs[i] == dev) { + r = 0; + new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; break; } + + if (r) { + kfree(new_bus); + return r; + } + + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); + return r; } static struct notifier_block kvm_cpu_notifier = { -- cgit v1.2.3-70-g09d2 From f656ce0185cabbbb0cf96877306879661297c7ad Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:25 -0200 Subject: KVM: switch vcpu context to use SRCU Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 15 ++++++--------- arch/s390/kvm/kvm-s390.h | 10 +++++++--- arch/x86/kvm/mmu.c | 7 +++---- arch/x86/kvm/vmx.c | 6 +++--- arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++------------------- include/linux/kvm_host.h | 2 ++ 6 files changed, 45 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d0ad538f008..d5e38464127 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; - int r; + int r, idx; - /* - * down_read() may sleep and return with interrupts enabled - */ - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); again: if (signal_pending(current)) { @@ -663,7 +660,7 @@ again: if (r < 0) goto vcpu_run_fail; - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); kvm_guest_enter(); /* @@ -687,7 +684,7 @@ again: kvm_guest_exit(); preempt_enable(); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_handle_exit(kvm_run, vcpu); @@ -697,10 +694,10 @@ again: } out: - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { kvm_resched(vcpu); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 06cce8285ba..60f09ab3672 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) { + int idx; struct kvm_memory_slot *mem; + struct kvm_memslots *memslots; - down_read(&vcpu->kvm->slots_lock); - mem = &vcpu->kvm->memslots[0]; + idx = srcu_read_lock(&vcpu->kvm->srcu); + memslots = rcu_dereference(vcpu->kvm->memslots); + + mem = &memslots->memslots[0]; vcpu->arch.sie_block->gmsor = mem->userspace_addr; vcpu->arch.sie_block->gmslm = @@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) (mem->npages << PAGE_SHIFT) + VIRTIODESCSPACE - 1ul; - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); } /* implemented in priv.c */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f8bf42a2599..25aabd00aa0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2933,10 +2933,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - int npages; + int npages, idx; - if (!down_read_trylock(&kvm->slots_lock)) - continue; + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); npages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; @@ -2949,7 +2948,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) nr_to_scan--; spin_unlock(&kvm->mmu_lock); - up_read(&kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, idx); } if (kvm_freed) list_move_tail(&kvm_freed->vm_list, &vm_list); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f1cae7d6113..22ab7137d1d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2478,10 +2478,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 msr; - int ret; + int ret, idx; vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); if (!init_rmode(vmx->vcpu.kvm)) { ret = -ENOMEM; goto out; @@ -2589,7 +2589,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmx->emulation_required = 0; out: - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9b42673df4a..53bc06a6810 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1306,15 +1306,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { - int i; + int i, idx; vcpu_load(vcpu); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu_put(vcpu); @@ -3900,14 +3900,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu) static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; + int idx; if (!apic || !apic->vapic_addr) return; - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) @@ -4036,7 +4037,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_lapic_sync_to_vapic(vcpu); } - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_guest_enter(); @@ -4078,7 +4079,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_enable(); - down_read(&vcpu->kvm->slots_lock); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: @@ -4100,6 +4101,7 @@ out: static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; + struct kvm *kvm = vcpu->kvm; if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { pr_debug("vcpu %d received sipi with vector # %x\n", @@ -4111,7 +4113,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } - down_read(&vcpu->kvm->slots_lock); + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vapic_enter(vcpu); r = 1; @@ -4119,9 +4121,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) r = vcpu_enter_guest(vcpu); else { - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); - down_read(&vcpu->kvm->slots_lock); + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) { switch(vcpu->arch.mp_state) { @@ -4156,13 +4158,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ++vcpu->stat.signal_exits; } if (need_resched()) { - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); - down_read(&vcpu->kvm->slots_lock); + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); post_kvm_run_save(vcpu); vapic_exit(vcpu); @@ -4201,10 +4203,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; - down_read(&vcpu->kvm->slots_lock); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, EMULTYPE_NO_DECODE); - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r == EMULATE_DO_MMIO) { /* * Read-modify-write. Back to userspace. @@ -4967,11 +4969,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, { unsigned long vaddr = tr->linear_address; gpa_t gpa; + int idx; vcpu_load(vcpu); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; @@ -5223,11 +5226,13 @@ fail: void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { + int idx; + kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5e9cb902550..0bb9aa295e6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -83,6 +83,8 @@ struct kvm_vcpu { struct kvm_run *run; unsigned long requests; unsigned long guest_debug; + int srcu_idx; + int fpu_active; int guest_fpu_loaded; wait_queue_head_t wq; -- cgit v1.2.3-70-g09d2 From 79fac95ecfa3969aab8119d37ccd7226165f933a Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:26 -0200 Subject: KVM: convert slots_lock to a mutex Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 ++-- arch/powerpc/kvm/book3s.c | 4 ++-- arch/x86/kvm/i8254.c | 2 +- arch/x86/kvm/i8259.c | 4 ++-- arch/x86/kvm/vmx.c | 8 ++++---- arch/x86/kvm/x86.c | 16 ++++++++-------- include/linux/kvm_host.h | 2 +- virt/kvm/coalesced_mmio.c | 14 +++++++------- virt/kvm/eventfd.c | 10 +++++----- virt/kvm/ioapic.c | 4 ++-- virt/kvm/kvm_main.c | 10 +++++----- 11 files changed, 39 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d5e38464127..e6ac549f8d5 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; int is_dirty = 0; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); spin_lock(&kvm->arch.dirty_log_lock); r = kvm_ia64_sync_dirty_log(kvm, log); @@ -1854,7 +1854,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, } r = 0; out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); spin_unlock(&kvm->arch.dirty_log_lock); return r; } diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index bb8873dcb20..492dcc198dd 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -857,7 +857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, int is_dirty = 0; int r, n; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) @@ -879,7 +879,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, r = 0; out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4b433de02e5..6a74246f80c 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -605,7 +605,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = { .write = speaker_ioport_write, }; -/* Caller must have writers lock on slots_lock */ +/* Caller must hold slots_lock */ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) { struct kvm_pit *pit; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index b7d145b2095..d5753a75d58 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -533,9 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) * Initialize PIO device */ kvm_iodevice_init(&s->dev, &picdev_ops); - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); if (ret < 0) { kfree(s); return NULL; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 22ab7137d1d..f04e2ff2138 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2223,7 +2223,7 @@ static int alloc_apic_access_page(struct kvm *kvm) struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page) goto out; kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; @@ -2236,7 +2236,7 @@ static int alloc_apic_access_page(struct kvm *kvm) kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } @@ -2245,7 +2245,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); if (kvm->arch.ept_identity_pagetable) goto out; kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; @@ -2260,7 +2260,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 53bc06a6810..aff3479867a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2208,14 +2208,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; spin_unlock(&kvm->mmu_lock); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return 0; } @@ -2292,7 +2292,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, if (!aliases) goto out; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); /* invalidate any gfn reference in case of deletion/shrinking */ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); @@ -2328,7 +2328,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, r = 0; out_unlock: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); out: return r; } @@ -2462,7 +2462,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, unsigned long is_dirty = 0; unsigned long *dirty_bitmap = NULL; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) @@ -2512,7 +2512,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, out_free: vfree(dirty_bitmap); out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } @@ -2625,7 +2625,7 @@ long kvm_arch_vm_ioctl(struct file *filp, sizeof(struct kvm_pit_config))) goto out; create_pit: - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; @@ -2634,7 +2634,7 @@ long kvm_arch_vm_ioctl(struct file *filp, if (kvm->arch.vpit) r = 0; create_pit_unlock: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0bb9aa295e6..bb0314ea926 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -161,7 +161,7 @@ struct kvm_memslots { struct kvm { spinlock_t mmu_lock; spinlock_t requests_lock; - struct rw_semaphore slots_lock; + struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots *memslots; struct srcu_struct srcu; diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index a736a93ca7b..5de6594260c 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -110,9 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) dev->kvm = kvm; kvm->coalesced_mmio_dev = dev; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); if (ret < 0) goto out_free_dev; @@ -140,16 +140,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, if (dev == NULL) return -EINVAL; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return -ENOBUFS; } dev->zone[dev->nb_zones] = *zone; dev->nb_zones++; - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return 0; } @@ -163,7 +163,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, if (dev == NULL) return -EINVAL; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); i = dev->nb_zones; while(i) { @@ -181,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, i--; } - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return 0; } diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 315a586ec4d..486c604365d 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -508,7 +508,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) else p->wildcard = true; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); /* Verify that there isnt a match already */ if (ioeventfd_check_collision(kvm, p)) { @@ -524,12 +524,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) list_add_tail(&p->list, &kvm->ioeventfds); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return 0; unlock_fail: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); fail: kfree(p); @@ -551,7 +551,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) if (IS_ERR(eventfd)) return PTR_ERR(eventfd); - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); @@ -571,7 +571,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) break; } - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); eventfd_ctx_put(eventfd); diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index f326a6f301c..f01392f51e8 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -372,9 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm) kvm_ioapic_reset(ioapic); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); ioapic->kvm = kvm; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); if (ret < 0) kfree(ioapic); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 659bc12ad16..2b7cd6c0d9c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -429,7 +429,7 @@ static struct kvm *kvm_create_vm(void) kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); - init_rwsem(&kvm->slots_lock); + mutex_init(&kvm->slots_lock); atomic_set(&kvm->users_count, 1); spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); @@ -763,9 +763,9 @@ int kvm_set_memory_region(struct kvm *kvm, { int r; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); r = __kvm_set_memory_region(kvm, mem, user_alloc); - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } EXPORT_SYMBOL_GPL(kvm_set_memory_region); @@ -1997,7 +1997,7 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, return -EOPNOTSUPP; } -/* Caller must have write lock on slots_lock. */ +/* Caller must hold slots_lock. */ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { @@ -2019,7 +2019,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, return 0; } -/* Caller must have write lock on slots_lock. */ +/* Caller must hold slots_lock. */ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { -- cgit v1.2.3-70-g09d2 From 0680fe52753381cb7154beeb01ef3e48f2cdeec6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 27 Dec 2009 17:00:46 +0200 Subject: KVM: Bump maximum vcpu count to 64 With slots_lock converted to rcu, the entire kvm hotpath on modern processors (with npt or ept) now scales beautifully. Increase the maximum vcpu count to 64 to reflect this. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6c8c7c578c4..741b8972a3a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -25,7 +25,7 @@ #include #include -#define KVM_MAX_VCPUS 16 +#define KVM_MAX_VCPUS 64 #define KVM_MEMORY_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 -- cgit v1.2.3-70-g09d2 From f4c9e87c83a9f5bc1800db27dbb39e5cd1254c0a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 28 Dec 2009 16:06:35 +0200 Subject: KVM: Fill out ftrace exit reason strings Some exit reasons missed their strings; fill out the table. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 58 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f04e2ff2138..f4486f46027 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3993,29 +3993,49 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) return ret; } +#define _ER(x) { EXIT_REASON_##x, #x } + static const struct trace_print_flags vmx_exit_reasons_str[] = { - { EXIT_REASON_EXCEPTION_NMI, "exception" }, - { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" }, - { EXIT_REASON_TRIPLE_FAULT, "triple_fault" }, - { EXIT_REASON_NMI_WINDOW, "nmi_window" }, - { EXIT_REASON_IO_INSTRUCTION, "io_instruction" }, - { EXIT_REASON_CR_ACCESS, "cr_access" }, - { EXIT_REASON_DR_ACCESS, "dr_access" }, - { EXIT_REASON_CPUID, "cpuid" }, - { EXIT_REASON_MSR_READ, "rdmsr" }, - { EXIT_REASON_MSR_WRITE, "wrmsr" }, - { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" }, - { EXIT_REASON_HLT, "halt" }, - { EXIT_REASON_INVLPG, "invlpg" }, - { EXIT_REASON_VMCALL, "hypercall" }, - { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" }, - { EXIT_REASON_APIC_ACCESS, "apic_access" }, - { EXIT_REASON_WBINVD, "wbinvd" }, - { EXIT_REASON_TASK_SWITCH, "task_switch" }, - { EXIT_REASON_EPT_VIOLATION, "ept_violation" }, + _ER(EXCEPTION_NMI), + _ER(EXTERNAL_INTERRUPT), + _ER(TRIPLE_FAULT), + _ER(PENDING_INTERRUPT), + _ER(NMI_WINDOW), + _ER(TASK_SWITCH), + _ER(CPUID), + _ER(HLT), + _ER(INVLPG), + _ER(RDPMC), + _ER(RDTSC), + _ER(VMCALL), + _ER(VMCLEAR), + _ER(VMLAUNCH), + _ER(VMPTRLD), + _ER(VMPTRST), + _ER(VMREAD), + _ER(VMRESUME), + _ER(VMWRITE), + _ER(VMOFF), + _ER(VMON), + _ER(CR_ACCESS), + _ER(DR_ACCESS), + _ER(IO_INSTRUCTION), + _ER(MSR_READ), + _ER(MSR_WRITE), + _ER(MWAIT_INSTRUCTION), + _ER(MONITOR_INSTRUCTION), + _ER(PAUSE_INSTRUCTION), + _ER(MCE_DURING_VMENTRY), + _ER(TPR_BELOW_THRESHOLD), + _ER(APIC_ACCESS), + _ER(EPT_VIOLATION), + _ER(EPT_MISCONFIG), + _ER(WBINVD), { -1, NULL } }; +#undef _ER + static bool vmx_gb_page_enable(void) { return false; -- cgit v1.2.3-70-g09d2 From b480f780f071a068810ccd0e49c1daa210bfbeab Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 4 Jan 2010 22:19:22 +0100 Subject: KVM: PPC: Fix typo in rebolting code When we're loading bolted entries into the SLB again, we're checking if an entry is in use and only slbmte it when it is. Unfortunately, the check always goes to the skip label of the first entry, resulting in an endless loop when it actually gets triggered. Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/book3s_64_slb.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index ecd237a03fd..8e447886666 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -31,7 +31,7 @@ #define REBOLT_SLB_ENTRY(num) \ ld r10, SHADOW_SLB_ESID(num)(r11); \ cmpdi r10, 0; \ - beq slb_exit_skip_1; \ + beq slb_exit_skip_ ## num; \ oris r10, r10, SLB_ESID_V@h; \ ld r9, SHADOW_SLB_VSID(num)(r11); \ slbmte r9, r10; \ -- cgit v1.2.3-70-g09d2 From 97c4cfbe890a4ad82dde8660008d42b7b05dc488 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 4 Jan 2010 22:19:25 +0100 Subject: KVM: PPC: Enable lightweight exits again The PowerPC C ABI defines that registers r14-r31 need to be preserved across function calls. Since our exit handler is written in C, we can make use of that and don't need to reload r14-r31 on every entry/exit cycle. This technique is also used in the BookE code and is called "lightweight exits" there. To follow the tradition, it's called the same in Book3S. So far this optimization was disabled though, as the code didn't do what it was expected to do, but failed to work. This patch fixes and enables lightweight exits again. Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/book3s.c | 4 +- arch/powerpc/kvm/book3s_64_interrupts.S | 106 +++++++++++++++++--------------- 2 files changed, 57 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 492dcc198dd..fd2a4d53158 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -539,8 +539,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_emulate_mmio(run, vcpu); if ( r == RESUME_HOST_NV ) r = RESUME_HOST; - if ( r == RESUME_GUEST_NV ) - r = RESUME_GUEST; } return r; @@ -645,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, er = kvmppc_emulate_instruction(run, vcpu); switch (er) { case EMULATE_DONE: - r = RESUME_GUEST; + r = RESUME_GUEST_NV; break; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 7b55d8094c8..d95d0d967d5 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -40,6 +40,26 @@ mtmsrd r0,1 .endm +#define VCPU_LOAD_NVGPRS(vcpu) \ + ld r14, VCPU_GPR(r14)(vcpu); \ + ld r15, VCPU_GPR(r15)(vcpu); \ + ld r16, VCPU_GPR(r16)(vcpu); \ + ld r17, VCPU_GPR(r17)(vcpu); \ + ld r18, VCPU_GPR(r18)(vcpu); \ + ld r19, VCPU_GPR(r19)(vcpu); \ + ld r20, VCPU_GPR(r20)(vcpu); \ + ld r21, VCPU_GPR(r21)(vcpu); \ + ld r22, VCPU_GPR(r22)(vcpu); \ + ld r23, VCPU_GPR(r23)(vcpu); \ + ld r24, VCPU_GPR(r24)(vcpu); \ + ld r25, VCPU_GPR(r25)(vcpu); \ + ld r26, VCPU_GPR(r26)(vcpu); \ + ld r27, VCPU_GPR(r27)(vcpu); \ + ld r28, VCPU_GPR(r28)(vcpu); \ + ld r29, VCPU_GPR(r29)(vcpu); \ + ld r30, VCPU_GPR(r30)(vcpu); \ + ld r31, VCPU_GPR(r31)(vcpu); \ + /***************************************************************************** * * * Guest entry / exit code that is in kernel module memory (highmem) * @@ -67,12 +87,16 @@ kvm_start_entry: SAVE_NVGPRS(r1) /* Save LR */ - mflr r14 - std r14, _LINK(r1) + std r0, _LINK(r1) + + /* Load non-volatile guest state from the vcpu */ + VCPU_LOAD_NVGPRS(r4) -/* XXX optimize non-volatile loading away */ kvm_start_lightweight: + ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ + ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ + DISABLE_INTERRUPTS /* Save R1/R2 in the PACA */ @@ -81,29 +105,6 @@ kvm_start_lightweight: ld r3, VCPU_HIGHMEM_HANDLER(r4) std r3, PACASAVEDMSR(r13) - /* Load non-volatile guest state from the vcpu */ - ld r14, VCPU_GPR(r14)(r4) - ld r15, VCPU_GPR(r15)(r4) - ld r16, VCPU_GPR(r16)(r4) - ld r17, VCPU_GPR(r17)(r4) - ld r18, VCPU_GPR(r18)(r4) - ld r19, VCPU_GPR(r19)(r4) - ld r20, VCPU_GPR(r20)(r4) - ld r21, VCPU_GPR(r21)(r4) - ld r22, VCPU_GPR(r22)(r4) - ld r23, VCPU_GPR(r23)(r4) - ld r24, VCPU_GPR(r24)(r4) - ld r25, VCPU_GPR(r25)(r4) - ld r26, VCPU_GPR(r26)(r4) - ld r27, VCPU_GPR(r27)(r4) - ld r28, VCPU_GPR(r28)(r4) - ld r29, VCPU_GPR(r29)(r4) - ld r30, VCPU_GPR(r30)(r4) - ld r31, VCPU_GPR(r31)(r4) - - ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ - ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ - ld r3, VCPU_TRAMPOLINE_ENTER(r4) mtsrr0 r3 @@ -247,7 +248,6 @@ kvmppc_handler_highmem: no_dcbz32_off: - /* XXX maybe skip on lightweight? */ std r14, VCPU_GPR(r14)(r12) std r15, VCPU_GPR(r15)(r12) std r16, VCPU_GPR(r16)(r12) @@ -267,9 +267,6 @@ no_dcbz32_off: std r30, VCPU_GPR(r30)(r12) std r31, VCPU_GPR(r31)(r12) - /* Restore non-volatile host registers (r14 - r31) */ - REST_NVGPRS(r1) - /* Save guest PC (R10) */ std r10, VCPU_PC(r12) @@ -351,42 +348,51 @@ kvm_return_point: /* Jump back to lightweight entry if we're supposed to */ /* go back into the guest */ + + /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ mr r5, r3 + /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) bl KVMPPC_HANDLE_EXIT -#if 0 /* XXX get lightweight exits back */ + /* If RESUME_GUEST, get back in the loop */ cmpwi r3, RESUME_GUEST - bne kvm_exit_heavyweight + beq kvm_loop_lightweight - /* put VCPU and KVM_RUN back into place and roll again! */ - REST_2GPRS(3, r1) - b kvm_start_lightweight + cmpwi r3, RESUME_GUEST_NV + beq kvm_loop_heavyweight -kvm_exit_heavyweight: - /* Restore non-volatile host registers */ - ld r14, _LINK(r1) - mtlr r14 - REST_NVGPRS(r1) +kvm_exit_loop: - addi r1, r1, SWITCH_FRAME_SIZE -#else ld r4, _LINK(r1) mtlr r4 - cmpwi r3, RESUME_GUEST - bne kvm_exit_heavyweight + /* Restore non-volatile host registers (r14 - r31) */ + REST_NVGPRS(r1) + + addi r1, r1, SWITCH_FRAME_SIZE + blr + +kvm_loop_heavyweight: + ld r4, _LINK(r1) + std r4, (16 + SWITCH_FRAME_SIZE)(r1) + + /* Load vcpu and cpu_run */ REST_2GPRS(3, r1) - addi r1, r1, SWITCH_FRAME_SIZE + /* Load non-volatile guest state from the vcpu */ + VCPU_LOAD_NVGPRS(r4) - b kvm_start_entry + /* Jump back into the beginning of this function */ + b kvm_start_lightweight -kvm_exit_heavyweight: +kvm_loop_lightweight: - addi r1, r1, SWITCH_FRAME_SIZE -#endif + /* We'll need the vcpu pointer */ + REST_GPR(4, r1) + + /* Jump back into the beginning of this function */ + b kvm_start_lightweight - blr -- cgit v1.2.3-70-g09d2 From c9c5417455b0c2e3c164883354b5480e5aee3b36 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 5 Jan 2010 19:02:26 +0800 Subject: KVM: x86: Moving PT_*_LEVEL to mmu.h We can use them in x86.c and vmx.c now... Signed-off-by: Sheng Yang Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 4 ---- arch/x86/kvm/mmu.h | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 25aabd00aa0..12ccf14f853 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -143,10 +143,6 @@ module_param(oos_shadow, bool, 0644); #define PFERR_RSVD_MASK (1U << 3) #define PFERR_FETCH_MASK (1U << 4) -#define PT_PDPE_LEVEL 3 -#define PT_DIRECTORY_LEVEL 2 -#define PT_PAGE_TABLE_LEVEL 1 - #define RMAP_EXT 4 #define ACC_EXEC_MASK 1 diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 4567d8042b2..ff583423968 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -38,6 +38,10 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 +#define PT_PDPE_LEVEL 3 +#define PT_DIRECTORY_LEVEL 2 +#define PT_PAGE_TABLE_LEVEL 1 + int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) -- cgit v1.2.3-70-g09d2 From 17cc393596823f4bbab81e68a9e23e7beadbcfca Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 5 Jan 2010 19:02:27 +0800 Subject: KVM: x86: Rename gb_page_enable() to get_lpage_level() in kvm_x86_ops Then the callback can provide the maximum supported large page level, which is more flexible. Also move the gb page support into x86_64 specific. Signed-off-by: Sheng Yang Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm.c | 6 +++--- arch/x86/kvm/vmx.c | 6 +++--- arch/x86/kvm/x86.c | 4 +++- 4 files changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 741b8972a3a..a4de557ad73 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -528,7 +528,7 @@ struct kvm_x86_ops { int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); - bool (*gb_page_enable)(void); + int (*get_lpage_level)(void); bool (*rdtscp_supported)(void); const struct trace_print_flags *exit_reasons_str; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b373ae6fb97..cf64fc026e3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2911,9 +2911,9 @@ static const struct trace_print_flags svm_exit_reasons_str[] = { { -1, NULL } }; -static bool svm_gb_page_enable(void) +static int svm_get_lpage_level(void) { - return true; + return PT_PDPE_LEVEL; } static bool svm_rdtscp_supported(void) @@ -2986,7 +2986,7 @@ static struct kvm_x86_ops svm_x86_ops = { .get_mt_mask = svm_get_mt_mask, .exit_reasons_str = svm_exit_reasons_str, - .gb_page_enable = svm_gb_page_enable, + .get_lpage_level = svm_get_lpage_level, .cpuid_update = svm_cpuid_update, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f4486f46027..0fd0892553e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4036,9 +4036,9 @@ static const struct trace_print_flags vmx_exit_reasons_str[] = { #undef _ER -static bool vmx_gb_page_enable(void) +static int vmx_get_lpage_level(void) { - return false; + return PT_DIRECTORY_LEVEL; } static inline u32 bit(int bitno) @@ -4131,7 +4131,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .get_mt_mask = vmx_get_mt_mask, .exit_reasons_str = vmx_exit_reasons_str, - .gb_page_enable = vmx_gb_page_enable, + .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index aff3479867a..c990424d86d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1641,10 +1641,12 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index, int *nent, int maxnent) { unsigned f_nx = is_efer_nx() ? F(NX) : 0; - unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0; #ifdef CONFIG_X86_64 + unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) + ? F(GBPAGES) : 0; unsigned f_lm = F(LM); #else + unsigned f_gbpages = 0; unsigned f_lm = 0; #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; -- cgit v1.2.3-70-g09d2 From 878403b788bff1af9c7f1a61e104f0c77115af29 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 5 Jan 2010 19:02:29 +0800 Subject: KVM: VMX: Enable EPT 1GB page support Signed-off-by: Sheng Yang Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/mmu.c | 8 +++++--- arch/x86/kvm/vmx.c | 11 ++++++++++- 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 713ed9a5b1d..43f1e9b4591 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -364,6 +364,7 @@ enum vmcs_field { #define VMX_EPTP_UC_BIT (1ull << 8) #define VMX_EPTP_WB_BIT (1ull << 14) #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) +#define VMX_EPT_1GB_PAGE_BIT (1ull << 17) #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 12ccf14f853..4f5508c3510 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -500,8 +500,7 @@ out: static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) { struct kvm_memory_slot *slot; - int host_level; - int level = PT_PAGE_TABLE_LEVEL; + int host_level, level, max_level; slot = gfn_to_memslot(vcpu->kvm, large_gfn); if (slot && slot->dirty_bitmap) @@ -512,7 +511,10 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) if (host_level == PT_PAGE_TABLE_LEVEL) return host_level; - for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) + max_level = kvm_x86_ops->get_lpage_level() < host_level ? + kvm_x86_ops->get_lpage_level() : host_level; + + for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) break; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0fd0892553e..9b197b25b66 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -318,6 +318,11 @@ static inline bool cpu_has_vmx_ept_2m_page(void) return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT); } +static inline bool cpu_has_vmx_ept_1g_page(void) +{ + return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT); +} + static inline int cpu_has_vmx_invept_individual_addr(void) { return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT); @@ -4038,7 +4043,11 @@ static const struct trace_print_flags vmx_exit_reasons_str[] = { static int vmx_get_lpage_level(void) { - return PT_DIRECTORY_LEVEL; + if (enable_ept && !cpu_has_vmx_ept_1g_page()) + return PT_DIRECTORY_LEVEL; + else + /* For shadow and EPT supported 1GB page */ + return PT_PDPE_LEVEL; } static inline u32 bit(int bitno) -- cgit v1.2.3-70-g09d2 From 0d178975d0a5afe5e0fd3211bd1397905b225be5 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 6 Jan 2010 17:55:23 +0900 Subject: KVM: Fix the explanation of write_emulated The explanation of write_emulated is confused with that of read_emulated. This patch fix it. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_emulate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7c18e1230f5..9b697c2735d 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -74,7 +74,7 @@ struct x86_emulate_ops { struct kvm_vcpu *vcpu); /* - * write_emulated: Read bytes from emulated/special memory area. + * write_emulated: Write bytes to emulated/special memory area. * @addr: [IN ] Linear address to which to write. * @val: [IN ] Value to write to memory (low-order bytes used as * required). -- cgit v1.2.3-70-g09d2 From 8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:01 +0100 Subject: KVM: PPC: Use accessor functions for GPR access All code in PPC KVM currently accesses gprs in the vcpu struct directly. While there's nothing wrong with that wrt the current way gprs are stored and loaded, it doesn't suffice for the PACA acceleration that will follow in this patchset. So let's just create little wrapper inline functions that we call whenever a GPR needs to be read from or written to. The compiled code shouldn't really change at all for now. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 26 +++++++++ arch/powerpc/kvm/44x_emulate.c | 25 ++++---- arch/powerpc/kvm/44x_tlb.c | 14 ++--- arch/powerpc/kvm/book3s.c | 8 +-- arch/powerpc/kvm/book3s_64_emulate.c | 77 +++++++++++++------------ arch/powerpc/kvm/booke.c | 16 +++--- arch/powerpc/kvm/booke_emulate.c | 107 ++++++++++++++++++----------------- arch/powerpc/kvm/e500_emulate.c | 95 +++++++++++++++++-------------- arch/powerpc/kvm/e500_tlb.c | 4 +- arch/powerpc/kvm/emulate.c | 106 ++++++++++++++++++---------------- arch/powerpc/kvm/powerpc.c | 21 +++---- 11 files changed, 274 insertions(+), 225 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index abfd0c4d567..ba01b9c1d38 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -96,4 +96,30 @@ extern void kvmppc_booke_exit(void); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); +#ifdef CONFIG_PPC_BOOK3S + +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) +{ + vcpu->arch.gpr[num] = val; +} + +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) +{ + return vcpu->arch.gpr[num]; +} + +#else + +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) +{ + vcpu->arch.gpr[num] = val; +} + +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) +{ + return vcpu->arch.gpr[num]; +} + +#endif + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 61af58fcece..65ea083a5b2 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: - vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); break; case DCRN_CPR0_CONFIG_DATA: local_irq_disable(); mtdcr(DCRN_CPR0_CONFIG_ADDR, vcpu->arch.cpr0_cfgaddr); - vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); + kvmppc_set_gpr(vcpu, rt, + mfdcr(DCRN_CPR0_CONFIG_DATA)); local_irq_enable(); break; default: @@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, /* emulate some access in kernel */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: - vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; + vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); break; default: run->dcr.dcrn = dcrn; - run->dcr.data = vcpu->arch.gpr[rs]; + run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); @@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) switch (sprn) { case SPRN_PID: - kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; + kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case SPRN_MMUCR: - vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; + vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; case SPRN_CCR0: - vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; + vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_CCR1: - vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; + vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); } @@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) switch (sprn) { case SPRN_PID: - vcpu->arch.gpr[rt] = vcpu->arch.pid; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; case SPRN_MMUCR: - vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; case SPRN_CCR0: - vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; case SPRN_CCR1: - vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); } diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ff3cb63b811..8b3773669c9 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) struct kvmppc_44x_tlbe *tlbe; unsigned int gtlb_index; - gtlb_index = vcpu->arch.gpr[ra]; + gtlb_index = kvmppc_get_gpr(vcpu, ra); if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { printk("%s: index %d\n", __func__, gtlb_index); kvmppc_dump_vcpu(vcpu); @@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) switch (ws) { case PPC44x_TLB_PAGEID: tlbe->tid = get_mmucr_stid(vcpu); - tlbe->word0 = vcpu->arch.gpr[rs]; + tlbe->word0 = kvmppc_get_gpr(vcpu, rs); break; case PPC44x_TLB_XLAT: - tlbe->word1 = vcpu->arch.gpr[rs]; + tlbe->word1 = kvmppc_get_gpr(vcpu, rs); break; case PPC44x_TLB_ATTRIB: - tlbe->word2 = vcpu->arch.gpr[rs]; + tlbe->word2 = kvmppc_get_gpr(vcpu, rs); break; default: @@ -500,9 +500,9 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) unsigned int as = get_mmucr_sts(vcpu); unsigned int pid = get_mmucr_stid(vcpu); - ea = vcpu->arch.gpr[rb]; + ea = kvmppc_get_gpr(vcpu, rb); if (ra) - ea += vcpu->arch.gpr[ra]; + ea += kvmppc_get_gpr(vcpu, ra); gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); if (rc) { @@ -511,7 +511,7 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) else vcpu->arch.cr |= 0x20000000; } - vcpu->arch.gpr[rt] = gtlb_index; + kvmppc_set_gpr(vcpu, rt, gtlb_index); kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); return EMULATE_DONE; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index fd2a4d53158..574b24fb6e0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -658,7 +658,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOK3S_INTERRUPT_SYSCALL: #ifdef EXIT_DEBUG - printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); + printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); #endif vcpu->stat.syscall_exits++; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); @@ -734,7 +734,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->sprg7 = vcpu->arch.sprg6; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) - regs->gpr[i] = vcpu->arch.gpr[i]; + regs->gpr[i] = kvmppc_get_gpr(vcpu, i); return 0; } @@ -759,8 +759,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.sprg6 = regs->sprg5; vcpu->arch.sprg7 = regs->sprg6; - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) - vcpu->arch.gpr[i] = regs->gpr[i]; + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) + kvmppc_set_gpr(vcpu, i, regs->gpr[i]); return 0; } diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index 1027eac6d47..2b0ee7e040c 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c @@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: - vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr; + kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); break; case OP_31_XOP_MTMSRD: { - ulong rs = vcpu->arch.gpr[get_rs(inst)]; + ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); if (inst & 0x10000) { vcpu->arch.msr &= ~(MSR_RI | MSR_EE); vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); @@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } case OP_31_XOP_MTMSR: - kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]); + kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); break; case OP_31_XOP_MFSRIN: { int srnum; - srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf; + srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; if (vcpu->arch.mmu.mfsrin) { u32 sr; sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); - vcpu->arch.gpr[get_rt(inst)] = sr; + kvmppc_set_gpr(vcpu, get_rt(inst), sr); } break; } case OP_31_XOP_MTSRIN: vcpu->arch.mmu.mtsrin(vcpu, - (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf, - vcpu->arch.gpr[get_rs(inst)]); + (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, + kvmppc_get_gpr(vcpu, get_rs(inst))); break; case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIEL: { bool large = (inst & 0x00200000) ? true : false; - ulong addr = vcpu->arch.gpr[get_rb(inst)]; + ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } @@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!vcpu->arch.mmu.slbmte) return EMULATE_FAIL; - vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)], - vcpu->arch.gpr[get_rb(inst)]); + vcpu->arch.mmu.slbmte(vcpu, + kvmppc_get_gpr(vcpu, get_rs(inst)), + kvmppc_get_gpr(vcpu, get_rb(inst))); break; case OP_31_XOP_SLBIE: if (!vcpu->arch.mmu.slbie) return EMULATE_FAIL; - vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]); + vcpu->arch.mmu.slbie(vcpu, + kvmppc_get_gpr(vcpu, get_rb(inst))); break; case OP_31_XOP_SLBIA: if (!vcpu->arch.mmu.slbia) @@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, } else { ulong t, rb; - rb = vcpu->arch.gpr[get_rb(inst)]; + rb = kvmppc_get_gpr(vcpu, get_rb(inst)); t = vcpu->arch.mmu.slbmfee(vcpu, rb); - vcpu->arch.gpr[get_rt(inst)] = t; + kvmppc_set_gpr(vcpu, get_rt(inst), t); } break; case OP_31_XOP_SLBMFEV: @@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, } else { ulong t, rb; - rb = vcpu->arch.gpr[get_rb(inst)]; + rb = kvmppc_get_gpr(vcpu, get_rb(inst)); t = vcpu->arch.mmu.slbmfev(vcpu, rb); - vcpu->arch.gpr[get_rt(inst)] = t; + kvmppc_set_gpr(vcpu, get_rt(inst), t); } break; case OP_31_XOP_DCBZ: { - ulong rb = vcpu->arch.gpr[get_rb(inst)]; + ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong ra = 0; ulong addr; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; if (get_ra(inst)) - ra = vcpu->arch.gpr[get_ra(inst)]; + ra = kvmppc_get_gpr(vcpu, get_ra(inst)); addr = (ra + rb) & ~31ULL; if (!(vcpu->arch.msr & MSR_SF)) @@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { int emulated = EMULATE_DONE; + ulong spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SDR1: - to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->sdr1 = spr_val; break; case SPRN_DSISR: - to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->dsisr = spr_val; break; case SPRN_DAR: - vcpu->arch.dear = vcpu->arch.gpr[rs]; + vcpu->arch.dear = spr_val; break; case SPRN_HIOR: - to_book3s(vcpu)->hior = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hior = spr_val; break; case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: - kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); + kvmppc_write_bat(vcpu, sprn, (u32)spr_val); /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); break; case SPRN_HID0: - to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hid[0] = spr_val; break; case SPRN_HID1: - to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hid[1] = spr_val; break; case SPRN_HID2: - to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hid[2] = spr_val; break; case SPRN_HID4: - to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hid[4] = spr_val; break; case SPRN_HID5: - to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs]; + to_book3s(vcpu)->hid[5] = spr_val; /* guest HID5 set can change is_dcbz32 */ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV)) @@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) switch (sprn) { case SPRN_SDR1: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); break; case SPRN_DSISR: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); break; case SPRN_DAR: - vcpu->arch.gpr[rt] = vcpu->arch.dear; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; case SPRN_HIOR: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); break; case SPRN_HID0: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0]; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); break; case SPRN_HID1: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1]; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); break; case SPRN_HID2: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2]; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); break; case SPRN_HID4: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4]; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); break; case SPRN_HID5: - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5]; + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); break; case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: - vcpu->arch.gpr[rt] = 0; + kvmppc_set_gpr(vcpu, rt, 0); break; default: printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index d8b63420acf..49af80e4a6e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) for (i = 0; i < 32; i += 4) { printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, - vcpu->arch.gpr[i], - vcpu->arch.gpr[i+1], - vcpu->arch.gpr[i+2], - vcpu->arch.gpr[i+3]); + kvmppc_get_gpr(vcpu, i), + kvmppc_get_gpr(vcpu, i+1), + kvmppc_get_gpr(vcpu, i+2), + kvmppc_get_gpr(vcpu, i+3)); } } @@ -431,7 +431,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { vcpu->arch.pc = 0; vcpu->arch.msr = 0; - vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ + kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ vcpu->arch.shadow_pid = 1; @@ -466,7 +466,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->sprg7 = vcpu->arch.sprg6; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) - regs->gpr[i] = vcpu->arch.gpr[i]; + regs->gpr[i] = kvmppc_get_gpr(vcpu, i); return 0; } @@ -491,8 +491,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.sprg6 = regs->sprg5; vcpu->arch.sprg7 = regs->sprg6; - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) - vcpu->arch.gpr[i] = regs->gpr[i]; + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) + kvmppc_set_gpr(vcpu, i, regs->gpr[i]); return 0; } diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index aebc65e93f4..cbc790ee192 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, case OP_31_XOP_MFMSR: rt = get_rt(inst); - vcpu->arch.gpr[rt] = vcpu->arch.msr; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); break; case OP_31_XOP_MTMSR: rs = get_rs(inst); kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); - kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); + kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_WRTEE: rs = get_rs(inst); vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) - | (vcpu->arch.gpr[rs] & MSR_EE); + | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; @@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { int emulated = EMULATE_DONE; + ulong spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_DEAR: - vcpu->arch.dear = vcpu->arch.gpr[rs]; break; + vcpu->arch.dear = spr_val; break; case SPRN_ESR: - vcpu->arch.esr = vcpu->arch.gpr[rs]; break; + vcpu->arch.esr = spr_val; break; case SPRN_DBCR0: - vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; + vcpu->arch.dbcr0 = spr_val; break; case SPRN_DBCR1: - vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; + vcpu->arch.dbcr1 = spr_val; break; case SPRN_DBSR: - vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; + vcpu->arch.dbsr &= ~spr_val; break; case SPRN_TSR: - vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; + vcpu->arch.tsr &= ~spr_val; break; case SPRN_TCR: - vcpu->arch.tcr = vcpu->arch.gpr[rs]; + vcpu->arch.tcr = spr_val; kvmppc_emulate_dec(vcpu); break; @@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) * loaded into the real SPRGs when resuming the * guest. */ case SPRN_SPRG4: - vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg4 = spr_val; break; case SPRN_SPRG5: - vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg5 = spr_val; break; case SPRN_SPRG6: - vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg6 = spr_val; break; case SPRN_SPRG7: - vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg7 = spr_val; break; case SPRN_IVPR: - vcpu->arch.ivpr = vcpu->arch.gpr[rs]; + vcpu->arch.ivpr = spr_val; break; case SPRN_IVOR0: - vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; break; case SPRN_IVOR1: - vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; break; case SPRN_IVOR2: - vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; break; case SPRN_IVOR3: - vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; break; case SPRN_IVOR4: - vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; break; case SPRN_IVOR5: - vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; break; case SPRN_IVOR6: - vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; break; case SPRN_IVOR7: - vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; break; case SPRN_IVOR8: - vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; break; case SPRN_IVOR9: - vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; break; case SPRN_IVOR10: - vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; break; case SPRN_IVOR11: - vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; break; case SPRN_IVOR12: - vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; break; case SPRN_IVOR13: - vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; break; case SPRN_IVOR14: - vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; break; case SPRN_IVOR15: - vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; break; default: @@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) switch (sprn) { case SPRN_IVPR: - vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; case SPRN_DEAR: - vcpu->arch.gpr[rt] = vcpu->arch.dear; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; case SPRN_ESR: - vcpu->arch.gpr[rt] = vcpu->arch.esr; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; case SPRN_DBCR0: - vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; case SPRN_DBCR1: - vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; case SPRN_DBSR: - vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; case SPRN_IVOR0: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); break; case SPRN_IVOR1: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); break; case SPRN_IVOR2: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); break; case SPRN_IVOR3: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); break; case SPRN_IVOR4: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); break; case SPRN_IVOR5: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); break; case SPRN_IVOR6: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); break; case SPRN_IVOR7: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); break; case SPRN_IVOR8: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); break; case SPRN_IVOR9: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); break; case SPRN_IVOR10: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); break; case SPRN_IVOR11: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); break; case SPRN_IVOR12: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); break; case SPRN_IVOR13: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); break; case SPRN_IVOR14: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); break; case SPRN_IVOR15: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); break; default: diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index be95b8d8e3b..7644f7a9bac 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -74,54 +74,55 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; + ulong spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_PID: vcpu_e500->pid[0] = vcpu->arch.shadow_pid = - vcpu->arch.pid = vcpu->arch.gpr[rs]; + vcpu->arch.pid = spr_val; break; case SPRN_PID1: - vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; + vcpu_e500->pid[1] = spr_val; break; case SPRN_PID2: - vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; + vcpu_e500->pid[2] = spr_val; break; case SPRN_MAS0: - vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas0 = spr_val; break; case SPRN_MAS1: - vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas1 = spr_val; break; case SPRN_MAS2: - vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas2 = spr_val; break; case SPRN_MAS3: - vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas3 = spr_val; break; case SPRN_MAS4: - vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas4 = spr_val; break; case SPRN_MAS6: - vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas6 = spr_val; break; case SPRN_MAS7: - vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; + vcpu_e500->mas7 = spr_val; break; case SPRN_L1CSR1: - vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; + vcpu_e500->l1csr1 = spr_val; break; case SPRN_HID0: - vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; + vcpu_e500->hid0 = spr_val; break; case SPRN_HID1: - vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; + vcpu_e500->hid1 = spr_val; break; case SPRN_MMUCSR0: emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, - vcpu->arch.gpr[rs]); + spr_val); break; /* extra exceptions */ case SPRN_IVOR32: - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; break; case SPRN_IVOR33: - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; break; case SPRN_IVOR34: - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; break; case SPRN_IVOR35: - vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; + vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; break; default: @@ -138,63 +139,71 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) switch (sprn) { case SPRN_PID: - vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; case SPRN_PID1: - vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; case SPRN_PID2: - vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; case SPRN_MAS0: - vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; case SPRN_MAS1: - vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; case SPRN_MAS2: - vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; case SPRN_MAS3: - vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; case SPRN_MAS4: - vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; case SPRN_MAS6: - vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; case SPRN_MAS7: - vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; case SPRN_TLB0CFG: - vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); - vcpu->arch.gpr[rt] &= ~0xfffUL; - vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0]; + { + ulong tmp = SPRN_TLB0CFG; + + tmp &= ~0xfffUL; + tmp |= vcpu_e500->guest_tlb_size[0]; + kvmppc_set_gpr(vcpu, rt, tmp); break; + } case SPRN_TLB1CFG: - vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); - vcpu->arch.gpr[rt] &= ~0xfffUL; - vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; + { + ulong tmp = SPRN_TLB1CFG; + + tmp &= ~0xfffUL; + tmp |= vcpu_e500->guest_tlb_size[1]; + kvmppc_set_gpr(vcpu, rt, tmp); break; + } case SPRN_L1CSR1: - vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; case SPRN_HID0: - vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; case SPRN_HID1: - vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; + kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; case SPRN_MMUCSR0: - vcpu->arch.gpr[rt] = 0; break; + kvmppc_set_gpr(vcpu, rt, 0); break; case SPRN_MMUCFG: - vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; + kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; /* extra exceptions */ case SPRN_IVOR32: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); break; case SPRN_IVOR33: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); break; case SPRN_IVOR34: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); break; case SPRN_IVOR35: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); break; default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index fb1e1dc11ba..6a7fc012b93 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) int esel, tlbsel; gva_t ea; - ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; + ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); ia = (ea >> 2) & 0x1; @@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) struct tlbe *gtlbe = NULL; gva_t ea; - ea = vcpu->arch.gpr[rb]; + ea = kvmppc_get_gpr(vcpu, rb); for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 303457b2f52..38219af0cd0 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -170,14 +170,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 1, 1); break; @@ -186,14 +186,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rb = get_rb(inst); - ea = vcpu->arch.gpr[rb]; + ea = kvmppc_get_gpr(vcpu, rb); if (ra) - ea += vcpu->arch.gpr[ra]; + ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 1, 1); - vcpu->arch.gpr[rs] = ea; + kvmppc_set_gpr(vcpu, rs, ea); break; case OP_31_XOP_LHZX: @@ -206,12 +206,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rb = get_rb(inst); - ea = vcpu->arch.gpr[rb]; + ea = kvmppc_get_gpr(vcpu, rb); if (ra) - ea += vcpu->arch.gpr[ra]; + ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - vcpu->arch.gpr[ra] = ea; + kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MFSPR: @@ -220,47 +220,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) switch (sprn) { case SPRN_SRR0: - vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; case SPRN_SRR1: - vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; case SPRN_PVR: - vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; case SPRN_PIR: - vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; + kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; case SPRN_MSSSR0: - vcpu->arch.gpr[rt] = 0; break; + kvmppc_set_gpr(vcpu, rt, 0); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: - vcpu->arch.gpr[rt] = get_tb() >> 32; break; + kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; case SPRN_TBWU: - vcpu->arch.gpr[rt] = get_tb(); break; + kvmppc_set_gpr(vcpu, rt, get_tb()); break; case SPRN_SPRG0: - vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; case SPRN_SPRG1: - vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; case SPRN_SPRG2: - vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; case SPRN_SPRG3: - vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: { u64 jd = get_tb() - vcpu->arch.dec_jiffies; - vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; - pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); + pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", + vcpu->arch.dec, jd, + kvmppc_get_gpr(vcpu, rt)); break; } default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); - vcpu->arch.gpr[rt] = 0; + kvmppc_set_gpr(vcpu, rt, 0); } break; } @@ -272,7 +274,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 1); break; @@ -281,14 +283,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rb = get_rb(inst); - ea = vcpu->arch.gpr[rb]; + ea = kvmppc_get_gpr(vcpu, rb); if (ra) - ea += vcpu->arch.gpr[ra]; + ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 1); - vcpu->arch.gpr[ra] = ea; + kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MTSPR: @@ -296,9 +298,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: - vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; + vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SRR1: - vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; + vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ @@ -308,18 +310,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case SPRN_MSSSR0: break; case SPRN_DEC: - vcpu->arch.dec = vcpu->arch.gpr[rs]; + vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: - vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG1: - vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG2: - vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG3: - vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; + vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); @@ -351,7 +353,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 4, 0); break; @@ -366,7 +368,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 0); break; @@ -385,7 +387,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LBZ: @@ -397,35 +399,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STW: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STB: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 1, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHZ: @@ -437,21 +443,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STH: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 2, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; default: diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4633e7850dd..2c291161df8 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -270,34 +270,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { - ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; - *gpr = run->dcr.data; + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); } static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { - ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; + ulong gpr; - if (run->mmio.len > sizeof(*gpr)) { + if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (vcpu->arch.mmio_is_bigendian) { switch (run->mmio.len) { - case 4: *gpr = *(u32 *)run->mmio.data; break; - case 2: *gpr = *(u16 *)run->mmio.data; break; - case 1: *gpr = *(u8 *)run->mmio.data; break; + case 4: gpr = *(u32 *)run->mmio.data; break; + case 2: gpr = *(u16 *)run->mmio.data; break; + case 1: gpr = *(u8 *)run->mmio.data; break; } } else { /* Convert BE data from userland back to LE. */ switch (run->mmio.len) { - case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; - case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; - case 1: *gpr = *(u8 *)run->mmio.data; break; + case 4: gpr = ld_le32((u32 *)run->mmio.data); break; + case 2: gpr = ld_le16((u16 *)run->mmio.data); break; + case 1: gpr = *(u8 *)run->mmio.data; break; } } + + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); } int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, -- cgit v1.2.3-70-g09d2 From 992b5b29b5ae254c416c62faf98d59a6cf970027 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:02 +0100 Subject: KVM: PPC: Add helpers for CR, XER We now have helpers for the GPRs, so let's also add some for CR and XER. Having them in the PACA simplifies code a lot, as we don't need to care about where to store CC or not to overflow any integers. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 40 ++++++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/44x_tlb.c | 6 ++++-- arch/powerpc/kvm/book3s.c | 8 ++++---- arch/powerpc/kvm/booke.c | 8 ++++---- 4 files changed, 52 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ba01b9c1d38..d60b2f0cdcf 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -108,6 +108,26 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) return vcpu->arch.gpr[num]; } +static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.cr = val; +} + +static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.cr; +} + +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.xer = val; +} + +static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.xer; +} + #else static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) @@ -120,6 +140,26 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) return vcpu->arch.gpr[num]; } +static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.cr = val; +} + +static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.cr; +} + +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.xer = val; +} + +static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.xer; +} + #endif #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 8b3773669c9..2570fcc7665 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -506,10 +506,12 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); if (rc) { + u32 cr = kvmppc_get_cr(vcpu); + if (gtlb_index < 0) - vcpu->arch.cr &= ~0x20000000; + kvmppc_set_cr(vcpu, cr & ~0x20000000); else - vcpu->arch.cr |= 0x20000000; + kvmppc_set_cr(vcpu, cr | 0x20000000); } kvmppc_set_gpr(vcpu, rt, gtlb_index); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 574b24fb6e0..09ba8dbaaba 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -717,10 +717,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; regs->pc = vcpu->arch.pc; - regs->cr = vcpu->arch.cr; + regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; regs->lr = vcpu->arch.lr; - regs->xer = vcpu->arch.xer; + regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.msr; regs->srr0 = vcpu->arch.srr0; regs->srr1 = vcpu->arch.srr1; @@ -744,10 +744,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; vcpu->arch.pc = regs->pc; - vcpu->arch.cr = regs->cr; + kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; vcpu->arch.lr = regs->lr; - vcpu->arch.xer = regs->xer; + kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.srr0 = regs->srr0; vcpu->arch.srr1 = regs->srr1; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 49af80e4a6e..338baf9f6b2 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -449,10 +449,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; regs->pc = vcpu->arch.pc; - regs->cr = vcpu->arch.cr; + regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; regs->lr = vcpu->arch.lr; - regs->xer = vcpu->arch.xer; + regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.msr; regs->srr0 = vcpu->arch.srr0; regs->srr1 = vcpu->arch.srr1; @@ -476,10 +476,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; vcpu->arch.pc = regs->pc; - vcpu->arch.cr = regs->cr; + kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; vcpu->arch.lr = regs->lr; - vcpu->arch.xer = regs->xer; + kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.srr0 = regs->srr0; vcpu->arch.srr1 = regs->srr1; -- cgit v1.2.3-70-g09d2 From 7e57cba06074da84d7c24d8c3f44040d2d8c88ac Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:03 +0100 Subject: KVM: PPC: Use PACA backed shadow vcpu We're being horribly racy right now. All the entry and exit code hijacks random fields from the PACA that could easily be used by different code in case we get interrupted, for example by a #MC or even page fault. After discussing this with Ben, we figured it's best to reserve some more space in the PACA and just shove off some vcpu state to there. That way we can drastically improve the readability of the code, make it less racy and less complex. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 2 + arch/powerpc/include/asm/kvm_book3s_64_asm.h | 19 +++ arch/powerpc/include/asm/kvm_host.h | 5 +- arch/powerpc/include/asm/kvm_ppc.h | 20 ++- arch/powerpc/include/asm/paca.h | 5 + arch/powerpc/kernel/asm-offsets.c | 33 +++- arch/powerpc/kvm/book3s.c | 4 + arch/powerpc/kvm/book3s_64_interrupts.S | 216 +++++++++------------------ arch/powerpc/kvm/book3s_64_rmhandlers.S | 32 +--- arch/powerpc/kvm/book3s_64_slb.S | 150 +++++++++++-------- 10 files changed, 249 insertions(+), 237 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 74b7369770d..f192017d799 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -23,6 +23,7 @@ #include #include #include +#include struct kvmppc_slb { u64 esid; @@ -69,6 +70,7 @@ struct kvmppc_sid_map { struct kvmppc_vcpu_book3s { struct kvm_vcpu vcpu; + struct kvmppc_book3s_shadow_vcpu shadow_vcpu; struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_slb slb[64]; struct { diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h index 2e06ee8184e..fca9404c1a7 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h @@ -20,6 +20,8 @@ #ifndef __ASM_KVM_BOOK3S_ASM_H__ #define __ASM_KVM_BOOK3S_ASM_H__ +#ifdef __ASSEMBLY__ + #ifdef CONFIG_KVM_BOOK3S_64_HANDLER #include @@ -55,4 +57,21 @@ kvmppc_resume_\intno: #endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ +#else /*__ASSEMBLY__ */ + +struct kvmppc_book3s_shadow_vcpu { + ulong gpr[14]; + u32 cr; + u32 xer; + ulong host_r1; + ulong host_r2; + ulong handler; + ulong scratch0; + ulong scratch1; + ulong vmhandler; + ulong rmhandler; +}; + +#endif /*__ASSEMBLY__ */ + #endif /* __ASM_KVM_BOOK3S_ASM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1201f62d0d7..d615fa8a141 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -175,10 +175,13 @@ struct kvm_vcpu_arch { ulong gpr[32]; ulong pc; - u32 cr; ulong ctr; ulong lr; + +#ifdef CONFIG_BOOKE ulong xer; + u32 cr; +#endif ulong msr; #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index d60b2f0cdcf..89c5d79c347 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -98,34 +98,42 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); #ifdef CONFIG_PPC_BOOK3S +/* We assume we're always acting on the current vcpu */ + static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { - vcpu->arch.gpr[num] = val; + if ( num < 14 ) + get_paca()->shadow_vcpu.gpr[num] = val; + else + vcpu->arch.gpr[num] = val; } static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) { - return vcpu->arch.gpr[num]; + if ( num < 14 ) + return get_paca()->shadow_vcpu.gpr[num]; + else + return vcpu->arch.gpr[num]; } static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + get_paca()->shadow_vcpu.cr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return get_paca()->shadow_vcpu.cr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.xer = val; + get_paca()->shadow_vcpu.xer = val; } static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) { - return vcpu->arch.xer; + return get_paca()->shadow_vcpu.xer; } #else diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 5e9b4ef7141..d8a693109c8 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -19,6 +19,9 @@ #include #include #include +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#include +#endif register struct paca_struct *local_paca asm("r13"); @@ -135,6 +138,8 @@ struct paca_struct { u64 esid; u64 vsid; } kvm_slb[64]; /* guest SLB */ + /* We use this to store guest state in */ + struct kvmppc_book3s_shadow_vcpu shadow_vcpu; u8 kvm_slb_max; /* highest used guest slb entry */ u8 kvm_in_guest; /* are we inside the guest? */ #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index a6c2b63227b..1501e77c980 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -194,6 +194,32 @@ int main(void) DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); + DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr)); + DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer)); + DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0])); + DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1])); + DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2])); + DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3])); + DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4])); + DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5])); + DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6])); + DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7])); + DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8])); + DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9])); + DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10])); + DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11])); + DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12])); + DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13])); + DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1)); + DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); + DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, + shadow_vcpu.vmhandler)); + DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct, + shadow_vcpu.rmhandler)); + DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, + shadow_vcpu.scratch0)); + DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, + shadow_vcpu.scratch1)); #endif #endif /* CONFIG_PPC64 */ @@ -389,8 +415,6 @@ int main(void) DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); @@ -415,7 +439,10 @@ int main(void) DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); -#endif +#else + DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); + DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); +#endif /* CONFIG_PPC64 */ #endif #ifdef CONFIG_44x DEFINE(PGD_T_LOG2, PGD_T_LOG2); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 09ba8dbaaba..3e06eae3f2c 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -66,12 +66,16 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); + memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, + sizeof(get_paca()->shadow_vcpu)); get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); + memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, + sizeof(get_paca()->shadow_vcpu)); to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; } diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index d95d0d967d5..66e3b1179b3 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -28,11 +28,6 @@ #define ULONG_SIZE 8 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) -.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg - ld \tmp_reg, (PACA_EXMC+\offset)(r13) - std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg) -.endm - .macro DISABLE_INTERRUPTS mfmsr r0 rldicl r0,r0,48,1 @@ -92,37 +87,30 @@ kvm_start_entry: /* Load non-volatile guest state from the vcpu */ VCPU_LOAD_NVGPRS(r4) -kvm_start_lightweight: - - ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ - ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ - - DISABLE_INTERRUPTS - /* Save R1/R2 in the PACA */ - std r1, PACAR1(r13) - std r2, (PACA_EXMC+EX_SRR0)(r13) + std r1, PACA_KVM_HOST_R1(r13) + std r2, PACA_KVM_HOST_R2(r13) + + /* XXX swap in/out on load? */ ld r3, VCPU_HIGHMEM_HANDLER(r4) - std r3, PACASAVEDMSR(r13) + std r3, PACA_KVM_VMHANDLER(r13) ld r3, VCPU_TRAMPOLINE_ENTER(r4) - mtsrr0 r3 + std r3, PACA_KVM_RMHANDLER(r13) - LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) - mtsrr1 r3 +kvm_start_lightweight: - /* Load guest state in the respective registers */ - lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ - stw r3, (PACA_EXMC + EX_CCR)(r13) + ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ + ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ + /* Load some guest state in the respective registers */ ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ mtctr r3 /* CTR = r3 */ ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ mtlr r3 /* LR = r3 */ - ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ - std r3, (PACA_EXMC + EX_R3)(r13) + DISABLE_INTERRUPTS /* Some guests may need to have dcbz set to 32 byte length. * @@ -142,34 +130,21 @@ kvm_start_lightweight: mtspr SPRN_HID5,r3 no_dcbz32_on: - /* Load guest GPRs */ - - ld r3, VCPU_GPR(r9)(r4) - std r3, (PACA_EXMC + EX_R9)(r13) - ld r3, VCPU_GPR(r10)(r4) - std r3, (PACA_EXMC + EX_R10)(r13) - ld r3, VCPU_GPR(r11)(r4) - std r3, (PACA_EXMC + EX_R11)(r13) - ld r3, VCPU_GPR(r12)(r4) - std r3, (PACA_EXMC + EX_R12)(r13) - ld r3, VCPU_GPR(r13)(r4) - std r3, (PACA_EXMC + EX_R13)(r13) - - ld r0, VCPU_GPR(r0)(r4) - ld r1, VCPU_GPR(r1)(r4) - ld r2, VCPU_GPR(r2)(r4) - ld r3, VCPU_GPR(r3)(r4) - ld r5, VCPU_GPR(r5)(r4) - ld r6, VCPU_GPR(r6)(r4) - ld r7, VCPU_GPR(r7)(r4) - ld r8, VCPU_GPR(r8)(r4) - ld r4, VCPU_GPR(r4)(r4) /* This sets the Magic value for the trampoline */ + /* XXX this needs to move into a safe function, so we can + be sure we don't get any interrupts */ + li r11, 1 stb r11, PACA_KVM_IN_GUEST(r13) + ld r3, PACA_KVM_RMHANDLER(r13) + mtsrr0 r3 + + LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) + mtsrr1 r3 + /* Jump to SLB patching handlder and into our guest */ RFI @@ -185,60 +160,31 @@ kvmppc_handler_highmem: /* * Register usage at this point: * - * R00 = guest R13 - * R01 = host R1 - * R02 = host R2 - * R10 = guest PC - * R11 = guest MSR - * R12 = exit handler id - * R13 = PACA - * PACA.exmc.R9 = guest R1 - * PACA.exmc.R10 = guest R10 - * PACA.exmc.R11 = guest R11 - * PACA.exmc.R12 = guest R12 - * PACA.exmc.R13 = guest R2 - * PACA.exmc.DAR = guest DAR - * PACA.exmc.DSISR = guest DSISR - * PACA.exmc.LR = guest instruction - * PACA.exmc.CCR = guest CR - * PACA.exmc.SRR0 = guest R0 + * R0 = guest last inst + * R1 = host R1 + * R2 = host R2 + * R3 = guest PC + * R4 = guest MSR + * R5 = guest DAR + * R6 = guest DSISR + * R13 = PACA + * PACA.KVM.* = guest * * */ - std r3, (PACA_EXMC+EX_R3)(r13) + /* R7 = vcpu */ + ld r7, GPR4(r1) - /* save the exit id in R3 */ - mr r3, r12 + /* Now save the guest state */ - /* R12 = vcpu */ - ld r12, GPR4(r1) + stw r0, VCPU_LAST_INST(r7) - /* Now save the guest state */ + std r3, VCPU_PC(r7) + std r4, VCPU_SHADOW_MSR(r7) + std r5, VCPU_FAULT_DEAR(r7) + std r6, VCPU_FAULT_DSISR(r7) - std r0, VCPU_GPR(r13)(r12) - std r4, VCPU_GPR(r4)(r12) - std r5, VCPU_GPR(r5)(r12) - std r6, VCPU_GPR(r6)(r12) - std r7, VCPU_GPR(r7)(r12) - std r8, VCPU_GPR(r8)(r12) - std r9, VCPU_GPR(r9)(r12) - - /* get registers from PACA */ - mfpaca r5, r0, EX_SRR0, r12 - mfpaca r5, r3, EX_R3, r12 - mfpaca r5, r1, EX_R9, r12 - mfpaca r5, r10, EX_R10, r12 - mfpaca r5, r11, EX_R11, r12 - mfpaca r5, r12, EX_R12, r12 - mfpaca r5, r2, EX_R13, r12 - - lwz r5, (PACA_EXMC+EX_LR)(r13) - stw r5, VCPU_LAST_INST(r12) - - lwz r5, (PACA_EXMC+EX_CCR)(r13) - stw r5, VCPU_CR(r12) - - ld r5, VCPU_HFLAGS(r12) + ld r5, VCPU_HFLAGS(r7) rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ beq no_dcbz32_off @@ -248,58 +194,42 @@ kvmppc_handler_highmem: no_dcbz32_off: - std r14, VCPU_GPR(r14)(r12) - std r15, VCPU_GPR(r15)(r12) - std r16, VCPU_GPR(r16)(r12) - std r17, VCPU_GPR(r17)(r12) - std r18, VCPU_GPR(r18)(r12) - std r19, VCPU_GPR(r19)(r12) - std r20, VCPU_GPR(r20)(r12) - std r21, VCPU_GPR(r21)(r12) - std r22, VCPU_GPR(r22)(r12) - std r23, VCPU_GPR(r23)(r12) - std r24, VCPU_GPR(r24)(r12) - std r25, VCPU_GPR(r25)(r12) - std r26, VCPU_GPR(r26)(r12) - std r27, VCPU_GPR(r27)(r12) - std r28, VCPU_GPR(r28)(r12) - std r29, VCPU_GPR(r29)(r12) - std r30, VCPU_GPR(r30)(r12) - std r31, VCPU_GPR(r31)(r12) - - /* Save guest PC (R10) */ - std r10, VCPU_PC(r12) - - /* Save guest msr (R11) */ - std r11, VCPU_SHADOW_MSR(r12) - - /* Save guest CTR (in R12) */ + std r14, VCPU_GPR(r14)(r7) + std r15, VCPU_GPR(r15)(r7) + std r16, VCPU_GPR(r16)(r7) + std r17, VCPU_GPR(r17)(r7) + std r18, VCPU_GPR(r18)(r7) + std r19, VCPU_GPR(r19)(r7) + std r20, VCPU_GPR(r20)(r7) + std r21, VCPU_GPR(r21)(r7) + std r22, VCPU_GPR(r22)(r7) + std r23, VCPU_GPR(r23)(r7) + std r24, VCPU_GPR(r24)(r7) + std r25, VCPU_GPR(r25)(r7) + std r26, VCPU_GPR(r26)(r7) + std r27, VCPU_GPR(r27)(r7) + std r28, VCPU_GPR(r28)(r7) + std r29, VCPU_GPR(r29)(r7) + std r30, VCPU_GPR(r30)(r7) + std r31, VCPU_GPR(r31)(r7) + + /* Save guest CTR */ mfctr r5 - std r5, VCPU_CTR(r12) + std r5, VCPU_CTR(r7) /* Save guest LR */ mflr r5 - std r5, VCPU_LR(r12) - - /* Save guest XER */ - mfxer r5 - std r5, VCPU_XER(r12) + std r5, VCPU_LR(r7) - /* Save guest DAR */ - ld r5, (PACA_EXMC+EX_DAR)(r13) - std r5, VCPU_FAULT_DEAR(r12) - - /* Save guest DSISR */ - lwz r5, (PACA_EXMC+EX_DSISR)(r13) - std r5, VCPU_FAULT_DSISR(r12) + /* XXX convert to safe function call */ /* Restore host msr -> SRR1 */ - ld r7, VCPU_HOST_MSR(r12) - mtsrr1 r7 + ld r6, VCPU_HOST_MSR(r7) + mtsrr1 r6 /* Restore host IP -> SRR0 */ - ld r6, VCPU_HOST_RETIP(r12) - mtsrr0 r6 + ld r5, VCPU_HOST_RETIP(r7) + mtsrr0 r5 /* * For some interrupts, we need to call the real Linux @@ -311,9 +241,9 @@ no_dcbz32_off: * r3 = address of interrupt handler (exit reason) */ - cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL beq call_linux_handler - cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER beq call_linux_handler /* Back to Interruptable Mode! (goto kvm_return_point) */ @@ -334,12 +264,12 @@ call_linux_handler: * R7 VCPU_HOST_MSR */ - mtlr r3 + mtlr r12 - ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) - mtsrr0 r5 - LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) - mtsrr1 r5 + ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) + mtsrr0 r4 + LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) + mtsrr1 r3 RFI @@ -350,7 +280,7 @@ kvm_return_point: /* go back into the guest */ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ - mr r5, r3 + mr r5, r12 /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index fb7dd2e9ac8..cd9f0b609e4 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S @@ -45,37 +45,21 @@ kvmppc_trampoline_\intno: * To distinguish, we check a magic byte in the PACA */ mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ - std r12, (PACA_EXMC + EX_R12)(r13) + std r12, PACA_KVM_SCRATCH0(r13) mfcr r12 - stw r12, (PACA_EXMC + EX_CCR)(r13) + stw r12, PACA_KVM_SCRATCH1(r13) lbz r12, PACA_KVM_IN_GUEST(r13) cmpwi r12, 0 bne ..kvmppc_handler_hasmagic_\intno /* No KVM guest? Then jump back to the Linux handler! */ - lwz r12, (PACA_EXMC + EX_CCR)(r13) + lwz r12, PACA_KVM_SCRATCH1(r13) mtcr r12 - ld r12, (PACA_EXMC + EX_R12)(r13) + ld r12, PACA_KVM_SCRATCH0(r13) mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ b kvmppc_resume_\intno /* Get back original handler */ /* Now we know we're handling a KVM guest */ ..kvmppc_handler_hasmagic_\intno: - /* Unset guest state */ - li r12, 0 - stb r12, PACA_KVM_IN_GUEST(r13) - - std r1, (PACA_EXMC+EX_R9)(r13) - std r10, (PACA_EXMC+EX_R10)(r13) - std r11, (PACA_EXMC+EX_R11)(r13) - std r2, (PACA_EXMC+EX_R13)(r13) - - mfsrr0 r10 - mfsrr1 r11 - - /* Restore R1/R2 so we can handle faults */ - ld r1, PACAR1(r13) - ld r2, (PACA_EXMC+EX_SRR0)(r13) - /* Let's store which interrupt we're handling */ li r12, \intno @@ -106,16 +90,16 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX * * Input Registers: * - * R6 = SRR0 - * R7 = SRR1 + * R5 = SRR0 + * R6 = SRR1 * LR = real-mode IP * */ .global kvmppc_handler_lowmem_trampoline kvmppc_handler_lowmem_trampoline: - mtsrr0 r6 - mtsrr1 r7 + mtsrr0 r5 + mtsrr1 r6 blr kvmppc_handler_lowmem_trampoline_end: diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 8e447886666..7188c11ed7d 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -51,24 +51,18 @@ kvmppc_handler_trampoline_enter: * * MSR = ~IR|DR * R13 = PACA + * R1 = host R1 + * R2 = host R2 * R9 = guest IP * R10 = guest MSR - * R11 = free - * R12 = free - * PACA[PACA_EXMC + EX_R9] = guest R9 - * PACA[PACA_EXMC + EX_R10] = guest R10 - * PACA[PACA_EXMC + EX_R11] = guest R11 - * PACA[PACA_EXMC + EX_R12] = guest R12 - * PACA[PACA_EXMC + EX_R13] = guest R13 - * PACA[PACA_EXMC + EX_CCR] = guest CR - * PACA[PACA_EXMC + EX_R3] = guest XER + * all other GPRS = free + * PACA[KVM_CR] = guest CR + * PACA[KVM_XER] = guest XER */ mtsrr0 r9 mtsrr1 r10 - mtspr SPRN_SPRG_SCRATCH0, r0 - /* Remove LPAR shadow entries */ #if SLB_NUM_BOLTED == 3 @@ -131,20 +125,27 @@ slb_do_enter: /* Enter guest */ - mfspr r0, SPRN_SPRG_SCRATCH0 - - ld r9, (PACA_EXMC+EX_R9)(r13) - ld r10, (PACA_EXMC+EX_R10)(r13) - ld r12, (PACA_EXMC+EX_R12)(r13) - - lwz r11, (PACA_EXMC+EX_CCR)(r13) + ld r0, (PACA_KVM_R0)(r13) + ld r1, (PACA_KVM_R1)(r13) + ld r2, (PACA_KVM_R2)(r13) + ld r3, (PACA_KVM_R3)(r13) + ld r4, (PACA_KVM_R4)(r13) + ld r5, (PACA_KVM_R5)(r13) + ld r6, (PACA_KVM_R6)(r13) + ld r7, (PACA_KVM_R7)(r13) + ld r8, (PACA_KVM_R8)(r13) + ld r9, (PACA_KVM_R9)(r13) + ld r10, (PACA_KVM_R10)(r13) + ld r12, (PACA_KVM_R12)(r13) + + lwz r11, (PACA_KVM_CR)(r13) mtcr r11 - ld r11, (PACA_EXMC+EX_R3)(r13) + ld r11, (PACA_KVM_XER)(r13) mtxer r11 - ld r11, (PACA_EXMC+EX_R11)(r13) - ld r13, (PACA_EXMC+EX_R13)(r13) + ld r11, (PACA_KVM_R11)(r13) + ld r13, (PACA_KVM_R13)(r13) RFI kvmppc_handler_trampoline_enter_end: @@ -162,28 +163,58 @@ kvmppc_handler_trampoline_exit: /* Register usage at this point: * - * SPRG_SCRATCH0 = guest R13 - * R01 = host R1 - * R02 = host R2 - * R10 = guest PC - * R11 = guest MSR - * R12 = exit handler id - * R13 = PACA - * PACA.exmc.CCR = guest CR - * PACA.exmc.R9 = guest R1 - * PACA.exmc.R10 = guest R10 - * PACA.exmc.R11 = guest R11 - * PACA.exmc.R12 = guest R12 - * PACA.exmc.R13 = guest R2 + * SPRG_SCRATCH0 = guest R13 + * R12 = exit handler id + * R13 = PACA + * PACA.KVM.SCRATCH0 = guest R12 + * PACA.KVM.SCRATCH1 = guest CR * */ /* Save registers */ - std r0, (PACA_EXMC+EX_SRR0)(r13) - std r9, (PACA_EXMC+EX_R3)(r13) - std r10, (PACA_EXMC+EX_LR)(r13) - std r11, (PACA_EXMC+EX_DAR)(r13) + std r0, PACA_KVM_R0(r13) + std r1, PACA_KVM_R1(r13) + std r2, PACA_KVM_R2(r13) + std r3, PACA_KVM_R3(r13) + std r4, PACA_KVM_R4(r13) + std r5, PACA_KVM_R5(r13) + std r6, PACA_KVM_R6(r13) + std r7, PACA_KVM_R7(r13) + std r8, PACA_KVM_R8(r13) + std r9, PACA_KVM_R9(r13) + std r10, PACA_KVM_R10(r13) + std r11, PACA_KVM_R11(r13) + + /* Restore R1/R2 so we can handle faults */ + ld r1, PACA_KVM_HOST_R1(r13) + ld r2, PACA_KVM_HOST_R2(r13) + + /* Save guest PC and MSR in GPRs */ + mfsrr0 r3 + mfsrr1 r4 + + /* Get scratch'ed off registers */ + mfspr r9, SPRN_SPRG_SCRATCH0 + std r9, PACA_KVM_R13(r13) + + ld r8, PACA_KVM_SCRATCH0(r13) + std r8, PACA_KVM_R12(r13) + + lwz r7, PACA_KVM_SCRATCH1(r13) + stw r7, PACA_KVM_CR(r13) + + /* Save more register state */ + + mfxer r6 + stw r6, PACA_KVM_XER(r13) + + mfdar r5 + mfdsisr r6 + + /* Unset guest state */ + li r9, 0 + stb r9, PACA_KVM_IN_GUEST(r13) /* * In order for us to easily get the last instruction, @@ -207,7 +238,8 @@ ld_last_inst: ori r11, r9, MSR_DR /* Enable paging for data */ mtmsr r11 /* 2) fetch the instruction */ - lwz r0, 0(r10) + /* XXX implement PACA_KVM_IN_GUEST=2 path to safely jump over this */ + lwz r0, 0(r3) /* 3) disable paging again */ mtmsr r9 @@ -233,29 +265,27 @@ no_ld_last_inst: slb_do_exit: - /* Restore registers */ - - ld r11, (PACA_EXMC+EX_DAR)(r13) - ld r10, (PACA_EXMC+EX_LR)(r13) - ld r9, (PACA_EXMC+EX_R3)(r13) - - /* Save last inst */ - stw r0, (PACA_EXMC+EX_LR)(r13) - - /* Save DAR and DSISR before going to paged mode */ - mfdar r0 - std r0, (PACA_EXMC+EX_DAR)(r13) - mfdsisr r0 - stw r0, (PACA_EXMC+EX_DSISR)(r13) + /* Register usage at this point: + * + * R0 = guest last inst + * R1 = host R1 + * R2 = host R2 + * R3 = guest PC + * R4 = guest MSR + * R5 = guest DAR + * R6 = guest DSISR + * R12 = exit handler id + * R13 = PACA + * PACA.KVM.* = guest * + * + */ /* RFI into the highmem handler */ - mfmsr r0 - ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ - mtsrr1 r0 - ld r0, PACASAVEDMSR(r13) /* Highmem handler address */ - mtsrr0 r0 - - mfspr r0, SPRN_SPRG_SCRATCH0 + mfmsr r7 + ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ + mtsrr1 r7 + ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */ + mtsrr0 r8 RFI kvmppc_handler_trampoline_exit_end: -- cgit v1.2.3-70-g09d2 From b4433a7cceed59714b0778e1ace624befdd15ded Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:04 +0100 Subject: KVM: PPC: Implement 'skip instruction' mode To fetch the last instruction we were interrupted on, we enable DR in early exit code, where we are still in a very transitional phase between guest and host state. Most of the time this seemed to work, but another CPU can easily flush our TLB and HTAB which makes us go in the Linux page fault handler which totally breaks because we still use the guest's SLB entries. To work around that, let's introduce a second KVM guest mode that defines that whenever we get a trap, we don't call the Linux handler or go into the KVM exit code, but just jump over the faulting instruction. That way a potentially bad lwz doesn't trigger any faults and we can later on interpret the invalid instruction we fetched as "fetch didn't work". Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_asm.h | 6 +++++ arch/powerpc/kvm/book3s_64_rmhandlers.S | 39 ++++++++++++++++++++++++++++++++- arch/powerpc/kvm/book3s_64_slb.S | 16 +++++++++----- arch/powerpc/kvm/emulate.c | 4 ++++ 4 files changed, 59 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index af2abe74f54..aadf2dd6f84 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -97,4 +97,10 @@ #define RESUME_HOST RESUME_FLAG_HOST #define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) +#define KVM_GUEST_MODE_NONE 0 +#define KVM_GUEST_MODE_GUEST 1 +#define KVM_GUEST_MODE_SKIP 2 + +#define KVM_INST_FETCH_FAILED -1 + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index cd9f0b609e4..9ad1c2645d6 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S @@ -49,7 +49,7 @@ kvmppc_trampoline_\intno: mfcr r12 stw r12, PACA_KVM_SCRATCH1(r13) lbz r12, PACA_KVM_IN_GUEST(r13) - cmpwi r12, 0 + cmpwi r12, KVM_GUEST_MODE_NONE bne ..kvmppc_handler_hasmagic_\intno /* No KVM guest? Then jump back to the Linux handler! */ lwz r12, PACA_KVM_SCRATCH1(r13) @@ -60,6 +60,11 @@ kvmppc_trampoline_\intno: /* Now we know we're handling a KVM guest */ ..kvmppc_handler_hasmagic_\intno: + + /* Should we just skip the faulting instruction? */ + cmpwi r12, KVM_GUEST_MODE_SKIP + beq kvmppc_handler_skip_ins + /* Let's store which interrupt we're handling */ li r12, \intno @@ -85,6 +90,38 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX +/* + * Bring us back to the faulting code, but skip the + * faulting instruction. + * + * This is a generic exit path from the interrupt + * trampolines above. + * + * Input Registers: + * + * R12 = free + * R13 = PACA + * PACA.KVM.SCRATCH0 = guest R12 + * PACA.KVM.SCRATCH1 = guest CR + * SPRG_SCRATCH0 = guest R13 + * + */ +kvmppc_handler_skip_ins: + + /* Patch the IP to the next instruction */ + mfsrr0 r12 + addi r12, r12, 4 + mtsrr0 r12 + + /* Clean up all state */ + lwz r12, PACA_KVM_SCRATCH1(r13) + mtcr r12 + ld r12, PACA_KVM_SCRATCH0(r13) + mfspr r13, SPRN_SPRG_SCRATCH0 + + /* And get back into the code */ + RFI + /* * This trampoline brings us back to a real mode handler * diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 7188c11ed7d..d07b88617b2 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -212,10 +212,6 @@ kvmppc_handler_trampoline_exit: mfdar r5 mfdsisr r6 - /* Unset guest state */ - li r9, 0 - stb r9, PACA_KVM_IN_GUEST(r13) - /* * In order for us to easily get the last instruction, * we got the #vmexit at, we exploit the fact that the @@ -233,18 +229,28 @@ kvmppc_handler_trampoline_exit: ld_last_inst: /* Save off the guest instruction we're at */ + + /* Set guest mode to 'jump over instruction' so if lwz faults + * we'll just continue at the next IP. */ + li r9, KVM_GUEST_MODE_SKIP + stb r9, PACA_KVM_IN_GUEST(r13) + /* 1) enable paging for data */ mfmsr r9 ori r11, r9, MSR_DR /* Enable paging for data */ mtmsr r11 /* 2) fetch the instruction */ - /* XXX implement PACA_KVM_IN_GUEST=2 path to safely jump over this */ + li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */ lwz r0, 0(r3) /* 3) disable paging again */ mtmsr r9 no_ld_last_inst: + /* Unset guest mode */ + li r9, KVM_GUEST_MODE_NONE + stb r9, PACA_KVM_IN_GUEST(r13) + /* Restore bolted entries from the shadow and fix it along the way */ /* We don't store anything in entry 0, so we don't need to take care of it */ diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 38219af0cd0..04e317c1bbe 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -143,6 +143,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); + /* Try again next time */ + if (inst == KVM_INST_FETCH_FAILED) + return EMULATE_DONE; + switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC64 -- cgit v1.2.3-70-g09d2 From bc90923e27908ef65aa8aaad2f234e18b5273c78 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:05 +0100 Subject: KVM: PPC: Get rid of unnecessary RFI Using an RFI in IR=1 is dangerous. We need to set two SRRs and then do an RFI without getting interrupted at all, because every interrupt could potentially overwrite the SRR values. Fortunately, we don't need to RFI in at least this particular case of the code, so we can just replace it with an mtmsr and b. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s_64_interrupts.S | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 66e3b1179b3..3c0ba551307 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -221,15 +221,8 @@ no_dcbz32_off: mflr r5 std r5, VCPU_LR(r7) - /* XXX convert to safe function call */ - /* Restore host msr -> SRR1 */ ld r6, VCPU_HOST_MSR(r7) - mtsrr1 r6 - - /* Restore host IP -> SRR0 */ - ld r5, VCPU_HOST_RETIP(r7) - mtsrr0 r5 /* * For some interrupts, we need to call the real Linux @@ -246,8 +239,9 @@ no_dcbz32_off: cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER beq call_linux_handler - /* Back to Interruptable Mode! (goto kvm_return_point) */ - RFI + /* Back to EE=1 */ + mtmsr r6 + b kvm_return_point call_linux_handler: @@ -260,10 +254,16 @@ call_linux_handler: * interrupt handler! * * R3 still contains the exit code, - * R6 VCPU_HOST_RETIP and - * R7 VCPU_HOST_MSR + * R5 VCPU_HOST_RETIP and + * R6 VCPU_HOST_MSR */ + /* Restore host IP -> SRR0 */ + ld r5, VCPU_HOST_RETIP(r7) + + /* XXX Better move to a safe function? + * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ + mtlr r12 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) -- cgit v1.2.3-70-g09d2 From 021ec9c69f8b7b20f46296cc76cc4cb341b25191 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:06 +0100 Subject: KVM: PPC: Call SLB patching code in interrupt safe manner Currently we're racy when doing the transition from IR=1 to IR=0, from the module memory entry code to the real mode SLB switching code. To work around that I took a look at the RTAS entry code which is faced with a similar problem and did the same thing: A small helper in linear mapped memory that does mtmsr with IR=0 and then RFIs info the actual handler. Thanks to that trick we can safely take page faults in the entry code and only need to be really wary of what to do as of the SLB switching part. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 + arch/powerpc/include/asm/kvm_book3s_64_asm.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kernel/asm-offsets.c | 3 +-- arch/powerpc/kvm/book3s.c | 1 + arch/powerpc/kvm/book3s_64_exports.c | 1 + arch/powerpc/kvm/book3s_64_interrupts.S | 25 +++++++------------------ arch/powerpc/kvm/book3s_64_rmhandlers.S | 18 ++++++++++++++++++ arch/powerpc/kvm/book3s_64_slb.S | 4 ++++ 9 files changed, 34 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index f192017d799..c91be0ff023 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; +extern void kvmppc_rmcall(ulong srr0, ulong srr1); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h index fca9404c1a7..183461b4840 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h @@ -69,7 +69,6 @@ struct kvmppc_book3s_shadow_vcpu { ulong scratch0; ulong scratch1; ulong vmhandler; - ulong rmhandler; }; #endif /*__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d615fa8a141..f7215e622df 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -167,6 +167,7 @@ struct kvm_vcpu_arch { ulong trampoline_lowmem; ulong trampoline_enter; ulong highmem_handler; + ulong rmcall; ulong host_paca_phys; struct kvmppc_mmu mmu; #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 1501e77c980..ee9935442f0 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -214,8 +214,6 @@ int main(void) DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, shadow_vcpu.vmhandler)); - DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct, - shadow_vcpu.rmhandler)); DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, shadow_vcpu.scratch0)); DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, @@ -438,6 +436,7 @@ int main(void) DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); + DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); #else DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e06eae3f2c..13173922b67 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -919,6 +919,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; + vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; vcpu->arch.shadow_msr = MSR_USER64; diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c index 5b2db38ed86..99b07125c52 100644 --- a/arch/powerpc/kvm/book3s_64_exports.c +++ b/arch/powerpc/kvm/book3s_64_exports.c @@ -22,3 +22,4 @@ EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); +EXPORT_SYMBOL_GPL(kvmppc_rmcall); diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 3c0ba551307..33aef5345f6 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -95,17 +95,14 @@ kvm_start_entry: ld r3, VCPU_HIGHMEM_HANDLER(r4) std r3, PACA_KVM_VMHANDLER(r13) - ld r3, VCPU_TRAMPOLINE_ENTER(r4) - std r3, PACA_KVM_RMHANDLER(r13) - kvm_start_lightweight: ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ /* Load some guest state in the respective registers */ - ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ - mtctr r3 /* CTR = r3 */ + ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ + /* will be swapped in by rmcall */ ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ mtlr r3 /* LR = r3 */ @@ -131,22 +128,14 @@ kvm_start_lightweight: no_dcbz32_on: - /* This sets the Magic value for the trampoline */ - - /* XXX this needs to move into a safe function, so we can - be sure we don't get any interrupts */ - - li r11, 1 - stb r11, PACA_KVM_IN_GUEST(r13) - - ld r3, PACA_KVM_RMHANDLER(r13) - mtsrr0 r3 + ld r6, VCPU_RMCALL(r4) + mtctr r6 - LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) - mtsrr1 r3 + ld r3, VCPU_TRAMPOLINE_ENTER(r4) + LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) /* Jump to SLB patching handlder and into our guest */ - RFI + bctr /* * This is the handler in module memory. It gets jumped at from the diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index 9ad1c2645d6..e7091c9459a 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S @@ -140,6 +140,24 @@ kvmppc_handler_lowmem_trampoline: blr kvmppc_handler_lowmem_trampoline_end: +/* + * Call a function in real mode + * + * Input Registers: + * + * R3 = function + * R4 = MSR + * R5 = CTR + * + */ +_GLOBAL(kvmppc_rmcall) + mtmsr r4 /* Disable relocation, so mtsrr + doesn't get interrupted */ + mtctr r5 + mtsrr0 r3 + mtsrr1 r4 + RFI + .global kvmppc_trampoline_lowmem kvmppc_trampoline_lowmem: .long kvmppc_handler_lowmem_trampoline - _stext diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index d07b88617b2..35b76272218 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -63,6 +63,10 @@ kvmppc_handler_trampoline_enter: mtsrr0 r9 mtsrr1 r10 + /* Activate guest mode, so faults get handled by KVM */ + li r11, KVM_GUEST_MODE_GUEST + stb r11, PACA_KVM_IN_GUEST(r13) + /* Remove LPAR shadow entries */ #if SLB_NUM_BOLTED == 3 -- cgit v1.2.3-70-g09d2 From 25a8a02d26a71c28e26417a3520c653c2d40af6b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:07 +0100 Subject: KVM: PPC: Emulate trap SRR1 flags properly Book3S needs some flags in SRR1 to get to know details about an interrupt. One such example is the trap instruction. It tells the guest kernel that a program interrupt is due to a trap using a bit in SRR1. This patch implements above behavior, making WARN_ON behave like WARN_ON. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 + arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/include/asm/reg.h | 4 ++++ arch/powerpc/kvm/book3s.c | 7 +++++-- arch/powerpc/kvm/booke.c | 3 ++- arch/powerpc/kvm/emulate.c | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c91be0ff023..79ab8faf18e 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -91,6 +91,7 @@ struct kvmppc_vcpu_book3s { u64 vsid_next; u64 vsid_max; int context_id; + ulong prog_flags; /* flags to inject when giving a 700 trap */ }; #define CONTEXT_HOST 0 diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 89c5d79c347..09816da9e95 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -80,7 +80,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index bc8dd53f718..5572e86223f 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -426,6 +426,10 @@ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ +#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ +#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ +#define SRR1_PROGTRAP 0x00020000 /* Trap */ +#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 13173922b67..66b5924e174 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -168,8 +168,9 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) } -void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { + to_book3s(vcpu)->prog_flags = flags; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); } @@ -198,6 +199,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int deliver = 1; int vec = 0; + ulong flags = 0ULL; switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: @@ -231,6 +233,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) break; case BOOK3S_IRQPRIO_PROGRAM: vec = BOOK3S_INTERRUPT_PROGRAM; + flags = to_book3s(vcpu)->prog_flags; break; case BOOK3S_IRQPRIO_VSX: vec = BOOK3S_INTERRUPT_VSX; @@ -261,7 +264,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) #endif if (deliver) - kvmppc_inject_interrupt(vcpu, vec, 0ULL); + kvmppc_inject_interrupt(vcpu, vec, flags); return deliver; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 338baf9f6b2..e283e44e9f1 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -82,8 +82,9 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, set_bit(priority, &vcpu->arch.pending_exceptions); } -void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { + /* BookE does flags in ESR, so ignore those we get here */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); } diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 04e317c1bbe..8b0ba0b69c2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -154,7 +154,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) #else vcpu->arch.esr |= ESR_PTR; #endif - kvmppc_core_queue_program(vcpu); + kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); advance = 0; break; -- cgit v1.2.3-70-g09d2 From d35feb26ef7e9fbef7643051be5f0773662b1f91 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:08 +0100 Subject: KVM: PPC: Fix HID5 setting code The code to unset HID5.dcbz32 is broken. This patch makes it do the right rotate magic. Signed-off-by: Alexander Graf Reported-by: Benjamin Herrenschmidt Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s_64_interrupts.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 33aef5345f6..2ff0b2137e6 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -177,8 +177,9 @@ kvmppc_handler_highmem: rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ beq no_dcbz32_off + li r4, 0 mfspr r5,SPRN_HID5 - rldimi r5,r5,6,56 + rldimi r5,r4,6,56 mtspr SPRN_HID5,r5 no_dcbz32_off: -- cgit v1.2.3-70-g09d2 From ff1ca3f983c4180018dbf5e02f2af06f86ef6d03 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:09 +0100 Subject: KVM: PPC: Pass program interrupt flags to the guest When we need to reinject a program interrupt into the guest, we also need to reinject the corresponding flags into the guest. Signed-off-by: Alexander Graf Reported-by: Benjamin Herrenschmidt Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 66b5924e174..02861fda73d 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -633,6 +633,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_PROGRAM: { enum emulation_result er; + ulong flags; + + flags = (vcpu->arch.shadow_msr & 0x1f0000ull); if (vcpu->arch.msr & MSR_PR) { #ifdef EXIT_DEBUG @@ -640,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, #endif if ((vcpu->arch.last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } @@ -655,7 +658,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", __func__, vcpu->arch.pc, vcpu->arch.last_inst); - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; default: -- cgit v1.2.3-70-g09d2 From 5f2b105a1d6a137c8cfb2792b79128db965880a8 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Sun, 10 Jan 2010 03:27:32 +0100 Subject: KVM: PPC: Pass through program interrupts When we get a program interrupt in guest kernel mode, we try to emulate the instruction. If that doesn't fail, we report to the user and try again - at the exact same instruction pointer. So if the guest kernel really does trigger an invalid instruction, we loop forever. So let's better go and forward program exceptions to the guest when we don't know the instruction we're supposed to emulate. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/kvm/emulate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 8b0ba0b69c2..b905623735b 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -476,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); + kvmppc_core_queue_program(vcpu, 0); } } -- cgit v1.2.3-70-g09d2 From 4b5c9b7f9bdd76a3c860731db08bfc6758e96e29 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Sun, 10 Jan 2010 03:27:47 +0100 Subject: KVM: PPC: Make large pages work An SLB entry contains two pieces of information related to size: 1) PTE size 2) SLB size The L bit defines the PTE be "large" (usually means 16MB), SLB_VSID_B_1T defines that the SLB should span 1 GB instead of the default 256MB. Apparently I messed things up and just put those two in one box, shaked it heavily and came up with the current code which handles large pages incorrectly, because it also treats large page SLB entries as "1TB" segment entries. This patch splits those two features apart, making Linux guests boot even when they have > 256MB. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 3 ++- arch/powerpc/kvm/book3s_64_mmu.c | 10 ++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 79ab8faf18e..c7db69f1e77 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -34,7 +34,8 @@ struct kvmppc_slb { bool Ks; bool Kp; bool nx; - bool large; + bool large; /* PTEs are 16MB */ + bool tb; /* 1TB segment */ bool class; }; diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index e4beeb371a7..512dcff7755 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( if (!vcpu_book3s->slb[i].valid) continue; - if (vcpu_book3s->slb[i].large) + if (vcpu_book3s->slb[i].tb) cmp_esid = esid_1t; if (vcpu_book3s->slb[i].esid == cmp_esid) @@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( eaddr, esid, esid_1t); for (i = 0; i < vcpu_book3s->slb_nr; i++) { if (vcpu_book3s->slb[i].vsid) - dprintk(" %d: %c%c %llx %llx\n", i, + dprintk(" %d: %c%c%c %llx %llx\n", i, vcpu_book3s->slb[i].valid ? 'v' : ' ', vcpu_book3s->slb[i].large ? 'l' : ' ', + vcpu_book3s->slb[i].tb ? 't' : ' ', vcpu_book3s->slb[i].esid, vcpu_book3s->slb[i].vsid); } @@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, if (!slb) return 0; - if (slb->large) + if (slb->tb) return (((u64)eaddr >> 12) & 0xfffffff) | (((u64)slb->vsid) << 28); @@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) slbe = &vcpu_book3s->slb[slb_nr]; slbe->large = (rs & SLB_VSID_L) ? 1 : 0; - slbe->esid = slbe->large ? esid_1t : esid; + slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; + slbe->esid = slbe->tb ? esid_1t : esid; slbe->vsid = rs >> 12; slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; -- cgit v1.2.3-70-g09d2 From a1f83a74feaa9718a5c61587256ea6cc1b993d16 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 29 Dec 2009 17:33:58 +0200 Subject: KVM: VMX: trace clts and lmsw instructions as cr accesses clts writes cr0.ts; lmsw writes cr0[0:15] - record that in ftrace. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9b197b25b66..7c7b2eeea5d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2997,6 +2997,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) vmx_fpu_deactivate(vcpu); vcpu->arch.cr0 &= ~X86_CR0_TS; vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); + trace_kvm_cr_write(0, vcpu->arch.cr0); vmx_fpu_activate(vcpu); skip_emulated_instruction(vcpu); return 1; @@ -3016,7 +3017,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) } break; case 3: /* lmsw */ - kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; + trace_kvm_cr_write(0, (vcpu->arch.cr0 & ~0xful) | val); + kvm_lmsw(vcpu, val); skip_emulated_instruction(vcpu); return 1; -- cgit v1.2.3-70-g09d2 From 4d4ec0874583b127caac1d0f84033c8971b2fd2a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 29 Dec 2009 18:07:30 +0200 Subject: KVM: Replace read accesses of vcpu->arch.cr0 by an accessor Since we'd like to allow the guest to own a few bits of cr0 at times, we need to know when we access those bits. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 6 +++--- arch/x86/kvm/kvm_cache_regs.h | 10 ++++++++++ arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/mmu.h | 2 +- arch/x86/kvm/svm.c | 9 +++++---- arch/x86/kvm/vmx.c | 16 ++++++++-------- arch/x86/kvm/x86.c | 20 ++++++++++---------- 7 files changed, 38 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 7e8faea4651..0f89e320bc9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1515,7 +1515,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) /* syscall is not available in real mode */ if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL - || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) + || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) return -1; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1569,7 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) /* inject #GP if in real mode or paging is disabled */ if (ctxt->mode == X86EMUL_MODE_REAL || - !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { + !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } @@ -1635,7 +1635,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) /* inject #GP if in real mode or paging is disabled */ if (ctxt->mode == X86EMUL_MODE_REAL - || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { + || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 35acc36e178..f46859751b3 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -38,6 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) return vcpu->arch.pdptrs[index]; } +static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) +{ + return vcpu->arch.cr0 & mask; +} + +static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr0_bits(vcpu, ~0UL); +} + static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) { if (mask & vcpu->arch.cr4_guest_owned_bits) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4f5508c3510..276bf7497c3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); static int is_write_protection(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr0 & X86_CR0_WP; + return kvm_read_cr0_bits(vcpu, X86_CR0_WP); } static int is_cpuid_PSE36(void) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index ff583423968..599159f728b 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -79,7 +79,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu) static inline int is_paging(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr0 & X86_CR0_PG; + return kvm_read_cr0_bits(vcpu, X86_CR0_PG); } static inline int is_present_gpte(unsigned long pte) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cf64fc026e3..d3246ce70ae 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (npt_enabled) goto set; - if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { + if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); vcpu->fpu_active = 1; } @@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm) static int nm_interception(struct vcpu_svm *svm) { svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); - if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) + if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) svm->vmcb->save.cr0 &= ~X86_CR0_TS; svm->vcpu.fpu_active = 1; @@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; hsave->save.efer = svm->vcpu.arch.shadow_efer; - hsave->save.cr0 = svm->vcpu.arch.cr0; + hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.rflags = vmcb->save.rflags; hsave->save.rip = svm->next_rip; @@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu) if (npt_enabled) { int mmu_reload = 0; - if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { + if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0) + & X86_CR0_PG) { svm_set_cr0(vcpu, svm->vmcb->save.cr0); mmu_reload = 1; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7c7b2eeea5d..4c7177c489a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -799,7 +799,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) return; vcpu->fpu_active = 1; vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); - if (vcpu->arch.cr0 & X86_CR0_TS) + if (kvm_read_cr0_bits(vcpu, X86_CR0_TS)) vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); } @@ -1785,7 +1785,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); - if (vcpu->arch.cr0 & X86_CR0_PE) + if (kvm_read_cr0_bits(vcpu, X86_CR0_PE)) vmx_fpu_deactivate(vcpu); } @@ -1840,7 +1840,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, static int vmx_get_cpl(struct kvm_vcpu *vcpu) { - if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ + if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) /* if real mode */ return 0; if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ @@ -2095,7 +2095,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) static bool guest_state_valid(struct kvm_vcpu *vcpu) { /* real mode guest state checks */ - if (!(vcpu->arch.cr0 & X86_CR0_PE)) { + if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) @@ -2580,7 +2580,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; - vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ + vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); @@ -2996,8 +2996,8 @@ static int handle_cr(struct kvm_vcpu *vcpu) case 2: /* clts */ vmx_fpu_deactivate(vcpu); vcpu->arch.cr0 &= ~X86_CR0_TS; - vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); - trace_kvm_cr_write(0, vcpu->arch.cr0); + vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu)); + trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); vmx_fpu_activate(vcpu); skip_emulated_instruction(vcpu); return 1; @@ -3018,7 +3018,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; - trace_kvm_cr_write(0, (vcpu->arch.cr0 & ~0xful) | val); + trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); skip_emulated_instruction(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c990424d86d..748b15d8e46 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -430,7 +430,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { if (cr0 & CR0_RESERVED_BITS) { printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", - cr0, vcpu->arch.cr0); + cr0, kvm_read_cr0(vcpu)); kvm_inject_gp(vcpu, 0); return; } @@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { - kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); + kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); @@ -3095,7 +3095,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) int emulate_clts(struct kvm_vcpu *vcpu) { - kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); + kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); return X86EMUL_CONTINUE; } @@ -3714,7 +3714,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) switch (cr) { case 0: - value = vcpu->arch.cr0; + value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; @@ -3741,7 +3741,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, { switch (cr) { case 0: - kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); + kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); *rflags = kvm_get_rflags(vcpu); break; case 2: @@ -4335,7 +4335,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->gdt.limit = dt.limit; sregs->gdt.base = dt.base; - sregs->cr0 = vcpu->arch.cr0; + sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = vcpu->arch.cr3; sregs->cr4 = kvm_read_cr4(vcpu); @@ -4521,7 +4521,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, { struct kvm_segment kvm_seg; - if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE)) + if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) return kvm_load_realmode_segment(vcpu, selector, seg); if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) return 1; @@ -4799,7 +4799,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) &nseg_desc); } - kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); + kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS); seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); tr_seg.type = 11; kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); @@ -4834,7 +4834,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); - mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; + mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; @@ -4873,7 +4873,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && - !(vcpu->arch.cr0 & X86_CR0_PE)) + !(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu_put(vcpu); -- cgit v1.2.3-70-g09d2 From e8467fda83cdc9de53972fee0cd2e6916cf66f41 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 29 Dec 2009 18:43:06 +0200 Subject: KVM: VMX: Allow the guest to own some cr0 bits We will use this later to give the guest ownership of cr0.ts. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/kvm_cache_regs.h | 2 ++ arch/x86/kvm/svm.c | 5 +++++ arch/x86/kvm/vmx.c | 9 +++++++++ 4 files changed, 18 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a4de557ad73..693046a7a12 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -269,6 +269,7 @@ struct kvm_vcpu_arch { u32 regs_dirty; unsigned long cr0; + unsigned long cr0_guest_owned_bits; unsigned long cr2; unsigned long cr3; unsigned long cr4; @@ -489,6 +490,7 @@ struct kvm_x86_ops { void (*set_segment)(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); + void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index f46859751b3..6b419a36cbd 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -40,6 +40,8 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) { + if (mask & vcpu->arch.cr0_guest_owned_bits) + kvm_x86_ops->decache_cr0_guest_bits(vcpu); return vcpu->arch.cr0 & mask; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d3246ce70ae..3899c2d1983 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -956,6 +956,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) svm->vmcb->save.gdtr.base = dt->base ; } +static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) +{ +} + static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } @@ -2948,6 +2952,7 @@ static struct kvm_x86_ops svm_x86_ops = { .set_segment = svm_set_segment, .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, + .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4c7177c489a..dbcdb55094f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1653,6 +1653,14 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); } +static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) +{ + ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; + + vcpu->arch.cr0 &= ~cr0_guest_owned_bits; + vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; +} + static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; @@ -4106,6 +4114,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, + .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, -- cgit v1.2.3-70-g09d2 From 02daab21d94dc4cf01b2fd09863d59a436900322 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 30 Dec 2009 12:40:26 +0200 Subject: KVM: Lazify fpu activation and deactivation Defer fpu deactivation as much as possible - if the guest fpu is loaded, keep it loaded until the next heavyweight exit (where we are forced to unload it). This reduces unnecessary exits. We also defer fpu activation on clts; while clts signals the intent to use the fpu, we can't be sure the guest will actually use it. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 35 +++++++++++++++++++++-------------- arch/x86/kvm/vmx.c | 25 +++++++++---------------- arch/x86/kvm/x86.c | 7 ++++++- include/linux/kvm_host.h | 1 + 5 files changed, 38 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 693046a7a12..93bee7abb71 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -506,6 +506,7 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); + void (*fpu_deactivate)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3899c2d1983..5b336a80f31 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -984,17 +984,11 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (npt_enabled) goto set; - if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { - svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); - vcpu->fpu_active = 1; - } - vcpu->arch.cr0 = cr0; cr0 |= X86_CR0_PG | X86_CR0_WP; - if (!vcpu->fpu_active) { - svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); + + if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - } set: /* * re-enable caching here because the QEMU bios @@ -1250,6 +1244,8 @@ static int nm_interception(struct vcpu_svm *svm) svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) svm->vmcb->save.cr0 &= ~X86_CR0_TS; + else + svm->vmcb->save.cr0 |= X86_CR0_TS; svm->vcpu.fpu_active = 1; return 1; @@ -2586,6 +2582,8 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { + if (npt_enabled) + vcpu->fpu_active = 1; } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) @@ -2805,12 +2803,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) svm->vmcb->save.cr3 = root; force_new_asid(vcpu); - - if (vcpu->fpu_active) { - svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); - svm->vmcb->save.cr0 |= X86_CR0_TS; - vcpu->fpu_active = 0; - } } static int is_disabled(void) @@ -2926,6 +2918,20 @@ static bool svm_rdtscp_supported(void) return false; } +static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (npt_enabled) { + /* hack: npt requires active fpu at this time */ + vcpu->fpu_active = 1; + return; + } + + svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; + svm->vmcb->save.cr0 |= X86_CR0_TS; +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -2967,6 +2973,7 @@ static struct kvm_x86_ops svm_x86_ops = { .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, + .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index dbcdb55094f..d11be3fb7c8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); #define KVM_GUEST_CR0_MASK \ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) + (X86_CR0_WP | X86_CR0_NE | X86_CR0_MP) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ @@ -579,9 +579,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; - eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); - if (!vcpu->fpu_active) - eb |= 1u << NM_VECTOR; + eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) + | (1u << NM_VECTOR); /* * Unconditionally intercept #DB so we can maintain dr6 without * reading it every exit. @@ -595,6 +594,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ + if (vcpu->fpu_active) + eb &= ~(1u << NM_VECTOR); vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -806,9 +807,6 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { - if (!vcpu->fpu_active) - return; - vcpu->fpu_active = 0; vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); } @@ -1737,8 +1735,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) else hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; - vmx_fpu_deactivate(vcpu); - if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); @@ -1757,12 +1753,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); + if (!vcpu->fpu_active) + hw_cr0 |= X86_CR0_TS; + vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; - - if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) - vmx_fpu_activate(vcpu); } static u64 construct_eptp(unsigned long root_hpa) @@ -1793,8 +1789,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); - if (kvm_read_cr0_bits(vcpu, X86_CR0_PE)) - vmx_fpu_deactivate(vcpu); } static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -3002,11 +2996,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) }; break; case 2: /* clts */ - vmx_fpu_deactivate(vcpu); vcpu->arch.cr0 &= ~X86_CR0_TS; vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); - vmx_fpu_activate(vcpu); skip_emulated_instruction(vcpu); return 1; case 1: /*mov from cr*/ @@ -4127,6 +4119,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, + .fpu_deactivate = vmx_fpu_deactivate, .tlb_flush = vmx_flush_tlb, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 748b15d8e46..1de2ad7a004 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1509,8 +1509,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); + kvm_x86_ops->vcpu_put(vcpu); } static int is_efer_nx(void) @@ -4006,6 +4006,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) r = 0; goto out; } + if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) { + vcpu->fpu_active = 0; + kvm_x86_ops->fpu_deactivate(vcpu); + } } preempt_disable(); @@ -5075,6 +5079,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) kvm_fx_save(&vcpu->arch.guest_fx_image); kvm_fx_restore(&vcpu->arch.host_fx_image); ++vcpu->stat.fpu_reload; + set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); } EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bb0314ea926..dfde04b0d45 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -38,6 +38,7 @@ #define KVM_REQ_MMU_SYNC 7 #define KVM_REQ_KVMCLOCK_UPDATE 8 #define KVM_REQ_KICK 9 +#define KVM_REQ_DEACTIVATE_FPU 10 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 -- cgit v1.2.3-70-g09d2 From edcafe3c5a06f46407c3f60145a36f269e56ff7f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 30 Dec 2009 18:07:40 +0200 Subject: KVM: VMX: Give the guest ownership of cr0.ts when the fpu is active If the guest fpu is loaded, there is nothing interesing about cr0.ts; let the guest play with it as it will. This makes context switches between fpu intensive guest processes faster, as we won't trap the clts and cr0 write instructions. [marcelo: fix cr0 read shadow update on fpu deactivation; kills F8 install] Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d11be3fb7c8..fc1964d5e97 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -803,12 +803,20 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) if (kvm_read_cr0_bits(vcpu, X86_CR0_TS)) vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } +static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); + static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { + vmx_decache_cr0_guest_bits(vcpu); vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits = 0; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) @@ -2996,8 +3004,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) }; break; case 2: /* clts */ - vcpu->arch.cr0 &= ~X86_CR0_TS; - vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu)); + vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); return 1; -- cgit v1.2.3-70-g09d2 From f9a48e6a18c210c4bf34769fa05ede250575c6a1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 6 Jan 2010 19:10:22 +0200 Subject: KVM: Set cr0.et when the guest writes cr0 Follow the hardware. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1de2ad7a004..1ad34d185da 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -428,6 +428,8 @@ out: void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { + cr0 |= X86_CR0_ET; + if (cr0 & CR0_RESERVED_BITS) { printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", cr0, kvm_read_cr0(vcpu)); -- cgit v1.2.3-70-g09d2 From dc77270f960a8b3cc39a6349a9fad58cc6053d53 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 6 Jan 2010 13:13:01 +0200 Subject: KVM: SVM: Fix SVM_CR0_SELECTIVE_MASK Instead of selecting TS and MP as the comments say, the macro included TS and PE. Luckily the macro is unused now, but fix in order to save a few hours of debugging from anyone who attempts to use it. Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/include/asm/svm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1fecb7e6113..38638cd2fa4 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -313,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_EXIT_ERR -1 -#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ +#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" -- cgit v1.2.3-70-g09d2 From bff7827479ed004c0394e6e7b35ae601bb1a97ad Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jan 2010 13:16:08 +0200 Subject: KVM: SVM: Initialize fpu_active in init_vmcb() init_vmcb() sets up the intercepts as if the fpu is active, so initialize it there. This avoids an INIT from setting up intercepts inconsistent with fpu_active. Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5b336a80f31..949990e4a22 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -540,6 +540,8 @@ static void init_vmcb(struct vcpu_svm *svm) struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; + svm->vcpu.fpu_active = 1; + control->intercept_cr_read = INTERCEPT_CR0_MASK | INTERCEPT_CR3_MASK | INTERCEPT_CR4_MASK; @@ -730,7 +732,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) init_vmcb(svm); fx_init(&svm->vcpu); - svm->vcpu.fpu_active = 1; svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; -- cgit v1.2.3-70-g09d2 From 888f9f3e0cfa32baf05b3840f0248f5502292a0f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jan 2010 12:14:04 +0200 Subject: KVM: SVM: Restore unconditional cr0 intercept under npt Currently we don't intercept cr0 at all when npt is enabled. This improves performance but requires us to activate the fpu at all times. Remove this behaviour in preparation for adding selective cr0 intercepts. Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 949990e4a22..27273ed24c4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -643,10 +643,8 @@ static void init_vmcb(struct vcpu_svm *svm) control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | (1ULL << INTERCEPT_INVLPG)); control->intercept_exceptions &= ~(1 << PF_VECTOR); - control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| - INTERCEPT_CR3_MASK); - control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK| - INTERCEPT_CR3_MASK); + control->intercept_cr_read &= ~INTERCEPT_CR3_MASK; + control->intercept_cr_write &= ~INTERCEPT_CR3_MASK; save->g_pat = 0x0007040600070406ULL; save->cr3 = 0; save->cr4 = 0; @@ -982,15 +980,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } } #endif - if (npt_enabled) - goto set; - vcpu->arch.cr0 = cr0; - cr0 |= X86_CR0_PG | X86_CR0_WP; + + if (!npt_enabled) + cr0 |= X86_CR0_PG | X86_CR0_WP; if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; -set: /* * re-enable caching here because the QEMU bios * does not do it - this results in some delay at @@ -2386,21 +2382,10 @@ static int handle_exit(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); - if (npt_enabled) { - int mmu_reload = 0; - if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0) - & X86_CR0_PG) { - svm_set_cr0(vcpu, svm->vmcb->save.cr0); - mmu_reload = 1; - } + if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) vcpu->arch.cr0 = svm->vmcb->save.cr0; + if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; - if (mmu_reload) { - kvm_mmu_reset_context(vcpu); - kvm_mmu_load(vcpu); - } - } - if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; -- cgit v1.2.3-70-g09d2 From d225157bc6a442b1214882635fbf287d7d0e8133 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 6 Jan 2010 10:55:27 +0200 Subject: KVM: SVM: Selective cr0 intercept If two conditions apply: - no bits outside TS and EM differ between the host and guest cr0 - the fpu is active then we can activate the selective cr0 write intercept and drop the unconditional cr0 read and write intercept, and allow the guest to run with the host fpu state. This reduces cr0 exits due to guest fpu management while the guest fpu is loaded. Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 27273ed24c4..83c7ab1bdad 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -571,6 +571,7 @@ static void init_vmcb(struct vcpu_svm *svm) control->intercept = (1ULL << INTERCEPT_INTR) | (1ULL << INTERCEPT_NMI) | (1ULL << INTERCEPT_SMI) | + (1ULL << INTERCEPT_SELECTIVE_CR0) | (1ULL << INTERCEPT_CPUID) | (1ULL << INTERCEPT_INVD) | (1ULL << INTERCEPT_HLT) | @@ -963,6 +964,27 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } +static void update_cr0_intercept(struct vcpu_svm *svm) +{ + ulong gcr0 = svm->vcpu.arch.cr0; + u64 *hcr0 = &svm->vmcb->save.cr0; + + if (!svm->vcpu.fpu_active) + *hcr0 |= SVM_CR0_SELECTIVE_MASK; + else + *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) + | (gcr0 & SVM_CR0_SELECTIVE_MASK); + + + if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; + } else { + svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; + svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; + } +} + static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); @@ -994,6 +1016,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) */ cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; + update_cr0_intercept(svm); } static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -1239,11 +1262,8 @@ static int ud_interception(struct vcpu_svm *svm) static int nm_interception(struct vcpu_svm *svm) { svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); - if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) - svm->vmcb->save.cr0 &= ~X86_CR0_TS; - else - svm->vmcb->save.cr0 |= X86_CR0_TS; svm->vcpu.fpu_active = 1; + update_cr0_intercept(svm); return 1; } @@ -2296,7 +2316,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR3] = emulate_on_interception, [SVM_EXIT_READ_CR4] = emulate_on_interception, [SVM_EXIT_READ_CR8] = emulate_on_interception, - /* for now: */ + [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, [SVM_EXIT_WRITE_CR0] = emulate_on_interception, [SVM_EXIT_WRITE_CR3] = emulate_on_interception, [SVM_EXIT_WRITE_CR4] = emulate_on_interception, @@ -2914,8 +2934,8 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) return; } + update_cr0_intercept(svm); svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; - svm->vmcb->save.cr0 |= X86_CR0_TS; } static struct kvm_x86_ops svm_x86_ops = { -- cgit v1.2.3-70-g09d2 From 4610c83cdc8bff04f2f22883749f716b1ccc502f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jan 2010 12:19:20 +0200 Subject: KVM: SVM: Lazy fpu with npt Now that we can allow the guest to play with cr0 when the fpu is loaded, we can enable lazy fpu when npt is in use. Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 83c7ab1bdad..8d7cb62ebef 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2588,8 +2588,6 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { - if (npt_enabled) - vcpu->fpu_active = 1; } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) @@ -2928,12 +2926,6 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (npt_enabled) { - /* hack: npt requires active fpu at this time */ - vcpu->fpu_active = 1; - return; - } - update_cr0_intercept(svm); svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; } -- cgit v1.2.3-70-g09d2 From 0f0412c1a734700fb8ccccb1c09371642e029e2e Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Thu, 14 Jan 2010 18:05:58 +0100 Subject: KVM: ia64: remove redundant kvm_get_exit_data() NULL tests kvm_get_exit_data() cannot return a NULL pointer. Signed-off-by: Roel Kluin Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm_fw.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index e4b82319881..cb548ee9fca 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu, struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { + if (p->exit_reason == EXIT_REASON_PAL_CALL) { p->u.pal_data.ret = result; return ; } @@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu, struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { + if (p->exit_reason == EXIT_REASON_SAL_CALL) { p->u.sal_data.ret = result; return ; } @@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) + if (p->exit_reason == EXIT_REASON_PAL_CALL) index = p->u.pal_data.gr28; return index; @@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, p = kvm_get_exit_data(vcpu); - if (p) { - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - *in0 = p->u.sal_data.in0; - *in1 = p->u.sal_data.in1; - *in2 = p->u.sal_data.in2; - *in3 = p->u.sal_data.in3; - *in4 = p->u.sal_data.in4; - *in5 = p->u.sal_data.in5; - *in6 = p->u.sal_data.in6; - *in7 = p->u.sal_data.in7; - return ; - } + if (p->exit_reason == EXIT_REASON_SAL_CALL) { + *in0 = p->u.sal_data.in0; + *in1 = p->u.sal_data.in1; + *in2 = p->u.sal_data.in2; + *in3 = p->u.sal_data.in3; + *in4 = p->u.sal_data.in4; + *in5 = p->u.sal_data.in5; + *in6 = p->u.sal_data.in6; + *in7 = p->u.sal_data.in7; + return ; } *in0 = 0; } -- cgit v1.2.3-70-g09d2 From fbad5f1dfdd097ca282ee3a796986e11eb5a971f Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:09 +0100 Subject: KVM: PPC: Export __giveup_vsx We need to explicitly only giveup VSX in KVM, so let's export that specific function to module space. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/kernel/ppc_ksyms.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 425451453e9..ab3e392ac63 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -107,6 +107,7 @@ EXPORT_SYMBOL(giveup_altivec); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX EXPORT_SYMBOL(giveup_vsx); +EXPORT_SYMBOL_GPL(__giveup_vsx); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE EXPORT_SYMBOL(giveup_spe); -- cgit v1.2.3-70-g09d2 From d5e528136cda31a32ff7d1eaa8d06220eb443781 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:10 +0100 Subject: KVM: PPC: Add helper functions to call real mode loaders Linux contains quite some bits of code to load FPU, Altivec and VSX lazily for a task. It calls those bits in real mode, coming from an interrupt handler. For KVM we better reuse those, so let's wrap a bit of trampoline magic around them and then we can call them from normal module code. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 3 +++ arch/powerpc/kvm/book3s_64_exports.c | 7 +++++++ arch/powerpc/kvm/book3s_64_rmhandlers.S | 34 +++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c7db69f1e77..e1b441cce41 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -124,6 +124,9 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; extern void kvmppc_rmcall(ulong srr0, ulong srr1); +extern void kvmppc_load_up_fpu(void); +extern void kvmppc_load_up_altivec(void); +extern void kvmppc_load_up_vsx(void); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c index 99b07125c52..1dd5a1ddfd0 100644 --- a/arch/powerpc/kvm/book3s_64_exports.c +++ b/arch/powerpc/kvm/book3s_64_exports.c @@ -23,3 +23,10 @@ EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); EXPORT_SYMBOL_GPL(kvmppc_rmcall); +EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); +#ifdef CONFIG_ALTIVEC +EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); +#endif +#ifdef CONFIG_VSX +EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); +#endif diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index e7091c9459a..c83c60ad96c 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S @@ -158,6 +158,40 @@ _GLOBAL(kvmppc_rmcall) mtsrr1 r4 RFI +/* + * Activate current's external feature (FPU/Altivec/VSX) + */ +#define define_load_up(what) \ + \ +_GLOBAL(kvmppc_load_up_ ## what); \ + subi r1, r1, INT_FRAME_SIZE; \ + mflr r3; \ + std r3, _LINK(r1); \ + mfmsr r4; \ + std r31, GPR3(r1); \ + mr r31, r4; \ + li r5, MSR_DR; \ + oris r5, r5, MSR_EE@h; \ + andc r4, r4, r5; \ + mtmsr r4; \ + \ + bl .load_up_ ## what; \ + \ + mtmsr r31; \ + ld r3, _LINK(r1); \ + ld r31, GPR3(r1); \ + addi r1, r1, INT_FRAME_SIZE; \ + mtlr r3; \ + blr + +define_load_up(fpu) +#ifdef CONFIG_ALTIVEC +define_load_up(altivec) +#endif +#ifdef CONFIG_VSX +define_load_up(vsx) +#endif + .global kvmppc_trampoline_lowmem kvmppc_trampoline_lowmem: .long kvmppc_handler_lowmem_trampoline - _stext -- cgit v1.2.3-70-g09d2 From 180a34d2d3fda0151154f9cead4aab9dddd3d0c1 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:11 +0100 Subject: KVM: PPC: Add support for FPU/Altivec/VSX When our guest starts using either the FPU, Altivec or VSX we need to make sure Linux knows about it and sneak into its process switching code accordingly. This patch makes accesses to the above parts of the system work inside the VM. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 14 ++- arch/powerpc/kvm/book3s.c | 193 +++++++++++++++++++++++++++++++++++- 2 files changed, 201 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index f7215e622df..c30a70c6b93 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -172,9 +172,20 @@ struct kvm_vcpu_arch { struct kvmppc_mmu mmu; #endif - u64 fpr[32]; ulong gpr[32]; + u64 fpr[32]; + u32 fpscr; + +#ifdef CONFIG_ALTIVEC + vector128 vr[32]; + vector128 vscr; +#endif + +#ifdef CONFIG_VSX + u64 vsr[32]; +#endif + ulong pc; ulong ctr; ulong lr; @@ -188,6 +199,7 @@ struct kvm_vcpu_arch { #ifdef CONFIG_PPC64 ulong shadow_msr; ulong hflags; + ulong guest_owned_ext; #endif u32 mmucr; ulong sprg0; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 02861fda73d..2cb181396f8 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -33,6 +33,9 @@ /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ +/* #define DEBUG_EXT */ + +static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); struct kvm_stats_debugfs_item debugfs_entries[] = { { "exits", VCPU_STAT(sum_exits) }, @@ -77,6 +80,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; + + kvmppc_giveup_ext(vcpu, MSR_FP); + kvmppc_giveup_ext(vcpu, MSR_VEC); + kvmppc_giveup_ext(vcpu, MSR_VSX); } #if defined(EXIT_DEBUG) @@ -97,9 +104,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) msr &= to_book3s(vcpu)->msr_mask; vcpu->arch.msr = msr; vcpu->arch.shadow_msr = msr | MSR_USER32; - vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | - MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | - MSR_FE1); + vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE | + MSR_DE | MSR_FE1); + vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext); if (msr & (MSR_WE|MSR_POW)) { if (!vcpu->arch.pending_exceptions) { @@ -551,6 +558,117 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } +static inline int get_fpr_index(int i) +{ +#ifdef CONFIG_VSX + i *= 2; +#endif + return i; +} + +/* Give up external provider (FPU, Altivec, VSX) */ +static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) +{ + struct thread_struct *t = ¤t->thread; + u64 *vcpu_fpr = vcpu->arch.fpr; + u64 *vcpu_vsx = vcpu->arch.vsr; + u64 *thread_fpr = (u64*)t->fpr; + int i; + + if (!(vcpu->arch.guest_owned_ext & msr)) + return; + +#ifdef DEBUG_EXT + printk(KERN_INFO "Giving up ext 0x%lx\n", msr); +#endif + + switch (msr) { + case MSR_FP: + giveup_fpu(current); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) + vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; + + vcpu->arch.fpscr = t->fpscr.val; + break; + case MSR_VEC: +#ifdef CONFIG_ALTIVEC + giveup_altivec(current); + memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); + vcpu->arch.vscr = t->vscr; +#endif + break; + case MSR_VSX: +#ifdef CONFIG_VSX + __giveup_vsx(current); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) + vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; +#endif + break; + default: + BUG(); + } + + vcpu->arch.guest_owned_ext &= ~msr; + current->thread.regs->msr &= ~msr; + kvmppc_set_msr(vcpu, vcpu->arch.msr); +} + +/* Handle external providers (FPU, Altivec, VSX) */ +static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, + ulong msr) +{ + struct thread_struct *t = ¤t->thread; + u64 *vcpu_fpr = vcpu->arch.fpr; + u64 *vcpu_vsx = vcpu->arch.vsr; + u64 *thread_fpr = (u64*)t->fpr; + int i; + + if (!(vcpu->arch.msr & msr)) { + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + return RESUME_GUEST; + } + +#ifdef DEBUG_EXT + printk(KERN_INFO "Loading up ext 0x%lx\n", msr); +#endif + + current->thread.regs->msr |= msr; + + switch (msr) { + case MSR_FP: + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) + thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; + + t->fpscr.val = vcpu->arch.fpscr; + t->fpexc_mode = 0; + kvmppc_load_up_fpu(); + break; + case MSR_VEC: +#ifdef CONFIG_ALTIVEC + memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); + t->vscr = vcpu->arch.vscr; + t->vrsave = -1; + kvmppc_load_up_altivec(); +#endif + break; + case MSR_VSX: +#ifdef CONFIG_VSX + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) + thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; + kvmppc_load_up_vsx(); +#endif + break; + default: + BUG(); + } + + vcpu->arch.guest_owned_ext |= msr; + + kvmppc_set_msr(vcpu, vcpu->arch.msr); + + return RESUME_GUEST; +} + int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -674,11 +792,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; - case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_FP_UNAVAIL: - case BOOK3S_INTERRUPT_TRACE: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); + break; case BOOK3S_INTERRUPT_ALTIVEC: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); + break; case BOOK3S_INTERRUPT_VSX: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); + break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + case BOOK3S_INTERRUPT_TRACE: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; @@ -959,6 +1083,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; + struct thread_struct ext_bkp; + bool save_vec = current->thread.used_vr; + bool save_vsx = current->thread.used_vsr; + ulong ext_msr; /* No need to go into the guest when all we do is going out */ if (signal_pending(current)) { @@ -966,6 +1094,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return -EINTR; } + /* Save FPU state in stack */ + if (current->thread.regs->msr & MSR_FP) + giveup_fpu(current); + memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); + ext_bkp.fpscr = current->thread.fpscr; + ext_bkp.fpexc_mode = current->thread.fpexc_mode; + +#ifdef CONFIG_ALTIVEC + /* Save Altivec state in stack */ + if (save_vec) { + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); + memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); + ext_bkp.vscr = current->thread.vscr; + ext_bkp.vrsave = current->thread.vrsave; + } + ext_bkp.used_vr = current->thread.used_vr; +#endif + +#ifdef CONFIG_VSX + /* Save VSX state in stack */ + if (save_vsx && (current->thread.regs->msr & MSR_VSX)) + __giveup_vsx(current); + ext_bkp.used_vsr = current->thread.used_vsr; +#endif + + /* Remember the MSR with disabled extensions */ + ext_msr = current->thread.regs->msr; + /* XXX we get called with irq disabled - change that! */ local_irq_enable(); @@ -973,6 +1130,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) local_irq_disable(); + current->thread.regs->msr = ext_msr; + + /* Make sure we save the guest FPU/Altivec/VSX state */ + kvmppc_giveup_ext(vcpu, MSR_FP); + kvmppc_giveup_ext(vcpu, MSR_VEC); + kvmppc_giveup_ext(vcpu, MSR_VSX); + + /* Restore FPU state from stack */ + memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); + current->thread.fpscr = ext_bkp.fpscr; + current->thread.fpexc_mode = ext_bkp.fpexc_mode; + +#ifdef CONFIG_ALTIVEC + /* Restore Altivec state from stack */ + if (save_vec && current->thread.used_vr) { + memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); + current->thread.vscr = ext_bkp.vscr; + current->thread.vrsave= ext_bkp.vrsave; + } + current->thread.used_vr = ext_bkp.used_vr; +#endif + +#ifdef CONFIG_VSX + current->thread.used_vsr = ext_bkp.used_vsr; +#endif + return ret; } -- cgit v1.2.3-70-g09d2 From 1c0006d8d131585095c4a27dbfcfb3970807a35e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:12 +0100 Subject: KVM: PPC: Fix initial GPR settings Commit 7d01b4c3ed2bb33ceaf2d270cb4831a67a76b51b introduced PACA backed vcpu values. With this patch, when a userspace app was setting GPRs before it was actually first loaded, the set values get discarded. This is because vcpu_load loads them from the vcpu backing store that we use whenever we're not owning the PACA. That behavior is not really a major problem, because we don't need it for qemu. Other users (like kvmctl) do have problems with it though, so let's better do it right. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 - arch/powerpc/include/asm/kvm_ppc.h | 10 ++++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e1b441cce41..db7db0a9696 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -22,7 +22,6 @@ #include #include -#include #include struct kvmppc_slb { diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 09816da9e95..e2642829e43 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -28,6 +28,9 @@ #include #include #include +#ifdef CONFIG_PPC_BOOK3S +#include +#endif enum emulation_result { EMULATE_DONE, /* no further processing */ @@ -102,9 +105,10 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { - if ( num < 14 ) + if ( num < 14 ) { get_paca()->shadow_vcpu.gpr[num] = val; - else + to_book3s(vcpu)->shadow_vcpu.gpr[num] = val; + } else vcpu->arch.gpr[num] = val; } @@ -119,6 +123,7 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { get_paca()->shadow_vcpu.cr = val; + to_book3s(vcpu)->shadow_vcpu.cr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) @@ -129,6 +134,7 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) { get_paca()->shadow_vcpu.xer = val; + to_book3s(vcpu)->shadow_vcpu.xer = val; } static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) -- cgit v1.2.3-70-g09d2 From f7adbba1e5d464b0d449adac1eb2519be6be9728 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:13 +0100 Subject: KVM: PPC: Keep SRR1 flags around in shadow_msr SRR1 stores more information that just the MSR value. It also stores valuable information about the type of interrupt we received, for example whether the storage interrupt we just got was because of a missing htab entry or not. We use that information to speed up the exit path. Now if we get preempted before we can interpret the shadow_msr values, we get into vcpu_put which then calls the MSR handler, which then sets all the SRR1 information bits in shadow_msr to 0. Great. So let's preserve the SRR1 specific bits in shadow_msr whenever we set the MSR. They don't hurt. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kvm/book3s.c | 13 +++++++------ arch/powerpc/kvm/book3s_64_interrupts.S | 2 +- 4 files changed, 10 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c30a70c6b93..715aa6baf6f 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -198,6 +198,7 @@ struct kvm_vcpu_arch { ulong msr; #ifdef CONFIG_PPC64 ulong shadow_msr; + ulong shadow_srr1; ulong hflags; ulong guest_owned_ext; #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ee9935442f0..957ceb7059c 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -433,6 +433,7 @@ int main(void) DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 2cb181396f8..58f5200fc09 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -524,14 +524,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Page not found in guest PTE entries */ vcpu->arch.dear = vcpu->arch.fault_dear; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); + vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ vcpu->arch.dear = vcpu->arch.fault_dear; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); + vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ @@ -693,7 +693,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_INST_STORAGE: vcpu->stat.pf_instruc++; /* only care about PTEG not found errors, but leave NX alone */ - if (vcpu->arch.shadow_msr & 0x40000000) { + if (vcpu->arch.shadow_srr1 & 0x40000000) { r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && @@ -705,7 +705,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, */ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); } else { - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); + vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); r = RESUME_GUEST; @@ -753,7 +753,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, enum emulation_result er; ulong flags; - flags = (vcpu->arch.shadow_msr & 0x1f0000ull); + flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; if (vcpu->arch.msr & MSR_PR) { #ifdef EXIT_DEBUG @@ -808,7 +808,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; default: /* Ugh - bork here! What did we get? */ - printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); + printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", + exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); r = RESUME_HOST; BUG(); break; diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 2ff0b2137e6..c1584d0cbce 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -169,7 +169,7 @@ kvmppc_handler_highmem: stw r0, VCPU_LAST_INST(r7) std r3, VCPU_PC(r7) - std r4, VCPU_SHADOW_MSR(r7) + std r4, VCPU_SHADOW_SRR1(r7) std r5, VCPU_FAULT_DEAR(r7) std r6, VCPU_FAULT_DSISR(r7) -- cgit v1.2.3-70-g09d2 From a76f8497fd475028869f4b45087d80df14d74a50 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:14 +0100 Subject: KVM: PPC: Move Shadow MSR calculation to function We keep a copy of the MSR around that we use when we go into the guest context. That copy is basically the normal process MSR flags OR some allowed guest specified MSR flags. We also AND the external providers into this, so we get traps on FPU usage when we haven't activated it on the host yet. Currently this calculation is part of the set_msr function that we use whenever we set the guest MSR value. With the external providers, we also have the case that we don't modify the guest's MSR, but only want to update the shadow MSR. So let's move the shadow MSR parts to a separate function that we then use whenever we only need to update it. That way we don't accidently kvm_vcpu_block within a preempt notifier context. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/kvm/book3s.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 58f5200fc09..9a271f0929c 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -94,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) } #endif +static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.shadow_msr = vcpu->arch.msr; + /* Guest MSR values */ + vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | + MSR_BE | MSR_DE; + /* Process MSR values */ + vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | + MSR_EE; + /* External providers the guest reserved */ + vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); + /* 64-bit Process MSR values */ +#ifdef CONFIG_PPC_BOOK3S_64 + vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; +#endif +} + void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) { ulong old_msr = vcpu->arch.msr; @@ -101,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif + msr &= to_book3s(vcpu)->msr_mask; vcpu->arch.msr = msr; - vcpu->arch.shadow_msr = msr | MSR_USER32; - vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE | - MSR_DE | MSR_FE1); - vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext); + kvmppc_recalc_shadow_msr(vcpu); if (msr & (MSR_WE|MSR_POW)) { if (!vcpu->arch.pending_exceptions) { @@ -610,7 +625,7 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) vcpu->arch.guest_owned_ext &= ~msr; current->thread.regs->msr &= ~msr; - kvmppc_set_msr(vcpu, vcpu->arch.msr); + kvmppc_recalc_shadow_msr(vcpu); } /* Handle external providers (FPU, Altivec, VSX) */ @@ -664,7 +679,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, vcpu->arch.guest_owned_ext |= msr; - kvmppc_set_msr(vcpu, vcpu->arch.msr); + kvmppc_recalc_shadow_msr(vcpu); return RESUME_GUEST; } -- cgit v1.2.3-70-g09d2 From 1d5103c11e32b5028262c073d56375691d51a886 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 17 Jan 2010 15:51:21 +0200 Subject: KVM: Add HYPER-V header file Provide HYPER-V related defines that will be used by following patches. Signed-off-by: Gleb Natapov Signed-off-by: Vadim Rozenfeld Signed-off-by: Avi Kivity --- arch/x86/include/asm/hyperv.h | 186 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 arch/x86/include/asm/hyperv.h (limited to 'arch') diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h new file mode 100644 index 00000000000..e153a2b3889 --- /dev/null +++ b/arch/x86/include/asm/hyperv.h @@ -0,0 +1,186 @@ +#ifndef _ASM_X86_KVM_HYPERV_H +#define _ASM_X86_KVM_HYPERV_H + +#include + +/* + * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent + * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). + */ +#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 +#define HYPERV_CPUID_INTERFACE 0x40000001 +#define HYPERV_CPUID_VERSION 0x40000002 +#define HYPERV_CPUID_FEATURES 0x40000003 +#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 +#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 + +/* + * Feature identification. EAX indicates which features are available + * to the partition based upon the current partition privileges. + */ + +/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ +#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) +/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ +#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) +/* + * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM + * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available + */ +#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) +/* + * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through + * HV_X64_MSR_STIMER3_COUNT) available + */ +#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3) +/* + * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) + * are available + */ +#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) +/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ +#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) +/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ +#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) +/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ +#define HV_X64_MSR_RESET_AVAILABLE (1 << 7) + /* + * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, + * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, + * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available + */ +#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) + +/* + * Feature identification: EBX indicates which flags were specified at + * partition creation. The format is the same as the partition creation + * flag structure defined in section Partition Creation Flags. + */ +#define HV_X64_CREATE_PARTITIONS (1 << 0) +#define HV_X64_ACCESS_PARTITION_ID (1 << 1) +#define HV_X64_ACCESS_MEMORY_POOL (1 << 2) +#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) +#define HV_X64_POST_MESSAGES (1 << 4) +#define HV_X64_SIGNAL_EVENTS (1 << 5) +#define HV_X64_CREATE_PORT (1 << 6) +#define HV_X64_CONNECT_PORT (1 << 7) +#define HV_X64_ACCESS_STATS (1 << 8) +#define HV_X64_DEBUGGING (1 << 11) +#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) +#define HV_X64_CONFIGURE_PROFILER (1 << 13) + +/* + * Feature identification. EDX indicates which miscellaneous features + * are available to the partition. + */ +/* The MWAIT instruction is available (per section MONITOR / MWAIT) */ +#define HV_X64_MWAIT_AVAILABLE (1 << 0) +/* Guest debugging support is available */ +#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) +/* Performance Monitor support is available*/ +#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) +/* Support for physical CPU dynamic partitioning events is available*/ +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) +/* + * Support for passing hypercall input parameter block via XMM + * registers is available + */ +#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) +/* Support for a virtual guest idle state is available */ +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) + +/* + * Implementation recommendations. Indicates which behaviors the hypervisor + * recommends the OS implement for optimal performance. + */ + /* + * Recommend using hypercall for address space switches rather + * than MOV to CR3 instruction + */ +#define HV_X64_MWAIT_RECOMMENDED (1 << 0) +/* Recommend using hypercall for local TLB flushes rather + * than INVLPG or MOV to CR3 instructions */ +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) +/* + * Recommend using hypercall for remote TLB flushes rather + * than inter-processor interrupts + */ +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) +/* + * Recommend using MSRs for accessing APIC registers + * EOI, ICR and TPR rather than their memory-mapped counterparts + */ +#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) +/* Recommend using the hypervisor-provided MSR to initiate a system RESET */ +#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) +/* + * Recommend using relaxed timing for this partition. If used, + * the VM should disable any watchdog timeouts that rely on the + * timely delivery of external interrupts + */ +#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) + +/* MSR used to identify the guest OS. */ +#define HV_X64_MSR_GUEST_OS_ID 0x40000000 + +/* MSR used to setup pages used to communicate with the hypervisor. */ +#define HV_X64_MSR_HYPERCALL 0x40000001 + +/* MSR used to provide vcpu index */ +#define HV_X64_MSR_VP_INDEX 0x40000002 + +/* Define the virtual APIC registers */ +#define HV_X64_MSR_EOI 0x40000070 +#define HV_X64_MSR_ICR 0x40000071 +#define HV_X64_MSR_TPR 0x40000072 +#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 + +/* Define synthetic interrupt controller model specific registers. */ +#define HV_X64_MSR_SCONTROL 0x40000080 +#define HV_X64_MSR_SVERSION 0x40000081 +#define HV_X64_MSR_SIEFP 0x40000082 +#define HV_X64_MSR_SIMP 0x40000083 +#define HV_X64_MSR_EOM 0x40000084 +#define HV_X64_MSR_SINT0 0x40000090 +#define HV_X64_MSR_SINT1 0x40000091 +#define HV_X64_MSR_SINT2 0x40000092 +#define HV_X64_MSR_SINT3 0x40000093 +#define HV_X64_MSR_SINT4 0x40000094 +#define HV_X64_MSR_SINT5 0x40000095 +#define HV_X64_MSR_SINT6 0x40000096 +#define HV_X64_MSR_SINT7 0x40000097 +#define HV_X64_MSR_SINT8 0x40000098 +#define HV_X64_MSR_SINT9 0x40000099 +#define HV_X64_MSR_SINT10 0x4000009A +#define HV_X64_MSR_SINT11 0x4000009B +#define HV_X64_MSR_SINT12 0x4000009C +#define HV_X64_MSR_SINT13 0x4000009D +#define HV_X64_MSR_SINT14 0x4000009E +#define HV_X64_MSR_SINT15 0x4000009F + + +#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) + +/* Declare the various hypercall operations. */ +#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008 + +#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 +#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) + +#define HV_PROCESSOR_POWER_STATE_C0 0 +#define HV_PROCESSOR_POWER_STATE_C1 1 +#define HV_PROCESSOR_POWER_STATE_C2 2 +#define HV_PROCESSOR_POWER_STATE_C3 3 + +/* hypercall status code */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 + +#endif -- cgit v1.2.3-70-g09d2 From 55cd8e5a4edb8e235163ffe8264b9aaa8d7c050f Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 17 Jan 2010 15:51:22 +0200 Subject: KVM: Implement bare minimum of HYPER-V MSRs Minimum HYPER-V implementation should have GUEST_OS_ID, HYPERCALL and VP_INDEX MSRs. [avi: fix build on i386] Signed-off-by: Gleb Natapov Signed-off-by: Vadim Rozenfeld Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 4 + arch/x86/include/asm/kvm_para.h | 1 + arch/x86/kvm/trace.h | 32 +++++++ arch/x86/kvm/x86.c | 193 +++++++++++++++++++++++++++++++++++++++- include/linux/kvm.h | 1 + 5 files changed, 230 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 93bee7abb71..67d19e42200 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -413,6 +413,10 @@ struct kvm_arch { s64 kvmclock_offset; struct kvm_xen_hvm_config xen_hvm_config; + + /* fields used by HYPER-V emulation */ + u64 hv_guest_os_id; + u64 hv_hypercall; }; struct kvm_vm_stat { diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index c584076a47f..ffae1420e7d 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -2,6 +2,7 @@ #define _ASM_X86_KVM_PARA_H #include +#include /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It * should be used to determine that a VM is running under KVM. diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 816e0449db0..1cb3d0e990f 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -55,6 +55,38 @@ TRACE_EVENT(kvm_hypercall, __entry->a3) ); +/* + * Tracepoint for hypercall. + */ +TRACE_EVENT(kvm_hv_hypercall, + TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, + __u64 ingpa, __u64 outgpa), + TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), + + TP_STRUCT__entry( + __field( __u16, code ) + __field( bool, fast ) + __field( __u16, rep_cnt ) + __field( __u16, rep_idx ) + __field( __u64, ingpa ) + __field( __u64, outgpa ) + ), + + TP_fast_assign( + __entry->code = code; + __entry->fast = fast; + __entry->rep_cnt = rep_cnt; + __entry->rep_idx = rep_idx; + __entry->ingpa = ingpa; + __entry->outgpa = outgpa; + ), + + TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", + __entry->code, __entry->fast ? "fast" : "slow", + __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, + __entry->outgpa) +); + /* * Tracepoint for PIO. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ad34d185da..480137db477 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -622,9 +622,10 @@ static inline u32 bit(int bitno) * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 2 +#define KVM_SAVE_MSRS_BEGIN 4 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, + HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_K6_STAR, #ifdef CONFIG_X86_64 @@ -1004,6 +1005,74 @@ out: return r; } +static bool kvm_hv_hypercall_enabled(struct kvm *kvm) +{ + return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; +} + +static bool kvm_hv_msr_partition_wide(u32 msr) +{ + bool r = false; + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + case HV_X64_MSR_HYPERCALL: + r = true; + break; + } + + return r; +} + +static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) +{ + struct kvm *kvm = vcpu->kvm; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + kvm->arch.hv_guest_os_id = data; + /* setting guest os id to zero disables hypercall page */ + if (!kvm->arch.hv_guest_os_id) + kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; + break; + case HV_X64_MSR_HYPERCALL: { + u64 gfn; + unsigned long addr; + u8 instructions[4]; + + /* if guest os id is not set hypercall should remain disabled */ + if (!kvm->arch.hv_guest_os_id) + break; + if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { + kvm->arch.hv_hypercall = data; + break; + } + gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; + addr = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(addr)) + return 1; + kvm_x86_ops->patch_hypercall(vcpu, instructions); + ((unsigned char *)instructions)[3] = 0xc3; /* ret */ + if (copy_to_user((void __user *)addr, instructions, 4)) + return 1; + kvm->arch.hv_hypercall = data; + break; + } + default: + pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " + "data 0x%llx\n", msr, data); + return 1; + } + return 0; +} + +static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) +{ + pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x data 0x%llx\n", + msr, data); + + return 1; +} + int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { @@ -1118,6 +1187,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; + case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: + if (kvm_hv_msr_partition_wide(msr)) { + int r; + mutex_lock(&vcpu->kvm->lock); + r = set_msr_hyperv_pw(vcpu, msr, data); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return set_msr_hyperv(vcpu, msr, data); + break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); @@ -1217,6 +1296,48 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return 0; } +static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + struct kvm *kvm = vcpu->kvm; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + data = kvm->arch.hv_guest_os_id; + break; + case HV_X64_MSR_HYPERCALL: + data = kvm->arch.hv_hypercall; + break; + default: + pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + + *pdata = data; + return 0; +} + +static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + + switch (msr) { + case HV_X64_MSR_VP_INDEX: { + int r; + struct kvm_vcpu *v; + kvm_for_each_vcpu(r, v, vcpu->kvm) + if (v == vcpu) + data = r; + break; + } + default: + pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + *pdata = data; + return 0; +} + int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; @@ -1283,6 +1404,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); + case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: + if (kvm_hv_msr_partition_wide(msr)) { + int r; + mutex_lock(&vcpu->kvm->lock); + r = get_msr_hyperv_pw(vcpu, msr, pdata); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return get_msr_hyperv(vcpu, msr, pdata); + break; default: if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); @@ -1398,6 +1529,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: + case KVM_CAP_HYPERV: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -3618,11 +3750,70 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0, return a0 | ((gpa_t)a1 << 32); } +int kvm_hv_hypercall(struct kvm_vcpu *vcpu) +{ + u64 param, ingpa, outgpa, ret; + uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; + bool fast, longmode; + int cs_db, cs_l; + + /* + * hypercall generates UD from non zero cpl and real mode + * per HYPER-V spec + */ + if (kvm_x86_ops->get_cpl(vcpu) != 0 || + !kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + longmode = is_long_mode(vcpu) && cs_l == 1; + + if (!longmode) { + param = (kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffff); + ingpa = (kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffff); + outgpa = (kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffff); + } +#ifdef CONFIG_X86_64 + else { + param = kvm_register_read(vcpu, VCPU_REGS_RCX); + ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); + outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); + } +#endif + + code = param & 0xffff; + fast = (param >> 16) & 0x1; + rep_cnt = (param >> 32) & 0xfff; + rep_idx = (param >> 48) & 0xfff; + + trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); + + res = HV_STATUS_INVALID_HYPERCALL_CODE; + + ret = res | (((u64)rep_done & 0xfff) << 32); + if (longmode) { + kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + } else { + kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); + kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); + } + + return 1; +} + int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; + if (kvm_hv_hypercall_enabled(vcpu->kvm)) + return kvm_hv_hypercall(vcpu); + nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f2feef68ffd..e227cbae70a 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -497,6 +497,7 @@ struct kvm_ioeventfd { #endif #define KVM_CAP_S390_PSW 42 #define KVM_CAP_PPC_SEGSTATE 43 +#define KVM_CAP_HYPERV 44 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-70-g09d2 From 10388a07164c1512b3a3d0273b9adc230f82790e Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 17 Jan 2010 15:51:23 +0200 Subject: KVM: Add HYPER-V apic access MSRs Implement HYPER-V apic MSRs. Spec defines three MSRs that speed-up access to EOI/TPR/ICR apic registers for PV guests. Signed-off-by: Gleb Natapov Signed-off-by: Vadim Rozenfeld Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/lapic.c | 31 ++++++++++++++++++++++++++++++ arch/x86/kvm/lapic.h | 8 ++++++++ arch/x86/kvm/x86.c | 42 +++++++++++++++++++++++++++++++++++++---- include/linux/kvm.h | 1 + 5 files changed, 80 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 67d19e42200..a1f0b5dd7d7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -363,6 +363,8 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ u16 singlestep_cs; unsigned long singlestep_rip; + /* fields used by HYPER-V emulation */ + u64 hv_vapic; }; struct kvm_mem_alias { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ba8c045da78..4b224f90087 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1246,3 +1246,34 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) return 0; } + +int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 1; + + /* if this is ICR write vector before command */ + if (reg == APIC_ICR) + apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); + return apic_reg_write(apic, reg, (u32)data); +} + +int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + u32 low, high = 0; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 1; + + if (apic_reg_read(apic, reg, 4, &low)) + return 1; + if (reg == APIC_ICR) + apic_reg_read(apic, APIC_ICR2, 4, &high); + + *data = (((u64)high) << 32) | low; + + return 0; +} diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 40010b09c4a..f5fe32c5eda 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -48,4 +48,12 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); + +int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); +int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); + +static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; +} #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 480137db477..552be51e4d8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -622,10 +622,11 @@ static inline u32 bit(int bitno) * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 4 +#define KVM_SAVE_MSRS_BEGIN 5 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, + HV_X64_MSR_APIC_ASSIST_PAGE, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_K6_STAR, #ifdef CONFIG_X86_64 @@ -1067,10 +1068,36 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { - pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x data 0x%llx\n", - msr, data); + switch (msr) { + case HV_X64_MSR_APIC_ASSIST_PAGE: { + unsigned long addr; - return 1; + if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { + vcpu->arch.hv_vapic = data; + break; + } + addr = gfn_to_hva(vcpu->kvm, data >> + HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); + if (kvm_is_error_hva(addr)) + return 1; + if (clear_user((void __user *)addr, PAGE_SIZE)) + return 1; + vcpu->arch.hv_vapic = data; + break; + } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); + default: + pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " + "data 0x%llx\n", msr, data); + return 1; + } + + return 0; } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) @@ -1330,6 +1357,12 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) data = r; break; } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; @@ -1530,6 +1563,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: + case KVM_CAP_HYPERV_VAPIC: r = 1; break; case KVM_CAP_COALESCED_MMIO: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index e227cbae70a..5ce61738dc3 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -498,6 +498,7 @@ struct kvm_ioeventfd { #define KVM_CAP_S390_PSW 42 #define KVM_CAP_PPC_SEGSTATE 43 #define KVM_CAP_HYPERV 44 +#define KVM_CAP_HYPERV_VAPIC 45 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-70-g09d2 From c25bc1638a1211f57cccbabdd8b732813b852340 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 17 Jan 2010 15:51:24 +0200 Subject: KVM: Implement NotifyLongSpinWait HYPER-V hypercall Windows issues this hypercall after guest was spinning on a spinlock for too many iterations. Signed-off-by: Gleb Natapov Signed-off-by: Vadim Rozenfeld Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 10 +++++++++- include/linux/kvm.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 552be51e4d8..9f72a443455 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1564,6 +1564,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: + case KVM_CAP_HYPERV_SPIN: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -3827,7 +3828,14 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); - res = HV_STATUS_INVALID_HYPERCALL_CODE; + switch (code) { + case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: + kvm_vcpu_on_spin(vcpu); + break; + default: + res = HV_STATUS_INVALID_HYPERCALL_CODE; + break; + } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 5ce61738dc3..4c4937e7f65 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -499,6 +499,7 @@ struct kvm_ioeventfd { #define KVM_CAP_PPC_SEGSTATE 43 #define KVM_CAP_HYPERV 44 #define KVM_CAP_HYPERV_VAPIC 45 +#define KVM_CAP_HYPERV_SPIN 46 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-70-g09d2 From 8dae444529230301bc85fc86033aa06a734c1a29 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 18 Jan 2010 18:45:10 +0900 Subject: KVM: rename is_writeble_pte() to is_writable_pte() There are two spellings of "writable" in arch/x86/kvm/mmu.c and paging_tmpl.h . This patch renames is_writeble_pte() to is_writable_pte() and makes grepping easy. New name is consistent with the definition of itself: return pte & PT_WRITABLE_MASK; Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 18 +++++++++--------- arch/x86/kvm/paging_tmpl.h | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 276bf7497c3..ff2b2e8d72e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -250,7 +250,7 @@ static int is_large_pte(u64 pte) return pte & PT_PAGE_SIZE_MASK; } -static int is_writeble_pte(unsigned long pte) +static int is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } @@ -632,7 +632,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) pfn = spte_to_pfn(*spte); if (*spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); - if (is_writeble_pte(*spte)) + if (is_writable_pte(*spte)) kvm_set_pfn_dirty(pfn); rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); if (!*rmapp) { @@ -708,7 +708,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) BUG_ON(!spte); BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); - if (is_writeble_pte(*spte)) { + if (is_writable_pte(*spte)) { __set_spte(spte, *spte & ~PT_WRITABLE_MASK); write_protected = 1; } @@ -732,7 +732,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); - if (is_writeble_pte(*spte)) { + if (is_writable_pte(*spte)) { rmap_remove(kvm, spte); --kvm->stat.lpages; __set_spte(spte, shadow_trap_nonpresent_pte); @@ -787,7 +787,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, new_spte &= ~PT_WRITABLE_MASK; new_spte &= ~SPTE_HOST_WRITEABLE; - if (is_writeble_pte(*spte)) + if (is_writable_pte(*spte)) kvm_set_pfn_dirty(spte_to_pfn(*spte)); __set_spte(spte, new_spte); spte = rmap_next(kvm, rmapp, spte); @@ -1847,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * is responsibility of mmu_get_page / kvm_sync_page. * Same reasoning can be applied to dirty page accounting. */ - if (!can_unsync && is_writeble_pte(*sptep)) + if (!can_unsync && is_writable_pte(*sptep)) goto set_pte; if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { @@ -1855,7 +1855,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, __func__, gfn); ret = 1; pte_access &= ~ACC_WRITE_MASK; - if (is_writeble_pte(spte)) + if (is_writable_pte(spte)) spte &= ~PT_WRITABLE_MASK; } } @@ -1876,7 +1876,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, bool reset_host_protection) { int was_rmapped = 0; - int was_writeble = is_writeble_pte(*sptep); + int was_writable = is_writable_pte(*sptep); int rmap_count; pgprintk("%s: spte %llx access %x write_fault %d" @@ -1927,7 +1927,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (rmap_count > RMAP_RECYCLE_THRESHOLD) rmap_recycle(vcpu, sptep, gfn); } else { - if (was_writeble) + if (was_writable) kvm_release_pfn_dirty(pfn); else kvm_release_pfn_clean(pfn); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ede2131a922..df15a5307d2 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -162,7 +162,7 @@ walk: if (rsvd_fault) goto access_error; - if (write_fault && !is_writeble_pte(pte)) + if (write_fault && !is_writable_pte(pte)) if (user_fault || is_write_protection(vcpu)) goto access_error; -- cgit v1.2.3-70-g09d2 From 94718da12741ef44e1eb2bfe3ca37db92115a3d3 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 18 Jan 2010 13:26:34 +0200 Subject: KVM: export Needed by . Signed-off-by: Avi Kivity --- arch/x86/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 9f828f87ca3..493092efaa3 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -11,6 +11,7 @@ header-y += sigcontext32.h header-y += ucontext.h header-y += processor-flags.h header-y += hw_breakpoint.h +header-y += hyperv.h unifdef-y += e820.h unifdef-y += ist.h -- cgit v1.2.3-70-g09d2 From 7062dcaa369cae40c1a59949a5654985d0ae26ea Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 19 Jan 2010 17:43:21 +0800 Subject: KVM: VMX: Remove emulation failure report As Avi noted: >There are two problems with the kernel failure report. First, it >doesn't report enough data - registers, surrounding instructions, etc. >that are needed to explain what is going on. Second, it can flood >dmesg, which is a pretty bad thing to do. So we remove the emulation failure report in handle_invalid_guest_state(), and would inspected the guest using userspace tool in the future. Signed-off-by: Sheng Yang Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fc1964d5e97..516084f3c6d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3449,7 +3449,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) } if (err != EMULATE_DONE) { - kvm_report_emulation_failure(vcpu, "emulation failure"); vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; -- cgit v1.2.3-70-g09d2 From ccd469362e826261ccc261c4c36fb0a346338222 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Tue, 19 Jan 2010 15:06:38 +0200 Subject: KVM: fix Hyper-V hypercall warnings and wrong mask value Fix compilation warnings and wrong mask value. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9f72a443455..9b0758b4d55 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3806,12 +3806,12 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { - param = (kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffff); - ingpa = (kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffff); - outgpa = (kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffff); + param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); + ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); + outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { -- cgit v1.2.3-70-g09d2 From 647492047763c3ee8fe51ecf9a04f39040aa495b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 19 Jan 2010 12:45:23 -0200 Subject: KVM: fix cleanup_srcu_struct on vm destruction cleanup_srcu_struct on VM destruction remains broken: BUG: unable to handle kernel paging request at ffffffffffffffff IP: [] srcu_read_lock+0x16/0x21 RIP: 0010:[] [] srcu_read_lock+0x16/0x21 Call Trace: [] kvm_arch_vcpu_uninit+0x1b/0x48 [kvm] [] kvm_vcpu_uninit+0x9/0x15 [kvm] [] vmx_free_vcpu+0x7f/0x8f [kvm_intel] [] kvm_arch_destroy_vm+0x78/0x111 [kvm] [] kvm_put_kvm+0xd4/0xfe [kvm] Move it to kvm_arch_destroy_vm. Signed-off-by: Marcelo Tosatti Reported-by: Jan Kiszka --- arch/ia64/kvm/kvm-ia64.c | 1 + arch/powerpc/kvm/powerpc.c | 1 + arch/s390/kvm/kvm-s390.c | 1 + arch/x86/kvm/x86.c | 1 + virt/kvm/kvm_main.c | 1 - 5 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index e6ac549f8d5..06188988ed2 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1404,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); kvm_free_physmem(kvm); + cleanup_srcu_struct(&kvm->srcu); free_kvm(kvm); } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2c291161df8..51aedd7f16b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -137,6 +137,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { kvmppc_free_vcpus(kvm); kvm_free_physmem(kvm); + cleanup_srcu_struct(&kvm->srcu); kfree(kvm); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c8002193d9d..49292869a5c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -242,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_physmem(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); + cleanup_srcu_struct(&kvm->srcu); kfree(kvm); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9b0758b4d55..322c2c5f9bc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5547,6 +5547,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); + cleanup_srcu_struct(&kvm->srcu); kfree(kvm->arch.aliases); kfree(kvm); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2b7cd6c0d9c..7c5c873812f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -506,7 +506,6 @@ static void kvm_destroy_vm(struct kvm *kvm) #else kvm_arch_flush_shadow(kvm); #endif - cleanup_srcu_struct(&kvm->srcu); kvm_arch_destroy_vm(kvm); hardware_disable_all(); mmdrop(mm); -- cgit v1.2.3-70-g09d2 From b60d513c32e2ddc8b3e9e1465b94913d44d19810 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 20 Jan 2010 16:47:21 +0900 Subject: KVM: x86: Use macros for x86_emulate_ops to avoid future mistakes The return values from x86_emulate_ops are defined in kvm_emulate.h as macros X86EMUL_*. But in emulate.c, we are comparing the return values from these ops with 0 to check if they're X86EMUL_CONTINUE or not: X86EMUL_CONTINUE is defined as 0 now. To avoid possible mistakes in the future, this patch substitutes "X86EMUL_CONTINUE" for "0" that are being compared with the return values from x86_emulate_ops. We think that there are more places we should use these macros, but the meanings of rc values in x86_emulate_insn() were not so clear at a glance. If we use proper macros in this function, we would be able to follow the flow of each emulation more easily and, maybe, more securely. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/emulate.c | 65 ++++++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0f89e320bc9..48c7f9f8a08 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1196,7 +1196,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, rc = ops->read_emulated(register_address(c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]), dest, len, ctxt->vcpu); - if (rc != 0) + if (rc != X86EMUL_CONTINUE) return rc; register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); @@ -1370,7 +1370,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, int rc; rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu); - if (rc != 0) + if (rc != X86EMUL_CONTINUE) return rc; if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || @@ -1385,7 +1385,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, (u32) c->regs[VCPU_REGS_RBX]; rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); - if (rc != 0) + if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags |= EFLG_ZF; } @@ -1451,7 +1451,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, &c->dst.val, c->dst.bytes, ctxt->vcpu); - if (rc != 0) + if (rc != X86EMUL_CONTINUE) return rc; break; case OP_NONE: @@ -1749,7 +1749,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) &c->src.val, c->src.bytes, ctxt->vcpu); - if (rc != 0) + if (rc != X86EMUL_CONTINUE) goto done; c->src.orig_val = c->src.val; } @@ -1768,12 +1768,15 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) c->dst.ptr = (void *)c->dst.ptr + (c->src.val & mask) / 8; } - if (!(c->d & Mov) && - /* optimisation - avoid slow emulated read */ - ((rc = ops->read_emulated((unsigned long)c->dst.ptr, - &c->dst.val, - c->dst.bytes, ctxt->vcpu)) != 0)) - goto done; + if (!(c->d & Mov)) { + /* optimisation - avoid slow emulated read */ + rc = ops->read_emulated((unsigned long)c->dst.ptr, + &c->dst.val, + c->dst.bytes, + ctxt->vcpu); + if (rc != X86EMUL_CONTINUE) + goto done; + } } c->dst.orig_val = c->dst.val; @@ -2039,11 +2042,12 @@ special_insn: c->dst.ptr = (unsigned long *)register_address(c, es_base(ctxt), c->regs[VCPU_REGS_RDI]); - if ((rc = ops->read_emulated(register_address(c, - seg_override_base(ctxt, c), - c->regs[VCPU_REGS_RSI]), + rc = ops->read_emulated(register_address(c, + seg_override_base(ctxt, c), + c->regs[VCPU_REGS_RSI]), &c->dst.val, - c->dst.bytes, ctxt->vcpu)) != 0) + c->dst.bytes, ctxt->vcpu); + if (rc != X86EMUL_CONTINUE) goto done; register_address_increment(c, &c->regs[VCPU_REGS_RSI], (ctxt->eflags & EFLG_DF) ? -c->dst.bytes @@ -2058,10 +2062,11 @@ special_insn: c->src.ptr = (unsigned long *)register_address(c, seg_override_base(ctxt, c), c->regs[VCPU_REGS_RSI]); - if ((rc = ops->read_emulated((unsigned long)c->src.ptr, - &c->src.val, - c->src.bytes, - ctxt->vcpu)) != 0) + rc = ops->read_emulated((unsigned long)c->src.ptr, + &c->src.val, + c->src.bytes, + ctxt->vcpu); + if (rc != X86EMUL_CONTINUE) goto done; c->dst.type = OP_NONE; /* Disable writeback. */ @@ -2069,10 +2074,11 @@ special_insn: c->dst.ptr = (unsigned long *)register_address(c, es_base(ctxt), c->regs[VCPU_REGS_RDI]); - if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, - &c->dst.val, - c->dst.bytes, - ctxt->vcpu)) != 0) + rc = ops->read_emulated((unsigned long)c->dst.ptr, + &c->dst.val, + c->dst.bytes, + ctxt->vcpu); + if (rc != X86EMUL_CONTINUE) goto done; DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); @@ -2102,12 +2108,13 @@ special_insn: c->dst.type = OP_REG; c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; - if ((rc = ops->read_emulated(register_address(c, - seg_override_base(ctxt, c), - c->regs[VCPU_REGS_RSI]), - &c->dst.val, - c->dst.bytes, - ctxt->vcpu)) != 0) + rc = ops->read_emulated(register_address(c, + seg_override_base(ctxt, c), + c->regs[VCPU_REGS_RSI]), + &c->dst.val, + c->dst.bytes, + ctxt->vcpu); + if (rc != X86EMUL_CONTINUE) goto done; register_address_increment(c, &c->regs[VCPU_REGS_RSI], (ctxt->eflags & EFLG_DF) ? -c->dst.bytes -- cgit v1.2.3-70-g09d2 From f2483415293b180945da707e7dbe74e5daa72651 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 20 Jan 2010 18:20:20 +0100 Subject: KVM: VMX: Fix exceptions of mov to dr Injecting GP without an error code is a bad idea (causes unhandled guest exits). Moreover, we must not skip the instruction if we injected an exception. Signed-off-by: Jan Kiszka Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 516084f3c6d..9727773f24b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3045,6 +3045,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) unsigned long val; int dr, reg; + /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr = vmcs_readl(GUEST_DR7); @@ -3099,20 +3100,22 @@ static int handle_dr(struct kvm_vcpu *vcpu) vcpu->arch.eff_db[dr] = val; break; case 4 ... 5: - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } break; case 6: if (val & 0xffffffff00000000ULL) { - kvm_queue_exception(vcpu, GP_VECTOR); - break; + kvm_inject_gp(vcpu, 0); + return 1; } vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 7: if (val & 0xffffffff00000000ULL) { - kvm_queue_exception(vcpu, GP_VECTOR); - break; + kvm_inject_gp(vcpu, 0); + return 1; } vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { -- cgit v1.2.3-70-g09d2 From 138ac8d88f91e2a6a278aa5cee9120c714c4ce2d Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 20 Jan 2010 18:20:20 +0100 Subject: KVM: VMX: Fix emulation of DR4 and DR5 Make sure DR4 and DR5 are aliased to DR6 and DR7, respectively, if CR4.DE is not set. Signed-off-by: Jan Kiszka Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9727773f24b..c7b99e1f8aa 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3039,6 +3039,15 @@ static int handle_cr(struct kvm_vcpu *vcpu) return 0; } +static int check_dr_alias(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return -1; + } + return 0; +} + static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; @@ -3081,14 +3090,20 @@ static int handle_dr(struct kvm_vcpu *vcpu) case 0 ... 3: val = vcpu->arch.db[dr]; break; + case 4: + if (check_dr_alias(vcpu) < 0) + return 1; + /* fall through */ case 6: val = vcpu->arch.dr6; break; - case 7: + case 5: + if (check_dr_alias(vcpu) < 0) + return 1; + /* fall through */ + default: /* 7 */ val = vcpu->arch.dr7; break; - default: - val = 0; } kvm_register_write(vcpu, reg, val); } else { @@ -3099,12 +3114,10 @@ static int handle_dr(struct kvm_vcpu *vcpu) if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; - case 4 ... 5: - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { - kvm_queue_exception(vcpu, UD_VECTOR); + case 4: + if (check_dr_alias(vcpu) < 0) return 1; - } - break; + /* fall through */ case 6: if (val & 0xffffffff00000000ULL) { kvm_inject_gp(vcpu, 0); @@ -3112,7 +3125,11 @@ static int handle_dr(struct kvm_vcpu *vcpu) } vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; - case 7: + case 5: + if (check_dr_alias(vcpu) < 0) + return 1; + /* fall through */ + default: /* 7 */ if (val & 0xffffffff00000000ULL) { kvm_inject_gp(vcpu, 0); return 1; -- cgit v1.2.3-70-g09d2 From fd7373cce767a8803e79f51bd3fc5f531234657f Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 20 Jan 2010 18:20:20 +0100 Subject: KVM: VMX: Clean up DR6 emulation As we trap all debug register accesses, we do not need to switch real DR6 at all. Clean up update_exception_bitmap at this chance, too. Signed-off-by: Jan Kiszka Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c7b99e1f8aa..b7e812e9c29 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -579,17 +579,12 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; - eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) - | (1u << NM_VECTOR); - /* - * Unconditionally intercept #DB so we can maintain dr6 without - * reading it every exit. - */ - eb |= 1u << DB_VECTOR; - if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { - if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - eb |= 1u << BP_VECTOR; - } + eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | + (1u << NM_VECTOR) | (1u << DB_VECTOR); + if ((vcpu->guest_debug & + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) + eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) @@ -3777,9 +3772,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) */ vmcs_writel(HOST_CR0, read_cr0()); - if (vcpu->arch.switch_db_regs) - set_debugreg(vcpu->arch.dr6, 6); - asm( /* Store host registers */ "push %%"R"dx; push %%"R"bp;" @@ -3880,9 +3872,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | (1 << VCPU_EXREG_PDPTR)); vcpu->arch.regs_dirty = 0; - if (vcpu->arch.switch_db_regs) - get_debugreg(vcpu->arch.dr6, 6); - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); -- cgit v1.2.3-70-g09d2 From c76de350c8a3ba770becc17eaa744dc3c7642295 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 20 Jan 2010 18:20:20 +0100 Subject: KVM: SVM: Clean up and enhance mov dr emulation Enhance mov dr instruction emulation used by SVM so that it properly handles dr4/5: alias to dr6/7 if cr4.de is cleared. Otherwise return EMULATE_FAIL which will let our only possible caller in that scenario, ud_interception, re-inject UD. We do not need to inject faults, SVM does this for us (exceptions take precedence over instruction interceptions). For the same reason, the value overflow checks can be removed. Signed-off-by: Jan Kiszka Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 5 ++-- arch/x86/kvm/svm.c | 64 +++++++++++++++++++---------------------- arch/x86/kvm/x86.c | 19 ++---------- 3 files changed, 33 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a1f0b5dd7d7..d73ed48587e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -506,9 +506,8 @@ struct kvm_x86_ops { void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); - unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); - void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, - int *exception); + int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); + int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8d7cb62ebef..4295dfcc603 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1122,76 +1122,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) svm->vmcb->control.asid = sd->next_asid++; } -static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) +static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest) { struct vcpu_svm *svm = to_svm(vcpu); - unsigned long val; switch (dr) { case 0 ... 3: - val = vcpu->arch.db[dr]; + *dest = vcpu->arch.db[dr]; break; + case 4: + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + return EMULATE_FAIL; /* will re-inject UD */ + /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) - val = vcpu->arch.dr6; + *dest = vcpu->arch.dr6; else - val = svm->vmcb->save.dr6; + *dest = svm->vmcb->save.dr6; break; + case 5: + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + return EMULATE_FAIL; /* will re-inject UD */ + /* fall through */ case 7: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) - val = vcpu->arch.dr7; + *dest = vcpu->arch.dr7; else - val = svm->vmcb->save.dr7; + *dest = svm->vmcb->save.dr7; break; - default: - val = 0; } - return val; + return EMULATE_DONE; } -static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, - int *exception) +static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); - *exception = 0; - switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = value; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = value; - return; - case 4 ... 5: - if (vcpu->arch.cr4 & X86_CR4_DE) - *exception = UD_VECTOR; - return; + break; + case 4: + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + return EMULATE_FAIL; /* will re-inject UD */ + /* fall through */ case 6: - if (value & 0xffffffff00000000ULL) { - *exception = GP_VECTOR; - return; - } vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; - return; + break; + case 5: + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + return EMULATE_FAIL; /* will re-inject UD */ + /* fall through */ case 7: - if (value & 0xffffffff00000000ULL) { - *exception = GP_VECTOR; - return; - } vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { svm->vmcb->save.dr7 = vcpu->arch.dr7; vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); } - return; - default: - /* FIXME: Possible case? */ - printk(KERN_DEBUG "%s: unexpected dr %u\n", - __func__, dr); - *exception = UD_VECTOR; - return; + break; } + + return EMULATE_DONE; } static int pf_interception(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 322c2c5f9bc..fd5101b57fa 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3270,29 +3270,14 @@ int emulate_clts(struct kvm_vcpu *vcpu) int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { - struct kvm_vcpu *vcpu = ctxt->vcpu; - - switch (dr) { - case 0 ... 3: - *dest = kvm_x86_ops->get_dr(vcpu, dr); - return X86EMUL_CONTINUE; - default: - pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr); - return X86EMUL_UNHANDLEABLE; - } + return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; - int exception; - kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); - if (exception) { - /* FIXME: better handling */ - return X86EMUL_UNHANDLEABLE; - } - return X86EMUL_CONTINUE; + return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask); } void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) -- cgit v1.2.3-70-g09d2 From 727f5a23e2080b19da392f17fa3158a0941f81f5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 20 Jan 2010 18:20:20 +0100 Subject: KVM: SVM: Trap all debug register accesses To enable proper debug register emulation under all conditions, trap access to all DR0..7. This may be optimized later on. Signed-off-by: Jan Kiszka Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/svm.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4295dfcc603..a281368c3b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -554,13 +554,19 @@ static void init_vmcb(struct vcpu_svm *svm) control->intercept_dr_read = INTERCEPT_DR0_MASK | INTERCEPT_DR1_MASK | INTERCEPT_DR2_MASK | - INTERCEPT_DR3_MASK; + INTERCEPT_DR3_MASK | + INTERCEPT_DR4_MASK | + INTERCEPT_DR5_MASK | + INTERCEPT_DR6_MASK | + INTERCEPT_DR7_MASK; control->intercept_dr_write = INTERCEPT_DR0_MASK | INTERCEPT_DR1_MASK | INTERCEPT_DR2_MASK | INTERCEPT_DR3_MASK | + INTERCEPT_DR4_MASK | INTERCEPT_DR5_MASK | + INTERCEPT_DR6_MASK | INTERCEPT_DR7_MASK; control->intercept_exceptions = (1 << PF_VECTOR) | @@ -2319,11 +2325,17 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_DR1] = emulate_on_interception, [SVM_EXIT_READ_DR2] = emulate_on_interception, [SVM_EXIT_READ_DR3] = emulate_on_interception, + [SVM_EXIT_READ_DR4] = emulate_on_interception, + [SVM_EXIT_READ_DR5] = emulate_on_interception, + [SVM_EXIT_READ_DR6] = emulate_on_interception, + [SVM_EXIT_READ_DR7] = emulate_on_interception, [SVM_EXIT_WRITE_DR0] = emulate_on_interception, [SVM_EXIT_WRITE_DR1] = emulate_on_interception, [SVM_EXIT_WRITE_DR2] = emulate_on_interception, [SVM_EXIT_WRITE_DR3] = emulate_on_interception, + [SVM_EXIT_WRITE_DR4] = emulate_on_interception, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, + [SVM_EXIT_WRITE_DR6] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, -- cgit v1.2.3-70-g09d2 From ab344828ebe729e52949d64046adaa196f6b9dbe Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 21 Jan 2010 15:28:46 +0200 Subject: KVM: x86: fix checking of cr0 validity Move to/from Control Registers chapter of Intel SDM says. "Reserved bits in CR0 remain clear after any load of those registers; attempts to set them have no impact". Control Register chapter says "Bits 63:32 of CR0 are reserved and must be written with zeros. Writing a nonzero value to any of the upper 32 bits results in a general-protection exception, #GP(0)." This patch tries to implement this twisted logic. Signed-off-by: Gleb Natapov Reported-by: Lorenzo Martignoni Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fd5101b57fa..ce267d9f030 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -430,12 +430,16 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { cr0 |= X86_CR0_ET; - if (cr0 & CR0_RESERVED_BITS) { +#ifdef CONFIG_X86_64 + if (cr0 & 0xffffffff00000000UL) { printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", cr0, kvm_read_cr0(vcpu)); kvm_inject_gp(vcpu, 0); return; } +#endif + + cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); -- cgit v1.2.3-70-g09d2 From 2608d7a12fd4badfe2eac534150f0715a7fd3ff3 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:45 +0200 Subject: KVM: Allow kvm_load_guest_fpu() even when !vcpu->fpu_active This allows accessing the guest fpu from the instruction emulator, as well as being symmetric with kvm_put_guest_fpu(). Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ce267d9f030..4cf4eac03bb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4239,7 +4239,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); - kvm_load_guest_fpu(vcpu); + if (vcpu->fpu_active) + kvm_load_guest_fpu(vcpu); local_irq_disable(); @@ -5285,7 +5286,7 @@ EXPORT_SYMBOL_GPL(fx_init); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { - if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) + if (vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 1; -- cgit v1.2.3-70-g09d2 From e5bb40251a920cdd9d12c569c6aab0bdd0279e4e Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:46 +0200 Subject: KVM: Drop kvm_{load,put}_guest_fpu() exports Not used anymore. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4cf4eac03bb..c61ec9c6926 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5293,7 +5293,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) kvm_fx_save(&vcpu->arch.host_fx_image); kvm_fx_restore(&vcpu->arch.guest_fx_image); } -EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { @@ -5306,7 +5305,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ++vcpu->stat.fpu_reload; set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); } -EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { -- cgit v1.2.3-70-g09d2 From 6b52d18605f580bdffaffd48c8da228c3e848deb Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:47 +0200 Subject: KVM: Activate fpu on clts Assume that if the guest executes clts, it knows what it's doing, and load the guest fpu to prevent an #NM exception. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 8 +++++++- arch/x86/kvm/vmx.c | 1 + arch/x86/kvm/x86.c | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d73ed48587e..7ebf9fe670c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -511,6 +511,7 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); + void (*fpu_activate)(struct kvm_vcpu *vcpu); void (*fpu_deactivate)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a281368c3b9..800208a60a5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1259,12 +1259,17 @@ static int ud_interception(struct vcpu_svm *svm) return 1; } -static int nm_interception(struct vcpu_svm *svm) +static void svm_fpu_activate(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vcpu.fpu_active = 1; update_cr0_intercept(svm); +} +static int nm_interception(struct vcpu_svm *svm) +{ + svm_fpu_activate(&svm->vcpu); return 1; } @@ -2977,6 +2982,7 @@ static struct kvm_x86_ops svm_x86_ops = { .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, + .fpu_activate = svm_fpu_activate, .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b7e812e9c29..fad871cbed1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3002,6 +3002,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); + vmx_fpu_activate(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c61ec9c6926..4db0c8a9082 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3269,6 +3269,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) int emulate_clts(struct kvm_vcpu *vcpu) { kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); + kvm_x86_ops->fpu_activate(vcpu); return X86EMUL_CONTINUE; } -- cgit v1.2.3-70-g09d2 From 3eeb3288bcbf64da90afc26389b8844df7c34912 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:48 +0200 Subject: KVM: Add a helper for checking if the guest is in protected mode Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/emulate.c | 9 ++++----- arch/x86/kvm/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 7 +++---- arch/x86/kvm/x86.h | 6 ++++++ 4 files changed, 15 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 48c7f9f8a08..6a429eefc53 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -32,6 +32,7 @@ #include #include +#include "x86.h" #include "mmu.h" /* for is_long_mode() */ /* @@ -1515,7 +1516,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) /* syscall is not available in real mode */ if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL - || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) + || !is_protmode(ctxt->vcpu)) return -1; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1568,8 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) return -1; /* inject #GP if in real mode or paging is disabled */ - if (ctxt->mode == X86EMUL_MODE_REAL || - !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) { + if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } @@ -1634,8 +1634,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) return -1; /* inject #GP if in real mode or paging is disabled */ - if (ctxt->mode == X86EMUL_MODE_REAL - || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) { + if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fad871cbed1..2e894954069 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1845,7 +1845,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, static int vmx_get_cpl(struct kvm_vcpu *vcpu) { - if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) /* if real mode */ + if (!is_protmode(vcpu)) return 0; if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ @@ -2100,7 +2100,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) static bool guest_state_valid(struct kvm_vcpu *vcpu) { /* real mode guest state checks */ - if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { + if (!is_protmode(vcpu)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4db0c8a9082..a4a7d1892f7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3786,8 +3786,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ - if (kvm_x86_ops->get_cpl(vcpu) != 0 || - !kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { + if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } @@ -4751,7 +4750,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, { struct kvm_segment kvm_seg; - if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) + if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu)) return kvm_load_realmode_segment(vcpu, selector, seg); if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) return 1; @@ -5103,7 +5102,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && - !(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) + !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu_put(vcpu); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 5eadea585d2..f783d8fe0d1 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -2,6 +2,7 @@ #define ARCH_X86_KVM_X86_H #include +#include "kvm_cache_regs.h" static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { @@ -35,4 +36,9 @@ static inline bool kvm_exception_is_soft(unsigned int nr) struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); +static inline bool is_protmode(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr0_bits(vcpu, X86_CR0_PE); +} + #endif -- cgit v1.2.3-70-g09d2 From 836a1b3c3456042704c86aaa3d837b976de9343b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:49 +0200 Subject: KVM: Move cr0/cr4/efer related helpers to x86.h They have more general scope than the mmu. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/emulate.c | 1 - arch/x86/kvm/mmu.c | 1 + arch/x86/kvm/mmu.h | 24 ------------------------ arch/x86/kvm/x86.h | 24 ++++++++++++++++++++++++ 4 files changed, 25 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 6a429eefc53..645b245a3c2 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -33,7 +33,6 @@ #include #include "x86.h" -#include "mmu.h" /* for is_long_mode() */ /* * Opcode effective-address decode tables. diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ff2b2e8d72e..6f7158f4fbf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -18,6 +18,7 @@ */ #include "mmu.h" +#include "x86.h" #include "kvm_cache_regs.h" #include diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 599159f728b..61ef5a65b7d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -58,30 +58,6 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) return kvm_mmu_load(vcpu); } -static inline int is_long_mode(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_X86_64 - return vcpu->arch.shadow_efer & EFER_LMA; -#else - return 0; -#endif -} - -static inline int is_pae(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); -} - -static inline int is_pse(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); -} - -static inline int is_paging(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr0_bits(vcpu, X86_CR0_PG); -} - static inline int is_present_gpte(unsigned long pte) { return pte & PT_PRESENT_MASK; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f783d8fe0d1..2dc24a755b6 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -41,4 +41,28 @@ static inline bool is_protmode(struct kvm_vcpu *vcpu) return kvm_read_cr0_bits(vcpu, X86_CR0_PE); } +static inline int is_long_mode(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_X86_64 + return vcpu->arch.shadow_efer & EFER_LMA; +#else + return 0; +#endif +} + +static inline int is_pae(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); +} + +static inline int is_pse(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); +} + +static inline int is_paging(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr0_bits(vcpu, X86_CR0_PG); +} + #endif -- cgit v1.2.3-70-g09d2 From f6801dff23bd1902473902194667f4ac1eb6ea26 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:50 +0200 Subject: KVM: Rename vcpu->shadow_efer to efer None of the other registers have the shadow_ prefix. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/svm.c | 12 ++++++------ arch/x86/kvm/vmx.c | 14 +++++++------- arch/x86/kvm/x86.c | 14 +++++++------- arch/x86/kvm/x86.h | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7ebf9fe670c..15223372384 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -277,7 +277,7 @@ struct kvm_vcpu_arch { unsigned long cr8; u32 hflags; u64 pdptrs[4]; /* pae */ - u64 shadow_efer; + u64 efer; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ int32_t apic_arb_prio; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6f7158f4fbf..599c422c390 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -237,7 +237,7 @@ static int is_cpuid_PSE36(void) static int is_nx(struct kvm_vcpu *vcpu) { - return vcpu->arch.shadow_efer & EFER_NX; + return vcpu->arch.efer & EFER_NX; } static int is_shadow_present_pte(u64 pte) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 800208a60a5..9596cc86d6d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -231,7 +231,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) efer &= ~EFER_LME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; - vcpu->arch.shadow_efer = efer; + vcpu->arch.efer = efer; } static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, @@ -996,14 +996,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) struct vcpu_svm *svm = to_svm(vcpu); #ifdef CONFIG_X86_64 - if (vcpu->arch.shadow_efer & EFER_LME) { + if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { - vcpu->arch.shadow_efer |= EFER_LMA; + vcpu->arch.efer |= EFER_LMA; svm->vmcb->save.efer |= EFER_LMA | EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { - vcpu->arch.shadow_efer &= ~EFER_LMA; + vcpu->arch.efer &= ~EFER_LMA; svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); } } @@ -1361,7 +1361,7 @@ static int vmmcall_interception(struct vcpu_svm *svm) static int nested_svm_check_permissions(struct vcpu_svm *svm) { - if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) + if (!(svm->vcpu.arch.efer & EFER_SVME) || !is_paging(&svm->vcpu)) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; @@ -1764,7 +1764,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) hsave->save.ds = vmcb->save.ds; hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; - hsave->save.efer = svm->vcpu.arch.shadow_efer; + hsave->save.efer = svm->vcpu.arch.efer; hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.rflags = vmcb->save.rflags; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2e894954069..a680d939546 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -613,7 +613,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) u64 guest_efer; u64 ignore_bits; - guest_efer = vmx->vcpu.arch.shadow_efer; + guest_efer = vmx->vcpu.arch.efer; /* * NX is emulated; LMA and LME handled by hardware; SCE meaninless @@ -955,7 +955,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_K6_STAR); - if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) + if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif @@ -1600,7 +1600,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); - vcpu->arch.shadow_efer = efer; + vcpu->arch.efer = efer; if (!msr) return; if (efer & EFER_LMA) { @@ -1632,13 +1632,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu) (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } - vcpu->arch.shadow_efer |= EFER_LMA; - vmx_set_efer(vcpu, vcpu->arch.shadow_efer); + vcpu->arch.efer |= EFER_LMA; + vmx_set_efer(vcpu, vcpu->arch.efer); } static void exit_lmode(struct kvm_vcpu *vcpu) { - vcpu->arch.shadow_efer &= ~EFER_LMA; + vcpu->arch.efer &= ~EFER_LMA; vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) @@ -1745,7 +1745,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) enter_rmode(vcpu); #ifdef CONFIG_X86_64 - if (vcpu->arch.shadow_efer & EFER_LME) { + if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a4a7d1892f7..27af6e353b0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -456,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 - if ((vcpu->arch.shadow_efer & EFER_LME)) { + if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) { @@ -655,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) } if (is_paging(vcpu) - && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { + && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); kvm_inject_gp(vcpu, 0); return; @@ -686,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) kvm_x86_ops->set_efer(vcpu, efer); efer &= ~EFER_LMA; - efer |= vcpu->arch.shadow_efer & EFER_LMA; + efer |= vcpu->arch.efer & EFER_LMA; - vcpu->arch.shadow_efer = efer; + vcpu->arch.efer = efer; vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; kvm_mmu_reset_context(vcpu); @@ -1426,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: - data = vcpu->arch.shadow_efer; + data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: data = vcpu->kvm->arch.wall_clock; @@ -4569,7 +4569,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->cr3 = vcpu->arch.cr3; sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); - sregs->efer = vcpu->arch.shadow_efer; + sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); @@ -5059,7 +5059,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_set_cr8(vcpu, sregs->cr8); - mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; + mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2dc24a755b6..2d101639bd8 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -44,7 +44,7 @@ static inline bool is_protmode(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - return vcpu->arch.shadow_efer & EFER_LMA; + return vcpu->arch.efer & EFER_LMA; #else return 0; #endif -- cgit v1.2.3-70-g09d2 From 8ae099127668de80e4babba73bb492740ce3a1be Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:51 +0200 Subject: KVM: Optimize kvm_read_cr[04]_bits() 'mask' is always a constant, so we can check whether it includes a bit that might be owned by the guest very cheaply, and avoid the decache call. Saves a few hundred bytes of module text. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/kvm_cache_regs.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 6b419a36cbd..cff851cf532 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -1,6 +1,11 @@ #ifndef ASM_KVM_CACHE_REGS_H #define ASM_KVM_CACHE_REGS_H +#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS +#define KVM_POSSIBLE_CR4_GUEST_BITS \ + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ + | X86_CR4_OSXMMEXCPT | X86_CR4_PGE) + static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, enum kvm_reg reg) { @@ -40,7 +45,8 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) { - if (mask & vcpu->arch.cr0_guest_owned_bits) + ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; + if (tmask & vcpu->arch.cr0_guest_owned_bits) kvm_x86_ops->decache_cr0_guest_bits(vcpu); return vcpu->arch.cr0 & mask; } @@ -52,7 +58,8 @@ static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) { - if (mask & vcpu->arch.cr4_guest_owned_bits) + ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; + if (tmask & vcpu->arch.cr4_guest_owned_bits) kvm_x86_ops->decache_cr4_guest_bits(vcpu); return vcpu->arch.cr4 & mask; } -- cgit v1.2.3-70-g09d2 From 0c04851c0c093ce98ab4ca69556480d779292418 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 21 Jan 2010 15:31:52 +0200 Subject: KVM: trace guest fpu loads and unloads Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 2 ++ include/trace/events/kvm.h | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 27af6e353b0..3b90298fb98 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5292,6 +5292,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) vcpu->guest_fpu_loaded = 1; kvm_fx_save(&vcpu->arch.host_fx_image); kvm_fx_restore(&vcpu->arch.guest_fx_image); + trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) @@ -5304,6 +5305,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) kvm_fx_restore(&vcpu->arch.host_fx_image); ++vcpu->stat.fpu_reload; set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); + trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index dbe10845527..8abdc123014 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -145,6 +145,25 @@ TRACE_EVENT(kvm_mmio, __entry->len, __entry->gpa, __entry->val) ); +#define kvm_fpu_load_symbol \ + {0, "unload"}, \ + {1, "load"} + +TRACE_EVENT(kvm_fpu, + TP_PROTO(int load), + TP_ARGS(load), + + TP_STRUCT__entry( + __field( u32, load ) + ), + + TP_fast_assign( + __entry->load = load; + ), + + TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) +); + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ -- cgit v1.2.3-70-g09d2 From d7fa6ab217aeed26293e01d7b64f79a1ac57e823 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 22 Jan 2010 16:55:05 +0800 Subject: KVM: MMU: Remove some useless code from alloc_mmu_pages() If we fail to alloc page for vcpu->arch.mmu.pae_root, call to free_mmu_pages() is unnecessary, which just do free the page malloc for vcpu->arch.mmu.pae_root. Signed-off-by: Wei Yongjun Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 599c422c390..dc4d954efac 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2843,16 +2843,13 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) */ page = alloc_page(GFP_KERNEL | __GFP_DMA32); if (!page) - goto error_1; + return -ENOMEM; + vcpu->arch.mmu.pae_root = page_address(page); for (i = 0; i < 4; ++i) vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; return 0; - -error_1: - free_mmu_pages(vcpu); - return -ENOMEM; } int kvm_mmu_create(struct kvm_vcpu *vcpu) -- cgit v1.2.3-70-g09d2 From d86be077a4d62e5615e97674dab8d894ce582b09 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Fri, 22 Jan 2010 18:50:29 +0800 Subject: KVM: PPC E500: Add register l1csr0 emulation Latest kernel start to access l1csr0 to contron L1. We just tell guest no operation is on going. Signed-off-by: Liu Yu Acked-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/powerpc/include/asm/kvm_e500.h | 1 + arch/powerpc/kvm/e500_emulate.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h index 9d497ce4972..569dfd31ee7 100644 --- a/arch/powerpc/include/asm/kvm_e500.h +++ b/arch/powerpc/include/asm/kvm_e500.h @@ -52,6 +52,7 @@ struct kvmppc_vcpu_e500 { u32 mas5; u32 mas6; u32 mas7; + u32 l1csr0; u32 l1csr1; u32 hid0; u32 hid1; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 7644f7a9bac..95f8ec8fdc7 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -99,6 +99,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) vcpu_e500->mas6 = spr_val; break; case SPRN_MAS7: vcpu_e500->mas7 = spr_val; break; + case SPRN_L1CSR0: + vcpu_e500->l1csr0 = spr_val; + vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); + break; case SPRN_L1CSR1: vcpu_e500->l1csr1 = spr_val; break; case SPRN_HID0: @@ -179,6 +183,8 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) break; } + case SPRN_L1CSR0: + kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; case SPRN_L1CSR1: kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; case SPRN_HID0: -- cgit v1.2.3-70-g09d2 From a9040f274285c3c962a08b177ac5764b4a93df07 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Fri, 22 Jan 2010 18:50:30 +0800 Subject: KVM: PPC: Add PVR/PIR init for E500 commit 513579e3a391a3874c478a8493080822069976e8 change the way we emulate PVR/PIR, which left PVR/PIR uninitialized on E500, and make guest puzzled. Signed-off-by: Liu Yu Acked-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/e500.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 64949eef43f..efa1198940a 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -60,6 +60,12 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) kvmppc_e500_tlb_setup(vcpu_e500); + /* Registers init */ + vcpu->arch.pvr = mfspr(SPRN_PVR); + + /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ + vcpu->vcpu_id = 0; + return 0; } -- cgit v1.2.3-70-g09d2 From da15bf436bc9586603b47b39244157431fa38b56 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Fri, 22 Jan 2010 19:36:53 +0800 Subject: KVM: PPC E500: fix tlbcfg emulation commit 55fb1027c1cf9797dbdeab48180da530e81b1c39 doesn't update tlbcfg correctly. Fix it. And since guest OS likes 'fixed' hardware, initialize tlbcfg everytime when guest access is useless. So move this part to init code. Signed-off-by: Liu Yu Acked-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/powerpc/include/asm/kvm_e500.h | 2 ++ arch/powerpc/kvm/e500_emulate.c | 20 ++------------------ arch/powerpc/kvm/e500_tlb.c | 6 ++++++ 3 files changed, 10 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h index 569dfd31ee7..7fea26fffb2 100644 --- a/arch/powerpc/include/asm/kvm_e500.h +++ b/arch/powerpc/include/asm/kvm_e500.h @@ -56,6 +56,8 @@ struct kvmppc_vcpu_e500 { u32 l1csr1; u32 hid0; u32 hid1; + u32 tlb0cfg; + u32 tlb1cfg; struct kvm_vcpu vcpu; }; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 95f8ec8fdc7..8e3edfbc963 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -164,25 +164,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; case SPRN_TLB0CFG: - { - ulong tmp = SPRN_TLB0CFG; - - tmp &= ~0xfffUL; - tmp |= vcpu_e500->guest_tlb_size[0]; - kvmppc_set_gpr(vcpu, rt, tmp); - break; - } - + kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; case SPRN_TLB1CFG: - { - ulong tmp = SPRN_TLB1CFG; - - tmp &= ~0xfffUL; - tmp |= vcpu_e500->guest_tlb_size[1]; - kvmppc_set_gpr(vcpu, rt, tmp); - break; - } - + kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; case SPRN_L1CSR0: kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; case SPRN_L1CSR1: diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 6a7fc012b93..0d772e6b631 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -728,6 +728,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) if (vcpu_e500->shadow_pages[1] == NULL) goto err_out_page0; + /* Init TLB configuration register */ + vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; + vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; + vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; + vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1]; + return 0; err_out_page0: -- cgit v1.2.3-70-g09d2 From 81231c698a71af6e1815df72c06685d295e1cc1d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 24 Jan 2010 16:26:40 +0200 Subject: KVM: VMX: Pass cr0.mp through to the guest when the fpu is active When cr0.mp is clear, the guest doesn't expect a #NM in response to a WAIT instruction. Because we always keep cr0.mp set, it will get a #NM, and potentially be confused. Fix by keeping cr0.mp set only when the fpu is inactive, and passing it through when inactive. Reported-by: Lorenzo Martignoni Analyzed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a680d939546..7a56879a058 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); #define KVM_GUEST_CR0_MASK \ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE | X86_CR0_MP) + (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ @@ -791,12 +791,15 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu) static void vmx_fpu_activate(struct kvm_vcpu *vcpu) { + ulong cr0; + if (vcpu->fpu_active) return; vcpu->fpu_active = 1; - vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); - if (kvm_read_cr0_bits(vcpu, X86_CR0_TS)) - vmcs_set_bits(GUEST_CR0, X86_CR0_TS); + cr0 = vmcs_readl(GUEST_CR0); + cr0 &= ~(X86_CR0_TS | X86_CR0_MP); + cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); + vmcs_writel(GUEST_CR0, cr0); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); @@ -807,7 +810,7 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { vmx_decache_cr0_guest_bits(vcpu); - vmcs_set_bits(GUEST_CR0, X86_CR0_TS); + vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = 0; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); @@ -1757,7 +1760,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); if (!vcpu->fpu_active) - hw_cr0 |= X86_CR0_TS; + hw_cr0 |= X86_CR0_TS | X86_CR0_MP; vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); -- cgit v1.2.3-70-g09d2 From e01c2426149d70dc6dd46ad0453195656b6eeaa4 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 25 Jan 2010 12:01:04 +0200 Subject: KVM: mark segments accessed on HW task switch On HW task switch newly loaded segments should me marked as accessed. Reported-by: Lorenzo Martignoni Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b90298fb98..d47ceda7a92 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4697,18 +4697,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) return kvm_seg.selector; } -static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu, - u16 selector, - struct kvm_segment *kvm_seg) -{ - struct desc_struct seg_desc; - - if (load_guest_segment_descriptor(vcpu, selector, &seg_desc)) - return 1; - seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg); - return 0; -} - static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg) { struct kvm_segment segvar = { @@ -4749,11 +4737,14 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int type_bits, int seg) { struct kvm_segment kvm_seg; + struct desc_struct seg_desc; if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu)) return kvm_load_realmode_segment(vcpu, selector, seg); - if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) + + if (load_guest_segment_descriptor(vcpu, selector, &seg_desc)) return 1; + seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg); kvm_check_segment_descriptor(vcpu, seg, selector); kvm_seg.type |= type_bits; @@ -4764,6 +4755,11 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, kvm_seg.unusable = 1; kvm_set_segment(vcpu, &kvm_seg, seg); + if (selector && !kvm_seg.unusable && kvm_seg.s) { + /* mark segment as accessed */ + seg_desc.type |= 1; + save_guest_segment_descriptor(vcpu, selector, &seg_desc); + } return 0; } -- cgit v1.2.3-70-g09d2 From 6e7d15296773f7a479072ec7f3248f51d01e1fad Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 25 Jan 2010 19:36:03 +0200 Subject: KVM: Fix msr trace - data is 64 bits wide, not unsigned long - rw is confusingly named Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/trace.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 1cb3d0e990f..45903a92637 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -246,23 +246,23 @@ TRACE_EVENT(kvm_page_fault, * Tracepoint for guest MSR access. */ TRACE_EVENT(kvm_msr, - TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data), - TP_ARGS(rw, ecx, data), + TP_PROTO(unsigned write, u32 ecx, u64 data), + TP_ARGS(write, ecx, data), TP_STRUCT__entry( - __field( unsigned int, rw ) - __field( unsigned int, ecx ) - __field( unsigned long, data ) + __field( unsigned, write ) + __field( u32, ecx ) + __field( u64, data ) ), TP_fast_assign( - __entry->rw = rw; + __entry->write = write; __entry->ecx = ecx; __entry->data = data; ), - TP_printk("msr_%s %x = 0x%lx", - __entry->rw ? "write" : "read", + TP_printk("msr_%s %x = 0x%llx", + __entry->write ? "write" : "read", __entry->ecx, __entry->data) ); -- cgit v1.2.3-70-g09d2 From 59200273c4d07ddf99dd3c8d91b90bb170457fc3 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 25 Jan 2010 19:47:02 +0200 Subject: KVM: Trace failed msr reads and writes Record failed msrs reads and writes, and the fact that they failed as well. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/svm.c | 13 ++++++++----- arch/x86/kvm/trace.h | 17 +++++++++++------ arch/x86/kvm/vmx.c | 5 +++-- 3 files changed, 22 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9596cc86d6d..52f78dd0301 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2177,9 +2177,10 @@ static int rdmsr_interception(struct vcpu_svm *svm) u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u64 data; - if (svm_get_msr(&svm->vcpu, ecx, &data)) + if (svm_get_msr(&svm->vcpu, ecx, &data)) { + trace_kvm_msr_read_ex(ecx); kvm_inject_gp(&svm->vcpu, 0); - else { + } else { trace_kvm_msr_read(ecx, data); svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; @@ -2271,13 +2272,15 @@ static int wrmsr_interception(struct vcpu_svm *svm) u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); - trace_kvm_msr_write(ecx, data); svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; - if (svm_set_msr(&svm->vcpu, ecx, data)) + if (svm_set_msr(&svm->vcpu, ecx, data)) { + trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); - else + } else { + trace_kvm_msr_write(ecx, data); skip_emulated_instruction(&svm->vcpu); + } return 1; } diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 45903a92637..6ad30a29f04 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -246,28 +246,33 @@ TRACE_EVENT(kvm_page_fault, * Tracepoint for guest MSR access. */ TRACE_EVENT(kvm_msr, - TP_PROTO(unsigned write, u32 ecx, u64 data), - TP_ARGS(write, ecx, data), + TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), + TP_ARGS(write, ecx, data, exception), TP_STRUCT__entry( __field( unsigned, write ) __field( u32, ecx ) __field( u64, data ) + __field( u8, exception ) ), TP_fast_assign( __entry->write = write; __entry->ecx = ecx; __entry->data = data; + __entry->exception = exception; ), - TP_printk("msr_%s %x = 0x%llx", + TP_printk("msr_%s %x = 0x%llx%s", __entry->write ? "write" : "read", - __entry->ecx, __entry->data) + __entry->ecx, __entry->data, + __entry->exception ? " (#GP)" : "") ); -#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data) -#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data) +#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) +#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) +#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) +#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) /* * Tracepoint for guest CR access. diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7a56879a058..334b016267a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3158,6 +3158,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) u64 data; if (vmx_get_msr(vcpu, ecx, &data)) { + trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } @@ -3177,13 +3178,13 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); - trace_kvm_msr_write(ecx, data); - if (vmx_set_msr(vcpu, ecx, data) != 0) { + trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } + trace_kvm_msr_write(ecx, data); skip_emulated_instruction(vcpu); return 1; } -- cgit v1.2.3-70-g09d2 From 6d3e435e708fa32f99b780caf6c5508a606a513b Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Fri, 29 Jan 2010 15:36:59 +0800 Subject: KVM: VMX: Remove redundant check in vm_need_virtualize_apic_accesses() flexpriority_enabled implies cpu_has_vmx_virtualize_apic_accesses() returning true, so we don't need this check here. Signed-off-by: Gui Jianfeng Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 334b016267a..f083b64814d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -358,9 +358,7 @@ static inline int cpu_has_vmx_ple(void) static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) { - return flexpriority_enabled && - (cpu_has_vmx_virtualize_apic_accesses()) && - (irqchip_in_kernel(kvm)); + return flexpriority_enabled && irqchip_in_kernel(kvm); } static inline int cpu_has_vmx_vpid(void) -- cgit v1.2.3-70-g09d2 From ab9f4ecbb6d39a18e300a0d10a4968c37404aa76 Mon Sep 17 00:00:00 2001 From: "Zhai, Edwin" Date: Fri, 29 Jan 2010 14:38:44 +0800 Subject: KVM: enable PCI multiple-segments for pass-through device Enable optional parameter (default 0) - PCI segment (or domain) besides BDF, when assigning PCI device to guest. Signed-off-by: Zhai Edwin Acked-by: Chris Wright Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 1 + include/linux/kvm.h | 4 +++- include/linux/kvm_host.h | 1 + virt/kvm/assigned-dev.c | 4 +++- virt/kvm/iommu.c | 9 ++++++--- 5 files changed, 14 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d47ceda7a92..0bf3df527af 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1569,6 +1569,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: + case KVM_CAP_PCI_SEGMENT: r = 1; break; case KVM_CAP_COALESCED_MMIO: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 4c4937e7f65..dfa54be881f 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -500,6 +500,7 @@ struct kvm_ioeventfd { #define KVM_CAP_HYPERV 44 #define KVM_CAP_HYPERV_VAPIC 45 #define KVM_CAP_HYPERV_SPIN 46 +#define KVM_CAP_PCI_SEGMENT 47 #ifdef KVM_CAP_IRQ_ROUTING @@ -694,8 +695,9 @@ struct kvm_assigned_pci_dev { __u32 busnr; __u32 devfn; __u32 flags; + __u32 segnr; union { - __u32 reserved[12]; + __u32 reserved[11]; }; }; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index dfde04b0d45..665c37063f3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -400,6 +400,7 @@ struct kvm_assigned_dev_kernel { struct work_struct interrupt_work; struct list_head list; int assigned_dev_id; + int host_segnr; int host_busnr; int host_devfn; unsigned int entries_nr; diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index f51e684dd23..057e2cca6af 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -526,7 +526,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -ENOMEM; goto out; } - dev = pci_get_bus_and_slot(assigned_dev->busnr, + dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, + assigned_dev->busnr, assigned_dev->devfn); if (!dev) { printk(KERN_INFO "%s: host device not found\n", __func__); @@ -548,6 +549,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, pci_reset_function(dev); match->assigned_dev_id = assigned_dev->assigned_dev_id; + match->host_segnr = assigned_dev->segnr; match->host_busnr = assigned_dev->busnr; match->host_devfn = assigned_dev->devfn; match->flags = assigned_dev->flags; diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 65a51432c8e..80fd3ad3b2d 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -106,7 +106,8 @@ int kvm_assign_device(struct kvm *kvm, r = iommu_attach_device(domain, &pdev->dev); if (r) { - printk(KERN_ERR "assign device %x:%x.%x failed", + printk(KERN_ERR "assign device %x:%x:%x.%x failed", + pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -127,7 +128,8 @@ int kvm_assign_device(struct kvm *kvm, goto out_unmap; } - printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n", + printk(KERN_DEBUG "assign device %x:%x:%x.%x\n", + assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); @@ -154,7 +156,8 @@ int kvm_deassign_device(struct kvm *kvm, iommu_detach_device(domain, &pdev->dev); - printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", + printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n", + assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); -- cgit v1.2.3-70-g09d2 From c125c607326928001983f51109c3d0c190f0a8bb Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 1 Feb 2010 22:11:04 +0900 Subject: KVM: fix load_guest_segment_descriptor() to return X86EMUL_* This patch fixes load_guest_segment_descriptor() to return X86EMUL_PROPAGATE_FAULT when it tries to access the descriptor table beyond the limit of it: suggested by Marcelo. I have checked current callers of this helper function, - kvm_load_segment_descriptor() - kvm_task_switch() and confirmed that this patch will change nothing in the upper layers if we do not change the handling of this return value from load_guest_segment_descriptor(). Next step: Although fixing the kvm_task_switch() to handle the propagated faults properly seems difficult, and maybe not worth it because TSS is not used commonly these days, we can fix kvm_load_segment_descriptor(). By doing so, the injected #GP becomes possible to be handled by the guest. The only problem for this is how to differentiate this fault from the page faults generated by kvm_read_guest_virt(). We may have to split this function to achive this goal. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0bf3df527af..01f0b037092 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4663,7 +4663,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, if (dtable.limit < index * 8 + 7) { kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); - return 1; + return X86EMUL_PROPAGATE_FAULT; } return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); } -- cgit v1.2.3-70-g09d2 From 7edcface95c6e593faa40c70e0464500515db573 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 1 Feb 2010 22:11:52 +0900 Subject: KVM: fix kvm_fix_hypercall() to return X86EMUL_* This patch fixes kvm_fix_hypercall() to propagate X86EMUL_* info generated by emulator_write_emulated() to its callers: suggested by Marcelo. The effect of this is x86_emulate_insn() will begin to handle the page faults which occur in emulator_write_emulated(): this should be OK because emulator_write_emulated_onepage() always injects page fault when emulator_write_emulated() returns X86EMUL_PROPAGATE_FAULT. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 01f0b037092..c91007f8166 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3888,10 +3888,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); int kvm_fix_hypercall(struct kvm_vcpu *vcpu) { char instruction[3]; - int ret = 0; unsigned long rip = kvm_rip_read(vcpu); - /* * Blow out the MMU to ensure that no other VCPU has an active mapping * to ensure that the updated hypercall appears atomically across all @@ -3900,11 +3898,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu) kvm_mmu_zap_all(vcpu->kvm); kvm_x86_ops->patch_hypercall(vcpu, instruction); - if (emulator_write_emulated(rip, instruction, 3, vcpu) - != X86EMUL_CONTINUE) - ret = -EFAULT; - return ret; + return emulator_write_emulated(rip, instruction, 3, vcpu); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) -- cgit v1.2.3-70-g09d2 From ebcbab4c034db2ec25abe702d788936b29a49b24 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 7 Feb 2010 11:56:52 +0200 Subject: KVM: VMX: Wire up .fpu_activate() callback Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f083b64814d..82fb810afd3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4137,6 +4137,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, + .fpu_activate = vmx_fpu_activate, .fpu_deactivate = vmx_fpu_deactivate, .tlb_flush = vmx_flush_tlb, -- cgit v1.2.3-70-g09d2 From 16fbb5eecc4ab8d4eb4d2a683d7fd74ccacc890b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 1 Feb 2010 23:22:07 -0800 Subject: KVM: ia64: Fix string literal continuation lines String constants that are continued on subsequent lines with \ are not good. Signed-off-by: Joe Perches Signed-off-by: Avi Kivity --- arch/ia64/kvm/mmio.c | 4 ++-- arch/ia64/kvm/vcpu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 9bf55afd08d..fb8f9f59a1e 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) return; } else { inst_type = -1; - panic_vm(vcpu, "Unsupported MMIO access instruction! \ - Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", + panic_vm(vcpu, "Unsupported MMIO access instruction! " + "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", bundle.i64[0], bundle.i64[1]); } diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index dce75b70cdd..958815c9787 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) * Otherwise panic */ if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) - panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ - & vpsr.is=0\n"); + panic_vm(vcpu, "Only support guests with vpsr.pk =0 " + "& vpsr.is=0\n"); /* * For those IA64_PSR bits: id/da/dd/ss/ed/ia -- cgit v1.2.3-70-g09d2 From c45b4fd416f5497b6b38dd70acc0e5b01399e5c9 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 6 Feb 2010 09:43:03 +0100 Subject: KVM: VMX: Remove redundant test in vmx_set_efer() msr was tested above, so the second test is not needed. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r@ expression *x; expression e; identifier l; @@ if (x == NULL || ...) { ... when forall return ...; } ... when != goto l; when != x = e when != &x *x == NULL // Signed-off-by: Julia Lawall Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 82fb810afd3..b400be06c8c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1602,8 +1602,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; - if (!msr) - return; if (efer & EFER_LMA) { vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) | -- cgit v1.2.3-70-g09d2 From 8f0b1ab6fb045a1324d9435ba00c2940783b0041 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 28 Jan 2010 12:37:56 +0100 Subject: KVM: Introduce kvm_host_page_size This patch introduces a generic function to find out the host page size for a given gfn. This function is needed by the kvm iommu code. This patch also simplifies the x86 host_mapping_level function. Signed-off-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 18 ++---------------- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 25 +++++++++++++++++++++++++ 3 files changed, 28 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dc4d954efac..913ef4b7939 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -468,24 +468,10 @@ static int has_wrprotected_page(struct kvm *kvm, static int host_mapping_level(struct kvm *kvm, gfn_t gfn) { - unsigned long page_size = PAGE_SIZE; - struct vm_area_struct *vma; - unsigned long addr; + unsigned long page_size; int i, ret = 0; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return PT_PAGE_TABLE_LEVEL; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, addr); - if (!vma) - goto out; - - page_size = vma_kernel_pagesize(vma); - -out: - up_read(¤t->mm->mmap_sem); + page_size = kvm_host_page_size(kvm, gfn); for (i = PT_PAGE_TABLE_LEVEL; i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 665c37063f3..3145b281de9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -300,6 +300,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2b0974a1483..0a360c26cc3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -867,6 +868,30 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); +unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) +{ + struct vm_area_struct *vma; + unsigned long addr, size; + + size = PAGE_SIZE; + + addr = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(addr)) + return PAGE_SIZE; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + if (!vma) + goto out; + + size = vma_kernel_pagesize(vma); + +out: + up_read(¤t->mm->mmap_sem); + + return size; +} + int memslot_id(struct kvm *kvm, gfn_t gfn) { int i; -- cgit v1.2.3-70-g09d2 From 6316e1c8c6af6ccb55ff8564231710660608f46c Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 3 Feb 2010 16:11:03 -0500 Subject: KVM: VMX: emulate accessed bit for EPT Currently KVM pretends that pages with EPT mappings never got accessed. This has some side effects in the VM, like swapping out actively used guest pages and needlessly breaking up actively used hugepages. We can avoid those very costly side effects by emulating the accessed bit for EPT PTEs, which should only be slightly costly because pages pass through page_referenced infrequently. TLB flushing is taken care of by kvm_mmu_notifier_clear_flush_young(). This seems to help prevent KVM guests from being swapped out when they should not on my system. Signed-off-by: Rik van Riel Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 913ef4b7939..b8da6715d08 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -838,9 +838,15 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 *spte; int young = 0; - /* always return old for EPT */ + /* + * Emulate the accessed bit for EPT, by checking if this page has + * an EPT mapping, and clearing it if it does. On the next access, + * a new EPT mapping will be established. + * This has some overhead, but not as much as the cost of swapping + * out actively used pages or breaking up actively used hugepages. + */ if (!shadow_accessed_mask) - return 0; + return kvm_unmap_rmapp(kvm, rmapp, data); spte = rmap_next(kvm, rmapp, NULL); while (spte) { -- cgit v1.2.3-70-g09d2 From 1976d2d2c91246a37fcb8246b811de735aa6e9a4 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Fri, 5 Feb 2010 17:52:46 +0900 Subject: KVM: Remove redundant reading of rax on OUT instructions kvm_emulate_pio() and complete_pio() both read out the RAX register value and copy it to a place into which the value read out from the port will be copied later. This patch removes this redundancy. /*** snippet from arch/x86/kvm/x86.c ***/ int complete_pio(struct kvm_vcpu *vcpu) { ... if (!io->string) { if (io->in) { val = kvm_register_read(vcpu, VCPU_REGS_RAX); memcpy(&val, vcpu->arch.pio_data, io->size); kvm_register_write(vcpu, VCPU_REGS_RAX, val); } ... Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c91007f8166..bd3161c6dae 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3551,8 +3551,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port) trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, size, 1); - val = kvm_register_read(vcpu, VCPU_REGS_RAX); - memcpy(vcpu->arch.pio_data, &val, 4); + if (!vcpu->arch.pio.in) { + val = kvm_register_read(vcpu, VCPU_REGS_RAX); + memcpy(vcpu->arch.pio_data, &val, 4); + } if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { complete_pio(vcpu); -- cgit v1.2.3-70-g09d2 From 90bb6fc556ab255abd798bcf4ff5769690ab2eea Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 31 Dec 2009 12:10:16 +0200 Subject: KVM: MMU: Add tracepoint for guest page aging Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 11 ++++++++--- include/trace/events/kvm.h | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b8da6715d08..739793240d1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -151,6 +151,9 @@ module_param(oos_shadow, bool, 0644); #define ACC_USER_MASK PT_USER_MASK #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) +#include + +#undef TRACE_INCLUDE_FILE #define CREATE_TRACE_POINTS #include "mmutrace.h" @@ -792,6 +795,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, unsigned long data)) { int i, j; + int ret; int retval = 0; struct kvm_memslots *slots; @@ -806,16 +810,17 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; - retval |= handler(kvm, &memslot->rmap[gfn_offset], - data); + ret = handler(kvm, &memslot->rmap[gfn_offset], data); for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { int idx = gfn_offset; idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); - retval |= handler(kvm, + ret |= handler(kvm, &memslot->lpage_info[j][idx].rmap_pde, data); } + trace_kvm_age_page(hva, memslot, ret); + retval |= ret; } } diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 8abdc123014..b17d49dfc3e 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -164,6 +164,28 @@ TRACE_EVENT(kvm_fpu, TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) ); +TRACE_EVENT(kvm_age_page, + TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref), + TP_ARGS(hva, slot, ref), + + TP_STRUCT__entry( + __field( u64, hva ) + __field( u64, gfn ) + __field( u8, referenced ) + ), + + TP_fast_assign( + __entry->hva = hva; + __entry->gfn = + slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT); + __entry->referenced = ref; + ), + + TP_printk("hva %llx gfn %llx %s", + __entry->hva, __entry->gfn, + __entry->referenced ? "YOUNG" : "OLD") +); + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ -- cgit v1.2.3-70-g09d2 From a19a6d1131c77822ae55f84a1bd6d62327e99287 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 9 Feb 2010 16:41:53 +0800 Subject: KVM: VMX: Rename VMX_EPT_IGMT_BIT to VMX_EPT_IPAT_BIT Following the new SDM. Now the bit is named "Ignore PAT memory type". Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 2 +- arch/x86/kvm/vmx.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 43f1e9b4591..fb9a080740e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -377,7 +377,7 @@ enum vmcs_field { #define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull -#define VMX_EPT_IGMT_BIT (1ull << 6) +#define VMX_EPT_IPAT_BIT (1ull << 6) #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b400be06c8c..f82b0723afa 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4001,7 +4001,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. - * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep + * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) @@ -4012,7 +4012,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) VMX_EPT_MT_EPTE_SHIFT; else ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) - | VMX_EPT_IGMT_BIT; + | VMX_EPT_IPAT_BIT; return ret; } -- cgit v1.2.3-70-g09d2 From d225f53b76f4287365efd381b9e3a732e6bad27e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 8 Feb 2010 17:03:51 +0800 Subject: KVM: PIT: unregister kvm irq notifier if fail to create pit If fail to create pit, we should unregister kvm irq notifier which register in kvm_create_pit(). Signed-off-by: Wei Yongjun Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 6a74246f80c..c9569f20dfb 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -663,8 +663,9 @@ fail_unregister: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); fail: - if (pit->irq_source_id >= 0) - kvm_free_irq_source_id(kvm, pit->irq_source_id); + kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); + kvm_free_irq_source_id(kvm, pit->irq_source_id); kfree(pit); return NULL; -- cgit v1.2.3-70-g09d2 From 72bb2fcd23afe8db53b47e8f9edd736c517ba532 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 9 Feb 2010 10:33:03 +0800 Subject: KVM: cleanup the failure path of KVM_CREATE_IRQCHIP ioctrl If we fail to init ioapic device or the fail to setup the default irq routing, the device register by kvm_create_pic() and kvm_ioapic_init() remain unregister. This patch fixed to do this. Signed-off-by: Wei Yongjun Signed-off-by: Avi Kivity --- arch/x86/kvm/i8259.c | 11 +++++++++++ arch/x86/kvm/irq.h | 1 + arch/x86/kvm/x86.c | 8 ++++---- virt/kvm/ioapic.c | 11 +++++++++++ virt/kvm/ioapic.h | 1 + 5 files changed, 28 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index d5753a75d58..a3711f9e580 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -543,3 +543,14 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) return s; } + +void kvm_destroy_pic(struct kvm *kvm) +{ + struct kvm_pic *vpic = kvm->arch.vpic; + + if (vpic) { + kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); + kvm->arch.vpic = NULL; + kfree(vpic); + } +} diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index be399e207d5..0b71d480ebf 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -75,6 +75,7 @@ struct kvm_pic { }; struct kvm_pic *kvm_create_pic(struct kvm *kvm); +void kvm_destroy_pic(struct kvm *kvm); int kvm_pic_read_irq(struct kvm *kvm); void kvm_pic_update_irq(struct kvm_pic *s); void kvm_pic_clear_isr_ack(struct kvm *kvm); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd3161c6dae..b2f91b9af00 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2771,6 +2771,8 @@ long kvm_arch_vm_ioctl(struct file *filp, if (vpic) { r = kvm_ioapic_init(kvm); if (r) { + kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, + &vpic->dev); kfree(vpic); goto create_irqchip_unlock; } @@ -2782,10 +2784,8 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->irq_lock); - kfree(kvm->arch.vpic); - kfree(kvm->arch.vioapic); - kvm->arch.vpic = NULL; - kvm->arch.vioapic = NULL; + kvm_ioapic_destroy(kvm); + kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); } create_irqchip_unlock: diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index f3d06934ae6..3db15a807f8 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -401,6 +401,17 @@ int kvm_ioapic_init(struct kvm *kvm) return ret; } +void kvm_ioapic_destroy(struct kvm *kvm) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (ioapic) { + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); + kvm->arch.vioapic = NULL; + kfree(ioapic); + } +} + int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) { struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index a505ce9054f..8a751b78a43 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -72,6 +72,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); int kvm_ioapic_init(struct kvm *kvm); +void kvm_ioapic_destroy(struct kvm *kvm); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); void kvm_ioapic_reset(struct kvm_ioapic *ioapic); int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, -- cgit v1.2.3-70-g09d2 From 4b7bb9210047fe880bb71e6273c3a4526799dbd7 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 9 Feb 2010 10:41:56 +0800 Subject: KVM: ia64: destroy ioapic device if fail to setup default irq routing If KVM_CREATE_IRQCHIP fail due to kvm_setup_default_irq_routing(), ioapic device is not destroyed and kvm->arch.vioapic is not set to NULL, this may cause KVM_GET_IRQCHIP and KVM_SET_IRQCHIP access to unexcepted memory. Signed-off-by: Wei Yongjun Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 06188988ed2..26e0e089bfe 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -968,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { - kfree(kvm->arch.vioapic); + kvm_ioapic_destroy(kvm); goto out; } break; -- cgit v1.2.3-70-g09d2 From daf5e27109c8c16c987e955cc6abbbc0af050edd Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 2 Feb 2010 19:44:35 +0800 Subject: KVM: ppc/booke: Set ESR and DEAR when inject interrupt to guest Old method prematurely sets ESR and DEAR. Move this part after we decide to inject interrupt, which is more like hardware behave. Signed-off-by: Liu Yu Acked-by: Hollis Blanchard Acked-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 2 ++ arch/powerpc/kvm/booke.c | 59 +++++++++++++++++++++++++++---------- arch/powerpc/kvm/emulate.c | 4 +-- 3 files changed, 48 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 715aa6baf6f..5e5bae7e152 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -259,6 +259,8 @@ struct kvm_vcpu_arch { #endif ulong fault_dear; ulong fault_esr; + ulong queued_dear; + ulong queued_esr; gpa_t paddr_accessed; u8 io_gpr; /* GPR used as IO source/target */ diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index e283e44e9f1..4d686cc6b26 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -82,9 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, set_bit(priority, &vcpu->arch.pending_exceptions); } -void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) +static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, + ulong dear_flags, ulong esr_flags) { - /* BookE does flags in ESR, so ignore those we get here */ + vcpu->arch.queued_dear = dear_flags; + vcpu->arch.queued_esr = esr_flags; + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); +} + +static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, + ulong dear_flags, ulong esr_flags) +{ + vcpu->arch.queued_dear = dear_flags; + vcpu->arch.queued_esr = esr_flags; + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); +} + +static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, + ulong esr_flags) +{ + vcpu->arch.queued_esr = esr_flags; + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); +} + +void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) +{ + vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); } @@ -115,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, { int allowed = 0; ulong msr_mask; + bool update_esr = false, update_dear = false; switch (priority) { - case BOOKE_IRQPRIO_PROGRAM: case BOOKE_IRQPRIO_DTLB_MISS: - case BOOKE_IRQPRIO_ITLB_MISS: - case BOOKE_IRQPRIO_SYSCALL: case BOOKE_IRQPRIO_DATA_STORAGE: + update_dear = true; + /* fall through */ case BOOKE_IRQPRIO_INST_STORAGE: + case BOOKE_IRQPRIO_PROGRAM: + update_esr = true; + /* fall through */ + case BOOKE_IRQPRIO_ITLB_MISS: + case BOOKE_IRQPRIO_SYSCALL: case BOOKE_IRQPRIO_FP_UNAVAIL: case BOOKE_IRQPRIO_SPE_UNAVAIL: case BOOKE_IRQPRIO_SPE_FP_DATA: @@ -157,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, vcpu->arch.srr0 = vcpu->arch.pc; vcpu->arch.srr1 = vcpu->arch.msr; vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; + if (update_esr == true) + vcpu->arch.esr = vcpu->arch.queued_esr; + if (update_dear == true) + vcpu->arch.dear = vcpu->arch.queued_dear; kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); clear_bit(priority, &vcpu->arch.pending_exceptions); @@ -229,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, if (vcpu->arch.msr & MSR_PR) { /* Program traps generated by user-level software must be handled * by the guest kernel. */ - vcpu->arch.esr = vcpu->arch.fault_esr; - kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); + kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); r = RESUME_GUEST; kvmppc_account_exit(vcpu, USR_PR_INST); break; @@ -286,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOKE_INTERRUPT_DATA_STORAGE: - vcpu->arch.dear = vcpu->arch.fault_dear; - vcpu->arch.esr = vcpu->arch.fault_esr; - kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); + kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, + vcpu->arch.fault_esr); kvmppc_account_exit(vcpu, DSI_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_INST_STORAGE: - vcpu->arch.esr = vcpu->arch.fault_esr; - kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); + kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); kvmppc_account_exit(vcpu, ISI_EXITS); r = RESUME_GUEST; break; @@ -316,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ - kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); - vcpu->arch.dear = vcpu->arch.fault_dear; - vcpu->arch.esr = vcpu->arch.fault_esr; + kvmppc_core_queue_dtlb_miss(vcpu, + vcpu->arch.fault_dear, + vcpu->arch.fault_esr); kvmppc_mmu_dtlb_miss(vcpu); kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); r = RESUME_GUEST; diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index b905623735b..cb72a65f4ec 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -151,10 +151,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_TRAP: #ifdef CONFIG_PPC64 case OP_TRAP_64: + kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else - vcpu->arch.esr |= ESR_PTR; + kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); #endif - kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); advance = 0; break; -- cgit v1.2.3-70-g09d2 From 2db2c2eb6226e30f8059b82512a1364db98da8e3 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:29 +0200 Subject: KVM: x86 emulator: Add group8 instruction decoding Use groups mechanism to decode 0F BA instructions. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 645b245a3c2..435b1e4e8c9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -88,6 +88,7 @@ enum { Group1_80, Group1_81, Group1_82, Group1_83, Group1A, Group3_Byte, Group3, Group4, Group5, Group7, + Group8, }; static u32 opcode_table[256] = { @@ -267,7 +268,7 @@ static u32 twobyte_table[256] = { 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem16 | ModRM | Mov, /* 0xB8 - 0xBF */ - 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp, + 0, 0, Group | Group8, DstMem | SrcReg | ModRM | BitOp, 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem16 | ModRM | Mov, /* 0xC0 - 0xCF */ @@ -323,6 +324,10 @@ static u32 group_table[] = { 0, 0, ModRM | SrcMem, ModRM | SrcMem, SrcNone | ModRM | DstMem | Mov, 0, SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, + [Group8*8] = + 0, 0, 0, 0, + DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, + DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, }; static u32 group2_table[] = { -- cgit v1.2.3-70-g09d2 From 60a29d4ea4e7b6b95d9391ebc8625b0426f3a363 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:30 +0200 Subject: KVM: x86 emulator: Add group9 instruction decoding Use groups mechanism to decode 0F C7 instructions. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 435b1e4e8c9..45a4f7c1bb0 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -88,7 +88,7 @@ enum { Group1_80, Group1_81, Group1_82, Group1_83, Group1A, Group3_Byte, Group3, Group4, Group5, Group7, - Group8, + Group8, Group9, }; static u32 opcode_table[256] = { @@ -272,7 +272,8 @@ static u32 twobyte_table[256] = { 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem16 | ModRM | Mov, /* 0xC0 - 0xCF */ - 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM, + 0, 0, 0, DstMem | SrcReg | ModRM | Mov, + 0, 0, 0, Group | GroupDual | Group9, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 - 0xDF */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -328,6 +329,8 @@ static u32 group_table[] = { 0, 0, 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, + [Group9*8] = + 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, }; static u32 group2_table[] = { @@ -335,6 +338,8 @@ static u32 group2_table[] = { SrcNone | ModRM, 0, 0, SrcNone | ModRM, SrcNone | ModRM | DstMem | Mov, 0, SrcMem16 | ModRM | Mov, 0, + [Group9*8] = + 0, 0, 0, 0, 0, 0, 0, 0, }; /* EFLAGS bit definitions. */ -- cgit v1.2.3-70-g09d2 From a0044755679f3e761b8b95995e5f2db2b7efd0f6 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:31 +0200 Subject: KVM: x86 emulator: Add Virtual-8086 mode of emulation For some instructions CPU behaves differently for real-mode and virtual 8086. Let emulator know which mode cpu is in, so it will not poke into vcpu state directly. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_emulate.h | 1 + arch/x86/kvm/emulate.c | 12 +++++++----- arch/x86/kvm/x86.c | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 9b697c2735d..784d7c586d8 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -168,6 +168,7 @@ struct x86_emulate_ctxt { /* Execution mode, passed to the emulator. */ #define X86EMUL_MODE_REAL 0 /* Real mode. */ +#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 45a4f7c1bb0..e4e2df3b603 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -899,6 +899,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) switch (mode) { case X86EMUL_MODE_REAL: + case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; @@ -1525,7 +1526,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) /* syscall is not available in real mode */ if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL - || !is_protmode(ctxt->vcpu)) + || ctxt->mode == X86EMUL_MODE_VM86) return -1; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1577,8 +1578,8 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) if (c->lock_prefix) return -1; - /* inject #GP if in real mode or paging is disabled */ - if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) { + /* inject #GP if in real mode */ + if (ctxt->mode == X86EMUL_MODE_REAL) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } @@ -1642,8 +1643,9 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) if (c->lock_prefix) return -1; - /* inject #GP if in real mode or paging is disabled */ - if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) { + /* inject #GP if in real mode or Virtual 8086 mode */ + if (ctxt->mode == X86EMUL_MODE_REAL || + ctxt->mode == X86EMUL_MODE_VM86) { kvm_inject_gp(ctxt->vcpu, 0); return -1; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2f91b9af00..a28379507d3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3348,8 +3348,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu, vcpu->arch.emulate_ctxt.vcpu = vcpu; vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu); vcpu->arch.emulate_ctxt.mode = + (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) - ? X86EMUL_MODE_REAL : cs_l + ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; -- cgit v1.2.3-70-g09d2 From 1871c6020d7308afb99127bba51f04548e7ca84e Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:32 +0200 Subject: KVM: x86 emulator: fix memory access during x86 emulation Currently when x86 emulator needs to access memory, page walk is done with broadest permission possible, so if emulated instruction was executed by userspace process it can still access kernel memory. Fix that by providing correct memory access to page walker during emulation. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_emulate.h | 14 +++- arch/x86/include/asm/kvm_host.h | 7 +- arch/x86/kvm/emulate.c | 6 +- arch/x86/kvm/mmu.c | 17 ++--- arch/x86/kvm/mmu.h | 6 ++ arch/x86/kvm/paging_tmpl.h | 11 +++- arch/x86/kvm/x86.c | 131 ++++++++++++++++++++++++++++--------- 7 files changed, 142 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 784d7c586d8..7a6f54fa13b 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -54,13 +54,23 @@ struct x86_emulate_ctxt; struct x86_emulate_ops { /* * read_std: Read bytes of standard (non-emulated/special) memory. - * Used for instruction fetch, stack operations, and others. + * Used for descriptor reading. * @addr: [IN ] Linear address from which to read. * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_std)(unsigned long addr, void *val, - unsigned int bytes, struct kvm_vcpu *vcpu); + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); + + /* + * fetch: Read bytes of standard (non-emulated/special) memory. + * Used for instruction fetch. + * @addr: [IN ] Linear address from which to read. + * @val: [OUT] Value read from memory, zero-extended to 'u_long'. + * @bytes: [IN ] Number of bytes to read from memory. + */ + int (*fetch)(unsigned long addr, void *val, + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); /* * read_emulated: Read bytes from emulated/special memory area. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 15223372384..c07c16f6401 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -243,7 +243,8 @@ struct kvm_mmu { void (*new_cr3)(struct kvm_vcpu *vcpu); int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); void (*free)(struct kvm_vcpu *vcpu); - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + u32 *error); void (*prefetch_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page); int (*sync_page)(struct kvm_vcpu *vcpu, @@ -660,6 +661,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e4e2df3b603..c44b4601484 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -616,7 +616,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, if (linear < fc->start || linear >= fc->end) { size = min(15UL, PAGE_SIZE - offset_in_page(linear)); - rc = ops->read_std(linear, fc->data, size, ctxt->vcpu); + rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL); if (rc) return rc; fc->start = linear; @@ -671,11 +671,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, op_bytes = 3; *address = 0; rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, - ctxt->vcpu); + ctxt->vcpu, NULL); if (rc) return rc; rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, - ctxt->vcpu); + ctxt->vcpu, NULL); return rc; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 739793240d1..741373e8ca7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -138,12 +138,6 @@ module_param(oos_shadow, bool, 0644); #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ | PT64_NX_MASK) -#define PFERR_PRESENT_MASK (1U << 0) -#define PFERR_WRITE_MASK (1U << 1) -#define PFERR_USER_MASK (1U << 2) -#define PFERR_RSVD_MASK (1U << 3) -#define PFERR_FETCH_MASK (1U << 4) - #define RMAP_EXT 4 #define ACC_EXEC_MASK 1 @@ -1632,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) { struct page *page; - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); if (gpa == UNMAPPED_GVA) return NULL; @@ -2155,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->kvm->mmu_lock); } -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, u32 *error) { + if (error) + *error = 0; return vaddr; } @@ -2740,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) if (tdp_enabled) return 0; - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); spin_lock(&vcpu->kvm->mmu_lock); r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); @@ -3237,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, if (is_shadow_present_pte(ent) && !is_last_spte(ent, level)) audit_mappings_page(vcpu, ent, va, level - 1); else { - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL); gfn_t gfn = gpa >> PAGE_SHIFT; pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 61ef5a65b7d..be66759321a 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -42,6 +42,12 @@ #define PT_DIRECTORY_LEVEL 2 #define PT_PAGE_TABLE_LEVEL 1 +#define PFERR_PRESENT_MASK (1U << 0) +#define PFERR_WRITE_MASK (1U << 1) +#define PFERR_USER_MASK (1U << 2) +#define PFERR_RSVD_MASK (1U << 3) +#define PFERR_FETCH_MASK (1U << 4) + int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index df15a5307d2..81eab9a50e6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -490,18 +490,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) spin_unlock(&vcpu->kvm->mmu_lock); } -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, + u32 *error) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; int r; - r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0); + r = FNAME(walk_addr)(&walker, vcpu, vaddr, + !!(access & PFERR_WRITE_MASK), + !!(access & PFERR_USER_MASK), + !!(access & PFERR_FETCH_MASK)); if (r) { gpa = gfn_to_gpa(walker.gfn); gpa |= vaddr & ~PAGE_MASK; - } + } else if (error) + *error = walker.error_code; return gpa; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a28379507d3..ea3a8af8a47 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3039,14 +3039,41 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); +} + + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + access |= PFERR_FETCH_MASK; + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); +} + +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + access |= PFERR_WRITE_MASK; + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); +} + +/* uses this to access any guest's mapped memory without checking CPL */ +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +{ + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); +} + +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 access, + u32 *error) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; @@ -3069,14 +3096,37 @@ out: return r; } +/* used for instruction fetching */ +static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 *error) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, + access | PFERR_FETCH_MASK, error); +} + +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 *error) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, + error); +} + +static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 *error) +{ + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); +} + static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) + struct kvm_vcpu *vcpu, u32 *error) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; @@ -3106,6 +3156,7 @@ static int emulator_read_emulated(unsigned long addr, struct kvm_vcpu *vcpu) { gpa_t gpa; + u32 error_code; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -3115,17 +3166,20 @@ static int emulator_read_emulated(unsigned long addr, return X86EMUL_CONTINUE; } - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code); + + if (gpa == UNMAPPED_GVA) { + kvm_inject_page_fault(vcpu, addr, error_code); + return X86EMUL_PROPAGATE_FAULT; + } /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; - if (kvm_read_guest_virt(addr, val, bytes, vcpu) + if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; mmio: /* @@ -3164,11 +3218,12 @@ static int emulator_write_emulated_onepage(unsigned long addr, struct kvm_vcpu *vcpu) { gpa_t gpa; + u32 error_code; - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code); if (gpa == UNMAPPED_GVA) { - kvm_inject_page_fault(vcpu, addr, 2); + kvm_inject_page_fault(vcpu, addr, error_code); return X86EMUL_PROPAGATE_FAULT; } @@ -3232,7 +3287,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, char *kaddr; u64 val; - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) @@ -3297,7 +3352,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); - kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL); printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); @@ -3305,7 +3360,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); static struct x86_emulate_ops emulate_ops = { - .read_std = kvm_read_guest_virt, + .read_std = kvm_read_guest_virt_system, + .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, @@ -3442,12 +3498,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu) gva_t q = vcpu->arch.pio.guest_gva; unsigned bytes; int ret; + u32 error_code; bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; if (vcpu->arch.pio.in) - ret = kvm_write_guest_virt(q, p, bytes, vcpu); + ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code); else - ret = kvm_read_guest_virt(q, p, bytes, vcpu); + ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code); + + if (ret == X86EMUL_PROPAGATE_FAULT) + kvm_inject_page_fault(vcpu, q, error_code); + return ret; } @@ -3468,7 +3529,7 @@ int complete_pio(struct kvm_vcpu *vcpu) if (io->in) { r = pio_copy_data(vcpu); if (r) - return r; + goto out; } delta = 1; @@ -3495,7 +3556,7 @@ int complete_pio(struct kvm_vcpu *vcpu) kvm_register_write(vcpu, VCPU_REGS_RSI, val); } } - +out: io->count -= io->cur_count; io->cur_count = 0; @@ -3617,10 +3678,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, if (!vcpu->arch.pio.in) { /* string PIO write */ ret = pio_copy_data(vcpu); - if (ret == X86EMUL_PROPAGATE_FAULT) { - kvm_inject_gp(vcpu, 0); + if (ret == X86EMUL_PROPAGATE_FAULT) return 1; - } if (ret == 0 && !pio_string_write(vcpu)) { complete_pio(vcpu); if (vcpu->arch.pio.count == 0) @@ -4663,7 +4722,9 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); return X86EMUL_PROPAGATE_FAULT; } - return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); + return kvm_read_guest_virt_system(dtable.base + index*8, + seg_desc, sizeof(*seg_desc), + vcpu, NULL); } /* allowed just for 8 bytes segments */ @@ -4677,15 +4738,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, if (dtable.limit < index * 8 + 7) return 1; - return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); + return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); +} + +static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu, + struct desc_struct *seg_desc) +{ + u32 base_addr = get_desc_base(seg_desc); + + return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL); } -static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu, +static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu, struct desc_struct *seg_desc) { u32 base_addr = get_desc_base(seg_desc); - return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); + return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL); } static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) @@ -4894,7 +4963,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, sizeof tss_segment_16)) goto out; - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc), &tss_segment_16, sizeof tss_segment_16)) goto out; @@ -4902,7 +4971,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, tss_segment_16.prev_task_link = old_tss_sel; if (kvm_write_guest(vcpu->kvm, - get_tss_base_addr(vcpu, nseg_desc), + get_tss_base_addr_write(vcpu, nseg_desc), &tss_segment_16.prev_task_link, sizeof tss_segment_16.prev_task_link)) goto out; @@ -4933,7 +5002,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, sizeof tss_segment_32)) goto out; - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc), &tss_segment_32, sizeof tss_segment_32)) goto out; @@ -4941,7 +5010,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, tss_segment_32.prev_task_link = old_tss_sel; if (kvm_write_guest(vcpu->kvm, - get_tss_base_addr(vcpu, nseg_desc), + get_tss_base_addr_write(vcpu, nseg_desc), &tss_segment_32.prev_task_link, sizeof tss_segment_32.prev_task_link)) goto out; @@ -4964,7 +5033,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); - old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); + old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); /* FIXME: Handle errors. Failure to read either TSS or their * descriptors should generate a pagefault. @@ -5199,7 +5268,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, vcpu_load(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); + gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; -- cgit v1.2.3-70-g09d2 From f850e2e603bf5a05b0aee7901857cf85715aa694 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:33 +0200 Subject: KVM: x86 emulator: Check IOPL level during io instruction emulation Make emulator check that vcpu is allowed to execute IN, INS, OUT, OUTS, CLI, STI. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/emulate.c | 89 +++++++++++++++++++++++++++++++++++++---- arch/x86/kvm/x86.c | 10 ++--- 3 files changed, 87 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c07c16f6401..f9a2f66530c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -678,6 +678,7 @@ void kvm_disable_tdp(void); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int complete_pio(struct kvm_vcpu *vcpu); +bool kvm_check_iopl(struct kvm_vcpu *vcpu); struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c44b4601484..296e8519dc5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1698,6 +1698,57 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) return 0; } +static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) +{ + int iopl; + if (ctxt->mode == X86EMUL_MODE_REAL) + return false; + if (ctxt->mode == X86EMUL_MODE_VM86) + return true; + iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; + return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl; +} + +static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + u16 port, u16 len) +{ + struct kvm_segment tr_seg; + int r; + u16 io_bitmap_ptr; + u8 perm, bit_idx = port & 0x7; + unsigned mask = (1 << len) - 1; + + kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR); + if (tr_seg.unusable) + return false; + if (tr_seg.limit < 103) + return false; + r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, + NULL); + if (r != X86EMUL_CONTINUE) + return false; + if (io_bitmap_ptr + port/8 > tr_seg.limit) + return false; + r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1, + ctxt->vcpu, NULL); + if (r != X86EMUL_CONTINUE) + return false; + if ((perm >> bit_idx) & mask) + return false; + return true; +} + +static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + u16 port, u16 len) +{ + if (emulator_bad_iopl(ctxt)) + if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) + return false; + return true; +} + int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { @@ -1889,7 +1940,12 @@ special_insn: break; case 0x6c: /* insb */ case 0x6d: /* insw/insd */ - if (kvm_emulate_pio_string(ctxt->vcpu, + if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], + (c->d & ByteOp) ? 1 : c->op_bytes)) { + kvm_inject_gp(ctxt->vcpu, 0); + goto done; + } + if (kvm_emulate_pio_string(ctxt->vcpu, 1, (c->d & ByteOp) ? 1 : c->op_bytes, c->rep_prefix ? @@ -1905,6 +1961,11 @@ special_insn: return 0; case 0x6e: /* outsb */ case 0x6f: /* outsw/outsd */ + if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], + (c->d & ByteOp) ? 1 : c->op_bytes)) { + kvm_inject_gp(ctxt->vcpu, 0); + goto done; + } if (kvm_emulate_pio_string(ctxt->vcpu, 0, (c->d & ByteOp) ? 1 : c->op_bytes, @@ -2202,7 +2263,13 @@ special_insn: case 0xef: /* out (e/r)ax,dx */ port = c->regs[VCPU_REGS_RDX]; io_dir_in = 0; - do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, + do_io: + if (!emulator_io_permited(ctxt, ops, port, + (c->d & ByteOp) ? 1 : c->op_bytes)) { + kvm_inject_gp(ctxt->vcpu, 0); + goto done; + } + if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, (c->d & ByteOp) ? 1 : c->op_bytes, port) != 0) { c->eip = saved_eip; @@ -2227,13 +2294,21 @@ special_insn: c->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xfa: /* cli */ - ctxt->eflags &= ~X86_EFLAGS_IF; - c->dst.type = OP_NONE; /* Disable writeback. */ + if (emulator_bad_iopl(ctxt)) + kvm_inject_gp(ctxt->vcpu, 0); + else { + ctxt->eflags &= ~X86_EFLAGS_IF; + c->dst.type = OP_NONE; /* Disable writeback. */ + } break; case 0xfb: /* sti */ - toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); - ctxt->eflags |= X86_EFLAGS_IF; - c->dst.type = OP_NONE; /* Disable writeback. */ + if (emulator_bad_iopl(ctxt)) + kvm_inject_gp(ctxt->vcpu, 0); + else { + toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); + ctxt->eflags |= X86_EFLAGS_IF; + c->dst.type = OP_NONE; /* Disable writeback. */ + } break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ea3a8af8a47..86b739f8f17 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3599,6 +3599,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port) { unsigned long val; + trace_kvm_pio(!in, port, size, 1); + vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = vcpu->arch.pio.size = size; @@ -3610,9 +3612,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port) vcpu->arch.pio.down = 0; vcpu->arch.pio.rep = 0; - trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, - size, 1); - if (!vcpu->arch.pio.in) { val = kvm_register_read(vcpu, VCPU_REGS_RAX); memcpy(vcpu->arch.pio_data, &val, 4); @@ -3633,6 +3632,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, unsigned now, in_page; int ret = 0; + trace_kvm_pio(!in, port, size, count); + vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = vcpu->arch.pio.size = size; @@ -3644,9 +3645,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, vcpu->arch.pio.down = down; vcpu->arch.pio.rep = rep; - trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, - size, count); - if (!count) { kvm_x86_ops->skip_emulated_instruction(vcpu); return 1; -- cgit v1.2.3-70-g09d2 From d4c6a1549c056f1d817e8f6f2f97d8b44933472f Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:34 +0200 Subject: KVM: x86 emulator: Fix popf emulation POPF behaves differently depending on current CPU mode. Emulate correct logic to prevent guest from changing flags that it can't change otherwise. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 296e8519dc5..1782387c069 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -343,11 +343,18 @@ static u32 group2_table[] = { }; /* EFLAGS bit definitions. */ +#define EFLG_ID (1<<21) +#define EFLG_VIP (1<<20) +#define EFLG_VIF (1<<19) +#define EFLG_AC (1<<18) #define EFLG_VM (1<<17) #define EFLG_RF (1<<16) +#define EFLG_IOPL (3<<12) +#define EFLG_NT (1<<14) #define EFLG_OF (1<<11) #define EFLG_DF (1<<10) #define EFLG_IF (1<<9) +#define EFLG_TF (1<<8) #define EFLG_SF (1<<7) #define EFLG_ZF (1<<6) #define EFLG_AF (1<<4) @@ -1214,6 +1221,49 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, return rc; } +static int emulate_popf(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + void *dest, int len) +{ + int rc; + unsigned long val, change_mask; + int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; + int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu); + + rc = emulate_pop(ctxt, ops, &val, len); + if (rc != X86EMUL_CONTINUE) + return rc; + + change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF + | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID; + + switch(ctxt->mode) { + case X86EMUL_MODE_PROT64: + case X86EMUL_MODE_PROT32: + case X86EMUL_MODE_PROT16: + if (cpl == 0) + change_mask |= EFLG_IOPL; + if (cpl <= iopl) + change_mask |= EFLG_IF; + break; + case X86EMUL_MODE_VM86: + if (iopl < 3) { + kvm_inject_gp(ctxt->vcpu, 0); + return X86EMUL_PROPAGATE_FAULT; + } + change_mask |= EFLG_IF; + break; + default: /* real mode */ + change_mask |= (EFLG_IOPL | EFLG_IF); + break; + } + + *(unsigned long *)dest = + (ctxt->eflags & ~change_mask) | (val & change_mask); + + return rc; +} + static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) { struct decode_cache *c = &ctxt->decode; @@ -2099,7 +2149,10 @@ special_insn: c->dst.type = OP_REG; c->dst.ptr = (unsigned long *) &ctxt->eflags; c->dst.bytes = c->op_bytes; - goto pop_instruction; + rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes); + if (rc != X86EMUL_CONTINUE) + goto done; + break; case 0xa0 ... 0xa1: /* mov */ c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; c->dst.val = c->src.val; -- cgit v1.2.3-70-g09d2 From e92805ac1228626c59c865f2f4e9059b9fb8c97b Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:35 +0200 Subject: KVM: x86 emulator: Check CPL level during privilege instruction emulation Add CPL checking in case emulator is tricked into emulating privilege instruction from userspace. Signed-off-by: Gleb Natapov Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1782387c069..d63211169ac 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -76,6 +76,7 @@ #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ #define GroupMask 0xff /* Group number stored in bits 0:7 */ /* Misc flags */ +#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) /* Source 2 operand type */ #define Src2None (0<<29) @@ -211,7 +212,7 @@ static u32 opcode_table[256] = { SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* 0xF0 - 0xF7 */ 0, 0, 0, 0, - ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3, + ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3, /* 0xF8 - 0xFF */ ImplicitOps, 0, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, @@ -219,16 +220,20 @@ static u32 opcode_table[256] = { static u32 twobyte_table[256] = { /* 0x00 - 0x0F */ - 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0, - ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, + 0, Group | GroupDual | Group7, 0, 0, + 0, ImplicitOps, ImplicitOps | Priv, 0, + ImplicitOps | Priv, ImplicitOps | Priv, 0, 0, + 0, ImplicitOps | ModRM, 0, 0, /* 0x10 - 0x1F */ 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, /* 0x20 - 0x2F */ - ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0, + ModRM | ImplicitOps | Priv, ModRM | Priv, + ModRM | ImplicitOps | Priv, ModRM | Priv, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x30 - 0x3F */ - ImplicitOps, 0, ImplicitOps, 0, - ImplicitOps, ImplicitOps, 0, 0, + ImplicitOps | Priv, 0, ImplicitOps | Priv, 0, + ImplicitOps, ImplicitOps | Priv, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x40 - 0x47 */ DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, @@ -322,9 +327,9 @@ static u32 group_table[] = { SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0, [Group7*8] = - 0, 0, ModRM | SrcMem, ModRM | SrcMem, + 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, SrcNone | ModRM | DstMem | Mov, 0, - SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, + SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv, [Group8*8] = 0, 0, 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, @@ -335,7 +340,7 @@ static u32 group_table[] = { static u32 group2_table[] = { [Group7*8] = - SrcNone | ModRM, 0, 0, SrcNone | ModRM, + SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM, SrcNone | ModRM | DstMem | Mov, 0, SrcMem16 | ModRM | Mov, 0, [Group9*8] = @@ -1700,12 +1705,6 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) return -1; } - /* sysexit must be called from CPL 0 */ - if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) { - kvm_inject_gp(ctxt->vcpu, 0); - return -1; - } - setup_syscalls_segments(ctxt, &cs, &ss); if ((c->rex_prefix & 0x8) != 0x0) @@ -1820,6 +1819,12 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); saved_eip = c->eip; + /* Privileged instruction can be executed only in CPL=0 */ + if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) { + kvm_inject_gp(ctxt->vcpu, 0); + goto done; + } + if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs)) memop = c->modrm_ea; -- cgit v1.2.3-70-g09d2 From d380a5e4022a5c63a6c5ac631e48c752ba7f1e4f Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 10 Feb 2010 14:21:36 +0200 Subject: KVM: x86 emulator: Add LOCK prefix validity checking Instructions which are not allowed to have LOCK prefix should generate #UD if one is used. [avi: fold opcode 82 fix from another patch] Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 97 +++++++++++++++++++++++++++++--------------------- 1 file changed, 56 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d63211169ac..c2de9f03da8 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -76,6 +76,7 @@ #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ #define GroupMask 0xff /* Group number stored in bits 0:7 */ /* Misc flags */ +#define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) /* Source 2 operand type */ @@ -94,35 +95,35 @@ enum { static u32 opcode_table[256] = { /* 0x00 - 0x07 */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, /* 0x08 - 0x0F */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, ImplicitOps | Stack | No64, 0, /* 0x10 - 0x17 */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, /* 0x18 - 0x1F */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, /* 0x20 - 0x27 */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, /* 0x28 - 0x2F */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 0, 0, 0, 0, /* 0x30 - 0x37 */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 0, 0, 0, 0, /* 0x38 - 0x3F */ @@ -158,7 +159,7 @@ static u32 opcode_table[256] = { Group | Group1_80, Group | Group1_81, Group | Group1_82, Group | Group1_83, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, /* 0x88 - 0x8F */ ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, @@ -263,17 +264,18 @@ static u32 twobyte_table[256] = { DstMem | SrcReg | Src2CL | ModRM, 0, 0, /* 0xA8 - 0xAF */ ImplicitOps | Stack, ImplicitOps | Stack, - 0, DstMem | SrcReg | ModRM | BitOp, + 0, DstMem | SrcReg | ModRM | BitOp | Lock, DstMem | SrcReg | Src2ImmByte | ModRM, DstMem | SrcReg | Src2CL | ModRM, ModRM, 0, /* 0xB0 - 0xB7 */ - ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, - DstMem | SrcReg | ModRM | BitOp, + ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, + 0, DstMem | SrcReg | ModRM | BitOp | Lock, 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem16 | ModRM | Mov, /* 0xB8 - 0xBF */ - 0, 0, Group | Group8, DstMem | SrcReg | ModRM | BitOp, + 0, 0, + Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock, 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem16 | ModRM | Mov, /* 0xC0 - 0xCF */ @@ -290,25 +292,41 @@ static u32 twobyte_table[256] = { static u32 group_table[] = { [Group1_80*8] = - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM, [Group1_81*8] = - DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, - DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, - DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, - DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM | Lock, + DstMem | SrcImm | ModRM, [Group1_82*8] = - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, - ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM | Lock, + ByteOp | DstMem | SrcImm | ModRM, [Group1_83*8] = - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM, [Group1A*8] = DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0, [Group3_Byte*8] = @@ -332,10 +350,10 @@ static u32 group_table[] = { SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv, [Group8*8] = 0, 0, 0, 0, - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, - DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, + DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock, + DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock, [Group9*8] = - 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, + 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0, }; static u32 group2_table[] = { @@ -1580,8 +1598,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) u64 msr_data; /* syscall is not available in real mode */ - if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL - || ctxt->mode == X86EMUL_MODE_VM86) + if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return -1; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1629,10 +1646,6 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) struct kvm_segment cs, ss; u64 msr_data; - /* inject #UD if LOCK prefix is used */ - if (c->lock_prefix) - return -1; - /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) { kvm_inject_gp(ctxt->vcpu, 0); @@ -1694,10 +1707,6 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) u64 msr_data; int usermode; - /* inject #UD if LOCK prefix is used */ - if (c->lock_prefix) - return -1; - /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) { @@ -1819,6 +1828,12 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); saved_eip = c->eip; + /* LOCK prefix is allowed only with some instructions */ + if (c->lock_prefix && !(c->d & Lock)) { + kvm_queue_exception(ctxt->vcpu, UD_VECTOR); + goto done; + } + /* Privileged instruction can be executed only in CPL=0 */ if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) { kvm_inject_gp(ctxt->vcpu, 0); -- cgit v1.2.3-70-g09d2 From 1d327eac3c9a8b170d68df44957d82bdbd263c53 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 11 Feb 2010 11:12:07 +0800 Subject: KVM: x86 emulator: code style cleanup Just remove redundant semicolon. Signed-off-by: Wei Yongjun Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c2de9f03da8..dd1b9352237 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1016,7 +1016,7 @@ done_prefixes: } if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { - kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");; + kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction"); return -1; } -- cgit v1.2.3-70-g09d2 From e424e1918345ab17d3beb068b8745ab1706d0e1b Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 11 Feb 2010 12:41:10 +0200 Subject: KVM: x86 emulator: disallow opcode 82 in 64-bit mode Instructions with opcode 82 are not valid in 64 bit mode. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index dd1b9352237..c280c2352f7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -310,14 +310,14 @@ static u32 group_table[] = { DstMem | SrcImm | ModRM | Lock, DstMem | SrcImm | ModRM, [Group1_82*8] = - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM | Lock, - ByteOp | DstMem | SrcImm | ModRM, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, + ByteOp | DstMem | SrcImm | ModRM | No64, [Group1_83*8] = DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock, -- cgit v1.2.3-70-g09d2 From fa8273e9549b3cee2947d1aebe5242e7a25eb3de Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 17 Feb 2010 14:00:41 +0000 Subject: KVM: Convert i8254/i8259 locks to raw_spinlocks The i8254/i8259 locks need to be real spinlocks on preempt-rt. Convert them to raw_spinlock. No change for !RT kernels. Signed-off-by: Thomas Gleixner Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 10 +++++----- arch/x86/kvm/i8254.h | 2 +- arch/x86/kvm/i8259.c | 31 ++++++++++++++++--------------- arch/x86/kvm/irq.h | 2 +- arch/x86/kvm/x86.c | 8 ++++---- 5 files changed, 27 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index c9569f20dfb..294698b6daf 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -242,11 +242,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); - spin_lock(&ps->inject_lock); + raw_spin_lock(&ps->inject_lock); if (atomic_dec_return(&ps->pit_timer.pending) < 0) atomic_inc(&ps->pit_timer.pending); ps->irq_ack = 1; - spin_unlock(&ps->inject_lock); + raw_spin_unlock(&ps->inject_lock); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) @@ -624,7 +624,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); - spin_lock_init(&pit->pit_state.inject_lock); + raw_spin_lock_init(&pit->pit_state.inject_lock); kvm->arch.vpit = pit; pit->kvm = kvm; @@ -724,12 +724,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) /* Try to inject pending interrupts when * last one has been acked. */ - spin_lock(&ps->inject_lock); + raw_spin_lock(&ps->inject_lock); if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { ps->irq_ack = 0; inject = 1; } - spin_unlock(&ps->inject_lock); + raw_spin_unlock(&ps->inject_lock); if (inject) __inject_pit_timer_intr(kvm); } diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index d4c1c7ffdc0..900d6b0ba7c 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -27,7 +27,7 @@ struct kvm_kpit_state { u32 speaker_data_on; struct mutex lock; struct kvm_pit *pit; - spinlock_t inject_lock; + raw_spinlock_t inject_lock; unsigned long irq_ack; struct kvm_irq_ack_notifier irq_ack_notifier; }; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index a3711f9e580..07771da85de 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -44,18 +44,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq) * Other interrupt may be delivered to PIC while lock is dropped but * it should be safe since PIC state is already updated at this stage. */ - spin_unlock(&s->pics_state->lock); + raw_spin_unlock(&s->pics_state->lock); kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); - spin_lock(&s->pics_state->lock); + raw_spin_lock(&s->pics_state->lock); } void kvm_pic_clear_isr_ack(struct kvm *kvm) { struct kvm_pic *s = pic_irqchip(kvm); - spin_lock(&s->lock); + + raw_spin_lock(&s->lock); s->pics[0].isr_ack = 0xff; s->pics[1].isr_ack = 0xff; - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); } /* @@ -156,9 +157,9 @@ static void pic_update_irq(struct kvm_pic *s) void kvm_pic_update_irq(struct kvm_pic *s) { - spin_lock(&s->lock); + raw_spin_lock(&s->lock); pic_update_irq(s); - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); } int kvm_pic_set_irq(void *opaque, int irq, int level) @@ -166,14 +167,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) struct kvm_pic *s = opaque; int ret = -1; - spin_lock(&s->lock); + raw_spin_lock(&s->lock); if (irq >= 0 && irq < PIC_NUM_PINS) { ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, s->pics[irq >> 3].imr, ret == 0); } - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); return ret; } @@ -203,7 +204,7 @@ int kvm_pic_read_irq(struct kvm *kvm) int irq, irq2, intno; struct kvm_pic *s = pic_irqchip(kvm); - spin_lock(&s->lock); + raw_spin_lock(&s->lock); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); @@ -228,7 +229,7 @@ int kvm_pic_read_irq(struct kvm *kvm) intno = s->pics[0].irq_base + irq; } pic_update_irq(s); - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); return intno; } @@ -442,7 +443,7 @@ static int picdev_write(struct kvm_io_device *this, printk(KERN_ERR "PIC: non byte write\n"); return 0; } - spin_lock(&s->lock); + raw_spin_lock(&s->lock); switch (addr) { case 0x20: case 0x21: @@ -455,7 +456,7 @@ static int picdev_write(struct kvm_io_device *this, elcr_ioport_write(&s->pics[addr & 1], addr, data); break; } - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); return 0; } @@ -472,7 +473,7 @@ static int picdev_read(struct kvm_io_device *this, printk(KERN_ERR "PIC: non byte read\n"); return 0; } - spin_lock(&s->lock); + raw_spin_lock(&s->lock); switch (addr) { case 0x20: case 0x21: @@ -486,7 +487,7 @@ static int picdev_read(struct kvm_io_device *this, break; } *(unsigned char *)val = data; - spin_unlock(&s->lock); + raw_spin_unlock(&s->lock); return 0; } @@ -520,7 +521,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return NULL; - spin_lock_init(&s->lock); + raw_spin_lock_init(&s->lock); s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 0b71d480ebf..34b15915754 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -62,7 +62,7 @@ struct kvm_kpic_state { }; struct kvm_pic { - spinlock_t lock; + raw_spinlock_t lock; unsigned pending_acks; struct kvm *kvm; struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 86b739f8f17..f6ae4875d9d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2542,18 +2542,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: - spin_lock(&pic_irqchip(kvm)->lock); + raw_spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); - spin_unlock(&pic_irqchip(kvm)->lock); + raw_spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: - spin_lock(&pic_irqchip(kvm)->lock); + raw_spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); - spin_unlock(&pic_irqchip(kvm)->lock); + raw_spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); -- cgit v1.2.3-70-g09d2 From 8b9f44140bc4afd2698413cd9960c3912168ee91 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 18 Feb 2010 12:14:59 +0200 Subject: KVM: x86 emulator: Forbid modifying CS segment register by mov instruction Inject #UD if guest attempts to do so. This is in accordance to Intel SDM. Cc: stable@kernel.org (2.6.33, 2.6.32) Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c280c2352f7..2db760ff887 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2126,6 +2126,12 @@ special_insn: int err; sel = c->src.val; + + if (c->modrm_reg == VCPU_SREG_CS) { + kvm_queue_exception(ctxt->vcpu, UD_VECTOR); + goto done; + } + if (c->modrm_reg == VCPU_SREG_SS) toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); -- cgit v1.2.3-70-g09d2 From 6f550484a15ea1b468665cdf59f020bf08ccb292 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 18 Feb 2010 12:15:00 +0200 Subject: KVM: Fix load_guest_segment_descriptor() to inject page fault This patch injects page fault when reading descriptor in load_guest_segment_descriptor() fails with FAULT. Effects of this injection: This function is used by kvm_load_segment_descriptor() which is necessary for the following instructions: - mov seg,r/m16 - jmp far - pop ?s This patch makes it possible to emulate the page faults generated by these instructions. But be sure that unless we change the kvm_load_segment_descriptor()'s ret value propagation this patch has no effect. Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6ae4875d9d..203ee7d0ed5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4713,6 +4713,9 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, { struct descriptor_table dtable; u16 index = selector >> 3; + int ret; + u32 err; + gva_t addr; get_segment_descriptor_dtable(vcpu, selector, &dtable); @@ -4720,9 +4723,13 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); return X86EMUL_PROPAGATE_FAULT; } - return kvm_read_guest_virt_system(dtable.base + index*8, - seg_desc, sizeof(*seg_desc), - vcpu, NULL); + addr = dtable.base + index * 8; + ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc), + vcpu, &err); + if (ret == X86EMUL_PROPAGATE_FAULT) + kvm_inject_page_fault(vcpu, addr, err); + + return ret; } /* allowed just for 8 bytes segments */ -- cgit v1.2.3-70-g09d2 From c697518a861e6c43b92b848895f9926580ee63c3 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 18 Feb 2010 12:15:01 +0200 Subject: KVM: Fix segment descriptor loading Add proper error and permission checking. This patch also change task switching code to load segment selectors before segment descriptors, like SDM requires, otherwise permission checking during segment descriptor loading will be incorrect. Cc: stable@kernel.org (2.6.33, 2.6.32) Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 3 +- arch/x86/kvm/emulate.c | 30 ++----- arch/x86/kvm/x86.c | 177 ++++++++++++++++++++++++++++++++-------- 3 files changed, 151 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f9a2f66530c..06d9e79ca37 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -614,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); -int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, - int type_bits, int seg); +int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2db760ff887..a1a7b27adf4 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1309,7 +1309,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, if (rc != 0) return rc; - rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg); + rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg); return rc; } @@ -1491,7 +1491,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); if (rc) return rc; - rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); + rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS); return rc; } @@ -2122,12 +2122,11 @@ special_insn: break; case 0x8e: { /* mov seg, r/m16 */ uint16_t sel; - int type_bits; - int err; sel = c->src.val; - if (c->modrm_reg == VCPU_SREG_CS) { + if (c->modrm_reg == VCPU_SREG_CS || + c->modrm_reg > VCPU_SREG_GS) { kvm_queue_exception(ctxt->vcpu, UD_VECTOR); goto done; } @@ -2135,18 +2134,7 @@ special_insn: if (c->modrm_reg == VCPU_SREG_SS) toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); - if (c->modrm_reg <= 5) { - type_bits = (c->modrm_reg == 1) ? 9 : 1; - err = kvm_load_segment_descriptor(ctxt->vcpu, sel, - type_bits, c->modrm_reg); - } else { - printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n", - c->modrm); - goto cannot_emulate; - } - - if (err < 0) - goto cannot_emulate; + rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg); c->dst.type = OP_NONE; /* Disable writeback. */ break; @@ -2320,11 +2308,9 @@ special_insn: case 0xe9: /* jmp rel */ goto jmp; case 0xea: /* jmp far */ - if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9, - VCPU_SREG_CS) < 0) { - DPRINTF("jmp far: Failed to load CS descriptor\n"); - goto cannot_emulate; - } + if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, + VCPU_SREG_CS)) + goto done; c->eip = c->src.val; break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 203ee7d0ed5..c3d2acbbb91 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4787,7 +4787,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se .unusable = 0, }; kvm_x86_ops->set_segment(vcpu, &segvar, seg); - return 0; + return X86EMUL_CONTINUE; } static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) @@ -4797,43 +4797,112 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); } -static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg, - u16 selector) -{ - /* NULL selector is not valid for CS and SS */ - if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) - if (!selector) - kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3); -} - -int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, - int type_bits, int seg) +int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg) { struct kvm_segment kvm_seg; struct desc_struct seg_desc; + u8 dpl, rpl, cpl; + unsigned err_vec = GP_VECTOR; + u32 err_code = 0; + bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ + int ret; if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu)) return kvm_load_realmode_segment(vcpu, selector, seg); - if (load_guest_segment_descriptor(vcpu, selector, &seg_desc)) - return 1; + /* NULL selector is not valid for TR, CS and SS */ + if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) + && null_selector) + goto exception; + + /* TR should be in GDT only */ + if (seg == VCPU_SREG_TR && (selector & (1 << 2))) + goto exception; + + ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc); + if (ret) + return ret; + seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg); - kvm_check_segment_descriptor(vcpu, seg, selector); - kvm_seg.type |= type_bits; + if (null_selector) { /* for NULL selector skip all following checks */ + kvm_seg.unusable = 1; + goto load; + } - if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS && - seg != VCPU_SREG_LDTR) - if (!kvm_seg.s) - kvm_seg.unusable = 1; + err_code = selector & 0xfffc; + err_vec = GP_VECTOR; - kvm_set_segment(vcpu, &kvm_seg, seg); - if (selector && !kvm_seg.unusable && kvm_seg.s) { + /* can't load system descriptor into segment selecor */ + if (seg <= VCPU_SREG_GS && !kvm_seg.s) + goto exception; + + if (!kvm_seg.present) { + err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; + goto exception; + } + + rpl = selector & 3; + dpl = kvm_seg.dpl; + cpl = kvm_x86_ops->get_cpl(vcpu); + + switch (seg) { + case VCPU_SREG_SS: + /* + * segment is not a writable data segment or segment + * selector's RPL != CPL or segment selector's RPL != CPL + */ + if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl) + goto exception; + break; + case VCPU_SREG_CS: + if (!(kvm_seg.type & 8)) + goto exception; + + if (kvm_seg.type & 4) { + /* conforming */ + if (dpl > cpl) + goto exception; + } else { + /* nonconforming */ + if (rpl > cpl || dpl != cpl) + goto exception; + } + /* CS(RPL) <- CPL */ + selector = (selector & 0xfffc) | cpl; + break; + case VCPU_SREG_TR: + if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9)) + goto exception; + break; + case VCPU_SREG_LDTR: + if (kvm_seg.s || kvm_seg.type != 2) + goto exception; + break; + default: /* DS, ES, FS, or GS */ + /* + * segment is not a data or readable code segment or + * ((segment is a data or nonconforming code segment) + * and (both RPL and CPL > DPL)) + */ + if ((kvm_seg.type & 0xa) == 0x8 || + (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) + goto exception; + break; + } + + if (!kvm_seg.unusable && kvm_seg.s) { /* mark segment as accessed */ + kvm_seg.type |= 1; seg_desc.type |= 1; save_guest_segment_descriptor(vcpu, selector, &seg_desc); } - return 0; +load: + kvm_set_segment(vcpu, &kvm_seg, seg); + return X86EMUL_CONTINUE; +exception: + kvm_queue_exception_e(vcpu, err_vec, err_code); + return X86EMUL_PROPAGATE_FAULT; } static void save_state_to_tss32(struct kvm_vcpu *vcpu, @@ -4859,6 +4928,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu, tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR); } +static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg) +{ + struct kvm_segment kvm_seg; + kvm_get_segment(vcpu, &kvm_seg, seg); + kvm_seg.selector = sel; + kvm_set_segment(vcpu, &kvm_seg, seg); +} + static int load_state_from_tss32(struct kvm_vcpu *vcpu, struct tss_segment_32 *tss) { @@ -4876,25 +4953,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu, kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi); kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi); - if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) + /* + * SDM says that segment selectors are loaded before segment + * descriptors + */ + kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR); + kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES); + kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS); + kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS); + kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS); + kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS); + kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS); + + /* + * Now load segment descriptors. If fault happenes at this stage + * it is handled in a context of new task + */ + if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) + if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) + if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) + if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) + if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) + if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) + if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS)) return 1; return 0; } @@ -4934,19 +5027,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si); kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di); - if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) + /* + * SDM says that segment selectors are loaded before segment + * descriptors + */ + kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR); + kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES); + kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS); + kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS); + kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS); + + /* + * Now load segment descriptors. If fault happenes at this stage + * it is handled in a context of new task + */ + if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) + if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) + if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) + if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS)) return 1; - if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) + if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS)) return 1; return 0; } -- cgit v1.2.3-70-g09d2 From e54cfa97a9ca9a544a7257b89b530b505ae1b892 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 18 Feb 2010 12:15:02 +0200 Subject: KVM: Fix emulate_sys[call, enter, exit]()'s fault handling This patch fixes emulate_syscall(), emulate_sysenter() and emulate_sysexit() to handle injected faults properly. Even though original code injects faults in these functions, we cannot handle these unless we use the different return value from the UNHANDLEABLE case. So this patch use X86EMUL_* codes instead of -1 and 0 and makes x86_emulate_insn() to handle these propagated faults. Be sure that, in x86_emulate_insn(), goto cannot_emulate and goto done with rc equals X86EMUL_UNHANDLEABLE have same effect. Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a1a7b27adf4..4dade6ac082 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1599,7 +1599,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) - return -1; + return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1636,7 +1636,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); } - return 0; + return X86EMUL_CONTINUE; } static int @@ -1649,14 +1649,14 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_UNHANDLEABLE; } /* XXX sysenter/sysexit have not been tested in 64bit mode. * Therefore, we inject an #UD. */ if (ctxt->mode == X86EMUL_MODE_PROT64) - return -1; + return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); @@ -1665,13 +1665,13 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_PROPAGATE_FAULT; } break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_PROPAGATE_FAULT; } break; } @@ -1696,7 +1696,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); c->regs[VCPU_REGS_RSP] = msr_data; - return 0; + return X86EMUL_CONTINUE; } static int @@ -1711,7 +1711,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_UNHANDLEABLE; } setup_syscalls_segments(ctxt, &cs, &ss); @@ -1729,7 +1729,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) cs.selector = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_PROPAGATE_FAULT; } ss.selector = (u16)(msr_data + 24); break; @@ -1737,7 +1737,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) cs.selector = (u16)(msr_data + 32); if (msr_data == 0x0) { kvm_inject_gp(ctxt->vcpu, 0); - return -1; + return X86EMUL_PROPAGATE_FAULT; } ss.selector = cs.selector + 8; cs.db = 0; @@ -1753,7 +1753,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX]; c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX]; - return 0; + return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) @@ -2476,8 +2476,9 @@ twobyte_insn: } break; case 0x05: /* syscall */ - if (emulate_syscall(ctxt) == -1) - goto cannot_emulate; + rc = emulate_syscall(ctxt); + if (rc != X86EMUL_CONTINUE) + goto done; else goto writeback; break; @@ -2548,14 +2549,16 @@ twobyte_insn: c->dst.type = OP_NONE; break; case 0x34: /* sysenter */ - if (emulate_sysenter(ctxt) == -1) - goto cannot_emulate; + rc = emulate_sysenter(ctxt); + if (rc != X86EMUL_CONTINUE) + goto done; else goto writeback; break; case 0x35: /* sysexit */ - if (emulate_sysexit(ctxt) == -1) - goto cannot_emulate; + rc = emulate_sysexit(ctxt); + if (rc != X86EMUL_CONTINUE) + goto done; else goto writeback; break; -- cgit v1.2.3-70-g09d2 From c573cd22939e54fc1b8e672054a505048987a7cb Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Tue, 23 Feb 2010 17:47:53 +0100 Subject: KVM: VMX: Update instruction length on intercepted BP We intercept #BP while in guest debugging mode. As VM exits due to intercepted exceptions do not necessarily come with valid idt_vectoring, we have to update event_exit_inst_len explicitly in such cases. At least in the absence of migration, this ensures that re-injections of #BP will find and use the correct instruction length. Signed-off-by: Jan Kiszka Cc: stable@kernel.org (2.6.32, 2.6.33) Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f82b0723afa..14873b9f843 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2775,6 +2775,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, kvm_queue_exception(vcpu, vec); return 1; case BP_VECTOR: + /* + * Update instruction length as we may reinject the exception + * from user space while in guest debugging mode. + */ + to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return 0; /* fall through */ @@ -2897,6 +2903,13 @@ static int handle_exception(struct kvm_vcpu *vcpu) kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: + /* + * Update instruction length as we may reinject #BP from + * user space while in guest debugging mode. Reading it for + * #DB as well causes no harm, it is not used in that case. + */ + vmx->vcpu.arch.event_exit_inst_len = + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; -- cgit v1.2.3-70-g09d2 From d2be1651b736002e0c76d7095d6c0ba77b4a897c Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Tue, 23 Feb 2010 17:47:57 +0100 Subject: KVM: x86: Add KVM_CAP_X86_ROBUST_SINGLESTEP This marks the guest single-step API improvement of 94fe45da and 91586a3b with a capability flag to allow reliable detection by user space. Signed-off-by: Jan Kiszka Cc: stable@kernel.org (2.6.33) Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 1 + include/linux/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c3d2acbbb91..e46282a5656 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1570,6 +1570,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: + case KVM_CAP_X86_ROBUST_SINGLESTEP: r = 1; break; case KVM_CAP_COALESCED_MMIO: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index dfa54be881f..60df9c84eca 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -501,6 +501,7 @@ struct kvm_ioeventfd { #define KVM_CAP_HYPERV_VAPIC 45 #define KVM_CAP_HYPERV_SPIN 46 #define KVM_CAP_PCI_SEGMENT 47 +#define KVM_CAP_X86_ROBUST_SINGLESTEP 51 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-70-g09d2