diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 10:14:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 10:14:24 -0800 |
commit | 55065bc52795faae549abfb912aacc622dd63876 (patch) | |
tree | 63683547e41ed459a2a8747eeafb5e969633d54f /arch/x86/kvm/kvm_cache_regs.h | |
parent | 008d23e4852d78bb2618f2035f8b2110b6a6b968 (diff) | |
parent | e5c301428294cb8925667c9ee39f817c4ab1c2c9 (diff) |
Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits)
KVM: Initialize fpu state in preemptible context
KVM: VMX: when entering real mode align segment base to 16 bytes
KVM: MMU: handle 'map_writable' in set_spte() function
KVM: MMU: audit: allow audit more guests at the same time
KVM: Fetch guest cr3 from hardware on demand
KVM: Replace reads of vcpu->arch.cr3 by an accessor
KVM: MMU: only write protect mappings at pagetable level
KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear()
KVM: MMU: Initialize base_role for tdp mmus
KVM: VMX: Optimize atomic EFER load
KVM: VMX: Add definitions for more vm entry/exit control bits
KVM: SVM: copy instruction bytes from VMCB
KVM: SVM: implement enhanced INVLPG intercept
KVM: SVM: enhance mov DR intercept handler
KVM: SVM: enhance MOV CR intercept handler
KVM: SVM: add new SVM feature bit names
KVM: cleanup emulate_instruction
KVM: move complete_insn_gp() into x86.c
KVM: x86: fix CR8 handling
KVM guest: Fix kvm clock initialization when it's configured out
...
Diffstat (limited to 'arch/x86/kvm/kvm_cache_regs.h')
-rw-r--r-- | arch/x86/kvm/kvm_cache_regs.h | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 975bb45329a..3377d53fcd3 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -73,6 +73,13 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) return vcpu->arch.cr4 & mask; } +static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) +{ + if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + kvm_x86_ops->decache_cr3(vcpu); + return vcpu->arch.cr3; +} + static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, ~0UL); @@ -84,4 +91,19 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); } +static inline void enter_guest_mode(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hflags |= HF_GUEST_MASK; +} + +static inline void leave_guest_mode(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hflags &= ~HF_GUEST_MASK; +} + +static inline bool is_guest_mode(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hflags & HF_GUEST_MASK; +} + #endif |