diff options
author | Avi Kivity <avi@redhat.com> | 2012-09-12 14:03:28 +0300 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-09-20 13:00:08 +0300 |
commit | 3d34adec7081621ff51c195be045b87d75c0c49d (patch) | |
tree | 0f2be432c532d25e8979b3853eb57af754d96f72 /arch/x86/kvm | |
parent | edc2ae84eb40a3c062210fe01af1cae1633cc810 (diff) |
KVM: MMU: Move gpte_access() out of paging_tmpl.h
We no longer rely on paging_tmpl.h defines; so we can move the function
to mmu.c.
Rely on zero extension to 64 bits to get the correct nx behaviour.
Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 21 |
2 files changed, 15 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 54c9cb4fdfa..f297a2ccf4f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3437,6 +3437,16 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, return false; } +static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned access; + + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; + access &= ~(gpte >> PT64_NX_SHIFT); + + return access; +} + #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 064bcb32d84..1cbf576852c 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -101,17 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return (ret != orig_pte); } -static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) -{ - unsigned access; - - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; -#if PTTYPE == 64 - access &= ~(gpte >> PT64_NX_SHIFT); -#endif - return access; -} - static bool FNAME(is_last_gpte)(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t gpte) @@ -217,7 +206,7 @@ retry_walk: last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte); if (last_gpte) { - pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); + pte_access = pt_access & gpte_access(vcpu, pte); /* check if the kernel is fetching from user page */ if (unlikely(pte_access & PT_USER_MASK) && kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) @@ -268,7 +257,7 @@ retry_walk: break; } - pt_access &= FNAME(gpte_access)(vcpu, pte); + pt_access &= gpte_access(vcpu, pte); --walker->level; } @@ -364,7 +353,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return; pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); - pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + pte_access = sp->role.access & gpte_access(vcpu, gpte); protect_clean_gpte(&pte_access, gpte); pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); if (mmu_invalid_pfn(pfn)) @@ -438,7 +427,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) continue; - pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + pte_access = sp->role.access & gpte_access(vcpu, gpte); protect_clean_gpte(&pte_access, gpte); gfn = gpte_to_gfn(gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, @@ -791,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) gfn = gpte_to_gfn(gpte); pte_access = sp->role.access; - pte_access &= FNAME(gpte_access)(vcpu, gpte); + pte_access &= gpte_access(vcpu, gpte); protect_clean_gpte(&pte_access, gpte); if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) |