diff options
author | Takashi Iwai <tiwai@suse.de> | 2010-08-18 15:17:30 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2010-08-18 15:17:30 +0200 |
commit | 6ab561c8aab2e4af535f09adbc6253f958536848 (patch) | |
tree | 37846adb4ea106485720d113e252d71d615c23ed /arch/powerpc/kvm | |
parent | 4f4e8f69895c8696a4bcc751817d4b186023ac44 (diff) | |
parent | cbaa9f60d5d5c3af10f94e0d49789d5b82341a4a (diff) |
Merge branch 'topic/isa' into topic/misc
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 79 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 134 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 129 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 277 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 94 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/fpu.S | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kvm/timing.c | 2 |
12 files changed, 405 insertions, 367 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 812312542e5..9b9b5cdea84 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -316,7 +316,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, gfn = gpaddr >> PAGE_SHIFT; new_page = gfn_to_page(vcpu->kvm, gfn); if (is_error_page(new_page)) { - printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); + printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", + (unsigned long long)gfn); kvm_release_page_clean(new_page); return; } diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index ff436066bf7..d45c818a384 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -45,6 +45,7 @@ kvm-book3s_64-objs := \ book3s.o \ book3s_emulate.o \ book3s_interrupts.o \ + book3s_mmu_hpte.o \ book3s_64_mmu_host.o \ book3s_64_mmu.o \ book3s_32_mmu.o @@ -57,6 +58,7 @@ kvm-book3s_32-objs := \ book3s.o \ book3s_emulate.o \ book3s_interrupts.o \ + book3s_mmu_hpte.o \ book3s_32_mmu_host.o \ book3s_32_mmu.o kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b998abf1a63..a3cef30d1d4 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -1047,8 +1047,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); @@ -1069,8 +1067,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); - vcpu_put(vcpu); - return 0; } @@ -1078,8 +1074,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); @@ -1099,8 +1093,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); - vcpu_put(vcpu); - return 0; } @@ -1110,8 +1102,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; - vcpu_load(vcpu); - sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; @@ -1131,8 +1121,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, } } - vcpu_put(vcpu); - return 0; } @@ -1142,8 +1130,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; - vcpu_load(vcpu); - kvmppc_set_pvr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; @@ -1171,8 +1157,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); - vcpu_put(vcpu); - return 0; } @@ -1309,12 +1293,17 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; - struct thread_struct ext_bkp; + double fpr[32][TS_FPRWIDTH]; + unsigned int fpscr; + int fpexc_mode; #ifdef CONFIG_ALTIVEC - bool save_vec = current->thread.used_vr; + vector128 vr[32]; + vector128 vscr; + unsigned long uninitialized_var(vrsave); + int used_vr; #endif #ifdef CONFIG_VSX - bool save_vsx = current->thread.used_vsr; + int used_vsr; #endif ulong ext_msr; @@ -1327,27 +1316,27 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Save FPU state in stack */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); - memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); - ext_bkp.fpscr = current->thread.fpscr; - ext_bkp.fpexc_mode = current->thread.fpexc_mode; + memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); + fpscr = current->thread.fpscr.val; + fpexc_mode = current->thread.fpexc_mode; #ifdef CONFIG_ALTIVEC /* Save Altivec state in stack */ - if (save_vec) { + used_vr = current->thread.used_vr; + if (used_vr) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); - ext_bkp.vscr = current->thread.vscr; - ext_bkp.vrsave = current->thread.vrsave; + memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); + vscr = current->thread.vscr; + vrsave = current->thread.vrsave; } - ext_bkp.used_vr = current->thread.used_vr; #endif #ifdef CONFIG_VSX /* Save VSX state in stack */ - if (save_vsx && (current->thread.regs->msr & MSR_VSX)) + used_vsr = current->thread.used_vsr; + if (used_vsr && (current->thread.regs->msr & MSR_VSX)) __giveup_vsx(current); - ext_bkp.used_vsr = current->thread.used_vsr; #endif /* Remember the MSR with disabled extensions */ @@ -1372,22 +1361,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_giveup_ext(vcpu, MSR_VSX); /* Restore FPU state from stack */ - memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); - current->thread.fpscr = ext_bkp.fpscr; - current->thread.fpexc_mode = ext_bkp.fpexc_mode; + memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); + current->thread.fpscr.val = fpscr; + current->thread.fpexc_mode = fpexc_mode; #ifdef CONFIG_ALTIVEC /* Restore Altivec state from stack */ - if (save_vec && current->thread.used_vr) { - memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); - current->thread.vscr = ext_bkp.vscr; - current->thread.vrsave= ext_bkp.vrsave; + if (used_vr && current->thread.used_vr) { + memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); + current->thread.vscr = vscr; + current->thread.vrsave = vrsave; } - current->thread.used_vr = ext_bkp.used_vr; + current->thread.used_vr = used_vr; #endif #ifdef CONFIG_VSX - current->thread.used_vsr = ext_bkp.used_vsr; + current->thread.used_vsr = used_vsr; #endif return ret; @@ -1395,12 +1384,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_book3s_init(void) { - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, - THIS_MODULE); + int r; + + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, + THIS_MODULE); + + if (r) + return r; + + r = kvmppc_mmu_hpte_sysinit(); + + return r; } static void kvmppc_book3s_exit(void) { + kvmppc_mmu_hpte_sysexit(); kvm_exit(); } diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 0b10503c8a4..3292d76101d 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -354,10 +354,10 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - if (!sr->valid) - return -1; - - *vsid = sr->vsid; + if (sr->valid) + *vsid = sr->vsid; + else + *vsid = VSID_BAT | gvsid; break; default: BUG(); diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0bb66005338..0b51ef872c1 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -19,6 +19,7 @@ */ #include <linux/kvm_host.h> +#include <linux/hash.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -57,139 +58,26 @@ static ulong htab; static u32 htabmask; -static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { volatile u32 *pteg; - dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", - pte->pte.eaddr, pte->pte.vpage, pte->host_va); - + /* Remove from host HTAB */ pteg = (u32*)pte->slot; - pteg[0] = 0; + + /* And make sure it's gone from the TLB too */ asm volatile ("sync"); asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); asm volatile ("sync"); asm volatile ("tlbsync"); - - pte->host_va = 0; - - if (pte->pte.may_write) - kvm_release_pfn_dirty(pte->pfn); - else - kvm_release_pfn_clean(pte->pfn); -} - -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n", - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_ea &= ea_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.eaddr & ea_mask) == guest_ea) { - invalidate_pte(vcpu, pte); - } - } - - /* Doing a complete flush -> start from scratch */ - if (!ea_mask) - vcpu->arch.hpte_cache_offset = 0; -} - -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_vp &= vp_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.vpage & vp_mask) == guest_vp) { - invalidate_pte(vcpu, pte); - } - } -} - -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, pa_start, pa_end); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.raddr >= pa_start) && - (pte->pte.raddr < pa_end)) { - invalidate_pte(vcpu, pte); - } - } -} - -struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) -{ - int i; - u64 guest_vp; - - guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); - for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if (pte->pte.vpage == guest_vp) - return &pte->pte; - } - - return NULL; -} - -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) - kvmppc_mmu_pte_flush(vcpu, 0, 0); - - return vcpu->arch.hpte_cache_offset++; } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { - return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); + return hash_64(gvsid, SID_MAP_BITS); } @@ -256,7 +144,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) register int rr = 0; bool primary = false; bool evict = false; - int hpte_id; struct hpte_cache *pte; /* Get host physical address for gpa */ @@ -341,8 +228,7 @@ next_pteg: /* Now tell our Shadow PTE code about the new page */ - hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); - pte = &vcpu->arch.hpte_cache[hpte_id]; + pte = kvmppc_mmu_hpte_cache_next(vcpu); dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', @@ -355,6 +241,8 @@ next_pteg: pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; + kvmppc_mmu_hpte_cache_map(vcpu, pte); + return 0; } @@ -439,7 +327,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { - kvmppc_mmu_pte_flush(vcpu, 0, 0); + kvmppc_mmu_hpte_destroy(vcpu); preempt_disable(); __destroy_context(to_book3s(vcpu)->context_id); preempt_enable(); @@ -479,5 +367,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; htab = (ulong)__va(sdr1 & 0xffff0000); + kvmppc_mmu_hpte_init(vcpu); + return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e4b5744977f..384179a5002 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -20,6 +20,7 @@ */ #include <linux/kvm_host.h> +#include <linux/hash.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -46,135 +47,20 @@ #define dprintk_slb(a, ...) do { } while(0) #endif -static void invalidate_pte(struct hpte_cache *pte) +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { - dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", - pte->pte.eaddr, pte->pte.vpage, pte->host_va); - ppc_md.hpte_invalidate(pte->slot, pte->host_va, MMU_PAGE_4K, MMU_SEGSIZE_256M, false); - pte->host_va = 0; - - if (pte->pte.may_write) - kvm_release_pfn_dirty(pte->pfn); - else - kvm_release_pfn_clean(pte->pfn); -} - -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_ea &= ea_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.eaddr & ea_mask) == guest_ea) { - invalidate_pte(pte); - } - } - - /* Doing a complete flush -> start from scratch */ - if (!ea_mask) - vcpu->arch.hpte_cache_offset = 0; -} - -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_vp &= vp_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.vpage & vp_mask) == guest_vp) { - invalidate_pte(pte); - } - } -} - -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n", - vcpu->arch.hpte_cache_offset, pa_start, pa_end); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.raddr >= pa_start) && - (pte->pte.raddr < pa_end)) { - invalidate_pte(pte); - } - } -} - -struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) -{ - int i; - u64 guest_vp; - - guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); - for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if (pte->pte.vpage == guest_vp) - return &pte->pte; - } - - return NULL; -} - -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) - kvmppc_mmu_pte_flush(vcpu, 0, 0); - - return vcpu->arch.hpte_cache_offset++; } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { - return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); + return hash_64(gvsid, SID_MAP_BITS); } - static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; @@ -273,8 +159,7 @@ map_again: attempt++; goto map_again; } else { - int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); - struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; + struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', @@ -292,6 +177,8 @@ map_again: pte->host_va = va; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; + + kvmppc_mmu_hpte_cache_map(vcpu, pte); } return 0; @@ -418,7 +305,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { - kvmppc_mmu_pte_flush(vcpu, 0, 0); + kvmppc_mmu_hpte_destroy(vcpu); __destroy_context(to_book3s(vcpu)->context_id); } @@ -436,5 +323,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; vcpu3s->vsid_next = vcpu3s->vsid_first; + kvmppc_mmu_hpte_init(vcpu); + return 0; } diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c new file mode 100644 index 00000000000..4868d4a7ebc --- /dev/null +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -0,0 +1,277 @@ +/* + * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. + * + * Authors: + * Alexander Graf <agraf@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/kvm_host.h> +#include <linux/hash.h> +#include <linux/slab.h> + +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/machdep.h> +#include <asm/mmu_context.h> +#include <asm/hw_irq.h> + +#define PTE_SIZE 12 + +/* #define DEBUG_MMU */ + +#ifdef DEBUG_MMU +#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) +#else +#define dprintk_mmu(a, ...) do { } while(0) +#endif + +static struct kmem_cache *hpte_cache; + +static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) +{ + return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); +} + +static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) +{ + return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); +} + +static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) +{ + return hash_64((vpage & 0xffffff000ULL) >> 12, + HPTEG_HASH_BITS_VPTE_LONG); +} + +void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +{ + u64 index; + + /* Add to ePTE list */ + index = kvmppc_mmu_hash_pte(pte->pte.eaddr); + hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); + + /* Add to vPTE list */ + index = kvmppc_mmu_hash_vpte(pte->pte.vpage); + hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); + + /* Add to vPTE_long list */ + index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); + hlist_add_head(&pte->list_vpte_long, + &vcpu->arch.hpte_hash_vpte_long[index]); +} + +static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +{ + dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", + pte->pte.eaddr, pte->pte.vpage, pte->host_va); + + /* Different for 32 and 64 bit */ + kvmppc_mmu_invalidate_pte(vcpu, pte); + + if (pte->pte.may_write) + kvm_release_pfn_dirty(pte->pfn); + else + kvm_release_pfn_clean(pte->pfn); + + hlist_del(&pte->list_pte); + hlist_del(&pte->list_vpte); + hlist_del(&pte->list_vpte_long); + + vcpu->arch.hpte_cache_count--; + kmem_cache_free(hpte_cache, pte); +} + +static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) +{ + struct hpte_cache *pte; + struct hlist_node *node, *tmp; + int i; + + for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { + struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; + + hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) + invalidate_pte(vcpu, pte); + } +} + +static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) +{ + struct hlist_head *list; + struct hlist_node *node, *tmp; + struct hpte_cache *pte; + + /* Find the list of entries in the map */ + list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; + + /* Check the list for matching entries and invalidate */ + hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) + if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) + invalidate_pte(vcpu, pte); +} + +void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) +{ + u64 i; + + dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", + vcpu->arch.hpte_cache_count, guest_ea, ea_mask); + + guest_ea &= ea_mask; + + switch (ea_mask) { + case ~0xfffUL: + kvmppc_mmu_pte_flush_page(vcpu, guest_ea); + break; + case 0x0ffff000: + /* 32-bit flush w/o segment, go through all possible segments */ + for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) + kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); + break; + case 0: + /* Doing a complete flush -> start from scratch */ + kvmppc_mmu_pte_flush_all(vcpu); + break; + default: + WARN_ON(1); + break; + } +} + +/* Flush with mask 0xfffffffff */ +static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) +{ + struct hlist_head *list; + struct hlist_node *node, *tmp; + struct hpte_cache *pte; + u64 vp_mask = 0xfffffffffULL; + + list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; + + /* Check the list for matching entries and invalidate */ + hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) + if ((pte->pte.vpage & vp_mask) == guest_vp) + invalidate_pte(vcpu, pte); +} + +/* Flush with mask 0xffffff000 */ +static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) +{ + struct hlist_head *list; + struct hlist_node *node, *tmp; + struct hpte_cache *pte; + u64 vp_mask = 0xffffff000ULL; + + list = &vcpu->arch.hpte_hash_vpte_long[ + kvmppc_mmu_hash_vpte_long(guest_vp)]; + + /* Check the list for matching entries and invalidate */ + hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) + if ((pte->pte.vpage & vp_mask) == guest_vp) + invalidate_pte(vcpu, pte); +} + +void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) +{ + dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", + vcpu->arch.hpte_cache_count, guest_vp, vp_mask); + guest_vp &= vp_mask; + + switch(vp_mask) { + case 0xfffffffffULL: + kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); + break; + case 0xffffff000ULL: + kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); + break; + default: + WARN_ON(1); + return; + } +} + +void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) +{ + struct hlist_node *node, *tmp; + struct hpte_cache *pte; + int i; + + dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", + vcpu->arch.hpte_cache_count, pa_start, pa_end); + + for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { + struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; + + hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) + if ((pte->pte.raddr >= pa_start) && + (pte->pte.raddr < pa_end)) + invalidate_pte(vcpu, pte); + } +} + +struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) +{ + struct hpte_cache *pte; + + pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); + vcpu->arch.hpte_cache_count++; + + if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) + kvmppc_mmu_pte_flush_all(vcpu); + + return pte; +} + +void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) +{ + kvmppc_mmu_pte_flush(vcpu, 0, 0); +} + +static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len) +{ + int i; + + for (i = 0; i < len; i++) + INIT_HLIST_HEAD(&hash_list[i]); +} + +int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) +{ + /* init hpte lookup hashes */ + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, + ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, + ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, + ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); + + return 0; +} + +int kvmppc_mmu_hpte_sysinit(void) +{ + /* init hpte slab cache */ + hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache), + sizeof(struct hpte_cache), 0, NULL); + + return 0; +} + +void kvmppc_mmu_hpte_sysexit(void) +{ + kmem_cache_destroy(hpte_cache); +} diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a9f66abafcb..474f2e24050 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -159,10 +159,7 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) { - struct thread_struct t; - - t.fpscr.val = vcpu->arch.fpscr; - cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t); + kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr); } static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) @@ -183,7 +180,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; - struct thread_struct t; int r; char tmp[8]; int len = sizeof(u32); @@ -191,8 +187,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, if (ls_type == FPU_LS_DOUBLE) len = sizeof(u64); - t.fpscr.val = vcpu->arch.fpscr; - /* read from memory */ r = kvmppc_ld(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; @@ -210,7 +204,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, /* put in registers */ switch (ls_type) { case FPU_LS_SINGLE: - cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t); + kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); vcpu->arch.qpr[rs] = *((u32*)tmp); break; case FPU_LS_DOUBLE: @@ -229,17 +223,14 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; - struct thread_struct t; int r; char tmp[8]; u64 val; int len; - t.fpscr.val = vcpu->arch.fpscr; - switch (ls_type) { case FPU_LS_SINGLE: - cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t); + kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr); val = *((u32*)tmp); len = sizeof(u32); break; @@ -278,13 +269,10 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; - struct thread_struct t; int r; float one = 1.0; u32 tmp[2]; - t.fpscr.val = vcpu->arch.fpscr; - /* read from memory */ if (w) { r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); @@ -308,7 +296,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, emulated = EMULATE_DONE; /* put in registers */ - cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t); + kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); vcpu->arch.qpr[rs] = tmp[1]; dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], @@ -322,14 +310,11 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; - struct thread_struct t; int r; u32 tmp[2]; int len = w ? sizeof(u32) : sizeof(u64); - t.fpscr.val = vcpu->arch.fpscr; - - cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t); + kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr); tmp[1] = vcpu->arch.qpr[rs]; r = kvmppc_st(vcpu, &addr, len, tmp, true); @@ -517,7 +502,7 @@ static int get_d_signext(u32 inst) static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int reg_in3, int scalar, - void (*func)(struct thread_struct *t, + void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3)) { @@ -526,27 +511,25 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, u32 ps0_out; u32 ps0_in1, ps0_in2, ps0_in3; u32 ps1_in1, ps1_in2, ps1_in3; - struct thread_struct t; - t.fpscr.val = vcpu->arch.fpscr; /* RC */ WARN_ON(rc); /* PS0 */ - cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); - cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); - cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t); + kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); + kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); + kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; - func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); + func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_in3, ps0_out); if (!(scalar & SCALAR_NO_PS0)) - cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); /* PS1 */ ps1_in1 = qpr[reg_in1]; @@ -557,7 +540,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, ps1_in2 = ps0_in2; if (!(scalar & SCALAR_NO_PS1)) - func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); + func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); @@ -568,7 +551,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int scalar, - void (*func)(struct thread_struct *t, + void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2)) { @@ -578,27 +561,25 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, u32 ps0_in1, ps0_in2; u32 ps1_out; u32 ps1_in1, ps1_in2; - struct thread_struct t; - t.fpscr.val = vcpu->arch.fpscr; /* RC */ WARN_ON(rc); /* PS0 */ - cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); + kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; else - cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); + kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); - func(&t, &ps0_out, &ps0_in1, &ps0_in2); + func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); if (!(scalar & SCALAR_NO_PS0)) { dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_out); - cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); } /* PS1 */ @@ -608,7 +589,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; - func(&t, &ps1_out, &ps1_in1, &ps1_in2); + func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); if (!(scalar & SCALAR_NO_PS1)) { qpr[reg_out] = ps1_out; @@ -622,31 +603,29 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in, - void (*func)(struct thread_struct *t, + void (*func)(u64 *t, u32 *dst, u32 *src1)) { u32 *qpr = vcpu->arch.qpr; u64 *fpr = vcpu->arch.fpr; u32 ps0_out, ps0_in; u32 ps1_in; - struct thread_struct t; - t.fpscr.val = vcpu->arch.fpscr; /* RC */ WARN_ON(rc); /* PS0 */ - cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t); - func(&t, &ps0_out, &ps0_in); + kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr); + func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", ps0_in, ps0_out); - cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); /* PS1 */ ps1_in = qpr[reg_in]; - func(&t, &qpr[reg_out], &ps1_in); + func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", ps1_in, qpr[reg_out]); @@ -672,13 +651,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) bool rcomp = (inst & 1) ? true : false; u32 cr = kvmppc_get_cr(vcpu); - struct thread_struct t; #ifdef DEBUG int i; #endif - t.fpscr.val = vcpu->arch.fpscr; - if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; @@ -695,7 +671,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; - cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); + kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); } @@ -819,8 +795,9 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ - cvt_df((double*)&vcpu->arch.fpr[ax_rb], - (float*)&vcpu->arch.qpr[ax_rd], &t); + kvm_cvt_df(&vcpu->arch.fpr[ax_rb], + &vcpu->arch.qpr[ax_rd], + &vcpu->arch.fpscr); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); @@ -830,17 +807,20 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_4X_PS_MERGE10: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ - cvt_fd((float*)&vcpu->arch.qpr[ax_ra], - (double*)&vcpu->arch.fpr[ax_rd], &t); + kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], + &vcpu->arch.fpr[ax_rd], + &vcpu->arch.fpscr); /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ - cvt_df((double*)&vcpu->arch.fpr[ax_rb], - (float*)&vcpu->arch.qpr[ax_rd], &t); + kvm_cvt_df(&vcpu->arch.fpr[ax_rb], + &vcpu->arch.qpr[ax_rd], + &vcpu->arch.fpscr); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ - cvt_fd((float*)&vcpu->arch.qpr[ax_ra], - (double*)&vcpu->arch.fpr[ax_rd], &t); + kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], + &vcpu->arch.fpr[ax_rd], + &vcpu->arch.fpscr); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } @@ -1275,7 +1255,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; - cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); + kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a33ab8cc2cc..8d4e35f5372 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -144,7 +144,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int allowed = 0; - ulong msr_mask; + ulong uninitialized_var(msr_mask); bool update_esr = false, update_dear = false; switch (priority) { @@ -485,8 +485,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - regs->pc = vcpu->arch.pc; regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; @@ -507,8 +505,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); - vcpu_put(vcpu); - return 0; } @@ -516,8 +512,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - vcpu->arch.pc = regs->pc; kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; @@ -537,8 +531,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); - vcpu_put(vcpu); - return 0; } @@ -569,9 +561,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, { int r; - vcpu_load(vcpu); r = kvmppc_core_vcpu_translate(vcpu, tr); - vcpu_put(vcpu); return r; } diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index 2b340a3eee9..cb34bbe1611 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S @@ -271,3 +271,21 @@ FPD_THREE_IN(fmsub) FPD_THREE_IN(fmadd) FPD_THREE_IN(fnmsub) FPD_THREE_IN(fnmadd) + +_GLOBAL(kvm_cvt_fd) + lfd 0,0(r5) /* load up fpscr value */ + MTFSF_L(0) + lfs 0,0(r3) + stfd 0,0(r4) + mffs 0 + stfd 0,0(r5) /* save new fpscr value */ + blr + +_GLOBAL(kvm_cvt_df) + lfd 0,0(r5) /* load up fpscr value */ + MTFSF_L(0) + lfd 0,0(r3) + stfs 0,0(r4) + mffs 0 + stfd 0,0(r5) /* save new fpscr value */ + blr diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9b8683f39e0..72a4ad86ee9 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -36,11 +36,6 @@ #define CREATE_TRACE_POINTS #include "trace.h" -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); @@ -287,7 +282,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { - u64 gpr; + u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); @@ -423,8 +418,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int r; sigset_t sigsaved; - vcpu_load(vcpu); - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -456,8 +449,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); - vcpu_put(vcpu); - return r; } @@ -523,8 +514,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&irq, argp, sizeof(irq))) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); - break; + goto out; } + case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c index 70378551c0c..46fa04f12a9 100644 --- a/arch/powerpc/kvm/timing.c +++ b/arch/powerpc/kvm/timing.c @@ -182,7 +182,7 @@ static ssize_t kvmppc_exit_timing_write(struct file *file, } if (c == 'c') { - struct seq_file *seqf = (struct seq_file *)file->private_data; + struct seq_file *seqf = file->private_data; struct kvm_vcpu *vcpu = seqf->private; /* Write does not affect our buffers previously generated with * show. seq_file is locked here to prevent races of init with |