summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-20 02:49:48 +0200
committerAvi Kivity <avi@redhat.com>2010-05-17 12:18:58 +0300
commitf7bc74e1c306636a659a04805474b2f8fcbd1f7e (patch)
tree5aa5c5c9676d577b55bbc700f1d5a6ee5c137a27 /arch/powerpc/kvm
parent7fdaec997cc8ef77e8da7ed70f3d9f074b61c31f (diff)
KVM: PPC: Improve split mode
When in split mode, instruction relocation and data relocation are not equal. So far we implemented this mode by reserving a special pseudo-VSID for the two cases and flushing all PTEs when going into split mode, which is slow. Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not slow down things too much, I came up with a different idea: Mark the split mode with a bit in the VSID and then treat it like any other segment. This means we can just flush the shadow segment cache, but keep the PTEs intact. I verified that this works with ppc32 Linux and Mac OS X 10.4 guests and does speed them up. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c28
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c27
3 files changed, 42 insertions, 34 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index f66de7e518f..397701d39ae 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -148,16 +148,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
- if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
- (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
- bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
-
- /* Flush split mode PTEs */
- if (dr != ir)
- kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
- VSID_SPLIT_MASK);
-
+ if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
@@ -535,6 +527,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
bool is_mmio = false;
bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+ u64 vsid;
relocated = data ? dr : ir;
@@ -552,13 +545,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- pte.vpage |= VSID_REAL;
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
break;
case MSR_DR:
- pte.vpage |= VSID_REAL_DR;
- break;
case MSR_IR:
- pte.vpage |= VSID_REAL_IR;
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
break;
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 33186b745c9..0b10503c8a4 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_sr *sr;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ sr = find_sr(to_book3s(vcpu), ea);
+ if (sr->valid)
+ gvsid = sr->vsid;
+ }
+
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea = esid << SID_SHIFT;
- struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
-
if (!sr->valid)
return -1;
*vsid = sr->vsid;
break;
- }
default:
BUG();
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a9241e90a68..612de6e4d74 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_slb *slb;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ if (slb)
+ gvsid = slb->vsid;
+ }
+
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- struct kvmppc_slb *slb;
- ea = esid << SID_SHIFT;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
- if (slb)
- *vsid = slb->vsid;
- else
+ if (!slb)
return -ENOENT;
+ *vsid = gvsid;
break;
- }
default:
BUG();
break;