summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_32_mmu.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 14:47:43 +0200
committerAvi Kivity <avi@redhat.com>2010-10-24 10:50:43 +0200
commit666e7252a15b7fc4a116e65deaf6da5e4ce660e3 (patch)
treee7a56f03cb4e181eacd4f481fb3e6e038ad05b82 /arch/powerpc/kvm/book3s_32_mmu.c
parent96bc451a153297bf1f99ef2d633d512ea349ae7a (diff)
KVM: PPC: Convert MSR to shared page
One of the most obvious registers to share with the guest directly is the MSR. The MSR contains the "interrupts enabled" flag which the guest has to toggle in critical sections. So in order to bring the overhead of interrupt en- and disabling down, let's put msr into the shared page. Keep in mind that even though you can fully read its contents, writing to it doesn't always update all state. There are a few safe fields that don't require hypervisor interaction. See the documentation for a list of MSR bits that are safe to be set from inside the guest. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_32_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 3292d76101d..449bce5f021 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -133,7 +133,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
else
bat = &vcpu_book3s->ibat[i];
- if (vcpu->arch.msr & MSR_PR) {
+ if (vcpu->arch.shared->msr & MSR_PR) {
if (!bat->vp)
continue;
} else {
@@ -214,8 +214,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
pp = pteg[i+1] & 3;
- if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) ||
- (sre->Ks && !(vcpu->arch.msr & MSR_PR)))
+ if ((sre->Kp && (vcpu->arch.shared->msr & MSR_PR)) ||
+ (sre->Ks && !(vcpu->arch.shared->msr & MSR_PR)))
pp |= 4;
pte->may_write = false;
@@ -334,7 +334,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_sr *sr;
u64 gvsid = esid;
- if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
sr = find_sr(to_book3s(vcpu), ea);
if (sr->valid)
gvsid = sr->vsid;
@@ -343,7 +343,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
- switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
*vsid = VSID_REAL | esid;
break;
@@ -363,7 +363,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
BUG();
}
- if (vcpu->arch.msr & MSR_PR)
+ if (vcpu->arch.shared->msr & MSR_PR)
*vsid |= VSID_PR;
return 0;