diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 14:47:43 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 10:50:43 +0200 |
commit | 666e7252a15b7fc4a116e65deaf6da5e4ce660e3 (patch) | |
tree | e7a56f03cb4e181eacd4f481fb3e6e038ad05b82 /arch/powerpc/kvm/book3s_64_mmu.c | |
parent | 96bc451a153297bf1f99ef2d633d512ea349ae7a (diff) |
KVM: PPC: Convert MSR to shared page
One of the most obvious registers to share with the guest directly is the
MSR. The MSR contains the "interrupts enabled" flag which the guest has to
toggle in critical sections.
So in order to bring the overhead of interrupt en- and disabling down, let's
put msr into the shared page. Keep in mind that even though you can fully read
its contents, writing to it doesn't always update all state. There are a few
safe fields that don't require hypervisor interaction. See the documentation
for a list of MSR bits that are safe to be set from inside the guest.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 4025ea26b3c..58aa8409dae 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -180,9 +180,9 @@ do_second: goto no_page_found; } - if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) + if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) key = 4; - else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) + else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) key = 4; for (i=0; i<16; i+=2) { @@ -381,7 +381,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) for (i = 1; i < vcpu_book3s->slb_nr; i++) vcpu_book3s->slb[i].valid = false; - if (vcpu->arch.msr & MSR_IR) { + if (vcpu->arch.shared->msr & MSR_IR) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } @@ -446,13 +446,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, struct kvmppc_slb *slb; u64 gvsid = esid; - if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); if (slb) gvsid = slb->vsid; } - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { case 0: *vsid = VSID_REAL | esid; break; @@ -473,7 +473,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, break; } - if (vcpu->arch.msr & MSR_PR) + if (vcpu->arch.shared->msr & MSR_PR) *vsid |= VSID_PR; return 0; |