diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-07 11:15:40 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 11:15:40 +0200 |
commit | 5e34437840d33554f69380584311743b39e8fbeb (patch) | |
tree | e081135619ee146af5efb9ee883afca950df5757 /arch/ia64/kvm/vcpu.c | |
parent | 77d05632baee21b1cef8730d7c06aa69601e4dca (diff) | |
parent | d508afb437daee7cf07da085b635c44a4ebf9b38 (diff) |
Merge branch 'linus' into core/softlockup
Conflicts:
kernel/sysctl.c
Diffstat (limited to 'arch/ia64/kvm/vcpu.c')
-rw-r--r-- | arch/ia64/kvm/vcpu.c | 46 |
1 files changed, 7 insertions, 39 deletions
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index ecd526b5532..a18ee17b919 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) return; } - void switch_to_virtual_rid(struct kvm_vcpu *vcpu) { unsigned long psr; @@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, return; } - - /* * In physical mode, insert tc/tr for region 0 and 4 uses * RID[0] and RID[4] which is for physical mode emulation. @@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); } - /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. @@ -390,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, else *rnat_addr = (*rnat_addr) & (~nat_mask); - ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); + ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); ia64_setreg(_IA64_REG_AR_RNAT, rnat); } local_irq_restore(psr); @@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) { struct kvm_vcpu *v; + struct kvm *kvm; int i; long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); unsigned long vitv = VCPU(vcpu, itv); + kvm = (struct kvm *)KVM_VM_BASE; + if (vcpu->vcpu_id == 0) { - for (i = 0; i < KVM_MAX_VCPUS; i++) { + for (i = 0; i < kvm->arch.online_vcpus; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); VMX(v, itc_offset) = itc_offset; @@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) return key; } - - void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long thash, vadr; @@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); } - void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tag, vadr; @@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) return IA64_NO_FAULT; } - int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1, r3; @@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); } - /************************************ * Insert/Purge translation register/cache ************************************/ @@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_itc(vcpu, r2); } - void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1; @@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) r1 = vcpu_get_itc(vcpu); vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); } + /************************************************************************** - struct kvm_vcpu*protection key register access routines + struct kvm_vcpu protection key register access routines **************************************************************************/ unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) @@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) ia64_set_pkr(reg, val); } - -unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) -{ - union ia64_rr rr, rr1; - - rr.val = vcpu_get_rr(vcpu, ifa); - rr1.val = 0; - rr1.ps = rr.ps; - rr1.rid = rr.rid; - return (rr1.val); -} - - - /******************************** * Moves to privileged registers ********************************/ @@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, return (IA64_NO_FAULT); } - - void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r2; @@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_pkr(vcpu, r3, r2); } - - void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r1; @@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); } - unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) { /* FIXME: This could get called as a result of a rsvd-reg fault */ @@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tgt = inst.M33.r1; @@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - - void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) { @@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) } } - - - void vcpu_rfi(struct kvm_vcpu *vcpu) { unsigned long ifs, psr; @@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) regs->cr_iip = VCPU(vcpu, iip); } - /* VPSR can't keep track of below bits of guest PSR This function gets guest PSR |