summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-10-15 20:43:04 +1100
committerAlexander Graf <agraf@suse.de>2014-01-09 10:15:03 +0100
commit595e4f7e697e2e6fb252f6be6a83d5b9460b59a3 (patch)
tree29b52d82c5eda550b77a9762a9840b5473e0ac50 /arch/powerpc
parent99dae3bad28d8fdd32b7bfdd5e2ec7bb2d4d019d (diff)
KVM: PPC: Book3S HV: Use load/store_fp_state functions in HV guest entry/exit
This modifies kvmppc_load_fp and kvmppc_save_fp to use the generic FP/VSX and VMX load/store functions instead of open-coding the FP/VSX/VMX load/store instructions. Since kvmppc_load/save_fp don't follow C calling conventions, we make them private symbols within book3s_hv_rmhandlers.S. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S86
2 files changed, 22 insertions, 66 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8403f9031d9..5e64c3d2149 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -426,10 +426,8 @@ int main(void)
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
- DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr));
#ifdef CONFIG_ALTIVEC
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
- DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr));
#endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 47fd536fb13..acbd1d6afba 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1261,7 +1261,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* save FP state */
mr r3, r9
- bl .kvmppc_save_fp
+ bl kvmppc_save_fp
/* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
@@ -1691,7 +1691,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
std r31, VCPU_GPR(R31)(r3)
/* save FP state */
- bl .kvmppc_save_fp
+ bl kvmppc_save_fp
/*
* Take a nap until a decrementer or external interrupt occurs,
@@ -1869,8 +1869,12 @@ kvmppc_read_intr:
/*
* Save away FP, VMX and VSX registers.
* r3 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
-_GLOBAL(kvmppc_save_fp)
+kvmppc_save_fp:
+ mflr r30
+ mr r31,r3
mfmsr r5
ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC
@@ -1885,42 +1889,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
isync
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r6,reg*16+VCPU_FPRS
- STXVD2X(reg,R6,R3)
- reg = reg + 1
- .endr
-FTR_SECTION_ELSE
-#endif
- reg = 0
- .rept 32
- stfd reg,reg*8+VCPU_FPRS(r3)
- reg = reg + 1
- .endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
- mffs fr0
- stfd fr0,VCPU_FPSCR(r3)
-
+ addi r3,r3,VCPU_FPRS
+ bl .store_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r6,reg*16+VCPU_VRS
- stvx reg,r6,r3
- reg = reg + 1
- .endr
- mfvscr vr0
- li r6,VCPU_VSCR
- stvx vr0,r6,r3
+ addi r3,r31,VCPU_VRS
+ bl .store_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
+ mtlr r30
mtmsrd r5
isync
blr
@@ -1928,9 +1907,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Load up FP, VMX and VSX registers
* r4 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
- .globl kvmppc_load_fp
kvmppc_load_fp:
+ mflr r30
+ mr r31,r4
mfmsr r9
ori r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
@@ -1945,42 +1927,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
isync
- lfd fr0,VCPU_FPSCR(r4)
- MTFSF_L(fr0)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r7,reg*16+VCPU_FPRS
- LXVD2X(reg,R7,R4)
- reg = reg + 1
- .endr
-FTR_SECTION_ELSE
-#endif
- reg = 0
- .rept 32
- lfd reg,reg*8+VCPU_FPRS(r4)
- reg = reg + 1
- .endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
-
+ addi r3,r4,VCPU_FPRS
+ bl .load_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
- li r7,VCPU_VSCR
- lvx vr0,r7,r4
- mtvscr vr0
- reg = 0
- .rept 32
- li r7,reg*16+VCPU_VRS
- lvx reg,r7,r4
- reg = reg + 1
- .endr
+ addi r3,r31,VCPU_VRS
+ bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
lwz r7,VCPU_VRSAVE(r4)
mtspr SPRN_VRSAVE,r7
+ mtlr r30
+ mr r4,r31
blr
/*