summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-30 09:47:01 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-30 09:47:01 +1100
commitbcd77ffc5d8ff3c9056927fb7aa855164b982053 (patch)
tree73813e0c1330279b169f376827b9b9ecf5e8ba35 /arch/powerpc/kernel
parent3ad26e5c4459d3793ad65bc8929037c70515df83 (diff)
parent955c1cab809edfb5429603c68493363074ac20cf (diff)
Merge branch 'for-kvm' into next
Add Paul's fix for 32-bit register corruption in the new FP code
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/fpu.S14
-rw-r--r--arch/powerpc/kernel/vector.S15
2 files changed, 17 insertions, 12 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 4dca05e91e9..f7f5b8bed68 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -106,6 +106,8 @@ _GLOBAL(store_fp_state)
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_fpu)
mfmsr r5
@@ -131,10 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
- addi r8,r4,THREAD_FPSTATE
- SAVE_32FPVSRS(0, R5, R8)
+ addi r10,r4,THREAD_FPSTATE
+ SAVE_32FPVSRS(0, R5, R10)
mffs fr0
- stfd fr0,FPSTATE_FPSCR(r8)
+ stfd fr0,FPSTATE_FPSCR(r10)
PPC_LL r5,PT_REGS(r4)
toreal(r5)
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -157,10 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
or r12,r12,r4
std r12,_MSR(r1)
#endif
- addi r7,r5,THREAD_FPSTATE
- lfd fr0,FPSTATE_FPSCR(r7)
+ addi r10,r5,THREAD_FPSTATE
+ lfd fr0,FPSTATE_FPSCR(r10)
MTFSF_L(fr0)
- REST_32FPVSRS(0, R4, R7)
+ REST_32FPVSRS(0, R4, R10)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index eacda4eea2d..0458a9aaba9 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -64,6 +64,9 @@ _GLOBAL(store_vr_state)
* Enables the VMX for use in the kernel on return.
* On SMP we know the VMX is free, since we give it up every
* switch (ie, no lazy save of the vector registers).
+ *
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
@@ -89,11 +92,11 @@ _GLOBAL(load_up_altivec)
/* Save VMX state to last_task_used_altivec's THREAD struct */
toreal(r4)
addi r4,r4,THREAD
- addi r7,r4,THREAD_VRSTATE
- SAVE_32VRS(0,r5,r7)
+ addi r6,r4,THREAD_VRSTATE
+ SAVE_32VRS(0,r5,r6)
mfvscr vr0
li r10,VRSTATE_VSCR
- stvx vr0,r10,r7
+ stvx vr0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -125,13 +128,13 @@ _GLOBAL(load_up_altivec)
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
#endif
- addi r7,r5,THREAD_VRSTATE
+ addi r6,r5,THREAD_VRSTATE
li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r7
+ lvx vr0,r10,r6
mtvscr vr0
- REST_32VRS(0,r4,r7)
+ REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */