summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/head.S
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-10-27 16:27:25 +1000
committerPaul Mackerras <paulus@samba.org>2005-10-27 20:48:50 +1000
commit25c8a78b1e00ac0cc640677eda78b462c2cd4c6e (patch)
treea0044f8b2b557799a8cb3346b590fcd3a8507ed7 /arch/ppc64/kernel/head.S
parentfda262b8978d0089758ef9444508434c74113a61 (diff)
[PATCH] powerpc: Fix handling of fpscr on 64-bit
The recent merge of fpu.S broken the handling of fpscr for ARCH=powerpc and CONFIG_PPC64=y. FP registers could be corrupted, leading to strange random application crashes. The confusion arises, because the thread_struct has (and requires) a 64-bit area to save the fpscr, because we use load/store double instructions to get it in to/out of the FPU. However, only the low 32-bits are actually used, so we want to treat it as a 32-bit quantity when manipulating its bits to avoid extra load/stores on 32-bit. This patch replaces the current definition with a structure of two 32-bit quantities (pad and val), to clarify things as much as is possible. The 'val' field is used when manipulating bits, the structure itself is used when obtaining the address for loading/unloading the value from the FPU. While we're at it, consolidate the 4 (!) almost identical versions of cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S, arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S, arch/powerpc/kernel/misc_64.S) into a single version in fpu.S. The new version takes a pointer to thread_struct and applies the correct offset itself, rather than a pointer to the fpscr field itself, again to avoid confusion as to which is the correct field to use. Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S code, which it previously did not. Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y). Booted on G5 (ARCH=powerpc) and things which previously fell over no longer do. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/kernel/head.S')
-rw-r--r--arch/ppc64/kernel/head.S59
1 files changed, 2 insertions, 57 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index f58af9c246c..929f9f42cf7 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -81,7 +81,7 @@ _stext:
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
- b .__start_initialization_multiplatform
+ b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
#endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -747,6 +747,7 @@ bad_stack:
* any task or sent any task a signal, you should use
* ret_from_except or ret_from_except_lite instead of this.
*/
+ .globl fast_exception_return
fast_exception_return:
ld r12,_MSR(r1)
ld r11,_NIP(r1)
@@ -858,62 +859,6 @@ fp_unavailable_common:
bl .kernel_fp_unavailable_exception
BUG_OPCODE
-/*
- * load_up_fpu(unused, unused, tsk)
- * Disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- * On SMP we know the fpu is free, since we give it up every
- * switch (ie, no lazy save of the FP registers).
- * On entry: r13 == 'current' && last_task_used_math != 'current'
- */
-_STATIC(load_up_fpu)
- mfmsr r5 /* grab the current MSR */
- ori r5,r5,MSR_FP
- mtmsrd r5 /* enable use of fpu now */
- isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_fpu in switch_to.
- *
- */
-#ifndef CONFIG_SMP
- ld r3,last_task_used_math@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Save FP state to last_task_used_math's THREAD struct */
- addi r4,r4,THREAD
- SAVE_32FPRS(0, r4)
- mffs fr0
- stfd fr0,THREAD_FPSCR(r4)
- /* Disable FP for last_task_used_math */
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r6,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r6
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* enable use of FP after return */
- ld r4,PACACURRENT(r13)
- addi r5,r4,THREAD /* Get THREAD */
- ld r4,THREAD_FPEXC_MODE(r5)
- ori r12,r12,MSR_FP
- or r12,r12,r4
- std r12,_MSR(r1)
- lfd fr0,THREAD_FPSCR(r5)
- mtfsf 0xff,fr0
- REST_32FPRS(0, r5)
-#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
- subi r4,r5,THREAD /* Back to 'current' */
- std r4,0(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- b fast_exception_return
-
.align 7
.globl altivec_unavailable_common
altivec_unavailable_common: