diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-02 21:17:37 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-09 16:46:25 +1000 |
commit | e821ea70f3b4873b50056a1e0f74befed1014c09 (patch) | |
tree | 5c7808e9ab65af2577a05a79726fb211f673feb8 /arch/powerpc/kernel/head_64.S | |
parent | d3f6204a7d65030ba92bf43a278b3f3054353e0b (diff) |
powerpc: Move VMX and VSX asm code to vector.S
Currently, load_up_altivec and give_up_altivec are duplicated
in 32-bit and 64-bit. This creates a common implementation that
is moved away from head_32.S, head_64.S and misc_64.S and into
vector.S, using the same macros we already use for our common
implementation of load_up_fpu.
I also moved the VSX code over to vector.S though in that case
I didn't make it build on 32-bit (yet).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 118 |
1 files changed, 0 insertions, 118 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 50ef505b8fb..382495fa90b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -844,124 +844,6 @@ unrecov_fer: bl .unrecoverable_exception b 1b -#ifdef CONFIG_ALTIVEC -/* - * load_up_altivec(unused, unused, tsk) - * Disable VMX for the task which had it previously, - * and save its vector registers in its thread_struct. - * Enables the VMX for use in the kernel on return. - * On SMP we know the VMX is free, since we give it up every - * switch (ie, no lazy save of the vector registers). - * On entry: r13 == 'current' && last_task_used_altivec != 'current' - */ -_STATIC(load_up_altivec) - mfmsr r5 /* grab the current MSR */ - oris r5,r5,MSR_VEC@h - mtmsrd r5 /* enable use of VMX now */ - isync - -/* - * For SMP, we don't do lazy VMX switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_altvec in switch_to. - * VRSAVE isn't dealt with here, that is done in the normal context - * switch code. Note that we could rely on vrsave value to eventually - * avoid saving all of the VREGs here... - */ -#ifndef CONFIG_SMP - ld r3,last_task_used_altivec@got(r2) - ld r4,0(r3) - cmpdi 0,r4,0 - beq 1f - /* Save VMX state to last_task_used_altivec's THREAD struct */ - addi r4,r4,THREAD - SAVE_32VRS(0,r5,r4) - mfvscr vr0 - li r10,THREAD_VSCR - stvx vr0,r10,r4 - /* Disable VMX for last_task_used_altivec */ - ld r5,PT_REGS(r4) - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r6,MSR_VEC@h - andc r4,r4,r6 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* Hack: if we get an altivec unavailable trap with VRSAVE - * set to all zeros, we assume this is a broken application - * that fails to set it properly, and thus we switch it to - * all 1's - */ - mfspr r4,SPRN_VRSAVE - cmpdi 0,r4,0 - bne+ 1f - li r4,-1 - mtspr SPRN_VRSAVE,r4 -1: - /* enable use of VMX after return */ - ld r4,PACACURRENT(r13) - addi r5,r4,THREAD /* Get THREAD */ - oris r12,r12,MSR_VEC@h - std r12,_MSR(r1) - li r4,1 - li r10,THREAD_VSCR - stw r4,THREAD_USED_VR(r5) - lvx vr0,r10,r5 - mtvscr vr0 - REST_32VRS(0,r4,r5) -#ifndef CONFIG_SMP - /* Update last_task_used_math to 'current' */ - subi r4,r5,THREAD /* Back to 'current' */ - std r4,0(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - blr -#endif /* CONFIG_ALTIVEC */ - -#ifdef CONFIG_VSX -/* - * load_up_vsx(unused, unused, tsk) - * Disable VSX for the task which had it previously, - * and save its vector registers in its thread_struct. - * Reuse the fp and vsx saves, but first check to see if they have - * been saved already. - * On entry: r13 == 'current' && last_task_used_vsx != 'current' - */ -_STATIC(load_up_vsx) -/* Load FP and VSX registers if they haven't been done yet */ - andi. r5,r12,MSR_FP - beql+ load_up_fpu /* skip if already loaded */ - andis. r5,r12,MSR_VEC@h - beql+ load_up_altivec /* skip if already loaded */ - -#ifndef CONFIG_SMP - ld r3,last_task_used_vsx@got(r2) - ld r4,0(r3) - cmpdi 0,r4,0 - beq 1f - /* Disable VSX for last_task_used_vsx */ - addi r4,r4,THREAD - ld r5,PT_REGS(r4) - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r6,MSR_VSX@h - andc r6,r4,r6 - std r6,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - ld r4,PACACURRENT(r13) - addi r4,r4,THREAD /* Get THREAD */ - li r6,1 - stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ - /* enable use of VSX after return */ - oris r12,r12,MSR_VSX@h - std r12,_MSR(r1) -#ifndef CONFIG_SMP - /* Update last_task_used_math to 'current' */ - ld r4,PACACURRENT(r13) - std r4,0(r3) -#endif /* CONFIG_SMP */ - b fast_exception_return -#endif /* CONFIG_VSX */ /* * Hash table stuff |