diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 14:34:19 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 14:34:19 +0900 |
commit | 66a173b926891023e34e78cb32f4681d19777e01 (patch) | |
tree | e6018f50fbceea7c07e6e27368ee817f9adb34f2 /arch/powerpc/kernel/vector.S | |
parent | 11db81a59d0b2e563e30512cd76f23d0db384780 (diff) | |
parent | 0c4888ef1d8a8b82c29075ce7e257ff795af15c7 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt:
"The bulk of this is LE updates. One should now be able to build an LE
kernel and even run some things in it.
I'm still sitting on a handful of patches to enable the new ABI that I
*might* still send this merge window around, but due to the
incertainty (they are pretty fresh) I want to keep them separate.
Other notable changes are some infrastructure bits to better handle
PCI pass-through under KVM, some bits and pieces added to the new
PowerNV platform support such as access to the CPU SCOM bus via sysfs,
and support for EEH error handling on PHB3 (Power8 PCIe).
We also grew arch_get_random_long() for both pseries and powernv when
running on P7+ and P8, exploiting the HW rng.
And finally various embedded updates from freescale"
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (154 commits)
powerpc: Fix fatal SLB miss when restoring PPR
powerpc/powernv: Reserve the correct PE number
powerpc/powernv: Add PE to its own PELTV
powerpc/powernv: Add support for indirect XSCOM via debugfs
powerpc/scom: Improve debugfs interface
powerpc/scom: Enable 64-bit addresses
powerpc/boot: Properly handle the base "of" boot wrapper
powerpc/bpf: Support MOD operation
powerpc/bpf: Fix DIVWU instruction opcode
of: Move definition of of_find_next_cache_node into common code.
powerpc: Remove big endianness assumption in of_find_next_cache_node
powerpc/tm: Remove interrupt disable in __switch_to()
powerpc: word-at-a-time optimization for 64-bit Little Endian
powerpc/bpf: BPF JIT compiler for 64-bit Little Endian
powerpc: Only save/restore SDR1 if in hypervisor mode
powerpc/pmu: Fix ADB_PMU_LED_IDE dependencies
powerpc/nvram: Fix endian issue when using the partition length
powerpc/nvram: Fix endian issue when reading the NVRAM size
powerpc/nvram: Scan partitions only once
powerpc/mpc512x: remove unnecessary #if
...
Diffstat (limited to 'arch/powerpc/kernel/vector.S')
-rw-r--r-- | arch/powerpc/kernel/vector.S | 80 |
1 files changed, 44 insertions, 36 deletions
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 9e20999aaef..0458a9aaba9 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -8,29 +8,6 @@ #include <asm/ptrace.h> #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -/* - * Wrapper to call load_up_altivec from C. - * void do_load_up_altivec(struct pt_regs *regs); - */ -_GLOBAL(do_load_up_altivec) - mflr r0 - std r0, 16(r1) - stdu r1, -112(r1) - - subi r6, r3, STACK_FRAME_OVERHEAD - /* load_up_altivec expects r12=MSR, r13=PACA, and returns - * with r12 = new MSR. - */ - ld r12,_MSR(r6) - GET_PACA(r13) - bl load_up_altivec - std r12,_MSR(r6) - - ld r0, 112+16(r1) - addi r1, r1, 112 - mtlr r0 - blr - /* void do_load_up_transact_altivec(struct thread_struct *thread) * * This is similar to load_up_altivec but for the transactional version of the @@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec) li r4,1 stw r4,THREAD_USED_VR(r3) - li r10,THREAD_TRANSACT_VSCR + li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR lvx vr0,r10,r3 mtvscr vr0 - REST_32VRS_TRANSACT(0,r4,r3) + addi r10,r3,THREAD_TRANSACT_VRSTATE + REST_32VRS(0,r4,r10) /* Disable VEC again. */ MTMSRD(r6) @@ -59,12 +37,36 @@ _GLOBAL(do_load_up_transact_altivec) #endif /* - * load_up_altivec(unused, unused, tsk) + * Load state from memory into VMX registers including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(load_vr_state) + li r4,VRSTATE_VSCR + lvx vr0,r4,r3 + mtvscr vr0 + REST_32VRS(0,r4,r3) + blr + +/* + * Store VMX state into memory, including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(store_vr_state) + SAVE_32VRS(0, r4, r3) + mfvscr vr0 + li r4, VRSTATE_VSCR + stvx vr0, r4, r3 + blr + +/* * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. * On SMP we know the VMX is free, since we give it up every * switch (ie, no lazy save of the vector registers). + * + * Note that on 32-bit this can only use registers that will be + * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_altivec) mfmsr r5 /* grab the current MSR */ @@ -90,10 +92,11 @@ _GLOBAL(load_up_altivec) /* Save VMX state to last_task_used_altivec's THREAD struct */ toreal(r4) addi r4,r4,THREAD - SAVE_32VRS(0,r5,r4) + addi r6,r4,THREAD_VRSTATE + SAVE_32VRS(0,r5,r6) mfvscr vr0 - li r10,THREAD_VSCR - stvx vr0,r10,r4 + li r10,VRSTATE_VSCR + stvx vr0,r10,r6 /* Disable VMX for last_task_used_altivec */ PPC_LL r5,PT_REGS(r4) toreal(r5) @@ -125,12 +128,13 @@ _GLOBAL(load_up_altivec) oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #endif + addi r6,r5,THREAD_VRSTATE li r4,1 - li r10,THREAD_VSCR + li r10,VRSTATE_VSCR stw r4,THREAD_USED_VR(r5) - lvx vr0,r10,r5 + lvx vr0,r10,r6 mtvscr vr0 - REST_32VRS(0,r4,r5) + REST_32VRS(0,r4,r6) #ifndef CONFIG_SMP /* Update last_task_used_altivec to 'current' */ subi r4,r5,THREAD /* Back to 'current' */ @@ -165,12 +169,16 @@ _GLOBAL(giveup_altivec) PPC_LCMPI 0,r3,0 beqlr /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 - SAVE_32VRS(0,r4,r3) + PPC_LCMPI 0,r7,0 + bne 2f + addi r7,r3,THREAD_VRSTATE +2: PPC_LCMPI 0,r5,0 + SAVE_32VRS(0,r4,r7) mfvscr vr0 - li r4,THREAD_VSCR - stvx vr0,r4,r3 + li r4,VRSTATE_VSCR + stvx vr0,r4,r7 beq 1f PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) #ifdef CONFIG_VSX |