diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-12-14 16:07:25 -0500 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-12-17 16:59:29 -0500 |
commit | 81711cee933599fa114abb0d258d8bbabef8adfb (patch) | |
tree | cef17c099689b15b3b8bb29b0eae84acd474ed8a /arch/tile/kernel/intvec_32.S | |
parent | bc4cf2bb271b2d557fc510426755da786fc985be (diff) |
arch/tile: handle rt_sigreturn() more cleanly
The current tile rt_sigreturn() syscall pattern uses the common idiom
of loading up pt_regs with all the saved registers from the time of
the signal, then anticipating the fact that we will clobber the ABI
"return value" register (r0) as we return from the syscall by setting
the rt_sigreturn return value to whatever random value was in the pt_regs
for r0.
However, this breaks in our 64-bit kernel when running "compat" tasks,
since we always sign-extend the "return value" register to properly
handle returned pointers that are in the upper 2GB of the 32-bit compat
address space. Doing this to the sigreturn path then causes occasional
random corruption of the 64-bit r0 register.
Instead, we stop doing the crazy "load the return-value register"
hack in sigreturn. We already have some sigreturn-specific assembly
code that we use to pass the pt_regs pointer to C code. We extend that
code to also set the link register to point to a spot a few instructions
after the usual syscall return address so we don't clobber the saved r0.
Now it no longer matters what the rt_sigreturn syscall returns, and the
pt_regs structure can be cleanly and completely reloaded.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 24 |
1 files changed, 21 insertions, 3 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index f5821626247..5eed4a02bf6 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -1342,8 +1342,8 @@ handle_syscall: lw r20, r20 /* Jump to syscall handler. */ - jalr r20; .Lhandle_syscall_link: - FEEDBACK_REENTER(handle_syscall) + jalr r20 +.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */ /* * Write our r0 onto the stack so it gets restored instead @@ -1352,6 +1352,9 @@ handle_syscall: PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) sw r29, r0 +.Lsyscall_sigreturn_skip: + FEEDBACK_REENTER(handle_syscall) + /* Do syscall trace again, if requested. */ lw r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE @@ -1536,9 +1539,24 @@ STD_ENTRY_LOCAL(bad_intr) }; \ STD_ENDPROC(_##x) +/* + * Special-case sigreturn to not write r0 to the stack on return. + * This is technically more efficient, but it also avoids difficulties + * in the 64-bit OS when handling 32-bit compat code, since we must not + * sign-extend r0 for the sigreturn return-value case. + */ +#define PTREGS_SYSCALL_SIGRETURN(x, reg) \ + STD_ENTRY(_##x); \ + addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \ + { \ + PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ + j x \ + }; \ + STD_ENDPROC(_##x) + PTREGS_SYSCALL(sys_execve, r3) PTREGS_SYSCALL(sys_sigaltstack, r2) -PTREGS_SYSCALL(sys_rt_sigreturn, r0) +PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0) PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1) /* Save additional callee-saves to pt_regs, put address in r4 and jump. */ |