diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-04-28 18:51:43 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-05-16 16:01:16 -0400 |
commit | fc327e268fbef08e129ad51aa3a7113ee9bc6ba5 (patch) | |
tree | ba75f2ac9509090c6896a4fbc6be7c3aaba1aaf6 /arch/tile/kernel/intvec_32.S | |
parent | 36be50515fe2aef61533b516fa2576a2c7fe7664 (diff) |
arch/tile: fix up some issues in calling do_work_pending()
First, we were at risk of handling thread-info flags, in particular
do_signal(), when returning from kernel space. This could happen
after a failed kernel_execve(), or when forking a kernel thread.
The fix is to test in do_work_pending() for user_mode() and return
immediately if so; we already had this test for one of the flags,
so I just hoisted it to the top of the function.
Second, if a ptraced process updated the callee-saved registers
in the ptregs struct and then processed another thread-info flag, we
would overwrite the modifications with the original callee-saved
registers. To fix this, we add a register to note if we've already
saved the registers once, and skip doing it on additional passes
through the loop. To avoid a performance hit from the couple of
extra instructions involved, I modified the GET_THREAD_INFO() macro
to be guaranteed to be one instruction, then bundled it with adjacent
instructions, yielding an overall net savings.
Reported-By: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 5d56a1ef5ba..6943515100f 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -839,6 +839,18 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + * Get base of stack in r32. + */ + { + GET_THREAD_INFO(r32) + movei r33, 0 + } + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out - * exactly what we need to do for each flag bit, then if - * necessary, reload the flags and recheck. + * handling, notify-resume, or single-step. Call out to C + * code to figure out exactly what we need to do for each flag bit, + * then if necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnz r33, 1f } - bnz r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnz r0, .Lretry_work_pending /* * In the NMI case we @@ -1180,10 +1191,12 @@ handle_syscall: add r20, r20, tp lw r21, r20 addi r21, r21, 1 - sw r20, r21 + { + sw r20, r21 + GET_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET lw r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE @@ -1362,7 +1375,10 @@ handle_ill: 3: /* set PC and continue */ lw r26, r24 - sw r28, r26 + { + sw r28, r26 + GET_THREAD_INFO(r0) + } /* * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. @@ -1370,7 +1386,6 @@ handle_ill: * need to clear it here and can't really impose on all other arches. * So what's another write between friends? */ - GET_THREAD_INFO(r0) addi r1, r0, THREAD_INFO_FLAGS_OFFSET { |