diff options
Diffstat (limited to 'arch/arm/kernel/entry-common.S')
-rw-r--r-- | arch/arm/kernel/entry-common.S | 198 |
1 files changed, 144 insertions, 54 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 2c1db77d784..8bfa98757cd 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,6 +48,8 @@ work_pending: beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' + tst r1, #_TIF_SIGPENDING @ delivering a signal? + movne why, #0 @ prevent further restarts bl do_notify_resume b ret_slow_syscall @ Check work again @@ -92,75 +94,150 @@ ENDPROC(ret_from_fork) #define CALL(x) .long x #ifdef CONFIG_FUNCTION_TRACER -#ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). + */ - .globl mcount_call -mcount_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(__gnu_mcount_nc) + mov ip, lr + ldmia sp!, {lr} + mov pc, ip +ENDPROC(__gnu_mcount_nc) ENTRY(ftrace_caller) - stmdb sp!, {r0-r3, lr} - ldr r1, [fp, #-4] - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE + stmdb sp!, {r0-r3, lr} + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + ldr r1, [sp, #20] - .globl ftrace_call + .global ftrace_call ftrace_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + bl ftrace_stub + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +ENDPROC(ftrace_caller) + +#ifdef CONFIG_OLD_MCOUNT +ENTRY(mcount) + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} +ENDPROC(mcount) + +ENTRY(ftrace_caller_old) + stmdb sp!, {r0-r3, lr} + ldr r1, [fp, #-4] + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + + .globl ftrace_call_old +ftrace_call_old: + bl ftrace_stub + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} +ENDPROC(ftrace_caller_old) +#endif #else ENTRY(__gnu_mcount_nc) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne gnu_trace - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, .Lftrace_stub + cmp r0, r2 + bne gnu_trace + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip gnu_trace: - ldr r1, [sp, #20] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip - + ldr r1, [sp, #20] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + adr lr, BSYM(1f) + mov pc, r2 +1: + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +ENDPROC(__gnu_mcount_nc) + +#ifdef CONFIG_OLD_MCOUNT +/* + * This is under an ifdef in order to force link-time errors for people trying + * to build with !FRAME_POINTER with a GCC which doesn't use the new-style + * mcount. + */ ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne trace - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, ftrace_stub + cmp r0, r2 + bne trace + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} trace: - ldr r1, [fp, #-4] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + ldr r1, [fp, #-4] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + mov lr, pc + mov pc, r2 + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} +ENDPROC(mcount) +#endif #endif /* CONFIG_DYNAMIC_FTRACE */ - .globl ftrace_stub -ftrace_stub: - mov pc, lr +ENTRY(ftrace_stub) +.Lftrace_stub: + mov pc, lr +ENDPROC(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ @@ -257,7 +334,6 @@ ENTRY(vector_swi) get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -274,8 +350,20 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + +#ifdef CONFIG_SECCOMP + tst r10, #_TIF_SECCOMP + beq 1f + mov r0, scno + bl __secure_computing + add r0, sp, #S_R0 + S_OFF @ pointer to regs + ldmia r0, {r0 - r3} @ have to reload r0 - r3 +1: +#endif + + tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit @@ -382,11 +470,13 @@ ENDPROC(sys_clone_wrapper) sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) |