summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/entry-common.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/entry-common.S')
-rw-r--r--arch/sh/kernel/entry-common.S89
1 files changed, 55 insertions, 34 deletions
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 718bd2356b3..1a5cf9dd82d 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -92,6 +92,7 @@ ENTRY(ret_from_irq)
bra resume_userspace
nop
ENTRY(resume_kernel)
+ cli
mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
tst r0, r0
bf noresched
@@ -105,28 +106,9 @@ need_resched:
and #0xf0, r0 ! interrupts off (exception path)?
cmp/eq #0xf0, r0
bt noresched
-
- mov.l 1f, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
-
-#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 3f, r0
- jsr @r0
- nop
-#endif
- sti
- mov.l 2f, r0
- jsr @r0
- nop
- mov #0, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
- cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 4f, r0
- jsr @r0
+ jsr @r0 ! call preempt_schedule_irq
nop
-#endif
-
bra need_resched
nop
@@ -137,10 +119,7 @@ noresched:
.align 2
1: .long PREEMPT_ACTIVE
2: .long schedule
-#ifdef CONFIG_TRACE_IRQFLAGS
-3: .long trace_hardirqs_on
-4: .long trace_hardirqs_off
-#endif
+3: .long preempt_schedule_irq
#endif
ENTRY(resume_userspace)
@@ -192,7 +171,7 @@ work_resched:
.align 2
1: .long schedule
2: .long do_notify_resume
-3: .long restore_all
+3: .long resume_userspace
#ifdef CONFIG_TRACE_IRQFLAGS
4: .long trace_hardirqs_on
5: .long trace_hardirqs_off
@@ -202,7 +181,7 @@ work_resched:
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
+ tst #_TIF_WORK_SYSCALL_MASK, r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -211,10 +190,8 @@ syscall_exit_work:
nop
#endif
sti
- ! XXX setup arguments...
mov r15, r4
- mov #1, r5
- mov.l 4f, r0 ! do_syscall_trace
+ mov.l 8f, r0 ! do_syscall_trace_leave
jsr @r0
nop
bra resume_userspace
@@ -223,12 +200,11 @@ syscall_exit_work:
.align 2
syscall_trace_entry:
! Yes it is traced.
- ! XXX setup arguments...
mov r15, r4
- mov #0, r5
- mov.l 4f, r11 ! Call do_syscall_trace which notifies
+ mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies
jsr @r11 ! superior (will chomp R[0-7])
nop
+ mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
@@ -351,7 +327,7 @@ ENTRY(system_call)
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
- mov #(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
+ mov #_TIF_WORK_SYSCALL_MASK, r10
tst r10, r8
bf syscall_trace_entry
!
@@ -389,8 +365,53 @@ syscall_exit:
#endif
2: .long NR_syscalls
3: .long sys_call_table
-4: .long do_syscall_trace
#ifdef CONFIG_TRACE_IRQFLAGS
5: .long trace_hardirqs_on
6: .long trace_hardirqs_off
#endif
+7: .long do_syscall_trace_enter
+8: .long do_syscall_trace_leave
+
+#ifdef CONFIG_FTRACE
+ .align 2
+ .globl _mcount
+ .type _mcount,@function
+ .globl mcount
+ .type mcount,@function
+_mcount:
+mcount:
+ mov.l r4, @-r15
+ mov.l r5, @-r15
+ mov.l r6, @-r15
+ mov.l r7, @-r15
+ sts.l pr, @-r15
+
+ mov.l @(20,r15),r4
+ sts pr, r5
+
+ mov.l 1f, r6
+ mov.l ftrace_stub, r7
+ cmp/eq r6, r7
+ bt skip_trace
+
+ mov.l @r6, r6
+ jsr @r6
+ nop
+
+skip_trace:
+
+ lds.l @r15+, pr
+ mov.l @r15+, r7
+ mov.l @r15+, r6
+ mov.l @r15+, r5
+ rts
+ mov.l @r15+, r4
+
+ .align 2
+1: .long ftrace_trace_function
+
+ .globl ftrace_stub
+ftrace_stub:
+ rts
+ nop
+#endif /* CONFIG_FTRACE */