1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
|
/*
* arch/sh/lib/mcount.S
*
* Copyright (C) 2008 Paul Mundt
* Copyright (C) 2008, 2009 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/ftrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#define MCOUNT_ENTER() \
mov.l r4, @-r15; \
mov.l r5, @-r15; \
mov.l r6, @-r15; \
mov.l r7, @-r15; \
sts.l pr, @-r15; \
\
mov.l @(20,r15),r4; \
sts pr, r5
#define MCOUNT_LEAVE() \
lds.l @r15+, pr; \
mov.l @r15+, r7; \
mov.l @r15+, r6; \
mov.l @r15+, r5; \
rts; \
mov.l @r15+, r4
#ifdef CONFIG_STACK_DEBUG
/*
* Perform diagnostic checks on the state of the kernel stack.
*
* Check for stack overflow. If there is less than 1KB free
* then it has overflowed.
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
* (after _ebss) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
shll8 r0; \
shll2 r0; \
\
/* r1 = sp & (THREAD_SIZE - 1) */ \
mov #-1, r1; \
add r0, r1; \
and r15, r1; \
\
mov #TI_SIZE, r3; \
mov #(STACK_WARN >> 8), r2; \
shll8 r2; \
add r3, r2; \
\
/* Is the stack overflowing? */ \
cmp/hi r2, r1; \
bf stack_panic; \
\
/* If sp > _ebss then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
\
/* If sp < init_stack, we're not OK. */ \
mov.l .L_init_thread_union, r1; \
cmp/hs r1, r15; \
bf stack_panic; \
\
/* If sp > init_stack && sp < _ebss, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
1:
#else
#define STACK_CHECK()
#endif /* CONFIG_STACK_DEBUG */
.align 2
.globl _mcount
.type _mcount,@function
.globl mcount
.type mcount,@function
_mcount:
mcount:
#ifndef CONFIG_DYNAMIC_FTRACE
mov.l .Lfunction_trace_stop, r0
mov.l @r0, r0
tst r0, r0
bf ftrace_stub
#endif
STACK_CHECK()
MCOUNT_ENTER()
#ifdef CONFIG_DYNAMIC_FTRACE
.globl mcount_call
mcount_call:
mov.l .Lftrace_stub, r6
#else
mov.l .Lftrace_trace_function, r6
mov.l ftrace_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l @r6, r6
#endif
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
mov.l .Lftrace_graph_return, r6
mov.l .Lftrace_stub, r7
cmp/eq r6, r7
bt 1f
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
1:
mov.l .Lftrace_graph_entry, r6
mov.l .Lftrace_graph_entry_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
.align 2
.Lftrace_graph_return:
.long ftrace_graph_return
.Lftrace_graph_entry:
.long ftrace_graph_entry
.Lftrace_graph_entry_stub:
.long ftrace_graph_entry_stub
.Lftrace_graph_caller:
.long ftrace_graph_caller
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl skip_trace
skip_trace:
MCOUNT_LEAVE()
.align 2
.Lftrace_trace_function:
.long ftrace_trace_function
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* NOTE: Do not move either ftrace_graph_call or ftrace_caller
* as this will affect the calculation of GRAPH_INSN_OFFSET.
*/
.globl ftrace_graph_call
ftrace_graph_call:
mov.l .Lskip_trace, r0
jmp @r0
nop
.align 2
.Lskip_trace:
.long skip_trace
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl ftrace_caller
ftrace_caller:
mov.l .Lfunction_trace_stop, r0
mov.l @r0, r0
tst r0, r0
bf ftrace_stub
STACK_CHECK()
MCOUNT_ENTER()
.globl ftrace_call
ftrace_call:
mov.l .Lftrace_stub, r6
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bra ftrace_graph_call
nop
#else
MCOUNT_LEAVE()
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
* NOTE: From here on the locations of the .Lftrace_stub label and
* ftrace_stub itself are fixed. Adding additional data here will skew
* the displacement for the memory table and break the block replacement.
* Place new labels either after the ftrace_stub body, or before
* ftrace_caller. You have been warned.
*/
.align 2
.Lftrace_stub:
.long ftrace_stub
.globl ftrace_stub
ftrace_stub:
rts
nop
#ifdef CONFIG_STACK_DEBUG
.globl stack_panic
stack_panic:
mov.l .Ldump_stack, r0
jsr @r0
nop
mov.l .Lpanic, r0
jsr @r0
mov.l .Lpanic_s, r4
rts
nop
.align 2
.Lfunction_trace_stop:
.long function_trace_stop
.L_ebss:
.long _ebss
.L_init_thread_union:
.long init_thread_union
.Lpanic:
.long panic
.Lpanic_s:
.long .Lpanic_str
.Ldump_stack:
.long dump_stack
.section .rodata
.align 2
.Lpanic_str:
.string "Stack error"
#endif /* CONFIG_STACK_DEBUG */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
mov.l 2f, r0
mov.l @r0, r0
tst r0, r0
bt 1f
mov.l 3f, r1
jmp @r1
nop
1:
/*
* MCOUNT_ENTER() pushed 5 registers onto the stack, so
* the stack address containing our return address is
* r15 + 20.
*/
mov #20, r0
add r15, r0
mov r0, r4
mov.l .Lprepare_ftrace_return, r0
jsr @r0
nop
MCOUNT_LEAVE()
.align 2
2: .long function_trace_stop
3: .long skip_trace
.Lprepare_ftrace_return:
.long prepare_ftrace_return
.globl return_to_handler
return_to_handler:
/*
* Save the return values.
*/
mov.l r0, @-r15
mov.l r1, @-r15
mov #0, r4
mov.l .Lftrace_return_to_handler, r0
jsr @r0
nop
/*
* The return value from ftrace_return_handler has the real
* address that we should return to.
*/
lds r0, pr
mov.l @r15+, r1
rts
mov.l @r15+, r0
.align 2
.Lftrace_return_to_handler:
.long ftrace_return_to_handler
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|