1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
|
/*
* linux/arch/m68knommu/platform/5307/entry.S
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2004 Macq Electronique SA. (www.macqel.com)
*
* Based on:
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
* 5307 fixes by David W. Miller
* linux 2.4 support David McCullough <davidm@snapgear.com>
* Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
*/
#include <linux/config.h>
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
.bss
sw_ksp:
.long 0
sw_usp:
.long 0
.text
.globl system_call
.globl resume
.globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table
.globl ret_from_interrupt
.globl inthandler
.globl fasthandler
ENTRY(system_call)
SAVE_ALL
move #0x2000,%sr /* enable intrs again */
movel #-LENOSYS,%d2
movel %d2,PT_D0(%sp) /* default return value in d0 */
/* original D0 is in orig_d0 */
movel %d0,%d2
/* save top of frame */
pea %sp@
jbsr set_esp0
addql #4,%sp
cmpl #NR_syscalls,%d2
jcc ret_from_exception
lea sys_call_table,%a0
lsll #2,%d2 /* movel %a0@(%d2:l:4),%d3 */
movel %a0@(%d2),%d3
jeq ret_from_exception
lsrl #2,%d2
movel %sp,%d2 /* get thread_info pointer */
andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
movel %d2,%a0
btst #TIF_SYSCALL_TRACE,%a0@(TI_FLAGS)
bnes 1f
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_D0) /* save the return value */
jra ret_from_exception
1:
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace
ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
ret_from_exception:
btst #5,%sp@(PT_SR) /* check if returning to kernel */
jeq Luser_return /* if so, skip resched, signals */
Lkernel_return:
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
addql #4,%sp /* orig d0 */
addl %sp@+,%sp /* stk adj */
rte
Luser_return:
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
andl #_TIF_WORK_MASK,%d1
jne Lwork_to_do /* still work to do */
Lreturn:
move #0x2700,%sr /* disable intrs */
movel sw_usp,%a0 /* get usp */
movel %sp@(PT_PC),%a0@- /* copy exception program counter */
movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
addql #4,%sp /* orig d0 */
addl %sp@+,%sp /* stk adj */
addql #8,%sp /* remove exception */
movel %sp,sw_ksp /* save ksp */
subql #8,sw_usp /* set exception */
movel sw_usp,%sp /* restore usp */
rte
Lwork_to_do:
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
/* GERG: do we need something here for TRACEing?? */
Lsignal_return:
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
clrl %sp@-
jsr do_signal
addql #8,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jmp Lreturn
/*
* This is the generic interrupt handler (for all hardware interrupt
* sources). It figures out the vector number and calls the appropriate
* interrupt service routine directly.
*/
ENTRY(inthandler)
SAVE_ALL
moveq #-1,%d0
movel %d0,%sp@(PT_ORIG_D0)
addql #1,local_irq_count
movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
leal per_cpu__kstat+STAT_IRQ,%a0
addql #1,%a0@(%d0)
lsrl #2,%d0 /* calculate real vector # */
movel %d0,%d1 /* calculate array offset */
lsll #4,%d1
lea irq_list,%a0
addl %d1,%a0 /* pointer to array struct */
movel %sp,%sp@- /* push regs arg onto stack */
movel %a0@(8),%sp@- /* push devid arg */
movel %d0,%sp@- /* push vector # on stack */
movel %a0@,%a0 /* get function to call */
jbsr %a0@ /* call vector handler */
lea %sp@(12),%sp /* pop parameters off stack */
bra ret_from_interrupt /* this was fallthrough */
/*
* This is the fast interrupt handler (for certain hardware interrupt
* sources). Unlike the normal interrupt handler it just uses the
* current stack (doesn't care if it is user or kernel). It also
* doesn't bother doing the bottom half handlers.
*/
ENTRY(fasthandler)
SAVE_LOCAL
movew %sp@(PT_FORMATVEC),%d0
andl #0x03fc,%d0 /* mask out vector only */
leal per_cpu__kstat+STAT_IRQ,%a0
addql #1,%a0@(%d0)
movel %sp,%sp@- /* push regs arg onto stack */
clrl %sp@- /* push devid arg */
lsrl #2,%d0 /* calculate real vector # */
movel %d0,%sp@- /* push vector # on stack */
lsll #4,%d0 /* adjust for array offset */
lea irq_list,%a0
movel %a0@(%d0),%a0 /* get function to call */
jbsr %a0@ /* call vector handler */
lea %sp@(12),%sp /* pop parameters off stack */
RESTORE_LOCAL
ENTRY(ret_from_interrupt)
subql #1,local_irq_count
jeq 2f
1:
RESTORE_ALL
2:
moveb %sp@(PT_SR),%d0
andl #0x7,%d0
jhi 1b
/* check if we need to do software interrupts */
movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
jeq ret_from_exception
pea ret_from_exception
jmp do_softirq
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movel %a0, %d1 /* get prev thread in d1 */
movew %sr,%d0 /* save thread status reg */
movew %d0,%a0@(TASK_THREAD+THREAD_SR)
oril #0x700,%d0 /* disable interrupts */
move %d0,%sr
movel sw_usp,%d0 /* save usp */
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
movel %a0, sw_usp
movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
movew %d0, %sr
rts
|