summaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/system_32.h
blob: 34bd2bac9a5fce12f057465df064d6451e88ea80 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
#ifndef __ASM_SH_SYSTEM_32_H
#define __ASM_SH_SYSTEM_32_H

#include <linux/types.h>
#include <asm/mmu.h>

#ifdef CONFIG_SH_DSP

#define is_dsp_enabled(tsk)						\
	(!!(tsk->thread.dsp_status.status & SR_DSP))

#define __restore_dsp(tsk)						\
do {									\
	register u32 *__ts2 __asm__ ("r2") =				\
			(u32 *)&tsk->thread.dsp_status;			\
	__asm__ __volatile__ (						\
		".balign 4\n\t"						\
		"movs.l	@r2+, a0\n\t"					\
		"movs.l	@r2+, a1\n\t"					\
		"movs.l	@r2+, a0g\n\t"					\
		"movs.l	@r2+, a1g\n\t"					\
		"movs.l	@r2+, m0\n\t"					\
		"movs.l	@r2+, m1\n\t"					\
		"movs.l	@r2+, x0\n\t"					\
		"movs.l	@r2+, x1\n\t"					\
		"movs.l	@r2+, y0\n\t"					\
		"movs.l	@r2+, y1\n\t"					\
		"lds.l	@r2+, dsr\n\t"					\
		"ldc.l	@r2+, rs\n\t"					\
		"ldc.l	@r2+, re\n\t"					\
		"ldc.l	@r2+, mod\n\t"					\
		: : "r" (__ts2));					\
} while (0)


#define __save_dsp(tsk)							\
do {									\
	register u32 *__ts2 __asm__ ("r2") =				\
			(u32 *)&tsk->thread.dsp_status + 14;		\
									\
	__asm__ __volatile__ (						\
		".balign 4\n\t"						\
		"stc.l	mod, @-r2\n\t"					\
		"stc.l	re, @-r2\n\t"					\
		"stc.l	rs, @-r2\n\t"					\
		"sts.l	dsr, @-r2\n\t"					\
		"movs.l	y1, @-r2\n\t"					\
		"movs.l	y0, @-r2\n\t"					\
		"movs.l	x1, @-r2\n\t"					\
		"movs.l	x0, @-r2\n\t"					\
		"movs.l	m1, @-r2\n\t"					\
		"movs.l	m0, @-r2\n\t"					\
		"movs.l	a1g, @-r2\n\t"					\
		"movs.l	a0g, @-r2\n\t"					\
		"movs.l	a1, @-r2\n\t"					\
		"movs.l	a0, @-r2\n\t"					\
		: : "r" (__ts2));					\
} while (0)

#else

#define is_dsp_enabled(tsk)	(0)
#define __save_dsp(tsk)		do { } while (0)
#define __restore_dsp(tsk)	do { } while (0)
#endif

#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr)	__asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr)	mb()
#endif

#define __ocbp(addr)	__asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr)	__asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr)	__asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))

struct task_struct *__switch_to(struct task_struct *prev,
				struct task_struct *next);

/*
 *	switch_to() should switch tasks to task nr n, first
 */
#define switch_to(prev, next, last)				\
do {								\
	register u32 *__ts1 __asm__ ("r1");			\
	register u32 *__ts2 __asm__ ("r2");			\
	register u32 *__ts4 __asm__ ("r4");			\
	register u32 *__ts5 __asm__ ("r5");			\
	register u32 *__ts6 __asm__ ("r6");			\
	register u32 __ts7 __asm__ ("r7");			\
	struct task_struct *__last;				\
								\
	if (is_dsp_enabled(prev))				\
		__save_dsp(prev);				\
								\
	__ts1 = (u32 *)&prev->thread.sp;			\
	__ts2 = (u32 *)&prev->thread.pc;			\
	__ts4 = (u32 *)prev;					\
	__ts5 = (u32 *)next;					\
	__ts6 = (u32 *)&next->thread.sp;			\
	__ts7 = next->thread.pc;				\
								\
	__asm__ __volatile__ (					\
		".balign 4\n\t"					\
		"stc.l	gbr, @-r15\n\t"				\
		"sts.l	pr, @-r15\n\t"				\
		"mov.l	r8, @-r15\n\t"				\
		"mov.l	r9, @-r15\n\t"				\
		"mov.l	r10, @-r15\n\t"				\
		"mov.l	r11, @-r15\n\t"				\
		"mov.l	r12, @-r15\n\t"				\
		"mov.l	r13, @-r15\n\t"				\
		"mov.l	r14, @-r15\n\t"				\
		"mov.l	r15, @r1\t! save SP\n\t"		\
		"mov.l	@r6, r15\t! change to new stack\n\t"	\
		"mova	1f, %0\n\t"				\
		"mov.l	%0, @r2\t! save PC\n\t"			\
		"mov.l	2f, %0\n\t"				\
		"jmp	@%0\t! call __switch_to\n\t"		\
		" lds	r7, pr\t!  with return to new PC\n\t"	\
		".balign	4\n"				\
		"2:\n\t"					\
		".long	__switch_to\n"				\
		"1:\n\t"					\
		"mov.l	@r15+, r14\n\t"				\
		"mov.l	@r15+, r13\n\t"				\
		"mov.l	@r15+, r12\n\t"				\
		"mov.l	@r15+, r11\n\t"				\
		"mov.l	@r15+, r10\n\t"				\
		"mov.l	@r15+, r9\n\t"				\
		"mov.l	@r15+, r8\n\t"				\
		"lds.l	@r15+, pr\n\t"				\
		"ldc.l	@r15+, gbr\n\t"				\
		: "=z" (__last)					\
		: "r" (__ts1), "r" (__ts2), "r" (__ts4),	\
		  "r" (__ts5), "r" (__ts6), "r" (__ts7)		\
		: "r3", "t");					\
								\
	last = __last;						\
} while (0)

#define finish_arch_switch(prev)				\
do {								\
	if (is_dsp_enabled(prev))				\
		__restore_dsp(prev);				\
} while (0)

#define __uses_jump_to_uncached \
	noinline __attribute__ ((__section__ (".uncached.text")))

/*
 * Jump to uncached area.
 * When handling TLB or caches, we need to do it from an uncached area.
 */
#define jump_to_uncached()			\
do {						\
	unsigned long __dummy;			\
						\
	__asm__ __volatile__(			\
		"mova	1f, %0\n\t"		\
		"add	%1, %0\n\t"		\
		"jmp	@%0\n\t"		\
		" nop\n\t"			\
		".balign 4\n"			\
		"1:"				\
		: "=&z" (__dummy)		\
		: "r" (cached_to_uncached));	\
} while (0)

/*
 * Back to cached area.
 */
#define back_to_cached()				\
do {							\
	unsigned long __dummy;				\
	ctrl_barrier();					\
	__asm__ __volatile__(				\
		"mov.l	1f, %0\n\t"			\
		"jmp	@%0\n\t"			\
		" nop\n\t"				\
		".balign 4\n"				\
		"1:	.long 2f\n"			\
		"2:"					\
		: "=&r" (__dummy));			\
} while (0)

#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector()	\
({					\
	unsigned long _vec;		\
					\
	__asm__ __volatile__ (		\
		"stc r2_bank, %0\n\t"	\
		: "=r" (_vec)		\
	);				\
					\
	_vec;				\
})
#else
#define lookup_exception_vector()	\
({					\
	unsigned long _vec;		\
	__asm__ __volatile__ (		\
		"mov r4, %0\n\t"	\
		: "=r" (_vec)		\
	);				\
					\
	_vec;				\
})
#endif

static inline reg_size_t register_align(void *val)
{
	return (unsigned long)(signed long)val;
}

int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
			    struct mem_access *ma, int);

static inline void trigger_address_error(void)
{
	if (__in_29bit_mode())
		__asm__ __volatile__ (
			"ldc %0, sr\n\t"
			"mov.l @%1, %0"
			:
			: "r" (0x10000000), "r" (0x80000001)
		);
}

asmlinkage void do_address_error(struct pt_regs *regs,
				 unsigned long writeaccess,
				 unsigned long address);
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
				unsigned long r6, unsigned long r7,
				struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
				unsigned long r6, unsigned long r7,
				struct pt_regs __regs);
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
				unsigned long r6, unsigned long r7,
				struct pt_regs __regs);
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
				   unsigned long r6, unsigned long r7,
				   struct pt_regs __regs);

static inline void set_bl_bit(void)
{
	unsigned long __dummy0, __dummy1;

	__asm__ __volatile__ (
		"stc	sr, %0\n\t"
		"or	%2, %0\n\t"
		"and	%3, %0\n\t"
		"ldc	%0, sr\n\t"
		: "=&r" (__dummy0), "=r" (__dummy1)
		: "r" (0x10000000), "r" (0xffffff0f)
		: "memory"
	);
}

static inline void clear_bl_bit(void)
{
	unsigned long __dummy0, __dummy1;

	__asm__ __volatile__ (
		"stc	sr, %0\n\t"
		"and	%2, %0\n\t"
		"ldc	%0, sr\n\t"
		: "=&r" (__dummy0), "=r" (__dummy1)
		: "1" (~0x10000000)
		: "memory"
	);
}

#endif /* __ASM_SH_SYSTEM_32_H */