summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/flush-sh4.c
blob: 99c50dc7551eda17ae58f8aa5488a4767814e77c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

/*
 * Write back the dirty D-caches, but not invalidate them.
 *
 * START: Virtual Address (U0, P1, or P3)
 * SIZE: Size of the region.
 */
static void sh4__flush_wback_region(void *start, int size)
{
	reg_size_t aligned_start, v, cnt, end;

	aligned_start = register_align(start);
	v = aligned_start & ~(L1_CACHE_BYTES-1);
	end = (aligned_start + size + L1_CACHE_BYTES-1)
		& ~(L1_CACHE_BYTES-1);
	cnt = (end - v) / L1_CACHE_BYTES;

	while (cnt >= 8) {
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt -= 8;
	}

	while (cnt) {
		asm volatile("ocbwb	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt--;
	}
}

/*
 * Write back the dirty D-caches and invalidate them.
 *
 * START: Virtual Address (U0, P1, or P3)
 * SIZE: Size of the region.
 */
static void sh4__flush_purge_region(void *start, int size)
{
	reg_size_t aligned_start, v, cnt, end;

	aligned_start = register_align(start);
	v = aligned_start & ~(L1_CACHE_BYTES-1);
	end = (aligned_start + size + L1_CACHE_BYTES-1)
		& ~(L1_CACHE_BYTES-1);
	cnt = (end - v) / L1_CACHE_BYTES;

	while (cnt >= 8) {
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt -= 8;
	}
	while (cnt) {
		asm volatile("ocbp	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt--;
	}
}

/*
 * No write back please
 */
static void sh4__flush_invalidate_region(void *start, int size)
{
	reg_size_t aligned_start, v, cnt, end;

	aligned_start = register_align(start);
	v = aligned_start & ~(L1_CACHE_BYTES-1);
	end = (aligned_start + size + L1_CACHE_BYTES-1)
		& ~(L1_CACHE_BYTES-1);
	cnt = (end - v) / L1_CACHE_BYTES;

	while (cnt >= 8) {
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt -= 8;
	}

	while (cnt) {
		asm volatile("ocbi	@%0" : : "r" (v));
		v += L1_CACHE_BYTES;
		cnt--;
	}
}

void __init sh4__flush_region_init(void)
{
	__flush_wback_region		= sh4__flush_wback_region;
	__flush_invalidate_region	= sh4__flush_invalidate_region;
	__flush_purge_region		= sh4__flush_purge_region;
}