summaryrefslogtreecommitdiffstats
path: root/arch/sh/include/cpu-sh4/cpu/cacheflush.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-07-29 08:09:44 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-07-29 08:09:44 +0900
commitf15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch)
tree774d7b11abaaf33561ab8268bf51ddd9ceb79025 /arch/sh/include/cpu-sh4/cpu/cacheflush.h
parent25326277d8d1393d1c66240e6255aca780f9e3eb (diff)
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac. Most of the moving about was done with Sam's directions at: http://marc.info/?l=linux-sh&m=121724823706062&w=2 with subsequent hacking and fixups entirely my fault. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/cpu-sh4/cpu/cacheflush.h')
-rw-r--r--arch/sh/include/cpu-sh4/cpu/cacheflush.h43
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
new file mode 100644
index 00000000000..065306d376e
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
@@ -0,0 +1,43 @@
+/*
+ * include/asm-sh/cpu-sh4/cacheflush.h
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH4_CACHEFLUSH_H
+#define __ASM_CPU_SH4_CACHEFLUSH_H
+
+/*
+ * Caches are broken on SH-4 (unless we use write-through
+ * caching; in which case they're only semi-broken),
+ * so we need them.
+ */
+void flush_cache_all(void);
+void flush_dcache_all(void);
+void flush_cache_mm(struct mm_struct *mm);
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+void flush_dcache_page(struct page *pg);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
+
+#define flush_icache_page(vma,pg) do { } while (0)
+
+/* Initialization of P3 area for copy_user_page */
+void p3_cache_init(void);
+
+#define PG_mapped PG_arch_1
+
+#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */