summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/tlb-flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/tlb-flush.c')
-rw-r--r--arch/sh/mm/tlb-flush.c132
1 files changed, 132 insertions, 0 deletions
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
new file mode 100644
index 00000000000..fd7e42bcaa4
--- /dev/null
+++ b/arch/sh/mm/tlb-flush.c
@@ -0,0 +1,132 @@
+/*
+ * TLB flushing operations for SH with an MMU.
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ unsigned long asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
+ page &= PAGE_MASK;
+
+ local_irq_save(flags);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ __flush_tlb_page(asid, page);
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ } else {
+ unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ flush_tlb_all();
+ } else {
+ unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = get_asid();
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ set_asid(asid);
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Invalidate all TLB of this process. */
+ /* Instead of invalidating each TLB, we get new MMU context. */
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ unsigned long flags, status;
+
+ /*
+ * Flush all the TLB.
+ *
+ * Write to the MMU control register's bit:
+ * TF-bit for SH-3, TI-bit for SH-4.
+ * It's same position, bit #2.
+ */
+ local_irq_save(flags);
+ status = ctrl_inl(MMUCR);
+ status |= 0x04;
+ ctrl_outl(status, MMUCR);
+ ctrl_barrier();
+ local_irq_restore(flags);
+}