summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/tlbflush_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/tlbflush_64.c')
-rw-r--r--arch/sh/mm/tlbflush_64.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 3ce40ea3482..2dcc48528f7 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -329,22 +329,6 @@ do_sigbus:
goto no_context;
}
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
-{
- /*
- * This appears to get called once for every pte entry that gets
- * established => I don't think it's efficient to try refilling the
- * TLBs with the pages - some may not get accessed even. Also, for
- * executable pages, it is impossible to determine reliably here which
- * TLB they should be mapped into (or both even).
- *
- * So, just do nothing here and handle faults on demand. In the
- * TLBMISS handling case, the refill is now done anyway after the pte
- * has been fixed up, so that deals with most useful cases.
- */
-}
-
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long long match, pteh=0, lpage;
@@ -353,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
/*
* Sign-extend based on neff.
*/
- lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
+ lpage = neff_sign_extend(page);
match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
match |= lpage;
@@ -482,3 +466,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
/* FIXME: Optimize this later.. */
flush_tlb_all();
}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}