diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 15:52:28 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 15:52:28 +0900 |
commit | c06fd28387a3da2cc4763f7f471f735ccdd61b88 (patch) | |
tree | e2d95ffa04f7e5b17958831e29935a231e094d09 /arch/sh/mm | |
parent | 28080329ede3e4110bb14306b4529a5b9a2ce163 (diff) |
sh64: Migrate to __update_tlb() API.
Now that we have a method for finding out if we're handling an ITLB fault
or not without passing it all the way down the chain, it's possible to
use the __update_tlb() interface in place of a special __do_tlb_refill().
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/tlb-sh5.c | 40 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_64.c | 57 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 4 |
3 files changed, 49 insertions, 52 deletions
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index f27dbe1c159..3aea25dc431 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c @@ -182,3 +182,43 @@ void tlb_unwire_entry(void) local_irq_restore(flags); } + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + unsigned long long ptel; + unsigned long long pteh=0; + struct tlb_info *tlbp; + unsigned long long next; + unsigned int fault_code = get_thread_fault_code(); + + /* Get PTEL first */ + ptel = pte.pte_low; + + /* + * Set PTEH register + */ + pteh = neff_sign_extend(address & MMU_VPN_MASK); + + /* Set the ASID. */ + pteh |= get_asid() << PTEH_ASID_SHIFT; + pteh |= PTEH_VALID; + + /* Set PTEL register, set_pte has performed the sign extension */ + ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ + + if (fault_code & FAULT_CODE_ITLB) + tlbp = &cpu_data->itlb; + else + tlbp = &cpu_data->dtlb; + + next = tlbp->next; + __flush_tlb_slot(next); + asm volatile ("putcfg %0,1,%2\n\n\t" + "putcfg %0,0,%1\n" + : : "r" (next), "r" (pteh), "r" (ptel) ); + + next += TLB_STEP; + if (next > tlbp->last) + next = tlbp->first; + tlbp->next = next; +} diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c index d15b9946650..98b64278f8c 100644 --- a/arch/sh/mm/tlbex_64.c +++ b/arch/sh/mm/tlbex_64.c @@ -38,54 +38,15 @@ #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <cpu/registers.h> - -/* Callable from fault.c, so not static */ -inline void __do_tlb_refill(unsigned long address, - unsigned long long is_text_not_data, pte_t *pte) -{ - unsigned long long ptel; - unsigned long long pteh=0; - struct tlb_info *tlbp; - unsigned long long next; - - /* Get PTEL first */ - ptel = pte_val(*pte); - - /* - * Set PTEH register - */ - pteh = neff_sign_extend(address & MMU_VPN_MASK); - - /* Set the ASID. */ - pteh |= get_asid() << PTEH_ASID_SHIFT; - pteh |= PTEH_VALID; - - /* Set PTEL register, set_pte has performed the sign extension */ - ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ - - tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); - next = tlbp->next; - __flush_tlb_slot(next); - asm volatile ("putcfg %0,1,%2\n\n\t" - "putcfg %0,0,%1\n" - : : "r" (next), "r" (pteh), "r" (ptel) ); - - next += TLB_STEP; - if (next > tlbp->last) next = tlbp->first; - tlbp->next = next; - -} static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, - unsigned long long textaccess, unsigned long address) { pgd_t *dir; pud_t *pud; pmd_t *pmd; - static pte_t *pte; + pte_t *pte; pte_t entry; dir = pgd_offset_k(address); @@ -106,14 +67,13 @@ static int handle_vmalloc_fault(struct mm_struct *mm, if ((pte_val(entry) & protection_flags) != protection_flags) return 0; - __do_tlb_refill(address, textaccess, pte); + update_mmu_cache(NULL, address, pte); return 1; } static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags, - unsigned long long textaccess, unsigned long address) { pgd_t *dir; @@ -165,7 +125,7 @@ static int handle_tlbmiss(struct mm_struct *mm, if ((pte_val(entry) & protection_flags) != protection_flags) return 0; - __do_tlb_refill(address, textaccess, pte); + update_mmu_cache(NULL, address, pte); return 1; } @@ -210,7 +170,6 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, { struct task_struct *tsk; struct mm_struct *mm; - unsigned long long textaccess; unsigned long long protection_flags; unsigned long long index; unsigned long long expevt4; @@ -229,8 +188,11 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, * that PRU is set when it needs to be. */ index = expevt4 ^ (expevt4 >> 5); index &= 7; + protection_flags = expevt_lookup_table.protection_flags[index]; - textaccess = expevt_lookup_table.is_text_access[index]; + + if (expevt_lookup_table.is_text_access[index]) + set_thread_fault_code(FAULT_CODE_ITLB); /* SIM * Note this is now called with interrupts still disabled @@ -252,11 +214,10 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, * Process-contexts can never have this address * range mapped */ - if (handle_vmalloc_fault(mm, protection_flags, - textaccess, address)) + if (handle_vmalloc_fault(mm, protection_flags, address)) return 1; } else if (!in_interrupt() && mm) { - if (handle_tlbmiss(mm, protection_flags, textaccess, address)) + if (handle_tlbmiss(mm, protection_flags, address)) return 1; } diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 908167bdfc0..f33fdd2558e 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -170,7 +170,3 @@ void __flush_tlb_global(void) { flush_tlb_all(); } - -void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) -{ -} |