diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 15:33:28 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 15:33:28 +0900 |
commit | 28080329ede3e4110bb14306b4529a5b9a2ce163 (patch) | |
tree | 6c4d953750eb8aa99077cb0315af060b4dcffc33 | |
parent | e45af0e083dfc5d49dbad6965b9eeb3ac0072d82 (diff) |
sh: Enable shared page fault handler for _32/_64.
This moves the now generic _32 page fault handling code to a shared place
and adapts the _64 implementation to make use of it.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/sh/mm/fault.c (renamed from arch/sh/mm/fault_32.c) | 80 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_32.c | 78 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_64.c (renamed from arch/sh/mm/fault_64.c) | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 243 |
5 files changed, 93 insertions, 314 deletions
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 2228c8cee4d..ba819108631 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -15,8 +15,8 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o -mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \ - ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o +mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \ + pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o obj-y += $(mmu-y) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault.c index 39e291c6b35..16799f920f9 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault.c @@ -66,7 +66,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) printk(KERN_ALERT "pgd = %p\n", pgd); pgd += pgd_index(addr); printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, - sizeof(*pgd) * 2, (u64)pgd_val(*pgd)); + (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); do { pud_t *pud; @@ -83,7 +83,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) pud = pud_offset(pgd, addr); if (PTRS_PER_PUD != 1) - printk(", *pud=%0*Lx", sizeof(*pud) * 2, + printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2), (u64)pud_val(*pud)); if (pud_none(*pud)) @@ -96,7 +96,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) pmd = pmd_offset(pud, addr); if (PTRS_PER_PMD != 1) - printk(", *pmd=%0*Lx", sizeof(*pmd) * 2, + printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), (u64)pmd_val(*pmd)); if (pmd_none(*pmd)) @@ -112,7 +112,8 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) break; pte = pte_offset_kernel(pmd, addr); - printk(", *pte=%0*Lx", sizeof(*pte) * 2, (u64)pte_val(*pte)); + printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2), + (u64)pte_val(*pte)); } while (0); printk("\n"); @@ -354,15 +355,20 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, return 1; } -static inline int access_error(int write, struct vm_area_struct *vma) +static inline int access_error(int error_code, struct vm_area_struct *vma) { - if (write) { + if (error_code & FAULT_CODE_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; return 0; } + /* ITLB miss on NX page */ + if (unlikely((error_code & FAULT_CODE_ITLB) && + !(vma->vm_flags & VM_EXEC))) + return 1; + /* read, not present: */ if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) return 1; @@ -500,65 +506,3 @@ good_area: up_read(&mm->mmap_sem); } - -/* - * Called with interrupts disabled. - */ -asmlinkage int __kprobes -handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, - unsigned long address) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - - /* - * We don't take page faults for P1, P2, and parts of P4, these - * are always mapped, whether it be due to legacy behaviour in - * 29-bit mode, or due to PMB configuration in 32-bit mode. - */ - if (address >= P3SEG && address < P3_ADDR_MAX) { - pgd = pgd_offset_k(address); - } else { - if (unlikely(address >= TASK_SIZE || !current->mm)) - return 1; - - pgd = pgd_offset(current->mm, address); - } - - pud = pud_offset(pgd, address); - if (pud_none_or_clear_bad(pud)) - return 1; - pmd = pmd_offset(pud, address); - if (pmd_none_or_clear_bad(pmd)) - return 1; - pte = pte_offset_kernel(pmd, address); - entry = *pte; - if (unlikely(pte_none(entry) || pte_not_present(entry))) - return 1; - if (unlikely(error_code && !pte_write(entry))) - return 1; - - if (error_code) - entry = pte_mkdirty(entry); - entry = pte_mkyoung(entry); - - set_pte(pte, entry); - -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) - /* - * SH-4 does not set MMUCR.RC to the corresponding TLB entry in - * the case of an initial page write exception, so we need to - * flush it in order to avoid potential TLB entry duplication. - */ - if (error_code == FAULT_CODE_INITIAL) - local_flush_tlb_one(get_asid(), address & PAGE_MASK); -#endif - - set_thread_fault_code(error_code); - update_mmu_cache(NULL, address, pte); - - return 0; -} diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c new file mode 100644 index 00000000000..382262dc0c4 --- /dev/null +++ b/arch/sh/mm/tlbex_32.c @@ -0,0 +1,78 @@ +/* + * TLB miss handler for SH with an MMU. + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2003 - 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <asm/mmu_context.h> +#include <asm/thread_info.h> + +/* + * Called with interrupts disabled. + */ +asmlinkage int __kprobes +handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) { + pgd = pgd_offset_k(address); + } else { + if (unlikely(address >= TASK_SIZE || !current->mm)) + return 1; + + pgd = pgd_offset(current->mm, address); + } + + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); + if (pmd_none_or_clear_bad(pmd)) + return 1; + pte = pte_offset_kernel(pmd, address); + entry = *pte; + if (unlikely(pte_none(entry) || pte_not_present(entry))) + return 1; + if (unlikely(error_code && !pte_write(entry))) + return 1; + + if (error_code) + entry = pte_mkdirty(entry); + entry = pte_mkyoung(entry); + + set_pte(pte, entry); + +#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) + /* + * SH-4 does not set MMUCR.RC to the corresponding TLB entry in + * the case of an initial page write exception, so we need to + * flush it in order to avoid potential TLB entry duplication. + */ + if (error_code == FAULT_CODE_INITIAL) + local_flush_tlb_one(get_asid(), address & PAGE_MASK); +#endif + + set_thread_fault_code(error_code); + update_mmu_cache(NULL, address, pte); + + return 0; +} diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/tlbex_64.c index 33a921962d0..d15b9946650 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/tlbex_64.c @@ -246,7 +246,7 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, tsk = current; mm = tsk->mm; - if ((address >= VMALLOC_START && address < VMALLOC_END)) { + if (is_vmalloc_addr((void *)address)) { if (ssr_md) /* * Process-contexts can never have this address diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 99c5833036b..908167bdfc0 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -28,249 +28,6 @@ #include <asm/pgalloc.h> #include <asm/mmu_context.h> -static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) -{ - pgd_t *dir; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - - dir = pgd_offset(mm, address); - if (pgd_none(*dir)) - return NULL; - - pud = pud_offset(dir, address); - if (pud_none(*pud)) - return NULL; - - pmd = pmd_offset(pud, address); - if (pmd_none(*pmd)) - return NULL; - - pte = pte_offset_kernel(pmd, address); - entry = *pte; - if (pte_none(entry) || !pte_present(entry)) - return NULL; - - return pte; -} - -/* - * This routine handles page faults. It determines the address, - * and the problem, and then passes it off to one of the appropriate - * routines. - */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, - unsigned long address) -{ - struct task_struct *tsk; - struct mm_struct *mm; - struct vm_area_struct * vma; - const struct exception_table_entry *fixup; - int write = error_code & FAULT_CODE_WRITE; - int textaccess = error_code & FAULT_CODE_ITLB; - unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0)); - pte_t *pte; - int fault; - - /* SIM - * Note this is now called with interrupts still disabled - * This is to cope with being called for a missing IO port - * address with interrupts disabled. This should be fixed as - * soon as we have a better 'fast path' miss handler. - * - * Plus take care how you try and debug this stuff. - * For example, writing debug data to a port which you - * have just faulted on is not going to work. - */ - - tsk = current; - mm = tsk->mm; - - /* Not an IO address, so reenable interrupts */ - local_irq_enable(); - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - /* - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ - if (in_atomic() || !mm) - goto no_context; - -retry: - /* TLB misses upon some cache flushes get done under cli() */ - down_read(&mm->mmap_sem); - - vma = find_vma(mm, address); - if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; - -/* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ -good_area: - if (textaccess) { - if (!(vma->vm_flags & VM_EXEC)) - goto bad_area; - } else { - if (write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - } else { - if (!(vma->vm_flags & VM_READ)) - goto bad_area; - } - } - - /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. - */ - fault = handle_mm_fault(mm, vma, address, flags); - - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) - return; - - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - goto do_sigbus; - BUG(); - } - - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - - /* - * No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - goto retry; - } - } - - /* If we get here, the page fault has been handled. Do the TLB refill - now from the newly-setup PTE, to avoid having to fault again right - away on the same instruction. */ - pte = lookup_pte (mm, address); - if (!pte) { - /* From empirical evidence, we can get here, due to - !pte_present(pte). (e.g. if a swap-in occurs, and the page - is swapped back out again before the process that wanted it - gets rescheduled?) */ - goto no_pte; - } - - __do_tlb_refill(address, textaccess, pte); - -no_pte: - - up_read(&mm->mmap_sem); - return; - -/* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. - */ -bad_area: - up_read(&mm->mmap_sem); - - if (user_mode(regs)) { - static int count=0; - siginfo_t info; - if (count < 4) { - /* This is really to help debug faults when starting - * usermode, so only need a few */ - count++; - printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", - address, task_pid_nr(current), current->comm, - (unsigned long) regs->pc); - } - if (is_global_init(tsk)) { - panic("INIT had user mode bad_area\n"); - } - tsk->thread.address = address; - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_addr = (void *) address; - force_sig_info(SIGSEGV, &info, tsk); - return; - } - -no_context: - /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->pc); - if (fixup) { - regs->pc = fixup->fixup; - return; - } - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - * - */ - if (address < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); - else - printk(KERN_ALERT "Unable to handle kernel paging request"); - printk(" at virtual address %08lx\n", address); - printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); - die("Oops", regs, error_code); - do_exit(SIGKILL); - -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ -out_of_memory: - up_read(&mm->mmap_sem); - if (!user_mode(regs)) - goto no_context; - pagefault_out_of_memory(); - return; - -do_sigbus: - printk("fault:Do sigbus\n"); - up_read(&mm->mmap_sem); - - /* - * Send a sigbus, regardless of whether we were in kernel - * or user mode. - */ - tsk->thread.address = address; - force_sig(SIGBUS, tsk); - - /* Kernel mode? Handle exceptions or die */ - if (!user_mode(regs)) - goto no_context; -} - void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long long match, pteh=0, lpage; |