From 298476220d1f793ca0ac6c9e5dc817e1ad3e9851 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 14:57:44 +0900 Subject: sh: Add control register barriers. Currently when making changes to control registers, we typically need some time for changes to take effect (8 nops, generally). However, for sh4a we simply need to do an icbi.. This is a simple patch for implementing a general purpose ctrl_barrier() which functions as a control register write barrier. There's some additional documentation in the patch itself, but it's pretty self explanatory. There were also some places where we were not doing the barrier, which didn't seem to have any adverse effects on legacy parts, but certainly did on sh4a. It's safer to have the barrier in place for legacy parts as well in these cases, though this does make flush_tlb_all() more expensive (by an order of 8 nops). We can ifdef around the flush_tlb_all() case for now if it's clear that all legacy parts won't have a problem with this. Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sh/mm/fault.c') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 775f86cd3fe..364181f27b7 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -369,12 +369,13 @@ void flush_tlb_all(void) * Flush all the TLB. * * Write to the MMU control register's bit: - * TF-bit for SH-3, TI-bit for SH-4. + * TF-bit for SH-3, TI-bit for SH-4. * It's same position, bit #2. */ local_irq_save(flags); status = ctrl_inl(MMUCR); - status |= 0x04; + status |= 0x04; ctrl_outl(status, MMUCR); + ctrl_barrier(); local_irq_restore(flags); } -- cgit v1.2.3-70-g09d2 From 26ff6c11ef38e08990c1e417c299246e6ab18ff7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 15:13:36 +0900 Subject: sh: page table alloc cleanups and page fault optimizations. Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt --- arch/sh/kernel/sys_sh.c | 2 +- arch/sh/mm/Makefile | 2 +- arch/sh/mm/consistent.c | 2 + arch/sh/mm/fault.c | 202 +++++++----------------------------- arch/sh/mm/init.c | 13 ++- arch/sh/mm/tlb-flush.c | 132 +++++++++++++++++++++++ include/asm-sh/cache.h | 8 -- include/asm-sh/cacheflush.h | 1 + include/asm-sh/cpu-sh3/cacheflush.h | 8 +- include/asm-sh/cpu-sh4/cacheflush.h | 29 +++--- include/asm-sh/page.h | 12 +-- include/asm-sh/pgalloc.h | 37 +------ include/asm-sh/pgtable.h | 80 +++++++++----- 13 files changed, 261 insertions(+), 267 deletions(-) create mode 100644 arch/sh/mm/tlb-flush.c (limited to 'arch/sh/mm/fault.c') diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 917b2f32f26..d8bcd8a2232 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -21,7 +21,7 @@ #include #include #include - +#include #include #include diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index f4e32b3d24d..d90906367c5 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o +mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o obj-y += $(mmu-y) diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index ee73e30263a..c81e6b67ad3 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -9,6 +9,8 @@ */ #include #include +#include +#include #include void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 364181f27b7..7a03ffe6dad 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -1,33 +1,20 @@ -/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ +/* + * Page fault handler for SH with an MMU. * - * linux/arch/sh/mm/fault.c * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - -#include -#include #include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include - #include -#include -#include -#include #include -#include #include extern void die(const char *,struct pt_regs *,long); @@ -187,14 +174,25 @@ do_sigbus: goto no_context; } +#ifdef CONFIG_SH_STORE_QUEUES /* - * Called with interrupt disabled. + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. */ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { - unsigned long addrmax = P4SEG; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; @@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, kgdb_bus_err_hook(); #endif -#ifdef CONFIG_SH_STORE_QUEUES - addrmax = P4SEG_STORE_QUE + 0x04000000; -#endif - - if (address >= P3SEG && address < addrmax) { + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) pgd = pgd_offset_k(address); - mm = NULL; - } else if (address >= TASK_SIZE) - return 1; - else if (!(mm = current->mm)) - return 1; - else - pgd = pgd_offset(mm, address); + else { + if (unlikely(address >= TASK_SIZE || !current->mm)) + return 1; + + pgd = pgd_offset(current->mm, address); + } - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 1; + if (mm) pte = pte_offset_map_lock(mm, pmd, address, &ptl); else pte = pte_offset_kernel(pmd, address); entry = *pte; - if (pte_none(entry) || pte_not_present(entry) - || (writeaccess && !pte_write(entry))) + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) goto unlock; if (writeaccess) @@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * ITLB is not affected by "ldtlb" instruction. * So, we need to flush the entry by ourselves. */ - - { - unsigned long flags; - local_irq_save(flags); - __flush_tlb_page(get_asid(), address&PAGE_MASK); - local_irq_restore(flags); - } + __flush_tlb_page(get_asid(), address & PAGE_MASK); #endif set_pte(pte, entry); @@ -260,122 +257,3 @@ unlock: pte_unmap_unlock(pte, ptl); return ret; } - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { - unsigned long flags; - unsigned long asid; - unsigned long saved_asid = MMU_NO_ASID; - - asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; - page &= PAGE_MASK; - - local_irq_save(flags); - if (vma->vm_mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - __flush_tlb_page(asid, page); - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - local_irq_restore(flags); - } -} - -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - - if (mm->context != NO_CONTEXT) { - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - } else { - unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = MMU_NO_ASID; - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - if (mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - } - local_irq_restore(flags); - } -} - -void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - flush_tlb_all(); - } else { - unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = get_asid(); - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - set_asid(asid); - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - set_asid(saved_asid); - } - local_irq_restore(flags); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - /* Invalidate all TLB of this process. */ - /* Instead of invalidating each TLB, we get new MMU context. */ - if (mm->context != NO_CONTEXT) { - unsigned long flags; - - local_irq_save(flags); - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - local_irq_restore(flags); - } -} - -void flush_tlb_all(void) -{ - unsigned long flags, status; - - /* - * Flush all the TLB. - * - * Write to the MMU control register's bit: - * TF-bit for SH-3, TI-bit for SH-4. - * It's same position, bit #2. - */ - local_irq_save(flags); - status = ctrl_inl(MMUCR); - status |= 0x04; - ctrl_outl(status, MMUCR); - ctrl_barrier(); - local_irq_restore(flags); -} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8ea27ca4b70..d1a979eab65 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -80,6 +80,7 @@ void show_mem(void) static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) return; } - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); + if (pmd != pmd_offset(pud, 0)) { + pud_ERROR(*pud); + return; + } + } + + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c new file mode 100644 index 00000000000..fd7e42bcaa4 --- /dev/null +++ b/arch/sh/mm/tlb-flush.c @@ -0,0 +1,132 @@ +/* + * TLB flushing operations for SH with an MMU. + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { + unsigned long flags; + unsigned long asid; + unsigned long saved_asid = MMU_NO_ASID; + + asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; + page &= PAGE_MASK; + + local_irq_save(flags); + if (vma->vm_mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + __flush_tlb_page(asid, page); + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + local_irq_restore(flags); + } +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context != NO_CONTEXT) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + } else { + unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = MMU_NO_ASID; + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + if (mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + } + local_irq_restore(flags); + } +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + flush_tlb_all(); + } else { + unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = get_asid(); + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + set_asid(asid); + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + set_asid(saved_asid); + } + local_irq_restore(flags); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + /* Invalidate all TLB of this process. */ + /* Instead of invalidating each TLB, we get new MMU context. */ + if (mm->context != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + local_irq_restore(flags); + } +} + +void flush_tlb_all(void) +{ + unsigned long flags, status; + + /* + * Flush all the TLB. + * + * Write to the MMU control register's bit: + * TF-bit for SH-3, TI-bit for SH-4. + * It's same position, bit #2. + */ + local_irq_save(flags); + status = ctrl_inl(MMUCR); + status |= 0x04; + ctrl_outl(status, MMUCR); + ctrl_barrier(); + local_irq_restore(flags); +} diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h index 33f13367054..e3a180cf506 100644 --- a/include/asm-sh/cache.h +++ b/include/asm-sh/cache.h @@ -10,7 +10,6 @@ #ifdef __KERNEL__ #include -#include #define SH_CACHE_VALID 1 #define SH_CACHE_UPDATED 2 @@ -49,12 +48,5 @@ struct cache_info { unsigned long flags; }; -/* Flush (write-back only) a region (smaller than a page) */ -extern void __flush_wback_region(void *start, int size); -/* Flush (write-back & invalidate) a region (smaller than a page) */ -extern void __flush_purge_region(void *start, int size); -/* Flush (invalidate only) a region (smaller than a page) */ -extern void __flush_invalidate_region(void *start, int size); - #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHE_H */ diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 9dfb33edb00..92930b4a40d 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h @@ -2,6 +2,7 @@ #define __ASM_SH_CACHEFLUSH_H #ifdef __KERNEL__ +#include #include /* Flush (write-back only) a region (smaller than a page) */ diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index f51aed00c68..db0cb071ea8 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -10,7 +10,7 @@ #ifndef __ASM_CPU_SH3_CACHEFLUSH_H #define __ASM_CPU_SH3_CACHEFLUSH_H -/* +/* * Cache flushing: * * - flush_cache_all() flushes entire cache @@ -35,10 +35,6 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -struct page; -struct mm_struct; -struct vm_area_struct; - extern void flush_cache_all(void); extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, @@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); #define p3_cache_init() do { } while (0) -#define HAVE_ARCH_UNMAPPED_AREA - #endif #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index ea58c4c5944..a95fc951aff 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h @@ -16,30 +16,26 @@ * caching; in which case they're only semi-broken), * so we need them. */ -struct page; -struct mm_struct; -struct vm_area_struct; - -extern void flush_cache_all(void); -extern void flush_cache_mm(struct mm_struct *mm); -extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -extern void flush_dcache_page(struct page *pg); +void flush_cache_all(void); +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); +void flush_dcache_page(struct page *pg); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -extern void flush_icache_range(unsigned long start, unsigned long end); -extern void flush_cache_sigtramp(unsigned long addr); -extern void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, - int len); +void flush_icache_range(unsigned long start, unsigned long end); +void flush_cache_sigtramp(unsigned long addr); +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); #define flush_icache_page(vma,pg) do { } while (0) /* Initialization of P3 area for copy_user_page */ -extern void p3_cache_init(void); +void p3_cache_init(void); #define PG_mapped PG_arch_1 @@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, } #endif /* CONFIG_MMU */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ - diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 4811d410d12..51d7281a546 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from); extern void clear_page_slow(void *to); extern void copy_page_slow(void *to, void *from); -#if defined(CONFIG_SH7705_CACHE_32KB) && defined(CONFIG_MMU) +#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) struct page; extern void clear_user_page(void *to, unsigned long address, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); @@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#elif defined(CONFIG_CPU_SH4) -struct page; -extern void clear_user_page(void *to, unsigned long address, struct page *pg); -extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); -extern void __clear_user_page(void *to, void *orig_to); -extern void __copy_user_page(void *to, void *from, void *orig_to); #endif /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h index f4f233f7a4f..e841465ab4d 100644 --- a/include/asm-sh/pgalloc.h +++ b/include/asm-sh/pgalloc.h @@ -1,15 +1,6 @@ #ifndef __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H -#include -#include -#include - -#define pgd_quicklist ((unsigned long *)0) -#define pmd_quicklist ((unsigned long *)0) -#define pte_quicklist ((unsigned long *)0) -#define pgtable_cache_size 0L - #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) @@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); - pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); - - if (pgd) - memset(pgd, 0, pgd_size); - - return pgd; + return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline void pgd_free(pgd_t *pgd) { - kfree(pgd); + free_page((unsigned long)pgd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte; - - pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); - - return pte; + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *pte; - - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); - - return pte; + return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline void pte_free_kernel(pte_t *pte) @@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte) * inside the pgd, so has no extra memory associated with it. */ -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() #define check_pgt_cache() do { } while (0) -#ifdef CONFIG_CPU_SH4 -#define PG_mapped PG_arch_1 -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 40d41a78041..9728b58f7c1 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -1,42 +1,42 @@ -#ifndef __ASM_SH_PGTABLE_H -#define __ASM_SH_PGTABLE_H - -#include - /* + * This file contains the functions and defines necessary to modify and + * use the SuperH page table tree. + * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 - 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. */ +#ifndef __ASM_SH_PGTABLE_H +#define __ASM_SH_PGTABLE_H -#include +#include +#include + +#define PTRS_PER_PGD 1024 -/* - * This file contains the functions and defines necessary to modify and use - * the SuperH page table tree. - */ #ifndef __ASSEMBLY__ -#include #include #include -#include extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); -/* - * Basically we have the same two-level (which is the logical three level - * Linux page table layout folded) page tables as the i386. - */ - /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page[1024]; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* !__ASSEMBLY__ */ +/* traditional two-level paging structure */ +#define PGDIR_SHIFT 22 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE 1024 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024]; #define PTE_PHYS_MASK 0x1ffff000 -#ifndef __ASSEMBLY__ /* * First 1MB map is used by fixed purpose. * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) @@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024]; #define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ #define _PAGE_PROTNONE 0x200 /* software: if not present */ -#define _PAGE_ACCESSED 0x400 /* software: page referenced */ +#define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ @@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024]; #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ - /* Mask which drop software flags * We also drop WT bit since it is used for _PAGE_FILE * bit in this implementation. @@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024]; #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED) +#ifndef __ASSEMBLY__ + #ifdef CONFIG_MMU #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD) @@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024]; #define PAGE_KERNEL_PCC __pgprot(0) #endif +#endif /* __ASSEMBLY__ */ + /* * As i386 and MIPS, SuperH can't do page protection for execute, and * considers that the same as a read. Also, write permissions imply - * read permissions. This is the closest we can get.. + * read permissions. This is the closest we can get.. */ - #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY @@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024]; #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED +#ifndef __ASSEMBLY__ + +/* + * Certain architectures need to do special things when PTEs + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) + +#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) @@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024]; #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) +#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) /* * The following only work if pte_present() is true. @@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte); @@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma, typedef pte_t *pte_addr_t; -#endif /* !__ASSEMBLY__ */ - #define kern_addr_valid(addr) (1) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ @@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t #include +#endif /* !__ASSEMBLY__ */ + #endif /* __ASM_SH_PAGE_H */ -- cgit v1.2.3-70-g09d2 From f647d33f879d258de4ab2559975bd6eebda2033e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 15:30:24 +0900 Subject: sh: Fix split ptlock for user mappings in __do_page_fault(). There was a bug that got introduced when the split ptlock changes went in where mm could be unintialized for user mappings, this fixes it up.. Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/fault.c') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 7a03ffe6dad..dc461d2bc18 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -210,10 +210,11 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * are always mapped, whether it be due to legacy behaviour in * 29-bit mode, or due to PMB configuration in 32-bit mode. */ - if (address >= P3SEG && address < P3_ADDR_MAX) + if (address >= P3SEG && address < P3_ADDR_MAX) { pgd = pgd_offset_k(address); - else { - if (unlikely(address >= TASK_SIZE || !current->mm)) + mm = NULL; + } else { + if (unlikely(address >= TASK_SIZE || !(mm = current->mm))) return 1; pgd = pgd_offset(current->mm, address); -- cgit v1.2.3-70-g09d2 From 0f08f338083cc1d68788ccbccc44bd0502fc57ae Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 17:03:56 +0900 Subject: sh: More cosmetic cleanups and trivial fixes. Nothing exciting here, just trivial fixes.. Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/dma-sh.c | 10 ++++++---- arch/sh/drivers/pci/ops-titan.c | 1 + arch/sh/kernel/setup.c | 2 +- arch/sh/kernel/sh_ksyms.c | 15 --------------- arch/sh/mm/cache-sh7705.c | 19 ++++++------------- arch/sh/mm/fault.c | 13 ++++++++----- arch/sh/mm/pmb.c | 4 ---- include/asm-sh/dma.h | 1 + include/asm-sh/irq.h | 5 +++++ include/asm-sh/kexec.h | 2 ++ include/asm-sh/system.h | 36 +++++------------------------------- 11 files changed, 35 insertions(+), 73 deletions(-) (limited to 'arch/sh/mm/fault.c') diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 40a480d20aa..cbbe8bce3d6 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -80,21 +80,23 @@ static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs) static int sh_dmac_request_dma(struct dma_channel *chan) { - char name[32]; - if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) return 0; - snprintf(name, sizeof(name), "DMAC Transfer End (Channel %d)", + chan->name = kzalloc(32, GFP_KERNEL); + if (unlikely(chan->name == NULL)) + return -ENOMEM; + snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)", chan->chan); return request_irq(get_dmte_irq(chan->chan), dma_tei, - IRQF_DISABLED, name, chan); + IRQF_DISABLED, chan->name, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) { free_irq(get_dmte_irq(chan->chan), chan); + kfree(chan->name); } static void diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c index 9c8b2027c35..c6097bcd97f 100644 --- a/arch/sh/drivers/pci/ops-titan.c +++ b/arch/sh/drivers/pci/ops-titan.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include "pci-sh4.h" diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index cff8d36f91b..4afdec07170 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -507,7 +507,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) * unified cache on the SH-2 and SH-3, as well as the harvard * style cache on the SH-4. */ - if (test_bit(SH_CACHE_COMBINED, &(boot_cpu_data.icache.flags))) { + if (boot_cpu_data.icache.flags & SH_CACHE_COMBINED) { seq_printf(m, "unified\n"); show_cacheinfo(m, "cache", boot_cpu_data.icache); } else { diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c index fd73ab0326e..7f3a42244ca 100644 --- a/arch/sh/kernel/sh_ksyms.c +++ b/arch/sh/kernel/sh_ksyms.c @@ -27,20 +27,11 @@ EXPORT_SYMBOL(sh_mv); /* platform dependent support */ EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(enable_irq); -EXPORT_SYMBOL(disable_irq); -EXPORT_SYMBOL(probe_irq_mask); EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(disable_irq_nosync); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(no_irq_type); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); /* PCI exports */ #ifdef CONFIG_PCI @@ -51,13 +42,8 @@ EXPORT_SYMBOL(pci_free_consistent); /* mem exports */ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memcpy_fromio); -EXPORT_SYMBOL(memcpy_toio); EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memset_io); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(boot_cpu_data); @@ -124,5 +110,4 @@ EXPORT_SYMBOL(csum_partial); #ifdef CONFIG_IPV6 EXPORT_SYMBOL(csum_ipv6_magic); #endif -EXPORT_SYMBOL(consistent_sync); EXPORT_SYMBOL(clear_page); diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index bf94eedb0a8..045abdf078f 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -9,7 +9,6 @@ * for more details. * */ - #include #include #include @@ -25,14 +24,10 @@ #include #include -/* The 32KB cache on the SH7705 suffers from the same synonym problem - * as SH4 CPUs */ - -#define __pte_offset(address) \ - ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ - __pte_offset(address)) - +/* + * The 32KB cache on the SH7705 suffers from the same synonym problem + * as SH4 CPUs + */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; @@ -73,7 +68,6 @@ void flush_icache_range(unsigned long start, unsigned long end) __flush_wback_region((void *)start, end - start); } - /* * Writeback&Invalidate the D-cache of the page */ @@ -128,7 +122,6 @@ static void __flush_dcache_page(unsigned long phys) local_irq_restore(flags); } - /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) @@ -186,7 +179,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * * ADDRESS: Virtual Address (U0 address) */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) +void flush_cache_page(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) { __flush_dcache_page(pfn << PAGE_SHIFT); } @@ -203,4 +197,3 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } - diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index dc461d2bc18..c69fd603226 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -13,6 +13,8 @@ */ #include #include +#include +#include #include #include #include @@ -188,15 +190,16 @@ do_sigbus: /* * Called with interrupts disabled. */ -asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; spinlock_t *ptl; int ret = 1; @@ -214,10 +217,10 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, pgd = pgd_offset_k(address); mm = NULL; } else { - if (unlikely(address >= TASK_SIZE || !(mm = current->mm))) + if (unlikely(address >= TASK_SIZE || !mm)) return 1; - pgd = pgd_offset(current->mm, address); + pgd = pgd_offset(mm, address); } pud = pud_offset(pgd, address); diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 819fd0faf02..92e745341e4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -337,10 +337,8 @@ static int __init pmb_init(void) return 0; } - arch_initcall(pmb_init); -#ifdef CONFIG_DEBUG_FS static int pmb_seq_show(struct seq_file *file, void *iter) { int i; @@ -399,6 +397,4 @@ static int __init pmb_debugfs_init(void) return 0; } - postcore_initcall(pmb_debugfs_init); -#endif diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h index e62a6d0ed93..d9daa028689 100644 --- a/include/asm-sh/dma.h +++ b/include/asm-sh/dma.h @@ -89,6 +89,7 @@ struct dma_channel { wait_queue_head_t wait_queue; struct sys_device dev; + char *name; }; struct dma_info { diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index 7e8455b1cb4..648102e9236 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h @@ -334,6 +334,11 @@ extern void enable_irq(unsigned int); extern void make_maskreg_irq(unsigned int irq); extern unsigned short *irq_mask_register; +/* + * PINT IRQs + */ +void init_IRQ_pint(void); + /* * Function for "on chip support modules". */ diff --git a/include/asm-sh/kexec.h b/include/asm-sh/kexec.h index 9dfe59f6fcb..a5f85e9e428 100644 --- a/include/asm-sh/kexec.h +++ b/include/asm-sh/kexec.h @@ -23,6 +23,8 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_SH +#define MAX_NOTE_BYTES 1024 + #ifndef __ASSEMBLY__ extern void machine_shutdown(void); diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 477422afeb0..6c1f8fde5ac 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -79,7 +79,7 @@ static inline void sched_cacheflush(void) } #endif -static __inline__ unsigned long tas(volatile int *m) +static inline unsigned long tas(volatile int *m) { unsigned long retval; @@ -161,7 +161,7 @@ static inline void local_irq_enable(void) } #endif -static __inline__ void local_irq_disable(void) +static inline void local_irq_disable(void) { unsigned long __dummy; __asm__ __volatile__("stc sr, %0\n\t" @@ -172,7 +172,7 @@ static __inline__ void local_irq_disable(void) : "memory"); } -static __inline__ void set_bl_bit(void) +static inline void set_bl_bit(void) { unsigned long __dummy0, __dummy1; @@ -185,7 +185,7 @@ static __inline__ void set_bl_bit(void) : "memory"); } -static __inline__ void clear_bl_bit(void) +static inline void clear_bl_bit(void) { unsigned long __dummy0, __dummy1; @@ -207,7 +207,7 @@ static __inline__ void clear_bl_bit(void) (flags != 0); \ }) -static __inline__ unsigned long local_irq_save(void) +static inline unsigned long local_irq_save(void) { unsigned long flags, __dummy; @@ -223,36 +223,10 @@ static __inline__ unsigned long local_irq_save(void) return flags; } -#ifdef DEBUG_CLI_STI -static __inline__ void local_irq_restore(unsigned long x) -{ - if ((x & 0x000000f0) != 0x000000f0) - local_irq_enable(); - else { - unsigned long flags; - local_save_flags(flags); - - if (flags == 0) { - extern void dump_stack(void); - printk(KERN_ERR "BUG!\n"); - dump_stack(); - local_irq_disable(); - } - } -} -#else #define local_irq_restore(x) do { \ if ((x & 0x000000f0) != 0x000000f0) \ local_irq_enable(); \ } while (0) -#endif - -#define really_restore_flags(x) do { \ - if ((x & 0x000000f0) != 0x000000f0) \ - local_irq_enable(); \ - else \ - local_irq_disable(); \ -} while (0) /* * Jump to P2 area. -- cgit v1.2.3-70-g09d2