diff options
-rw-r--r-- | arch/sh/kernel/sys_sh.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 202 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 132 | ||||
-rw-r--r-- | include/asm-sh/cache.h | 8 | ||||
-rw-r--r-- | include/asm-sh/cacheflush.h | 1 | ||||
-rw-r--r-- | include/asm-sh/cpu-sh3/cacheflush.h | 8 | ||||
-rw-r--r-- | include/asm-sh/cpu-sh4/cacheflush.h | 29 | ||||
-rw-r--r-- | include/asm-sh/page.h | 12 | ||||
-rw-r--r-- | include/asm-sh/pgalloc.h | 37 | ||||
-rw-r--r-- | include/asm-sh/pgtable.h | 80 |
13 files changed, 261 insertions, 267 deletions
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 917b2f32f26..d8bcd8a2232 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -21,7 +21,7 @@ #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> - +#include <asm/cacheflush.h> #include <asm/uaccess.h> #include <asm/ipc.h> diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index f4e32b3d24d..d90906367c5 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o +mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o obj-y += $(mmu-y) diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index ee73e30263a..c81e6b67ad3 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -9,6 +9,8 @@ */ #include <linux/mm.h> #include <linux/dma-mapping.h> +#include <asm/cacheflush.h> +#include <asm/addrspace.h> #include <asm/io.h> void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 364181f27b7..7a03ffe6dad 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -1,33 +1,20 @@ -/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ +/* + * Page fault handler for SH with an MMU. * - * linux/arch/sh/mm/fault.c * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - -#include <linux/signal.h> -#include <linux/sched.h> #include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> -#include <linux/module.h> - #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> #include <asm/kgdb.h> extern void die(const char *,struct pt_regs *,long); @@ -187,14 +174,25 @@ do_sigbus: goto no_context; } +#ifdef CONFIG_SH_STORE_QUEUES /* - * Called with interrupt disabled. + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. */ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { - unsigned long addrmax = P4SEG; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; @@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, kgdb_bus_err_hook(); #endif -#ifdef CONFIG_SH_STORE_QUEUES - addrmax = P4SEG_STORE_QUE + 0x04000000; -#endif - - if (address >= P3SEG && address < addrmax) { + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) pgd = pgd_offset_k(address); - mm = NULL; - } else if (address >= TASK_SIZE) - return 1; - else if (!(mm = current->mm)) - return 1; - else - pgd = pgd_offset(mm, address); + else { + if (unlikely(address >= TASK_SIZE || !current->mm)) + return 1; + + pgd = pgd_offset(current->mm, address); + } - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 1; + if (mm) pte = pte_offset_map_lock(mm, pmd, address, &ptl); else pte = pte_offset_kernel(pmd, address); entry = *pte; - if (pte_none(entry) || pte_not_present(entry) - || (writeaccess && !pte_write(entry))) + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) goto unlock; if (writeaccess) @@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * ITLB is not affected by "ldtlb" instruction. * So, we need to flush the entry by ourselves. */ - - { - unsigned long flags; - local_irq_save(flags); - __flush_tlb_page(get_asid(), address&PAGE_MASK); - local_irq_restore(flags); - } + __flush_tlb_page(get_asid(), address & PAGE_MASK); #endif set_pte(pte, entry); @@ -260,122 +257,3 @@ unlock: pte_unmap_unlock(pte, ptl); return ret; } - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { - unsigned long flags; - unsigned long asid; - unsigned long saved_asid = MMU_NO_ASID; - - asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; - page &= PAGE_MASK; - - local_irq_save(flags); - if (vma->vm_mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - __flush_tlb_page(asid, page); - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - local_irq_restore(flags); - } -} - -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - - if (mm->context != NO_CONTEXT) { - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - } else { - unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = MMU_NO_ASID; - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - if (mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - } - local_irq_restore(flags); - } -} - -void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - flush_tlb_all(); - } else { - unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = get_asid(); - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - set_asid(asid); - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - set_asid(saved_asid); - } - local_irq_restore(flags); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - /* Invalidate all TLB of this process. */ - /* Instead of invalidating each TLB, we get new MMU context. */ - if (mm->context != NO_CONTEXT) { - unsigned long flags; - - local_irq_save(flags); - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - local_irq_restore(flags); - } -} - -void flush_tlb_all(void) -{ - unsigned long flags, status; - - /* - * Flush all the TLB. - * - * Write to the MMU control register's bit: - * TF-bit for SH-3, TI-bit for SH-4. - * It's same position, bit #2. - */ - local_irq_save(flags); - status = ctrl_inl(MMUCR); - status |= 0x04; - ctrl_outl(status, MMUCR); - ctrl_barrier(); - local_irq_restore(flags); -} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8ea27ca4b70..d1a979eab65 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -80,6 +80,7 @@ void show_mem(void) static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) return; } - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); + if (pmd != pmd_offset(pud, 0)) { + pud_ERROR(*pud); + return; + } + } + + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c new file mode 100644 index 00000000000..fd7e42bcaa4 --- /dev/null +++ b/arch/sh/mm/tlb-flush.c @@ -0,0 +1,132 @@ +/* + * TLB flushing operations for SH with an MMU. + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { + unsigned long flags; + unsigned long asid; + unsigned long saved_asid = MMU_NO_ASID; + + asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; + page &= PAGE_MASK; + + local_irq_save(flags); + if (vma->vm_mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + __flush_tlb_page(asid, page); + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + local_irq_restore(flags); + } +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context != NO_CONTEXT) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + } else { + unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = MMU_NO_ASID; + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + if (mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + } + local_irq_restore(flags); + } +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + flush_tlb_all(); + } else { + unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = get_asid(); + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + set_asid(asid); + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + set_asid(saved_asid); + } + local_irq_restore(flags); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + /* Invalidate all TLB of this process. */ + /* Instead of invalidating each TLB, we get new MMU context. */ + if (mm->context != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + local_irq_restore(flags); + } +} + +void flush_tlb_all(void) +{ + unsigned long flags, status; + + /* + * Flush all the TLB. + * + * Write to the MMU control register's bit: + * TF-bit for SH-3, TI-bit for SH-4. + * It's same position, bit #2. + */ + local_irq_save(flags); + status = ctrl_inl(MMUCR); + status |= 0x04; + ctrl_outl(status, MMUCR); + ctrl_barrier(); + local_irq_restore(flags); +} diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h index 33f13367054..e3a180cf506 100644 --- a/include/asm-sh/cache.h +++ b/include/asm-sh/cache.h @@ -10,7 +10,6 @@ #ifdef __KERNEL__ #include <asm/cpu/cache.h> -#include <asm/cpu/cacheflush.h> #define SH_CACHE_VALID 1 #define SH_CACHE_UPDATED 2 @@ -49,12 +48,5 @@ struct cache_info { unsigned long flags; }; -/* Flush (write-back only) a region (smaller than a page) */ -extern void __flush_wback_region(void *start, int size); -/* Flush (write-back & invalidate) a region (smaller than a page) */ -extern void __flush_purge_region(void *start, int size); -/* Flush (invalidate only) a region (smaller than a page) */ -extern void __flush_invalidate_region(void *start, int size); - #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHE_H */ diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 9dfb33edb00..92930b4a40d 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h @@ -2,6 +2,7 @@ #define __ASM_SH_CACHEFLUSH_H #ifdef __KERNEL__ +#include <linux/mm.h> #include <asm/cpu/cacheflush.h> /* Flush (write-back only) a region (smaller than a page) */ diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index f51aed00c68..db0cb071ea8 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -10,7 +10,7 @@ #ifndef __ASM_CPU_SH3_CACHEFLUSH_H #define __ASM_CPU_SH3_CACHEFLUSH_H -/* +/* * Cache flushing: * * - flush_cache_all() flushes entire cache @@ -35,10 +35,6 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -struct page; -struct mm_struct; -struct vm_area_struct; - extern void flush_cache_all(void); extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, @@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); #define p3_cache_init() do { } while (0) -#define HAVE_ARCH_UNMAPPED_AREA - #endif #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index ea58c4c5944..a95fc951aff 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h @@ -16,30 +16,26 @@ * caching; in which case they're only semi-broken), * so we need them. */ -struct page; -struct mm_struct; -struct vm_area_struct; - -extern void flush_cache_all(void); -extern void flush_cache_mm(struct mm_struct *mm); -extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -extern void flush_dcache_page(struct page *pg); +void flush_cache_all(void); +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); +void flush_dcache_page(struct page *pg); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -extern void flush_icache_range(unsigned long start, unsigned long end); -extern void flush_cache_sigtramp(unsigned long addr); -extern void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, - int len); +void flush_icache_range(unsigned long start, unsigned long end); +void flush_cache_sigtramp(unsigned long addr); +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); #define flush_icache_page(vma,pg) do { } while (0) /* Initialization of P3 area for copy_user_page */ -extern void p3_cache_init(void); +void p3_cache_init(void); #define PG_mapped PG_arch_1 @@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, } #endif /* CONFIG_MMU */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ - diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 4811d410d12..51d7281a546 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from); extern void clear_page_slow(void *to); extern void copy_page_slow(void *to, void *from); -#if defined(CONFIG_SH7705_CACHE_32KB) && defined(CONFIG_MMU) +#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) struct page; extern void clear_user_page(void *to, unsigned long address, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); @@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#elif defined(CONFIG_CPU_SH4) -struct page; -extern void clear_user_page(void *to, unsigned long address, struct page *pg); -extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); -extern void __clear_user_page(void *to, void *orig_to); -extern void __copy_user_page(void *to, void *from, void *orig_to); #endif /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h index f4f233f7a4f..e841465ab4d 100644 --- a/include/asm-sh/pgalloc.h +++ b/include/asm-sh/pgalloc.h @@ -1,15 +1,6 @@ #ifndef __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H -#include <linux/threads.h> -#include <linux/slab.h> -#include <linux/mm.h> - -#define pgd_quicklist ((unsigned long *)0) -#define pmd_quicklist ((unsigned long *)0) -#define pte_quicklist ((unsigned long *)0) -#define pgtable_cache_size 0L - #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) @@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); - pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); - - if (pgd) - memset(pgd, 0, pgd_size); - - return pgd; + return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline void pgd_free(pgd_t *pgd) { - kfree(pgd); + free_page((unsigned long)pgd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte; - - pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); - - return pte; + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *pte; - - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); - - return pte; + return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline void pte_free_kernel(pte_t *pte) @@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte) * inside the pgd, so has no extra memory associated with it. */ -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() #define check_pgt_cache() do { } while (0) -#ifdef CONFIG_CPU_SH4 -#define PG_mapped PG_arch_1 -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 40d41a78041..9728b58f7c1 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -1,42 +1,42 @@ -#ifndef __ASM_SH_PGTABLE_H -#define __ASM_SH_PGTABLE_H - -#include <asm-generic/4level-fixup.h> - /* + * This file contains the functions and defines necessary to modify and + * use the SuperH page table tree. + * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 - 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. */ +#ifndef __ASM_SH_PGTABLE_H +#define __ASM_SH_PGTABLE_H -#include <asm/pgtable-2level.h> +#include <asm-generic/pgtable-nopmd.h> +#include <asm/page.h> + +#define PTRS_PER_PGD 1024 -/* - * This file contains the functions and defines necessary to modify and use - * the SuperH page table tree. - */ #ifndef __ASSEMBLY__ -#include <asm/processor.h> #include <asm/addrspace.h> #include <asm/fixmap.h> -#include <linux/threads.h> extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); /* - * Basically we have the same two-level (which is the logical three level - * Linux page table layout folded) page tables as the i386. - */ - -/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page[1024]; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* !__ASSEMBLY__ */ +/* traditional two-level paging structure */ +#define PGDIR_SHIFT 22 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE 1024 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024]; #define PTE_PHYS_MASK 0x1ffff000 -#ifndef __ASSEMBLY__ /* * First 1MB map is used by fixed purpose. * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) @@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024]; #define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ #define _PAGE_PROTNONE 0x200 /* software: if not present */ -#define _PAGE_ACCESSED 0x400 /* software: page referenced */ +#define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ @@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024]; #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ - /* Mask which drop software flags * We also drop WT bit since it is used for _PAGE_FILE * bit in this implementation. @@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024]; #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED) +#ifndef __ASSEMBLY__ + #ifdef CONFIG_MMU #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD) @@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024]; #define PAGE_KERNEL_PCC __pgprot(0) #endif +#endif /* __ASSEMBLY__ */ + /* * As i386 and MIPS, SuperH can't do page protection for execute, and * considers that the same as a read. Also, write permissions imply - * read permissions. This is the closest we can get.. + * read permissions. This is the closest we can get.. */ - #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY @@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024]; #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED +#ifndef __ASSEMBLY__ + +/* + * Certain architectures need to do special things when PTEs + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) + +#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) @@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024]; #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) +#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) /* * The following only work if pte_present() is true. @@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte); @@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma, typedef pte_t *pte_addr_t; -#endif /* !__ASSEMBLY__ */ - #define kern_addr_valid(addr) (1) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ @@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t #include <asm-generic/pgtable.h> +#endif /* !__ASSEMBLY__ */ + #endif /* __ASM_SH_PAGE_H */ |