diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 77 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2.c | 69 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 18 | ||||
-rw-r--r-- | arch/sh/mm/clear_page.S | 18 | ||||
-rw-r--r-- | arch/sh/mm/copy_page.S | 16 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 161 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 45 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pg-dma.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 35 |
10 files changed, 223 insertions, 222 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 9dd606464d2..4e0362f5038 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -4,8 +4,12 @@ menu "Processor selection" # Processor families # config CPU_SH2 + select SH_WRITETHROUGH if !CPU_SH2A bool - select SH_WRITETHROUGH + +config CPU_SH2A + bool + select CPU_SH2 config CPU_SH3 bool @@ -16,6 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB + select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 config CPU_SH4A bool @@ -40,6 +45,16 @@ config CPU_SUBTYPE_SH7604 bool "Support SH7604 processor" select CPU_SH2 +config CPU_SUBTYPE_SH7619 + bool "Support SH7619 processor" + select CPU_SH2 + +comment "SH-2A Processor Support" + +config CPU_SUBTYPE_SH7206 + bool "Support SH7206 processor" + select CPU_SH2A + comment "SH-3 Processor Support" config CPU_SUBTYPE_SH7300 @@ -89,6 +104,7 @@ comment "SH-4 Processor Support" config CPU_SUBTYPE_SH7750 bool "Support SH7750 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. @@ -104,15 +120,18 @@ config CPU_SUBTYPE_SH7750R bool "Support SH7750R processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7750S bool "Support SH7750S processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7751 bool "Support SH7751 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, or if you have a HD6417751R CPU. @@ -121,6 +140,7 @@ config CPU_SUBTYPE_SH7751R bool "Support SH7751R processor" select CPU_SH4 select CPU_SUBTYPE_SH7751 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" @@ -157,6 +177,11 @@ config CPU_SUBTYPE_SH7780 select CPU_SH4A select CPU_HAS_INTC2_IRQ +config CPU_SUBTYPE_SH7785 + bool "Support SH7785 processor" + select CPU_SH4A + select CPU_HAS_INTC2_IRQ + comment "SH4AL-DSP Processor Support" config CPU_SUBTYPE_SH73180 @@ -216,13 +241,22 @@ config MEMORY_SIZE config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A && MMU + depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) default y help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +config X2TLB + bool "Enable extended TLB mode" + depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL + help + Selecting this option will enable the extended mode of the SH-X2 + TLB. For legacy SH-X behaviour and interoperability, say N. For + all of the fun new features and a willingless to submit bug reports, + say Y. + config VSYSCALL bool "Support vsyscall page" depends on MMU @@ -237,16 +271,52 @@ config VSYSCALL (the default value) say Y. choice + prompt "Kernel page size" + default PAGE_SIZE_4KB + +config PAGE_SIZE_4KB + bool "4kB" + help + This is the default page size used by all SuperH CPUs. + +config PAGE_SIZE_8KB + bool "8kB" + depends on EXPERIMENTAL && X2TLB + help + This enables 8kB pages as supported by SH-X2 and later MMUs. + +config PAGE_SIZE_64KB + bool "64kB" + depends on EXPERIMENTAL && CPU_SH4 + help + This enables support for 64kB pages, possible on all SH-4 + CPUs and later. Highly experimental, not recommended. + +endchoice + +choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && CPU_SH4 && MMU default HUGETLB_PAGE_SIZE_64K config HUGETLB_PAGE_SIZE_64K - bool "64K" + bool "64kB" + +config HUGETLB_PAGE_SIZE_256K + bool "256kB" + depends on X2TLB config HUGETLB_PAGE_SIZE_1MB bool "1MB" +config HUGETLB_PAGE_SIZE_4MB + bool "4MB" + depends on X2TLB + +config HUGETLB_PAGE_SIZE_64MB + bool "64MB" + depends on X2TLB + endchoice source "mm/Kconfig" @@ -274,7 +344,6 @@ config SH_DIRECT_MAPPED config SH_WRITETHROUGH bool "Use write-through caching" - default y if CPU_SH2 help Selecting this option will configure the caches in write-through mode, as opposed to the default write-back configuration. diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 2689cb24ea2..6614033f6be 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -5,6 +5,7 @@ * * Released under the terms of the GNU GPL v2.0. */ + #include <linux/init.h> #include <linux/mm.h> @@ -14,37 +15,43 @@ #include <asm/cacheflush.h> #include <asm/io.h> -/* - * Calculate the OC address and set the way bit on the SH-2. - * - * We must have already jump_to_P2()'ed prior to calling this - * function, since we rely on CCR manipulation to do the - * Right Thing(tm). - */ -unsigned long __get_oc_addr(unsigned long set, unsigned long way) +void __flush_wback_region(void *start, int size) { - unsigned long ccr; - - /* - * On SH-2 the way bit isn't tracked in the address field - * if we're doing address array access .. instead, we need - * to manually switch out the way in the CCR. - */ - ccr = ctrl_inl(CCR); - ccr &= ~0x00c0; - ccr |= way << cpu_data->dcache.way_shift; - - /* - * Despite the number of sets being halved, we end up losing - * the first 2 ways to OCRAM instead of the last 2 (if we're - * 4-way). As a result, forcibly setting the W1 bit handily - * bumps us up 2 ways. - */ - if (ccr & CCR_CACHE_ORA) - ccr |= 1 << (cpu_data->dcache.way_shift + 1); - - ctrl_outl(ccr, CCR); - - return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + /* FIXME cache purge */ + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_purge_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_invalidate_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } } diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index e48cc22724d..ae531affccb 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -11,12 +11,8 @@ */ #include <linux/init.h> #include <linux/mm.h> -#include <asm/addrspace.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/pgalloc.h> +#include <linux/io.h> +#include <linux/mutex.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -83,9 +79,9 @@ static void __init emit_cache_params(void) */ /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ -#define MAX_P3_SEMAPHORES 16 +#define MAX_P3_MUTEXES 16 -struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; +struct mutex p3map_mutex[MAX_P3_MUTEXES]; void __init p3_cache_init(void) { @@ -115,7 +111,7 @@ void __init p3_cache_init(void) panic("%s failed.", __FUNCTION__); for (i = 0; i < cpu_data->dcache.n_aliases; i++) - sema_init(&p3map_sem[i], 1); + mutex_init(&p3map_mutex[i]); } /* @@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start, */ if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) - exec_offset = 0x20000000; + exec_offset = 0x20000000; local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, @@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page) /* Loop all the D-cache */ n = cpu_data->dcache.n_aliases; - for (i = 0; i < n; i++, addr += PAGE_SIZE) + for (i = 0; i < n; i++, addr += 4096) flush_cache_4096(addr, phys); } diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 7b96425ae27..8a706131e52 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S @@ -1,12 +1,12 @@ -/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ - * +/* * __clear_user_page, __clear_user, clear_page implementation of SuperH * * Copyright (C) 2001 Kaz Kojima * Copyright (C) 2001, 2002 Niibe Yutaka - * + * Copyright (C) 2006 Paul Mundt */ #include <linux/linkage.h> +#include <asm/page.h> /* * clear_page_slow @@ -18,11 +18,11 @@ /* * r0 --- scratch * r4 --- to - * r5 --- to + 4096 + * r5 --- to + PAGE_SIZE */ ENTRY(clear_page_slow) mov r4,r5 - mov.w .Llimit,r0 + mov.l .Llimit,r0 add r0,r5 mov #0,r0 ! @@ -50,7 +50,7 @@ ENTRY(clear_page_slow) ! rts nop -.Llimit: .word (4096-28) +.Llimit: .long (PAGE_SIZE-28) ENTRY(__clear_user) ! @@ -164,10 +164,10 @@ ENTRY(__clear_user) * r0 --- scratch * r4 --- to * r5 --- orig_to - * r6 --- to + 4096 + * r6 --- to + PAGE_SIZE */ ENTRY(__clear_user_page) - mov.w .L4096,r0 + mov.l .Lpsz,r0 mov r4,r6 add r0,r6 mov #0,r0 @@ -191,7 +191,7 @@ ENTRY(__clear_user_page) ! rts nop -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE #endif diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S index 1addffe117c..397c94c9731 100644 --- a/arch/sh/mm/copy_page.S +++ b/arch/sh/mm/copy_page.S @@ -1,12 +1,12 @@ -/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ - * +/* * copy_page, __copy_user_page, __copy_user implementation of SuperH * * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima * Copyright (C) 2002 Toshinobu Sugioka - * + * Copyright (C) 2006 Paul Mundt */ #include <linux/linkage.h> +#include <asm/page.h> /* * copy_page_slow @@ -18,7 +18,7 @@ /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- not used * r10 --- to * r11 --- from @@ -30,7 +30,7 @@ ENTRY(copy_page_slow) mov r4,r10 mov r5,r11 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: mov.l @r11+,r0 @@ -80,7 +80,7 @@ ENTRY(copy_page_slow) /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- orig_to * r10 --- to * r11 --- from @@ -94,7 +94,7 @@ ENTRY(__copy_user_page) mov r5,r11 mov r6,r9 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: ocbi @r9 @@ -129,7 +129,7 @@ ENTRY(__copy_user_page) rts nop #endif -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE /* * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); * Return the number of bytes NOT copied diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 68663b8f99a..716ebf568af 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long); * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page; + int si_code; + siginfo_t info; + + trace_hardirqs_on(); + local_irq_enable(); #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) @@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, tsk = current; mm = tsk->mm; + si_code = SEGV_MAPERR; + + if (unlikely(address >= TASK_SIZE)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + + pgd = get_TTB() + offset; + pgd_k = swapper_pg_dir + offset; + + /* This will never happen with the folded page table. */ + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (pud_present(*pud) || !pud_present(*pud_k)) + goto bad_area_nosemaphore; + set_pud(pud, *pud_k); + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + + return; + } /* * If we're in an interrupt or have no user @@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * we can handle it.. */ good_area: + si_code = SEGV_ACCERR; if (writeaccess) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -104,10 +151,13 @@ survive: bad_area: up_read(&mm->mmap_sem); +bad_area_nosemaphore: if (user_mode(regs)) { - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - force_sig(SIGSEGV, tsk); + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void *) address; + force_sig_info(SIGSEGV, &info, tsk); return; } @@ -127,11 +177,9 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); printk(KERN_ALERT "pc = %08lx\n", regs->pc); - asm volatile("mov.l %1, %0" - : "=r" (page) - : "m" (__m(MMU_TTB))); + page = (unsigned long)get_TTB(); if (page) { - page = ((unsigned long *) page)[address >> 22]; + page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; @@ -166,98 +214,13 @@ do_sigbus: * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - tsk->thread.trap_no = 14; - force_sig(SIGBUS, tsk); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *)address; + force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; } - -#ifdef CONFIG_SH_STORE_QUEUES -/* - * This is a special case for the SH-4 store queues, as pages for this - * space still need to be faulted in before it's possible to flush the - * store queue cache for writeout to the remapped region. - */ -#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) -#else -#define P3_ADDR_MAX P4SEG -#endif - -/* - * Called with interrupts disabled. - */ -asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - struct mm_struct *mm = current->mm; - spinlock_t *ptl; - int ret = 1; - -#ifdef CONFIG_SH_KGDB - if (kgdb_nofault && kgdb_bus_err_hook) - kgdb_bus_err_hook(); -#endif - - /* - * We don't take page faults for P1, P2, and parts of P4, these - * are always mapped, whether it be due to legacy behaviour in - * 29-bit mode, or due to PMB configuration in 32-bit mode. - */ - if (address >= P3SEG && address < P3_ADDR_MAX) { - pgd = pgd_offset_k(address); - mm = NULL; - } else { - if (unlikely(address >= TASK_SIZE || !mm)) - return 1; - - pgd = pgd_offset(mm, address); - } - - pud = pud_offset(pgd, address); - if (pud_none_or_clear_bad(pud)) - return 1; - pmd = pmd_offset(pud, address); - if (pmd_none_or_clear_bad(pmd)) - return 1; - - if (mm) - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - else - pte = pte_offset_kernel(pmd, address); - - entry = *pte; - if (unlikely(pte_none(entry) || pte_not_present(entry))) - goto unlock; - if (unlikely(writeaccess && !pte_write(entry))) - goto unlock; - - if (writeaccess) - entry = pte_mkdirty(entry); - entry = pte_mkyoung(entry); - -#ifdef CONFIG_CPU_SH4 - /* - * ITLB is not affected by "ldtlb" instruction. - * So, we need to flush the entry by ourselves. - */ - __flush_tlb_page(get_asid(), address & PAGE_MASK); -#endif - - set_pte(pte, entry); - update_mmu_cache(NULL, address, entry); - ret = 0; -unlock: - if (mm) - pte_unmap_unlock(pte, ptl); - return ret; -} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce978..59f4cc18235 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd_t *pmd; pte_t *pte; - pgd = swapper_pg_dir + pgd_index(addr); + pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); - if (pmd != pmd_offset(pud, 0)) { - pud_ERROR(*pud); - return; - } + pud = pud_alloc(NULL, pgd, addr); + if (unlikely(!pud)) { + pud_ERROR(*pud); + return; } - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); - if (pte != pte_offset_kernel(pmd, 0)) { - pmd_ERROR(*pmd); - return; - } + pmd = pmd_alloc(NULL, pud, addr); + if (unlikely(!pmd)) { + pmd_ERROR(*pmd); + return; } pte = pte_offset_kernel(pmd, addr); @@ -155,9 +147,6 @@ extern char __init_begin, __init_end; /* * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { @@ -180,14 +169,11 @@ void __init paging_init(void) */ { unsigned long max_dma, low, start_pfn; - pgd_t *pg_dir; - int i; - - /* We don't need kernel mapping as hardware support that. */ - pg_dir = swapper_pg_dir; - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; + /* We don't need to map the kernel through the TLB, as + * it is permanatly mapped using P1. So clear the + * entire pgd. */ + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); /* Turn on the MMU */ enable_mmu(); @@ -206,6 +192,10 @@ void __init paging_init(void) } } + /* Set an initial value for the MMU.TTB so we don't have to + * check for a null value. */ + set_TTB(swapper_pg_dir); + #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If we don't have CONFIG_MMU set and the processor in question @@ -227,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { - extern unsigned long empty_zero_page[1024]; int codesize, reservedpages, datasize, initsize; int tmp; extern unsigned long memory_start; diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc23..11d54c14982 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, { unsigned long end; unsigned long pfn; - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); + pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); address &= ~PMD_MASK; end = address + size; diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c index 1406d2e348c..bb23679369d 100644 --- a/arch/sh/mm/pg-dma.c +++ b/arch/sh/mm/pg-dma.c @@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from) static void clear_page_dma(void *to) { - extern unsigned long empty_zero_page[1024]; - /* * We get invoked quite early on, if the DMAC hasn't been initialized * yet, fall back on the slow manual implementation. diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a31..3f98d2a4f93 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -6,22 +6,12 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/init.h> -#include <linux/mman.h> #include <linux/mm.h> -#include <linux/threads.h> -#include <asm/addrspace.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> +#include <linux/mutex.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> -extern struct semaphore p3map_sem[]; +extern struct mutex p3map_mutex[]; #define CACHE_ALIAS (cpu_data->dcache.alias_mask) @@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page) pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address, if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address, pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address, update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t } return pte; } - |