diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 13 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 26 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1022.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1026.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm925.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 30 |
11 files changed, 88 insertions, 9 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5f80f184cd3..b4f220dd5eb 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -46,7 +46,7 @@ config CPU_ARM710 config CPU_ARM720T bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_LV4T select CPU_CACHE_V4 select CPU_CACHE_VIVT @@ -64,7 +64,7 @@ config CPU_ARM920T bool "Support ARM920T processor" depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200 default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200 - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -85,7 +85,7 @@ config CPU_ARM922T bool "Support ARM922T processor" if ARCH_INTEGRATOR depends on ARCH_LH7A40X || ARCH_INTEGRATOR default y if ARCH_LH7A40X - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -104,7 +104,7 @@ config CPU_ARM925T bool "Support ARM925T processor" if ARCH_OMAP1 depends on ARCH_OMAP15XX default y if ARCH_OMAP15XX - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -285,6 +285,11 @@ config CPU_32v4 select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP +config CPU_32v4T + bool + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + config CPU_32v5 bool select TLS_REG_EMUL if SMP || !MMU diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b103e56806b..d438ce41cdd 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (cache_is_vipt_aliasing()) flush_pfn_alias(pfn, user_addr); } + +void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + if (cache_is_vivt()) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } + return; + } + + if (cache_is_vipt_aliasing()) { + flush_pfn_alias(page_to_pfn(page), uaddr); + return; + } + + /* VIPT non-aliasing cache */ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && + vma->vm_flags | VM_EXEC) { + unsigned long addr = (unsigned long)kaddr; + /* only flushing the kernel mapping on non-aliasing VIPT */ + __cpuc_coherent_kern_range(addr, addr + len); + } +} #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 7eac87f0518..88a999df0ab 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -303,7 +303,6 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, int err; unsigned long addr; struct vm_struct * area; - unsigned int cr = get_cr(); /* * High mappings must be supersection aligned @@ -317,7 +316,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, addr = (unsigned long)area->addr; #ifndef CONFIG_SMP - if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (cr & CR_XP)) || + if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; @@ -364,11 +363,14 @@ EXPORT_SYMBOL(__ioremap); void __iounmap(void __iomem *addr) { +#ifndef CONFIG_SMP struct vm_struct **p, *tmp; +#endif unsigned int section_mapping = 0; addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); +#ifndef CONFIG_SMP /* * If this is a section based mapping we need to handle it * specially as the VM subysystem does not know how to handle @@ -390,6 +392,7 @@ void __iounmap(void __iomem *addr) } } write_unlock(&vmlist_lock); +#endif if (!section_mapping) vunmap(addr); diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 700297ae4a5..1d8316f3cec 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -34,6 +34,8 @@ #include <asm/procinfo.h> #include <asm/ptrace.h> +#include "proc-macros.S" + /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 0c33a5ed5a6..89b1d6d3d7c 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -34,6 +34,8 @@ #include <asm/procinfo.h> #include <asm/ptrace.h> +#include "proc-macros.S" + /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 566a5565307..a089528e6bc 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -23,6 +23,8 @@ #include <asm/procinfo.h> #include <asm/ptrace.h> +#include "proc-macros.S" + /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 6ea76321d0d..d6d84d92c7c 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -23,6 +23,8 @@ #include <asm/procinfo.h> #include <asm/ptrace.h> +#include "proc-macros.S" + /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index ad15f8503d5..8d9a9f93b01 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -454,7 +454,8 @@ __arm925_setup: mcr p15, 7, r0, c15, c0, 0 #endif - adr r5, {r5, r6} + adr r5, arm925_crval + ldmia r5, {r5, r6} mrc p15, 0, r0, c1, c0 @ get control register v4 bic r0, r0, r5 orr r0, r0, r6 diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 1e89d408047..44a7a652d62 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -480,7 +480,7 @@ __arm926_proc_info: b __arm926_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA .long cpu_arm926_name .long arm926_processor_functions .long v4wbi_tlb_fns diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 6c5f0fe578a..ab143557e68 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -13,6 +13,7 @@ #include <asm/cacheflush.h> #include <asm/proc-fns.h> #include <asm/tlbflush.h> +#include <asm/page.h> #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); @@ -30,6 +31,13 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range); EXPORT_SYMBOL(cpu_cache); #endif +#ifndef MULTI_USER +EXPORT_SYMBOL(__cpu_clear_user_page); +EXPORT_SYMBOL(__cpu_copy_user_page); +#else +EXPORT_SYMBOL(cpu_user); +#endif + /* * No module should need to touch the TLB (and currently * no modules do. We export this for "loadkernel" support diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 521538671f4..561bff73a03 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -536,6 +536,11 @@ cpu_80200_name: .asciz "XScale-80200" .size cpu_80200_name, . - cpu_80200_name + .type cpu_80219_name, #object +cpu_80219_name: + .asciz "XScale-80219" + .size cpu_80219_name, . - cpu_80219_name + .type cpu_8032x_name, #object cpu_8032x_name: .asciz "XScale-IOP8032x Family" @@ -613,10 +618,33 @@ __80200_proc_info: .long xscale_cache_fns .size __80200_proc_info, . - __80200_proc_info + .type __80219_proc_info,#object +__80219_proc_info: + .long 0x69052e20 + .long 0xffffffe0 + .long PMD_TYPE_SECT | \ + PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + .long PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __xscale_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_80219_name + .long xscale_processor_functions + .long v4wbi_tlb_fns + .long xscale_mc_user_fns + .long xscale_cache_fns + .size __80219_proc_info, . - __80219_proc_info + .type __8032x_proc_info,#object __8032x_proc_info: .long 0x69052420 - .long 0xfffff5e0 @ mask should accomodate IOP80219 also + .long 0xffffffe0 .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ |