diff options
Diffstat (limited to 'arch/arm/mm')
37 files changed, 278 insertions, 166 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689ef1a..99fa688dfad 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -21,18 +21,22 @@ #define D_CACHE_LINE_SIZE 32 #define BTB_FLUSH_SIZE 8 -#ifdef CONFIG_ARM_ERRATA_411920 /* - * Invalidate the entire I cache (this code is a workaround for the ARM1136 - * erratum 411920 - Invalidate Instruction Cache operation can fail. This - * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. + * v6_flush_icache_all() + * + * Flush the whole I-cache. * - * Registers: - * r0 - set to 0 - * r1 - corrupted + * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. + * This erratum is present in 1136, 1156 and 1176. It does not affect the + * MPCore. + * + * Registers: + * r0 - set to 0 + * r1 - corrupted */ -ENTRY(v6_icache_inval_all) +ENTRY(v6_flush_icache_all) mov r0, #0 +#ifdef CONFIG_ARM_ERRATA_411920 mrs r1, cpsr cpsid ifa @ disable interrupts mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache @@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all) .rept 11 @ ARM Ltd recommends at least nop @ 11 NOPs .endr - mov pc, lr +#else + mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache #endif + mov pc, lr +ENDPROC(v6_flush_icache_all) /* * v6_flush_cache_all() @@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all) #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else - b v6_icache_inval_all + b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate @@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range) #ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else - b v6_icache_inval_all + b v6_flush_icache_all #endif #else mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB @@ -312,6 +319,7 @@ ENDPROC(v6_dma_unmap_area) .type v6_cache_fns, #object ENTRY(v6_cache_fns) + .long v6_flush_icache_all .long v6_flush_kern_cache_all .long v6_flush_user_cache_all .long v6_flush_user_cache_range diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 37c8157e116..a3ebf7a4f49 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -18,6 +18,21 @@ #include "proc-macros.S" /* + * v7_flush_icache_all() + * + * Flush the whole I-cache. + * + * Registers: + * r0 - set to 0 + */ +ENTRY(v7_flush_icache_all) + mov r0, #0 + ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable + ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate + mov pc, lr +ENDPROC(v7_flush_icache_all) + +/* * v7_flush_dcache_all() * * Flush the whole D-cache. @@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl v7_flush_dcache_all mov r0, #0 -#ifdef CONFIG_SMP - mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable -#else - mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate -#endif + ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable + ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) mov pc, lr @@ -171,11 +183,8 @@ ENTRY(v7_coherent_user_range) cmp r0, r1 blo 1b mov r0, #0 -#ifdef CONFIG_SMP - mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable -#else - mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB -#endif + ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable + ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB dsb isb mov pc, lr @@ -309,6 +318,7 @@ ENDPROC(v7_dma_unmap_area) .type v7_cache_fns, #object ENTRY(v7_cache_fns) + .long v7_flush_icache_all .long v7_flush_kern_cache_all .long v7_flush_user_cache_all .long v7_flush_user_cache_range diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 598c51ad507..b8061519ce7 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index f55fa1044f7..bdba6c65c90 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); /* FIXME: not highmem safe */ diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 9920c0ae209..649bbcd325b 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4bc43e535d3..e4dd0646e85 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, outer_inv_range(paddr, paddr + size); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + + /* + * Mark the D-cache clean for this page to avoid extra flushing. + */ + if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) + set_bit(PG_dcache_clean, &page->flags); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca..8440d952ba6 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -28,6 +28,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; +#if __LINUX_ARM_ARCH__ < 6 /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. @@ -141,7 +142,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * - * 1. If PG_dcache_dirty is set for the page, we need to ensure + * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in @@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, return; mapping = page_mapping(page); -#ifndef CONFIG_SMP - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); -#endif if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, ptep, pfn); @@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, __flush_icache_all(); } } +#endif /* __LINUX_ARM_ARCH__ < 6 */ /* * Check whether the write buffer has physical address aliasing diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 23b0b03af5e..1e21e125fe3 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -581,6 +581,19 @@ static struct fsr_info ifsr_info[] = { { do_bad, SIGBUS, 0, "unknown 31" }, }; +void __init +hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name) +{ + if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info)) + BUG(); + + ifsr_info[nr].fn = fn; + ifsr_info[nr].sig = sig; + ifsr_info[nr].code = code; + ifsr_info[nr].name = name; +} + asmlinkage void __exception do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6844cb9b50..391ffae7509 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -17,6 +17,7 @@ #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> +#include <asm/smp_plat.h> #include "mm.h" @@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) : "cc"); } +static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) +{ + unsigned long colour = CACHE_COLOUR(vaddr); + unsigned long offset = vaddr & (PAGE_SIZE - 1); + unsigned long to; + + set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); + to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; + flush_tlb_kernel_page(to); + flush_icache_range(to, to + len); +} + void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { @@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } + #else -#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#define flush_icache_alias(pfn,vaddr,len) do { } while (0) #endif -#ifdef CONFIG_SMP static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } -#endif static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, @@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, return; } - /* VIPT non-aliasing cache */ + /* VIPT non-aliasing D-cache */ if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); -#ifdef CONFIG_SMP + if (icache_is_vipt_aliasing()) + flush_icache_alias(page_to_pfn(page), uaddr, len); + else + __cpuc_coherent_kern_range(addr, addr + len); if (cache_ops_need_broadcast()) smp_call_function(flush_ptrace_access_other, NULL, 1); -#endif } } @@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p flush_dcache_mmap_unlock(mapping); } +#if __LINUX_ARM_ARCH__ >= 6 +void __sync_icache_dcache(pte_t pteval) +{ + unsigned long pfn; + struct page *page; + struct address_space *mapping; + + if (!pte_present_user(pteval)) + return; + if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) + /* only flush non-aliasing VIPT caches for exec mappings */ + return; + pfn = pte_pfn(pteval); + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + if (cache_is_vipt_aliasing()) + mapping = page_mapping(page); + else + mapping = NULL; + + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + __flush_dcache_page(mapping, page); + /* pte_exec() already checked above for non-aliasing VIPT cache */ + if (cache_is_vipt_nonaliasing() || pte_exec(pteval)) + __flush_icache_all(); +} +#endif + /* * Ensure cache coherency between kernel mapping and userspace mapping * of this page. @@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page) mapping = page_mapping(page); -#ifndef CONFIG_SMP - if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) - set_bit(PG_dcache_dirty, &page->flags); - else -#endif - { + if (!cache_ops_need_broadcast() && + mapping && !mapping_mapped(mapping)) + clear_bit(PG_dcache_clean, &page->flags); + else { __flush_dcache_page(mapping, page); if (mapping && cache_is_vivt()) __flush_dcache_aliases(mapping, page); else if (mapping) __flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); } } EXPORT_SYMBOL(flush_dcache_page); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00650f..7fd9b5eb177 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -150,6 +150,7 @@ static void __init find_limits(struct meminfo *mi, static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { + struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; @@ -180,13 +181,13 @@ static void __init arm_bootmem_init(struct meminfo *mi, /* * Reserve the memblock reserved regions in bootmem. */ - for (i = 0; i < memblock.reserved.cnt; i++) { - phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); - if (start >= start_pfn && - memblock_end_pfn(&memblock.reserved, i) <= end_pfn) + for_each_memblock(reserved, reg) { + phys_addr_t start = memblock_region_reserved_base_pfn(reg); + phys_addr_t end = memblock_region_reserved_end_pfn(reg); + if (start >= start_pfn && end <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); + (end - start) << PAGE_SHIFT, + BOOTMEM_DEFAULT); } } @@ -237,20 +238,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) { - struct memblock_region *mem = &memblock.memory; - unsigned int left = 0, right = mem->cnt; - - do { - unsigned int mid = (right + left) / 2; - - if (pfn < memblock_start_pfn(mem, mid)) - right = mid; - else if (pfn >= memblock_end_pfn(mem, mid)) - left = mid + 1; - else - return 1; - } while (left < right); - return 0; + return memblock_is_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); @@ -260,10 +248,11 @@ static void arm_memory_present(void) #else static void arm_memory_present(void) { - int i; - for (i = 0; i < memblock.memory.cnt; i++) - memory_present(0, memblock_start_pfn(&memblock.memory, i), - memblock_end_pfn(&memblock.memory, i)); + struct memblock_region *reg; + + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); } #endif @@ -277,7 +266,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) /* Register the kernel text, kernel data and initrd with memblock. */ #ifdef CONFIG_XIP_KERNEL - memblock_reserve(__pa(_data), _end - _data); + memblock_reserve(__pa(_sdata), _end - _sdata); #else memblock_reserve(__pa(_stext), _end - _stext); #endif @@ -545,7 +534,7 @@ void __init mem_init(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), - MLK_ROUNDUP(_data, _edata)); + MLK_ROUNDUP(_sdata, _edata)); #undef MLK #undef MLM diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 4f5b39687df..b0a98305055 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -144,3 +144,25 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); } + +#ifdef CONFIG_STRICT_DEVMEM + +#include <linux/ioport.h> + +/* + * devmem_is_allowed() checks to see if /dev/mem access to a certain + * address is valid. The argument is a physical page number. + * We mimic x86 here by disallowing access to system RAM as well as + * device-exclusive MMIO regions. This effectively disable read()/write() + * on /dev/mem. + */ +int devmem_is_allowed(unsigned long pfn) +{ + if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pfn)) + return 1; + return 0; +} + +#endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e8ed9dc461f..c32f731d56d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -310,9 +310,8 @@ static void __init build_mem_type_table(void) cachepolicy = CPOLICY_WRITEBACK; ecc_mask = 0; } -#ifdef CONFIG_SMP - cachepolicy = CPOLICY_WRITEALLOC; -#endif + if (is_smp()) + cachepolicy = CPOLICY_WRITEALLOC; /* * Strip out features not present on earlier architectures. @@ -406,13 +405,11 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; -#ifndef CONFIG_SMP /* * Only use write-through for non-SMP systems */ - if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) + if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; -#endif /* * Enable CPU-specific coherency if supported. @@ -436,22 +433,23 @@ static void __init build_mem_type_table(void) mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -#ifdef CONFIG_SMP - /* - * Mark memory with the "shared" attribute for SMP systems - */ - user_pgprot |= L_PTE_SHARED; - kern_pgprot |= L_PTE_SHARED; - vecs_pgprot |= L_PTE_SHARED; - mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; - mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; - mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; - mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; -#endif + if (is_smp()) { + /* + * Mark memory with the "shared" attribute + * for SMP systems + */ + user_pgprot |= L_PTE_SHARED; + kern_pgprot |= L_PTE_SHARED; + vecs_pgprot |= L_PTE_SHARED; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; + } } /* @@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void) * rather difficult. */ reason = "with VIPT aliasing cache"; -#ifdef CONFIG_SMP - } else if (tlb_ops_need_broadcast()) { + } else if (is_smp() && tlb_ops_need_broadcast()) { /* * kmap_high needs to occasionally flush TLB entries, * however, if the TLB entries need to be broadcast @@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void) * (must not be called with irqs off) */ reason = "without hardware TLB ops broadcasting"; -#endif } if (reason) { printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 203a4e944d9..a6f5f8475b9 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -430,7 +430,7 @@ ENTRY(cpu_arm1020_set_pte_ext) #endif /* CONFIG_MMU */ mov pc, lr - __INIT + __CPUINIT .type __arm1020_setup, #function __arm1020_setup: diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 1a511e76590..afc06b9c313 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -412,7 +412,7 @@ ENTRY(cpu_arm1020e_set_pte_ext) #endif /* CONFIG_MMU */ mov pc, lr - __INIT + __CPUINIT .type __arm1020e_setup, #function __arm1020e_setup: diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 1ffa4eb9c34..8915e0ba3fe 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -394,7 +394,7 @@ ENTRY(cpu_arm1022_set_pte_ext) #endif /* CONFIG_MMU */ mov pc, lr - __INIT + __CPUINIT .type __arm1022_setup, #function __arm1022_setup: diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 5697c34b95b..ff446c5d476 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -384,7 +384,7 @@ ENTRY(cpu_arm1026_set_pte_ext) mov pc, lr - __INIT + __CPUINIT .type __arm1026_setup, #function __arm1026_setup: diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 64e0b327c7c..6a7be1863ed 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -238,7 +238,7 @@ ENTRY(cpu_arm7_reset) mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc mov pc, r0 - __INIT + __CPUINIT .type __arm6_setup, #function __arm6_setup: mov r0, #0 diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 9d96824134f..c285395f44b 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -113,7 +113,7 @@ ENTRY(cpu_arm720_reset) mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 - __INIT + __CPUINIT .type __arm710_setup, #function __arm710_setup: diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 6c1a9ab059a..38b27dcba72 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -55,7 +55,7 @@ ENTRY(cpu_arm740_reset) mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 - __INIT + __CPUINIT .type __arm740_setup, #function __arm740_setup: diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 6a850dbba22..0c9786de20a 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -46,7 +46,7 @@ ENTRY(cpu_arm7tdmi_proc_fin) ENTRY(cpu_arm7tdmi_reset) mov pc, r0 - __INIT + __CPUINIT .type __arm7tdmi_setup, #function __arm7tdmi_setup: diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 86f80aa5621..fecf570939f 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -375,7 +375,7 @@ ENTRY(cpu_arm920_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __arm920_setup, #function __arm920_setup: diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index f76ce9b6288..e3cbf87c948 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -379,7 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext) #endif /* CONFIG_MMU */ mov pc, lr - __INIT + __CPUINIT .type __arm922_setup, #function __arm922_setup: diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 657bd3f7c15..572424c867b 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -428,7 +428,7 @@ ENTRY(cpu_arm925_set_pte_ext) #endif /* CONFIG_MMU */ mov pc, lr - __INIT + __CPUINIT .type __arm925_setup, #function __arm925_setup: diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 73f1f3c6891..63d168b4ebe 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -389,7 +389,7 @@ ENTRY(cpu_arm926_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __arm926_setup, #function __arm926_setup: diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index fffb061a45a..f6a62822418 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -264,7 +264,7 @@ ENTRY(arm940_cache_fns) .long arm940_dma_unmap_area .long arm940_dma_flush_range - __INIT + __CPUINIT .type __arm940_setup, #function __arm940_setup: diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 249a6053760..ea2e7f2eb95 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -317,7 +317,7 @@ ENTRY(cpu_arm946_dcache_clean_area) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr - __INIT + __CPUINIT .type __arm946_setup, #function __arm946_setup: diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index db475667fac..db67e3134d7 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -46,7 +46,7 @@ ENTRY(cpu_arm9tdmi_proc_fin) ENTRY(cpu_arm9tdmi_reset) mov pc, r0 - __INIT + __CPUINIT .type __arm9tdmi_setup, #function __arm9tdmi_setup: diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 7803fdf7002..7c9ad621f0e 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -134,7 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __fa526_setup, #function __fa526_setup: diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index b304d0104a4..578da69200c 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -494,7 +494,7 @@ ENTRY(cpu_feroceon_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __feroceon_setup, #function __feroceon_setup: diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 5f6892fcc16..4458ee6aa71 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -338,7 +338,7 @@ ENTRY(cpu_mohawk_set_pte_ext) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr - __INIT + __CPUINIT .type __mohawk_setup, #function __mohawk_setup: diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index a201eb04b5e..5aa8d59c2e8 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -156,7 +156,7 @@ ENTRY(cpu_sa110_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __sa110_setup, #function __sa110_setup: diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 7ddc4805bf9..2ac4e6f1071 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -169,7 +169,7 @@ ENTRY(cpu_sa1100_set_pte_ext) #endif mov pc, lr - __INIT + __CPUINIT .type __sa1100_setup, #function __sa1100_setup: diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 22aac851519..59a7e1ffe7b 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -30,13 +30,10 @@ #define TTB_RGN_WT (2 << 3) #define TTB_RGN_WB (3 << 3) -#ifndef CONFIG_SMP -#define TTB_FLAGS TTB_RGN_WBWA -#define PMD_FLAGS PMD_SECT_WB -#else -#define TTB_FLAGS TTB_RGN_WBWA|TTB_S -#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S -#endif +#define TTB_FLAGS_UP TTB_RGN_WBWA +#define PMD_FLAGS_UP PMD_SECT_WB +#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S ENTRY(cpu_v6_proc_init) mov pc, lr @@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm) #ifdef CONFIG_MMU mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id - orr r0, r0, #TTB_FLAGS + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 @@ -137,7 +135,7 @@ cpu_pj4_name: .align - __INIT + __CPUINIT /* * __v6_setup @@ -156,9 +154,11 @@ cpu_pj4_name: */ __v6_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode + ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode + ALT_UP(nop) orr r0, r0, #0x20 - mcr p15, 0, r0, c1, c0, 1 + ALT_SMP(mcr p15, 0, r0, c1, c0, 1) + ALT_UP(nop) #endif mov r0, #0 @@ -169,7 +169,8 @@ __v6_setup: #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register - orr r4, r4, #TTB_FLAGS + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) + ALT_UP(orr r4, r4, #TTB_FLAGS_UP) mcr p15, 0, r4, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ adr r5, v6_crval @@ -192,6 +193,8 @@ __v6_setup: v6_crval: crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c + __INITDATA + .type v6_processor_functions, #object ENTRY(v6_processor_functions) .word v6_early_abort @@ -205,6 +208,8 @@ ENTRY(v6_processor_functions) .word cpu_v6_set_pte_ext .size v6_processor_functions, . - v6_processor_functions + .section ".rodata" + .type cpu_arch_name, #object cpu_arch_name: .asciz "armv6" @@ -225,10 +230,16 @@ cpu_elf_name: __v6_proc_info: .long 0x0007b000 .long 0x0007f000 - .long PMD_TYPE_SECT | \ + ALT_SMP(.long \ + PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ - PMD_FLAGS + PMD_FLAGS_SMP) + ALT_UP(.long \ + PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ @@ -249,10 +260,16 @@ __v6_proc_info: __pj4_v6_proc_info: .long 0x560f5810 .long 0xff0ffff0 - .long PMD_TYPE_SECT | \ + ALT_SMP(.long \ + PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS_SMP) + ALT_UP(.long \ + PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ - PMD_FLAGS + PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 197f21bed5e..53cbe222515 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -30,15 +30,13 @@ #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) -#ifndef CONFIG_SMP /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ -#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB -#define PMD_FLAGS PMD_SECT_WB -#else +#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB +#define PMD_FLAGS_UP PMD_SECT_WB + /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ -#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA -#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S -#endif +#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S ENTRY(cpu_v7_proc_init) mov pc, lr @@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id - orr r0, r0, #TTB_FLAGS + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif @@ -169,7 +168,7 @@ cpu_v7_name: .ascii "ARMv7 Processor" .align - __INIT + __CPUINIT /* * __v7_setup @@ -188,7 +187,8 @@ cpu_v7_name: */ __v7_ca9mp_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 + ALT_SMP(mrc p15, 0, r0, c1, c0, 1) + ALT_UP(mov r0, #(1 << 6)) @ fake it for UP tst r0, #(1 << 6) @ SMP/nAMP mode enabled? orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting @@ -270,7 +270,8 @@ __v7_setup: #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r10, c2, c0, 2 @ TTB control register - orr r4, r4, #TTB_FLAGS + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) + ALT_UP(orr r4, r4, #TTB_FLAGS_UP) mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mov r10, #0x1f @ domains 0, 1 = manager mcr p15, 0, r10, c3, c0, 0 @ load domain access register @@ -332,6 +333,8 @@ v7_crval: __v7_setup_stack: .space 4 * 11 @ 11 registers + __INITDATA + .type v7_processor_functions, #object ENTRY(v7_processor_functions) .word v7_early_abort @@ -345,6 +348,8 @@ ENTRY(v7_processor_functions) .word cpu_v7_set_pte_ext .size v7_processor_functions, . - v7_processor_functions + .section ".rodata" + .type cpu_arch_name, #object cpu_arch_name: .asciz "armv7" @@ -362,10 +367,16 @@ cpu_elf_name: __v7_ca9mp_proc_info: .long 0x410fc090 @ Required ID value .long 0xff0ffff0 @ Mask for ID - .long PMD_TYPE_SECT | \ + ALT_SMP(.long \ + PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ - PMD_FLAGS + PMD_FLAGS_SMP) + ALT_UP(.long \ + PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ @@ -388,10 +399,16 @@ __v7_ca9mp_proc_info: __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID - .long PMD_TYPE_SECT | \ + ALT_SMP(.long \ + PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS_SMP) + ALT_UP(.long \ + PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | \ - PMD_FLAGS + PMD_FLAGS_UP) .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 361a51e4903..cad07e40304 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -404,7 +404,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .align - __INIT + __CPUINIT .type __xsc3_setup, #function __xsc3_setup: diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 14075979bcb..cb245edb2c2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -506,7 +506,7 @@ ENTRY(cpu_xscale_set_pte_ext) .align - __INIT + __CPUINIT .type __xscale_setup, #function __xscale_setup: diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index f3f288a9546..53cd5b45467 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -13,6 +13,7 @@ */ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/tlbflush.h> @@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range) orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT 1: -#ifdef CONFIG_SMP - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) -#else - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA -#endif + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA + add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mov ip, #0 -#ifdef CONFIG_SMP - mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable -#else - mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB -#endif + ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable + ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB dsb mov pc, lr ENDPROC(v7wbi_flush_user_tlb_range) @@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: -#ifdef CONFIG_SMP - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) -#else - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA -#endif + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b mov r2, #0 -#ifdef CONFIG_SMP - mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable -#else - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB -#endif + ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable + ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB dsb isb mov pc, lr @@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range) ENTRY(v7wbi_tlb_fns) .long v7wbi_flush_user_tlb_range .long v7wbi_flush_kern_tlb_range - .long v7wbi_tlb_flags + ALT_SMP(.long v7wbi_tlb_flags_smp) + ALT_UP(.long v7wbi_tlb_flags_up) .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |