diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-08-28 18:37:31 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-08-28 18:37:31 +0100 |
commit | cdf0bfb0126bbd8c5424ca01fd59fd70d8ea80f9 (patch) | |
tree | e7864d3dacf433b7ccbe17a49b2e72b6a4b3a24c /arch/arm/mm | |
parent | b4f656eea63376da79b0b5a17660c4ce14b71b74 (diff) | |
parent | 6af396a6b6c698eb3834184518fc9a59bc22c817 (diff) |
Merge branch 'for-rmk/barriers' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 34 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 57 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 13 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 8 |
10 files changed, 102 insertions, 27 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd65..db5c2cab8fd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -421,24 +421,28 @@ config CPU_32v3 select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4T bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v5 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v6 bool @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool + select NEED_KUSER_HELPERS help An SMP system using a pre-ARMv6 processor (there are apparently a few prototypes like that in existence) and therefore access to @@ -783,11 +788,40 @@ config TLS_REG_EMUL config NEEDS_SYSCALL_FOR_CMPXCHG bool + select NEED_KUSER_HELPERS help SMP on a pre-ARMv6 processor? Well OK then. Forget about fast user space cmpxchg support. It is just not possible. +config NEED_KUSER_HELPERS + bool + +config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y + help + Warning: disabling this option may break user programs. + + Provide kuser helpers in the vector page. The kernel provides + helper code to userspace in read only form at a fixed location + in the high vector page to allow userspace to be independent of + the CPU type fitted to the system. This permits binaries to be + run on ARMv4 through to ARMv7 without modification. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off. However, + when such an binary or library is run, it will receive a SIGILL + signal, which will terminate the program. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index d70e0aba0c9..0c3fc276bd3 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -290,7 +290,7 @@ static void l2x0_disable(void) raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); writel_relaxed(0, l2x0_base + L2X0_CTRL); - dsb(); + dsb(st); raw_spin_unlock_irqrestore(&l2x0_lock, flags); } diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 515b00064da..b5c467a65c2 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range) add r12, r12, r2 cmp r12, r1 blo 1b - dsb + dsb ishst icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 @@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range) mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB - dsb + dsb ishst isb mov pc, lr diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724..84e6f772e20 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -162,10 +162,7 @@ static void flush_context(unsigned int cpu) } /* Queue a TLB invalidate and flush the I-cache if necessary. */ - if (!tlb_ops_need_broadcast()) - cpumask_set_cpu(cpu, &tlb_flush_pending); - else - cpumask_setall(&tlb_flush_pending); + cpumask_setall(&tlb_flush_pending); if (icache_is_vivt_asid_tagged()) __flush_icache_all(); @@ -245,7 +242,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); - dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7f9b1798c6c..0c17f69a828 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -455,7 +455,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot) unsigned end = start + size; apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); - dsb(); flush_tlb_kernel_range(start, end); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a239..53cdbd39ec8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) { + phys_addr_t memblock_limit = 0; int i, j, highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; @@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void) bank->size = size_limit; } #endif - if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) - arm_lowmem_limit = bank->start + bank->size; + if (!bank->highmem) { + phys_addr_t bank_end = bank->start + bank->size; + if (bank_end > arm_lowmem_limit) + arm_lowmem_limit = bank_end; + + /* + * Find the first non-section-aligned page, and point + * memblock_limit at it. This relies on rounding the + * limit down to be section-aligned, which happens at + * the end of this function. + * + * With this algorithm, the start or end of almost any + * bank can be non-section-aligned. The only exception + * is that the start of the bank 0 must be section- + * aligned, since otherwise memory would need to be + * allocated when mapping the start of bank 0, which + * occurs before any free memory is mapped. + */ + if (!memblock_limit) { + if (!IS_ALIGNED(bank->start, SECTION_SIZE)) + memblock_limit = bank->start; + else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) + memblock_limit = bank_end; + } + } j++; } #ifdef CONFIG_HIGHMEM @@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void) #endif meminfo.nr_banks = j; high_memory = __va(arm_lowmem_limit - 1) + 1; - memblock_set_current_limit(arm_lowmem_limit); + + /* + * Round the memblock limit down to a section size. This + * helps to ensure that we will allocate memory from the + * last full section, which should be mapped. + */ + if (memblock_limit) + memblock_limit = round_down(memblock_limit, SECTION_SIZE); + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + + memblock_set_current_limit(memblock_limit); } static inline void prepare_page_table(void) @@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; +#else + map.type = MT_LOW_VECTORS; +#endif create_mapping(&map); if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; create_mapping(&map); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + /* * Ask the machine support to map in the statically mapped devices. */ @@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; - memblock_set_current_limit(arm_lowmem_limit); - build_mem_type_table(); prepare_page_table(); map_lowmem(); diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index f64afb9f1bd..bdd3be4be77 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) - ALT_SMP(mov pc,lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index c36ac69488c..01a719e18bb 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) tst r3, #1 << (55 - 32) @ L_PTE_DIRTY orreq r2, #L_PTE_RDONLY 1: strd r2, r3, [r0] - ALT_SMP(mov pc, lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 5c6d5a3050e..0b5462a941a 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -75,14 +75,15 @@ ENTRY(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle) ENTRY(cpu_v7_dcache_clean_area) - ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW - ALT_UP(W(nop)) - dcache_line_size r2, r3 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + ALT_SMP(W(nop)) @ MP extensions imply L1 PTW + ALT_UP_B(1f) + mov pc, lr +1: dcache_line_size r2, r3 +2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, r2 subs r1, r1, r2 - bhi 1b - dsb + bhi 2b + dsb ishst mov pc, lr ENDPROC(cpu_v7_dcache_clean_area) diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index ea94765acf9..355308767ba 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -35,7 +35,7 @@ ENTRY(v7wbi_flush_user_tlb_range) vma_vm_mm r3, r2 @ get vma->vm_mm mmid r3, r3 @ get vm_mm->context.id - dsb + dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID @@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - dsb + dsb ish mov pc, lr ENDPROC(v7wbi_flush_user_tlb_range) @@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range) * - end - end address (exclusive, may not be aligned) */ ENTRY(v7wbi_flush_kern_tlb_range) - dsb + dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT @@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - dsb + dsb ish isb mov pc, lr ENDPROC(v7wbi_flush_kern_tlb_range) |