diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-v6.S | 1 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 68 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 7 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm7tdmi.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm9tdmi.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 38 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 2 |
11 files changed, 84 insertions, 60 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index c96fa1b3f49..73b4a8b66a5 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range) */ ENTRY(v6_flush_kern_dcache_area) add r1, r0, r1 + bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index dc18d81ef8c..d32f02b6186 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range) ENTRY(v7_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 + sub r3, r2, #1 + bic r0, r0, r3 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 2b269c95552..1a8d4aa821b 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -253,8 +253,8 @@ void __sync_icache_dcache(pte_t pteval) if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); - /* pte_exec() already checked above for non-aliasing VIPT cache */ - if (cache_is_vipt_nonaliasing() || pte_exec(pteval)) + + if (pte_exec(pteval)) __flush_icache_all(); } #endif @@ -275,7 +275,8 @@ void __sync_icache_dcache(pte_t pteval) * kernel cache lines for later. Otherwise, we assume we have * aliasing mappings. * - * Note that we disable the lazy flush for SMP. + * Note that we disable the lazy flush for SMP configurations where + * the cache maintenance operations are not automatically broadcasted. */ void flush_dcache_page(struct page *page) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc42834..c19571c40a2 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -15,12 +15,14 @@ #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/initrd.h> +#include <linux/of_fdt.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> #include <linux/sort.h> #include <asm/mach-types.h> +#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> @@ -71,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); +#ifdef CONFIG_OF_FLATTREE +void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) +{ + phys_initrd_start = start; + phys_initrd_size = end - start; +} +#endif /* CONFIG_OF_FLATTREE */ + /* * This keeps memory configuration data used by a couple memory * initialization functions, as well as show_mem() for the skipping @@ -85,7 +95,7 @@ void show_mem(unsigned int filter) struct meminfo * mi = &meminfo; printk("Mem-info:\n"); - show_free_areas(); + show_free_areas(filter); for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; @@ -201,6 +211,20 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } } +#ifdef CONFIG_ZONE_DMA +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) +{ + if (size[0] <= dma_size) + return; + + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; +} +#endif + static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, unsigned long max_high) { @@ -243,22 +267,31 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #endif } +#ifdef ARM_DMA_ZONE_SIZE +#ifndef CONFIG_ZONE_DMA +#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations +#endif + /* * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(zone_size, zhole_size); + arm_adjust_dma_zone(zone_size, zhole_size, + ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); +#endif free_area_init_node(0, zone_size, min, zhole_size); } -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { return memblock_is_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); +#endif +#ifndef CONFIG_SPARSEMEM static void arm_memory_present(void) { } @@ -298,6 +331,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #endif #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", + phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", phys_initrd_start, phys_initrd_size); @@ -313,6 +352,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #endif arm_mm_memblock_reserve(); + arm_dt_memblock_reserve(); /* reserve any platform specific memblock areas */ if (mdesc->reserve) @@ -392,7 +432,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -426,6 +466,14 @@ static void __init free_unused_memmap(struct meminfo *mi) bank_start = bank_pfn_start(bank); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + bank_start = min(bank_start, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -440,6 +488,12 @@ static void __init free_unused_memmap(struct meminfo *mi) */ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) + free_memmap(prev_bank_end, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif } static void __init free_highpages(void) @@ -587,7 +641,8 @@ void __init mem_init(void) " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .text : 0x%p" " - 0x%p" " (%4d kB)\n" - " .data : 0x%p" " - 0x%p" " (%4d kB)\n", + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), @@ -609,7 +664,8 @@ void __init mem_init(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), - MLK_ROUNDUP(_sdata, _edata)); + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d2384106af9..5b3d7d54365 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -5,14 +5,9 @@ extern pmd_t *top_pmd; #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) -static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) -{ - return pmd_offset(pud_offset(pgd, virt), virt); -} - static inline pmd_t *pmd_off_k(unsigned long virt) { - return pmd_off(pgd_offset_k(virt), virt); + return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); } struct mem_type { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6cf76b3b68d..9d9e736c2b4 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -31,8 +31,6 @@ #include "mm.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. @@ -765,15 +763,12 @@ static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; - lowmem_limit = __pa(vmalloc_min - 1) + 1; - memblock_set_current_limit(lowmem_limit); - for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM - if (__va(bank->start) > vmalloc_min || + if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) highmem = 1; @@ -831,6 +826,9 @@ static void __init sanity_check_meminfo(void) bank->size = newsize; } #endif + if (!bank->highmem && bank->start + bank->size > lowmem_limit) + lowmem_limit = bank->start + bank->size; + j++; } #ifdef CONFIG_HIGHMEM @@ -854,6 +852,7 @@ static void __init sanity_check_meminfo(void) } #endif meminfo.nr_banks = j; + memblock_set_current_limit(lowmem_limit); } static inline void prepare_page_table(void) diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index e4c165ca669..537ffcb0646 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -146,7 +146,7 @@ __arm7tdmi_proc_info: .long 0 .long 0 .long v4_cache_fns - .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info + .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info .type __triscenda7_proc_info, #object __triscenda7_proc_info: diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 7b7ebd4d096..546b54da100 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -116,7 +116,7 @@ __arm9tdmi_proc_info: .long 0 .long 0 .long v4_cache_fns - .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info + .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info .type __p2001_proc_info, #object __p2001_proc_info: diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 7c99cb4c8e4..1d2b8451bf2 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -175,11 +175,6 @@ cpu_v6_name: .asciz "ARMv6-compatible processor" .size cpu_v6_name, . - cpu_v6_name - .type cpu_pj4_name, #object -cpu_pj4_name: - .asciz "Marvell PJ4 processor" - .size cpu_pj4_name, . - cpu_pj4_name - .align __CPUINIT @@ -218,7 +213,9 @@ __v6_setup: mcr p15, 0, r0, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) - mcr p15, 0, r4, c2, c0, 1 @ load TTB1 + ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) + ALT_UP(orr r8, r8, #TTB_FLAGS_UP) + mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ adr r5, v6_crval ldmia r5, {r5, r6} @@ -305,32 +302,3 @@ __v6_proc_info: .long v6_user_fns .long v6_cache_fns .size __v6_proc_info, . - __v6_proc_info - - .type __pj4_v6_proc_info, #object -__pj4_v6_proc_info: - .long 0x560f5810 - .long 0xff0ffff0 - ALT_SMP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_SMP) - ALT_UP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_UP) - .long PMD_TYPE_SECT | \ - PMD_SECT_XN | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __v6_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS - .long cpu_pj4_name - .long v6_processor_functions - .long v6wbi_tlb_fns - .long v6_user_fns - .long v6_cache_fns - .size __pj4_v6_proc_info, . - __pj4_v6_proc_info diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index babfba09c89..3c3867850a3 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -368,7 +368,9 @@ __v7_setup: mcr p15, 0, r10, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) - mcr p15, 0, r4, c2, c0, 1 @ load TTB1 + ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) + ALT_UP(orr r8, r8, #TTB_FLAGS_UP) + mcr p15, 0, r8, c2, c0, 1 @ load TTB1 ldr r5, =PRRR @ PRRR ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index ce233bcbf50..42af97664c9 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area) teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range -ENDPROC(xscsale_dma_a0_map_area) +ENDPROC(xscale_dma_a0_map_area) /* * dma_unmap_area(start, size, dir) |