diff options
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r-- | arch/arm/mm/mm-armv.c | 96 |
1 files changed, 13 insertions, 83 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 2c2b93d77d4..e33fe4229d0 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); + /* + * Copy over the kernel and IO PGD entries + */ init_pgd = pgd_offset_k(0); + memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, + (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); + + clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* @@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } - /* - * Copy over the kernel and IO PGD entries - */ - memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, - (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); - - clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); - return new_pgd; no_pte: @@ -400,7 +399,7 @@ static void __init build_mem_type_table(void) ecc_mask = 0; } - if (cpu_arch <= CPU_ARCH_ARMv5) { + if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { if (mem_types[i].prot_l1) mem_types[i].prot_l1 |= PMD_BIT4; @@ -426,6 +425,9 @@ static void __init build_mem_type_table(void) mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + + mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; + mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; } cp = &cache_policies[cachepolicy]; @@ -585,7 +587,7 @@ void setup_mm_for_reboot(char mode) pmdval = (i << PGDIR_SHIFT) | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; - if (cpu_arch <= CPU_ARCH_ARMv5) + if (cpu_arch <= CPU_ARCH_ARMv5TEJ) pmdval |= PMD_BIT4; pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); @@ -683,7 +685,7 @@ void __init memtable_init(struct meminfo *mi) } flush_cache_all(); - flush_tlb_all(); + local_flush_tlb_all(); top_pmd = pmd_off_k(0xffff0000); } @@ -698,75 +700,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) for (i = 0; i < nr; i++) create_mapping(io_desc + i); } - -static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) -{ - struct page *start_pg, *end_pg; - unsigned long pg, pgend; - - /* - * Convert start_pfn/end_pfn to a struct page pointer. - */ - start_pg = pfn_to_page(start_pfn); - end_pg = pfn_to_page(end_pfn); - - /* - * Convert to physical addresses, and - * round start upwards and end downwards. - */ - pg = PAGE_ALIGN(__pa(start_pg)); - pgend = __pa(end_pg) & PAGE_MASK; - - /* - * If there are free pages between these, - * free the section of the memmap array. - */ - if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); -} - -static inline void free_unused_memmap_node(int node, struct meminfo *mi) -{ - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; - - /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. - */ - for (i = 0; i < mi->nr_banks; i++) { - if (mi->bank[i].size == 0 || mi->bank[i].node != node) - continue; - - bank_start = mi->bank[i].start >> PAGE_SHIFT; - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } - - /* - * If we had a previous bank, and there is a space - * between the current bank and the previous, free it. - */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); - - prev_bank_end = PAGE_ALIGN(mi->bank[i].start + - mi->bank[i].size) >> PAGE_SHIFT; - } -} - -/* - * The mem_map array can get very big. Free - * the unused area of the memory map. - */ -void __init create_memmap_holes(struct meminfo *mi) -{ - int node; - - for_each_online_node(node) - free_unused_memmap_node(node, mi); -} |