diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 23 | ||||
-rw-r--r-- | arch/arm/mm/abort-ev6.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 139 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 118 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 31 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 59 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 17 |
11 files changed, 312 insertions, 90 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 20979564e7e..83c025e72ce 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -391,7 +391,7 @@ config CPU_FEROCEON_OLD_ID # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB + bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 select CPU_ABRT_EV6 select CPU_PABRT_NOIFAR @@ -416,7 +416,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 - bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB + bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 @@ -639,10 +639,23 @@ config CPU_BIG_ENDIAN port must properly enable any big-endian related features of your chipset/board/processor. +config CPU_ENDIAN_BE8 + bool + depends on CPU_BIG_ENDIAN + default CPU_V6 || CPU_V7 + help + Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. + +config CPU_ENDIAN_BE32 + bool + depends on CPU_BIG_ENDIAN + default !CPU_ENDIAN_BE8 + help + Support for the BE-32 (big-endian) mode on pre-ARMv6 processors. + config CPU_HIGH_VECTOR depends on !MMU && CPU_CP15 && !CPU_ARM740T bool "Select the High exception vector" - default n help Say Y here to select high exception vector(0xFFFF0000~). The exception vector can be vary depending on the platform @@ -726,7 +739,6 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config OUTER_CACHE bool - default n config CACHE_FEROCEON_L2 bool "Enable the Feroceon L2 cache controller" @@ -739,7 +751,6 @@ config CACHE_FEROCEON_L2 config CACHE_FEROCEON_L2_WRITETHROUGH bool "Force Feroceon L2 cache write through" depends on CACHE_FEROCEON_L2 - default n help Say Y here to use the Feroceon L2 cache in writethrough mode. Unless you specifically require this, say N for writeback mode. @@ -747,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX default y select OUTER_CACHE help diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 6f7e70907e4..f332df7f0d3 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -37,6 +37,9 @@ ENTRY(v6_early_abort) movne pc, lr do_thumb_abort ldreq r3, [r2] @ read aborted ARM instruction +#ifdef CONFIG_CPU_ENDIAN_BE8 + reveq r3, r3 +#endif do_ldrd_abort tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 3a398befed4..03cd27d917b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -62,6 +62,12 @@ #define SHIFT_ASR 0x40 #define SHIFT_RORRRX 0x60 +#define BAD_INSTR 0xdeadc0de + +/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */ +#define IS_T32(hi16) \ + (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800)) + static unsigned long ai_user; static unsigned long ai_sys; static unsigned long ai_skipped; @@ -332,38 +338,48 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, struct pt_regs *regs) { unsigned int rd = RD_BITS(instr); - - if (((rd & 1) == 1) || (rd == 14)) + unsigned int rd2; + int load; + + if ((instr & 0xfe000000) == 0xe8000000) { + /* ARMv7 Thumb-2 32-bit LDRD/STRD */ + rd2 = (instr >> 8) & 0xf; + load = !!(LDST_L_BIT(instr)); + } else if (((rd & 1) == 1) || (rd == 14)) goto bad; + else { + load = ((instr & 0xf0) == 0xd0); + rd2 = rd + 1; + } ai_dword += 1; if (user_mode(regs)) goto user; - if ((instr & 0xf0) == 0xd0) { + if (load) { unsigned long val; get32_unaligned_check(val, addr); regs->uregs[rd] = val; get32_unaligned_check(val, addr + 4); - regs->uregs[rd + 1] = val; + regs->uregs[rd2] = val; } else { put32_unaligned_check(regs->uregs[rd], addr); - put32_unaligned_check(regs->uregs[rd + 1], addr + 4); + put32_unaligned_check(regs->uregs[rd2], addr + 4); } return TYPE_LDST; user: - if ((instr & 0xf0) == 0xd0) { + if (load) { unsigned long val; get32t_unaligned_check(val, addr); regs->uregs[rd] = val; get32t_unaligned_check(val, addr + 4); - regs->uregs[rd + 1] = val; + regs->uregs[rd2] = val; } else { put32t_unaligned_check(regs->uregs[rd], addr); - put32t_unaligned_check(regs->uregs[rd + 1], addr + 4); + put32t_unaligned_check(regs->uregs[rd2], addr + 4); } return TYPE_LDST; @@ -616,8 +632,72 @@ thumb2arm(u16 tinstr) /* Else fall through for illegal instruction case */ default: - return 0xdeadc0de; + return BAD_INSTR; + } +} + +/* + * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction + * handlable by ARM alignment handler, also find the corresponding handler, + * so that we can reuse ARM userland alignment fault fixups for Thumb. + * + * @pinstr: original Thumb-2 instruction; returns new handlable instruction + * @regs: register context. + * @poffset: return offset from faulted addr for later writeback + * + * NOTES: + * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections. + * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt) + */ +static void * +do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, + union offset_union *poffset) +{ + unsigned long instr = *pinstr; + u16 tinst1 = (instr >> 16) & 0xffff; + u16 tinst2 = instr & 0xffff; + poffset->un = 0; + + switch (tinst1 & 0xffe0) { + /* A6.3.5 Load/Store multiple */ + case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */ + case 0xe8a0: /* ...above writeback version */ + case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */ + case 0xe920: /* ...above writeback version */ + /* no need offset decision since handler calculates it */ + return do_alignment_ldmstm; + + case 0xf840: /* POP/PUSH T3 (single register) */ + if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) { + u32 L = !!(LDST_L_BIT(instr)); + const u32 subset[2] = { + 0xe92d0000, /* STMDB sp!,{registers} */ + 0xe8bd0000, /* LDMIA sp!,{registers} */ + }; + *pinstr = subset[L] | (1<<RD_BITS(instr)); + return do_alignment_ldmstm; + } + /* Else fall through for illegal instruction case */ + break; + + /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */ + case 0xe860: + case 0xe960: + case 0xe8e0: + case 0xe9e0: + poffset->un = (tinst2 & 0xff) << 2; + case 0xe940: + case 0xe9c0: + return do_alignment_ldrdstrd; + + /* + * No need to handle load/store instructions up to word size + * since ARMv6 and later CPUs can perform unaligned accesses. + */ + default: + break; } + return NULL; } static int @@ -630,6 +710,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) mm_segment_t fs; unsigned int fault; u16 tinstr = 0; + int isize = 4; + int thumb2_32b = 0; instrptr = instruction_pointer(regs); @@ -637,8 +719,19 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) set_fs(KERNEL_DS); if (thumb_mode(regs)) { fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); - if (!(fault)) - instr = thumb2arm(tinstr); + if (!fault) { + if (cpu_architecture() >= CPU_ARCH_ARMv7 && + IS_T32(tinstr)) { + /* Thumb-2 32-bit */ + u16 tinst2 = 0; + fault = __get_user(tinst2, (u16 *)(instrptr+2)); + instr = (tinstr << 16) | tinst2; + thumb2_32b = 1; + } else { + isize = 2; + instr = thumb2arm(tinstr); + } + } } else fault = __get_user(instr, (u32 *)instrptr); set_fs(fs); @@ -655,7 +748,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) fixup: - regs->ARM_pc += thumb_mode(regs) ? 2 : 4; + regs->ARM_pc += isize; switch (CODING_BITS(instr)) { case 0x00000000: /* 3.13.4 load/store instruction extensions */ @@ -714,18 +807,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) handler = do_alignment_ldrstr; break; - case 0x08000000: /* ldm or stm */ - handler = do_alignment_ldmstm; + case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ + if (thumb2_32b) + handler = do_alignment_t32_to_handler(&instr, regs, &offset); + else + handler = do_alignment_ldmstm; break; default: goto bad; } + if (!handler) + goto bad; type = handler(addr, instr, regs); - if (type == TYPE_ERROR || type == TYPE_FAULT) + if (type == TYPE_ERROR || type == TYPE_FAULT) { + regs->ARM_pc -= isize; goto bad_or_fault; + } if (type == TYPE_LDST) do_alignment_finish_ldst(addr, instr, regs, offset); @@ -735,7 +835,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) bad_or_fault: if (type == TYPE_ERROR) goto bad; - regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; /* * We got a fault - fix it up, or die. */ @@ -751,8 +850,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) */ printk(KERN_ERR "Alignment trap: not handling instruction " "%0*lx at [<%08lx>]\n", - thumb_mode(regs) ? 4 : 8, - thumb_mode(regs) ? tinstr : instr, instrptr); + isize << 1, + isize == 2 ? tinstr : instr, instrptr); ai_skipped += 1; return 1; @@ -763,8 +862,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " "Address=0x%08lx FSR 0x%03x\n", current->comm, task_pid_nr(current), instrptr, - thumb_mode(regs) ? 4 : 8, - thumb_mode(regs) ? tinstr : instr, + isize << 1, + isize == 2 ? tinstr : instr, addr, fsr); if (ai_usermode & UM_FIXUP) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a289..6fdcbb70982 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -208,7 +208,7 @@ good_area: * than endlessly redo the fault. */ survive: - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 8277802ec85..3a7279c1ce5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -120,6 +120,32 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } +static void __init find_node_limits(int node, struct meminfo *mi, + unsigned long *min, unsigned long *max_low, unsigned long *max_high) +{ + int i; + + *min = -1UL; + *max_low = *max_high = 0; + + for_each_nodebank(i, mi, node) { + struct membank *bank = &mi->bank[i]; + unsigned long start, end; + + start = bank_pfn_start(bank); + end = bank_pfn_end(bank); + + if (*min > start) + *min = start; + if (*max_high < end) + *max_high = end; + if (bank->highmem) + continue; + if (*max_low < end) + *max_low = end; + } +} + /* * FIXME: We really want to avoid allocating the bootmap bitmap * over the top of the initrd. Hopefully, this is located towards @@ -210,41 +236,25 @@ static inline void map_memory_bank(struct membank *bank) #endif } -static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) +static void __init bootmem_init_node(int node, struct meminfo *mi, + unsigned long start_pfn, unsigned long end_pfn) { - unsigned long start_pfn, end_pfn, boot_pfn; + unsigned long boot_pfn; unsigned int boot_pages; pg_data_t *pgdat; int i; - start_pfn = -1UL; - end_pfn = 0; - /* - * Calculate the pfn range, and map the memory banks for this node. + * Map the memory banks for this node. */ for_each_nodebank(i, mi, node) { struct membank *bank = &mi->bank[i]; - unsigned long start, end; - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); - - if (start_pfn > start) - start_pfn = start; - if (end_pfn < end) - end_pfn = end; - - map_memory_bank(bank); + if (!bank->highmem) + map_memory_bank(bank); } /* - * If there is no memory in this node, ignore it. - */ - if (end_pfn == 0) - return end_pfn; - - /* * Allocate the bootmem bitmap page. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); @@ -260,7 +270,8 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) for_each_nodebank(i, mi, node) { struct membank *bank = &mi->bank[i]; - free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); + if (!bank->highmem) + free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); } @@ -269,8 +280,6 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) */ reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); - - return end_pfn; } static void __init bootmem_reserve_initrd(int node) @@ -297,33 +306,39 @@ static void __init bootmem_reserve_initrd(int node) static void __init bootmem_free_node(int node, struct meminfo *mi) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long start_pfn, end_pfn; - pg_data_t *pgdat = NODE_DATA(node); + unsigned long min, max_low, max_high; int i; - start_pfn = pgdat->bdata->node_min_pfn; - end_pfn = pgdat->bdata->node_low_pfn; + find_node_limits(node, mi, &min, &max_low, &max_high); /* * initialise the zones within this node. */ memset(zone_size, 0, sizeof(zone_size)); - memset(zhole_size, 0, sizeof(zhole_size)); /* * The size of this node has already been determined. If we need * to do anything fancy with the allocation of this memory to the * zones, now is the time to do it. */ - zone_size[0] = end_pfn - start_pfn; + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif /* * For each bank in this node, calculate the size of the holes. * holes = node_size - sum(bank_sizes_in_node) */ - zhole_size[0] = zone_size[0]; - for_each_nodebank(i, mi, node) - zhole_size[0] -= bank_pfn_size(&mi->bank[i]); + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_nodebank(i, mi, node) { + int idx = 0; +#ifdef CONFIG_HIGHMEM + if (mi->bank[i].highmem) + idx = ZONE_HIGHMEM; +#endif + zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); + } /* * Adjust the sizes according to any special requirements for @@ -331,13 +346,13 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) */ arch_adjust_zones(node, zone_size, zhole_size); - free_area_init_node(node, zone_size, start_pfn, zhole_size); + free_area_init_node(node, zone_size, min, zhole_size); } void __init bootmem_init(void) { struct meminfo *mi = &meminfo; - unsigned long memend_pfn = 0; + unsigned long min, max_low, max_high; int node, initrd_node; /* @@ -345,11 +360,29 @@ void __init bootmem_init(void) */ initrd_node = check_initrd(mi); + max_low = max_high = 0; + /* * Run through each node initialising the bootmem allocator. */ for_each_node(node) { - unsigned long end_pfn = bootmem_init_node(node, mi); + unsigned long node_low, node_high; + + find_node_limits(node, mi, &min, &node_low, &node_high); + + if (node_low > max_low) + max_low = node_low; + if (node_high > max_high) + max_high = node_high; + + /* + * If there is no memory in this node, ignore it. + * (We can't have nodes which have no lowmem) + */ + if (node_low == 0) + continue; + + bootmem_init_node(node, mi, min, node_low); /* * Reserve any special node zero regions. @@ -362,12 +395,6 @@ void __init bootmem_init(void) */ if (node == initrd_node) bootmem_reserve_initrd(node); - - /* - * Remember the highest memory PFN. - */ - if (end_pfn > memend_pfn) - memend_pfn = end_pfn; } /* @@ -383,7 +410,7 @@ void __init bootmem_init(void) for_each_node(node) bootmem_free_node(node, mi); - high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1; + high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; /* * This doesn't seem to be used by the Linux memory manager any @@ -393,7 +420,8 @@ void __init bootmem_init(void) * Note: max_low_pfn and max_pfn reflect the number of _pages_ in * the system, not the maximum PFN. */ - max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; + max_low_pfn = max_low - PHYS_PFN_OFFSET; + max_pfn = max_high - PHYS_PFN_OFFSET; } static inline int free_area(unsigned long pfn, unsigned long end, char *s) diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 9f88dd3be60..0ab75c60f7c 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -110,6 +110,12 @@ static int remap_area_pages(unsigned long start, unsigned long pfn, return err; } +int ioremap_page(unsigned long virt, unsigned long phys, + const struct mem_type *mtype) +{ + return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); +} +EXPORT_SYMBOL(ioremap_page); void __check_kvm_seq(struct mm_struct *mm) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e6344ece00c..4426ee67cec 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -255,6 +255,7 @@ const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } +EXPORT_SYMBOL(get_mem_type); /* * Adjust the PMD section entries according to the CPU in use. @@ -686,13 +687,19 @@ __early_param("vmalloc=", early_vmalloc); static void __init sanity_check_meminfo(void) { - int i, j; + int i, j, highmem = 0; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM + if (__va(bank->start) > VMALLOC_MIN || + __va(bank->start) < (void *)PAGE_OFFSET) + highmem = 1; + + bank->highmem = highmem; + /* * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. @@ -713,6 +720,7 @@ static void __init sanity_check_meminfo(void) i++; bank[1].size -= VMALLOC_MIN - __va(bank->start); bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + bank[1].highmem = highmem = 1; j++; } bank->size = VMALLOC_MIN - __va(bank->start); @@ -835,10 +843,31 @@ void __init reserve_node_zero(pg_data_t *pgdat) BOOTMEM_EXCLUSIVE); } + if (machine_is_treo680()) { + reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, + BOOTMEM_EXCLUSIVE); + reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, + BOOTMEM_EXCLUSIVE); + } + if (machine_is_palmt5()) reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); + /* + * U300 - This platform family can share physical memory + * between two ARM cpus, one running Linux and the other + * running another OS. + */ + if (machine_is_u300()) { +#ifdef CONFIG_MACH_U300_SINGLE_RAM +#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ + CONFIG_MACH_U300_2MB_ALIGNMENT_FIX + res_size = 0x00100000; +#endif +#endif + } + #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 195e48edd8c..ac5c80062b7 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -27,6 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range); +EXPORT_SYMBOL(__cpuc_flush_dcache_page); EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */ #else EXPORT_SYMBOL(cpu_cache); diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 087e239704d..524ddae9259 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -170,6 +170,9 @@ __v6_setup: #endif /* CONFIG_MMU */ adr r5, v6_crval ldmia r5, {r5, r6} +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r6, r6, #1 << 25 @ big-endian page tables +#endif mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index a08d9d2380d..180a08d03a0 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -19,17 +19,23 @@ #include "proc-macros.S" -#define TTB_C (1 << 0) #define TTB_S (1 << 1) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_OC_WBWA (1 << 3) #define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WB (3 << 3) +#define TTB_NOS (1 << 5) +#define TTB_IRGN_NC ((0 << 0) | (0 << 6)) +#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) +#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) +#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) #ifndef CONFIG_SMP -#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB +/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ +#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB #else -#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA +/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ +#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA #endif ENTRY(cpu_v7_proc_init) @@ -176,8 +182,8 @@ cpu_v7_name: */ __v7_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode - orr r0, r0, #(0x1 << 6) + mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and + orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting mcr p15, 0, r0, c1, c0, 1 #endif adr r12, __v7_setup_stack @ the local stack @@ -227,12 +233,43 @@ __v7_setup: mov r10, #0x1f @ domains 0, 1 = manager mcr p15, 0, r10, c3, c0, 0 @ load domain access register #endif - ldr r5, =0xff0aa1a8 - ldr r6, =0x40e040e0 + /* + * Memory region attributes with SCTLR.TRE=1 + * + * n = TEX[0],C,B + * TR = PRRR[2n+1:2n] - memory type + * IR = NMRR[2n+1:2n] - inner cacheable property + * OR = NMRR[2n+17:2n+16] - outer cacheable property + * + * n TR IR OR + * UNCACHED 000 00 + * BUFFERABLE 001 10 00 00 + * WRITETHROUGH 010 10 10 10 + * WRITEBACK 011 10 11 11 + * reserved 110 + * WRITEALLOC 111 10 01 01 + * DEV_SHARED 100 01 + * DEV_NONSHARED 100 01 + * DEV_WC 001 10 + * DEV_CACHED 011 10 + * + * Other attributes: + * + * DS0 = PRRR[16] = 0 - device shareable property + * DS1 = PRRR[17] = 1 - device shareable property + * NS0 = PRRR[18] = 0 - normal shareable property + * NS1 = PRRR[19] = 1 - normal shareable property + * NOS = PRRR[24+n] = 1 - not outer shareable + */ + ldr r5, =0xff0a81a8 @ PRRR + ldr r6, =0x40e040e0 @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR adr r5, v7_crval ldmia r5, {r5, r6} +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r6, r6, #1 << 25 @ big-endian page tables +#endif mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them @@ -240,14 +277,14 @@ __v7_setup: ENDPROC(__v7_setup) /* AT - * TFR EV X F I D LR - * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM + * TFR EV X F I D LR S + * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 1 0 110 0011 1.00 .111 1101 < we want + * 1 0 110 0011 1100 .111 1101 < we want */ .type v7_crval, #object v7_crval: - crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c + crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c __v7_setup_stack: .space 4 * 11 @ 11 registers diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index b637e7380ab..a26a605b73b 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -42,9 +42,11 @@ ENTRY(v7wbi_flush_user_tlb_range) mov r1, r1, lsl #PAGE_SHIFT vma_vm_flags r2, r2 @ get vma->vm_flags 1: - mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) - tst r2, #VM_EXEC @ Executable area ? - mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) +#ifdef CONFIG_SMP + mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) +#else + mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA +#endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b @@ -69,8 +71,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: - mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA - mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA +#ifdef CONFIG_SMP + mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) +#else + mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA +#endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b @@ -87,5 +92,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) ENTRY(v7wbi_tlb_fns) .long v7wbi_flush_user_tlb_range .long v7wbi_flush_kern_tlb_range - .long v6wbi_tlb_flags + .long v7wbi_tlb_flags .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |