diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/abort-macro.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 21 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-sa1100.S | 10 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 16 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 6 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 6 |
9 files changed, 54 insertions, 13 deletions
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 52162d59407..2cbf68ef0e8 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -17,7 +17,7 @@ cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write - orreq \psr, \psr, #1 << 11 @ yes. + orreq \fsr, \fsr, #1 << 11 @ yes. b do_DataAbort not_thumb: .endm diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 44c086710d2..9ecfdb51195 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -277,6 +277,25 @@ static void l2x0_disable(void) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void __init l2x0_unlock(__u32 cache_id) +{ + int lockregs; + int i; + + if (cache_id == L2X0_CACHE_ID_PART_L310) + lockregs = 8; + else + /* L210 and unknown types */ + lockregs = 1; + + for (i = 0; i < lockregs; i++) { + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + + i * L2X0_LOCKDOWN_STRIDE); + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + + i * L2X0_LOCKDOWN_STRIDE); + } +} + void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; @@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) * accessing the below registers will fault. */ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + /* Make sure that I&D is not locked down when starting */ + l2x0_unlock(cache_id); /* l2x0 controller is disabled */ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 91bca355cd3..cc7e2d8be9a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_memory(pfn << PAGE_SHIFT); + return memblock_is_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); #endif diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 92bd102e398..2e6849b41f6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size -.equ cpu_arm920_suspend_size, 4 * 3 +.equ cpu_arm920_suspend_size, 4 * 4 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r7, lr} diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 2bbcf053dff..cd8f79c3a28 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size -.equ cpu_arm926_suspend_size, 4 * 3 +.equ cpu_arm926_suspend_size, 4 * 4 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r7, lr} diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 07219c2ae11..69e7f2ef738 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend) ENTRY(cpu_sa1100_do_resume) ldmia r0, {r4 - r7} @ load cp regs - mov r1, #0 - mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs - mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache - mcr p15, 0, r1, c9, c0, 0 @ invalidate RB - mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache + mcr p15, 0, ip, c9, c0, 0 @ invalidate RB + mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB mcr p15, 0, r4, c3, c0, 0 @ domain ID mcr p15, 0, r5, c2, c0, 0 @ translation table base addr diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 219138d2f15..a923aa0fd00 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -223,6 +223,22 @@ __v6_setup: mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them +#ifdef CONFIG_ARM_ERRATA_364296 + /* + * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data + * corruption with hit-under-miss enabled). The conditional code below + * (setting the undocumented bit 31 in the auxiliary control register + * and the FI bit in the control register) disables hit-under-miss + * without putting the processor into full low interrupt latency mode. + */ + ldr r6, =0x4107b362 @ id for ARM1136 r0p2 + mrc p15, 0, r5, c0, c0, 0 @ get processor id + teq r5, r6 @ check for the faulty core + mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg + orreq r5, r5, #(1 << 31) @ set the undocumented bit 31 + mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg + orreq r0, r0, #(1 << 21) @ low interrupt latency configuration +#endif mov pc, lr @ return to head.S:__ret /* diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index a30e78542cc..9049c0764db 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin) ENTRY(cpu_v7_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m + THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) mcr p15, 0, r1, c1, c0, 0 @ disable MMU isb mov pc, r0 @@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume) mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register - mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register + mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register + teq r4, r10 @ Is it already set? + mcrne p15, 0, r10, c1, c0, 1 @ No, so write it mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR mcr p15, 0, r4, c10, c2, 0 @ write PRRR mcr p15, 0, r5, c10, c2, 1 @ write NMRR isb + dsb mov r0, r9 @ control register mov r2, r7, lsr #14 @ get TTB0 base mov r2, r2, lsl #14 diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 28c72a2006a..755e1bf2268 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .align .globl cpu_xsc3_suspend_size -.equ cpu_xsc3_suspend_size, 4 * 8 +.equ cpu_xsc3_suspend_size, 4 * 7 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r10, lr} @@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend) mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r10, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit - stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs + stmia r0, {r4 - r10} @ store cp regs ldmia sp!, {r4 - r10, pc} ENDPROC(cpu_xsc3_do_suspend) ENTRY(cpu_xsc3_do_resume) - ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs + ldmia r0, {r4 - r10} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer |