summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-vexpress
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2013-10-18 22:06:03 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-29 11:06:23 +0000
commit39792c7cf3111d69dc4aa0923859d8b929e9039f (patch)
tree97e96f823d34ba2324efeafa5541893b062f112c /arch/arm/mach-vexpress
parent3c8828f6a0cb3bf1bae04a98135da3c53e20c217 (diff)
ARM: 7861/1: cacheflush: consolidate single-CPU ARMv7 cache disabling code
This code is becoming duplicated in many places. So let's consolidate it into a handy macro that is known to be right and available for reuse. Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r--arch/arm/mach-vexpress/dcscb.c56
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c48
2 files changed, 6 insertions, 98 deletions
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 3a6384c6c43..14d49968873 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush all cache levels for this cluster.
- *
- * To do so we do:
- * - Clear the SCTLR.C bit to prevent further cache allocations
- * - Flush the whole cache
- * - Clear the ACTLR "SMP" bit to disable local coherency
- *
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including to the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
} else {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush the local CPU cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index e6eb4819291..4eb92ebfd95 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -156,32 +156,7 @@ static void tc2_pm_down(u64 residency)
: : "r" (0x400) );
}
- /*
- * We need to disable and flush the whole (L1 and L2) cache.
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(all);
cci_disable_port_by_cpu(mpidr);
@@ -197,26 +172,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- /*
- * We need to disable and flush only the L1 cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);