summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-07-27 10:42:48 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-07-27 10:42:52 +0100
commit4708ac49305bbcd511600d4af71a4c6dca15af65 (patch)
tree77ba5c83a7d4f4a119e3fd524d82adc297df1e35 /arch/arm/mm
parent1dbd30e9890fd69e50b17edd70ca583546b0fe4e (diff)
parent2f7989efd4398d92b8adffce2e07dd043a0895fe (diff)
Merge branch 'origin' into misc
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/cache-l2x0.c5
-rw-r--r--arch/arm/mm/cache-v6.S18
-rw-r--r--arch/arm/mm/dma-mapping.c18
4 files changed, 47 insertions, 15 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 71d5d5efcee..87ec141fcaa 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -724,6 +724,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
Forget about fast user space cmpxchg support.
It is just not possible.
+config DMA_CACHE_RWFO
+ bool "Enable read/write for ownership DMA cache maintenance"
+ depends on CPU_V6 && SMP
+ default y
+ help
+ The Snoop Control Unit on ARM11MPCore does not detect the
+ cache maintenance operations and the dma_{map,unmap}_area()
+ functions may leave stale cache entries on other CPUs. By
+ enabling this option, Read or Write For Ownership in the ARMv6
+ DMA cache maintenance functions is performed. These LDR/STR
+ instructions change the cache line state to shared or modified
+ so that the cache operation has the desired effect.
+
+ Note that the workaround is only valid on processors that do
+ not perform speculative loads into the D-cache. For such
+ processors, if cache maintenance operations are not broadcast
+ in hardware, other workarounds are needed (e.g. cache
+ maintenance broadcasting in software via FIQ).
+
config OUTER_CACHE
bool
@@ -783,6 +802,8 @@ config ARM_L1_CACHE_SHIFT
config ARM_DMA_MEM_BUFFERABLE
bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
+ MACH_REALVIEW_PB11MP)
default y if CPU_V6 || CPU_V7
help
Historically, the kernel has used strongly ordered mappings to
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9819869d2bc..df4955885b2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -218,6 +218,9 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
cache_id = readl(l2x0_base + L2X0_CACHE_ID);
aux = readl(l2x0_base + L2X0_AUX_CTRL);
+ aux &= aux_mask;
+ aux |= aux_val;
+
/* Determine the number of ways */
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
@@ -248,8 +251,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
- aux &= aux_mask;
- aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_inv_all();
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index e46ecd84713..86aa689ef1a 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,8 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
-#ifdef CONFIG_SMP
- str r0, [r0] @ write for ownership
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
@@ -234,7 +235,7 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
#endif
#ifdef HARVARD_CACHE
@@ -257,7 +258,7 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
str r2, [r0] @ write for ownership
#endif
@@ -283,9 +284,13 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
+#ifndef CONFIG_DMA_CACHE_RWFO
+ b v6_dma_clean_range
+#else
teq r2, #DMA_TO_DEVICE
beq v6_dma_clean_range
b v6_dma_flush_range
+#endif
ENDPROC(v6_dma_map_area)
/*
@@ -295,6 +300,11 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
+#ifndef CONFIG_DMA_CACHE_RWFO
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v6_dma_inv_range
+#endif
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 13fa536d82e..9e7742f0a10 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -24,15 +24,6 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
-/* Sanity check size */
-#if (CONSISTENT_DMA_SIZE % SZ_2M)
-#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
-#endif
-
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
-#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
-
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = ISA_DMA_THRESHOLD;
@@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
+/* Sanity check size */
+#if (CONSISTENT_DMA_SIZE % SZ_2M)
+#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
+#endif
+
+#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/