summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2010-12-15 23:29:04 -0500
committerNicolas Pitre <nico@fluxnic.net>2010-12-19 12:57:08 -0500
commit25cbe45440ea89a3b0f6f7ed326d3d476d53068b (patch)
treec4f63f23f9152fb9bb75b6206b445b5ce7290e8d /arch/arm/mm
parent39af22a79232373764904576f31572f1db76af10 (diff)
ARM: fix cache-xsc3l2 after stack based kmap_atomic()
Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as kmap_atomic() totally ignores them and a concurrent instance of it may happily reuse any slot for any purpose. Because kmap_atomic() is now able to deal with reentrancy, we can get rid of the ad hoc mapping here, and we even don't have to disable IRQs anymore (highmem case). While the code is made much simpler, there is a needless cache flush introduced by the usage of __kunmap_atomic(). It is not clear if the performance difference to remove that is worth the cost in code maintenance (I don't think there are that many highmem users on that platform if at all anyway). Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-xsc3l2.c57
1 files changed, 21 insertions, 36 deletions
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index c3154928bcc..5a32020471e 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -17,14 +17,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
+#include <linux/highmem.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/kmap_types.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include "mm.h"
#define CR_L2 (1 << 26)
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
dsb();
}
+static inline void l2_unmap_va(unsigned long va)
+{
#ifdef CONFIG_HIGHMEM
-#define l2_map_save_flags(x) raw_local_save_flags(x)
-#define l2_map_restore_flags(x) raw_local_irq_restore(x)
-#else
-#define l2_map_save_flags(x) ((x) = 0)
-#define l2_map_restore_flags(x) ((void)(x))
+ if (va != -1)
+ kunmap_atomic((void *)va);
#endif
+}
-static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
- unsigned long flags)
+static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
- * in place for it. We also enable interrupts for a
- * short while and disable them again to protect this
- * mapping.
+ * in place for it.
*/
- unsigned long idx;
- raw_local_irq_restore(flags);
- idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
- va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- raw_local_irq_restore(flags | PSR_I_BIT);
- set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
- local_flush_tlb_kernel_page(va);
+ l2_unmap_va(prev_va);
+ va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
- vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
+ vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Clean and invalidate partial last cache line.
*/
if (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}