summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-09-13 15:58:06 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-19 12:17:44 +0100
commit6012191aa9c6ffff3a23b81162298318b56d7cb3 (patch)
tree8f08d869b452d66f126743bcfd73aa6f5a701605 /arch/arm/mm
parentc01778001a4f5ad9c62d882776235f3f31922fdd (diff)
ARM: 6380/1: Introduce __sync_icache_dcache() for VIPT caches
On SMP systems, there is a small chance of a PTE becoming visible to a different CPU before the current cache maintenance operations in update_mmu_cache(). To avoid this, cache maintenance must be handled in set_pte_at() (similar to IA-64 and PowerPC). This patch provides a unified VIPT cache handling mechanism and implements the __sync_icache_dcache() function for ARMv6 onwards architectures. It is called from set_pte_at() and replaces the update_mmu_cache(). The latter is still used on VIVT hardware where a vm_area_struct is required. Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault-armv.c4
-rw-r--r--arch/arm/mm/flush.c30
2 files changed, 32 insertions, 2 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 58846cbd0e0..8440d952ba6 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
+#if __LINUX_ARM_ARCH__ < 6
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
@@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
return;
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
-#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
__flush_icache_all();
}
}
+#endif /* __LINUX_ARM_ARCH__ < 6 */
/*
* Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b4efce9b798..dd5b0120b92 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -215,6 +215,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
flush_dcache_mmap_unlock(mapping);
}
+#if __LINUX_ARM_ARCH__ >= 6
+void __sync_icache_dcache(pte_t pteval)
+{
+ unsigned long pfn;
+ struct page *page;
+ struct address_space *mapping;
+
+ if (!pte_present_user(pteval))
+ return;
+ if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+ /* only flush non-aliasing VIPT caches for exec mappings */
+ return;
+ pfn = pte_pfn(pteval);
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (cache_is_vipt_aliasing())
+ mapping = page_mapping(page);
+ else
+ mapping = NULL;
+
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+ /* pte_exec() already checked above for non-aliasing VIPT cache */
+ if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
+ __flush_icache_all();
+}
+#endif
+
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.