diff options
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r-- | arch/arm/mm/flush.c | 74 |
1 files changed, 32 insertions, 42 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b27942909b2..329594e760c 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,10 +18,6 @@ #include "mm.h" -#ifdef CONFIG_ARM_ERRATA_411920 -extern void v6_icache_inval_all(void); -#endif - #ifdef CONFIG_CPU_CACHE_VIPT #define ALIAS_FLUSH_START 0xffff4000 @@ -35,77 +31,61 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) flush_tlb_kernel_page(to); asm( "mcrr p15, 0, %1, %0, c14\n" - " mcr p15, 0, %2, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %2, c7, c5, 0\n" -#endif + " mcr p15, 0, %2, c7, c10, 4" : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) - __cpuc_flush_user_all(); + vivt_flush_cache_mm(mm); return; } if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) - __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), - vma->vm_flags); + vivt_flush_cache_range(vma, start, end); return; } if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } + + if (vma->vm_flags & VM_EXEC) + __flush_icache_all(); } void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = user_addr & PAGE_MASK; - __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); - } + vivt_flush_cache_page(vma, user_addr, pfn); return; } - if (cache_is_vipt_aliasing()) + if (cache_is_vipt_aliasing()) { flush_pfn_alias(pfn, user_addr); + __flush_icache_all(); + } + + if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) + __flush_icache_all(); } void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, @@ -113,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } + vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); return; } if (cache_is_vipt_aliasing()) { flush_pfn_alias(page_to_pfn(page), uaddr); + __flush_icache_all(); return; } @@ -139,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { + void *addr = page_address(page); + /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually @@ -149,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * kmap_atomic() doesn't set the page virtual address, and * kunmap_atomic() takes care of cache flushing already. */ - if (page_address(page)) + if (addr) #endif - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_page(addr); /* * If this is a page cache page, and we have an aliasing VIPT cache, @@ -215,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p */ void flush_dcache_page(struct page *page) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping; + + /* + * The zero page is never written to, so never has any dirty + * cache lines, and therefore never needs to be flushed. + */ + if (page == ZERO_PAGE(0)) + return; + + mapping = page_mapping(page); #ifndef CONFIG_SMP if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) @@ -261,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l * userspace address only. */ flush_pfn_alias(pfn, vmaddr); + __flush_icache_all(); } /* |