From f311847c2fcebd81912e2f0caf8a461dec28db41 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 22 Dec 2010 10:22:11 -0600 Subject: parisc: flush pages through tmpalias space The kernel has an 8M tmpailas space (originally designed for copying and clearing pages but now only used for clearing). The idea is to place zeros into the cache above a physical page rather than into the physical page and flush the cache, because often the zeros end up being replaced quickly anyway. We can also use the tmpalias space for flushing a page. The difference here is that we have to do tmpalias processing in the non access data and instruction traps. The principle is the same: as long as we know the physical address and have a virtual address congruent to the real one, the flush will be effective. In order to use the tmpalias space, the icache miss path has to be enhanced to check for the alias region to make the fic instruction effective. Signed-off-by: James Bottomley --- arch/parisc/include/asm/cacheflush.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/parisc/include/asm/cacheflush.h') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f388a85bba1..dc9286a4dcc 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -26,8 +26,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); -void flush_user_dcache_page(unsigned long); -void flush_user_icache_page(unsigned long); void flush_user_dcache_range(unsigned long, unsigned long); void flush_user_icache_range(unsigned long, unsigned long); @@ -90,12 +88,15 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +/* defined in pacache.S exported in cache.c used by flush_anon_page */ +void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); + #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { if (PageAnon(page)) - flush_user_dcache_page(vmaddr); + flush_dcache_page_asm(page_to_phys(page), vmaddr); } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -- cgit v1.2.3-70-g09d2 From 8e1964a98920100f113ad26f78220ea706dbfa2b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 20 Jan 2011 12:54:18 -0600 Subject: [PARISC] fix vmap flush/invalidate On parisc, we never implemented invalidate_kernel_vmap_range() because it was unnecessary for the xfs use case. However, we do need to implement an invalidate for the opposite use case (which occurred in a recent NFS change) where the user wants to read through the vmap range and write via the kernel address. There's an additional complexity to this in that if the page has no userspace mappings, it might have dirty cache lines in the kernel (indicated by the PG_dcache_dirty bit). In order to get full coherency, we need to flush these pages through the kernel mapping before invalidating the vmap range. Signed-off-by: James Bottomley --- arch/parisc/include/asm/cacheflush.h | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'arch/parisc/include/asm/cacheflush.h') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f388a85bba1..7344e1d304a 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -37,6 +37,13 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +void flush_kernel_dcache_page_addr(void *addr); +static inline void flush_kernel_dcache_page(struct page *page) +{ + flush_kernel_dcache_page_addr(page_address(page)); +} + #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); /* vmap range flushes and invalidates. Architecturally, we don't need @@ -50,6 +57,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size) } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { + unsigned long start = (unsigned long)vaddr; + void *cursor = vaddr; + + for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { + struct page *page = vmalloc_to_page(cursor); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + flush_kernel_dcache_page(page); + } + flush_kernel_dcache_range_asm(start, start + size); } #define flush_cache_vmap(start, end) flush_cache_all() @@ -98,13 +115,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma flush_user_dcache_page(vmaddr); } -#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -void flush_kernel_dcache_page_addr(void *addr); -static inline void flush_kernel_dcache_page(struct page *page) -{ - flush_kernel_dcache_page_addr(page_address(page)); -} - #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif -- cgit v1.2.3-70-g09d2