From ef7cc35b0ee03431731186320b18e5da585341ff Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 25 Jan 2010 11:42:21 -0600 Subject: parisc: add mm API for DMA to vmalloc/vmap areas We already have an API to flush a kernel page along an alias address, so use it. The TLB purge prevents the CPU from doing speculative moveins on the flushed address, so we don't need to implement and invalidate. Acked-by: Kyle McMartin Signed-off-by: James Bottomley --- arch/parisc/include/asm/cacheflush.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/parisc') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 7a73b615c23..477277739da 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm); #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* vmap range flushes and invalidates. Architecturally, we don't need + * the invalidate, because the CPU should refuse to speculate once an + * area has been flushed, so invalidate is left empty */ +static inline void flush_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + flush_kernel_dcache_range_asm(start, start + size); +} +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() -- cgit v1.2.3-70-g09d2