diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-08-05 13:35:07 +0200 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-08-05 13:35:07 +0200 |
commit | 84db8d7cdb072866f5a6c6ac2c9a74c5c48dd22f (patch) | |
tree | 7f10cc33bca606abd1547ae90d90362134a35332 /arch/avr32/include/asm/cacheflush.h | |
parent | 2b12a4c524812fb3f6ee590a02e65b95c8c32229 (diff) |
avr32: Move include/asm-avr32 to arch/avr32/include/asm
Leaving include/asm/arch alone for now.
Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Diffstat (limited to 'arch/avr32/include/asm/cacheflush.h')
-rw-r--r-- | arch/avr32/include/asm/cacheflush.h | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/arch/avr32/include/asm/cacheflush.h b/arch/avr32/include/asm/cacheflush.h new file mode 100644 index 00000000000..670674749b2 --- /dev/null +++ b/arch/avr32/include/asm/cacheflush.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_CACHEFLUSH_H +#define __ASM_AVR32_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +#define CACHE_OP_ICACHE_INVALIDATE 0x01 +#define CACHE_OP_DCACHE_INVALIDATE 0x0b +#define CACHE_OP_DCACHE_CLEAN 0x0c +#define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d + +/* + * Invalidate any cacheline containing virtual address vaddr without + * writing anything back to memory. + * + * Note that this function may corrupt unrelated data structures when + * applied on buffers that are not cacheline aligned in both ends. + */ +static inline void invalidate_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE) + : "memory"); +} + +/* + * Make sure any cacheline containing virtual address vaddr is written + * to memory. + */ +static inline void clean_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN) + : "memory"); +} + +/* + * Make sure any cacheline containing virtual address vaddr is written + * to memory and then invalidate it. + */ +static inline void flush_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL) + : "memory"); +} + +/* + * Invalidate any instruction cacheline containing virtual address + * vaddr. + */ +static inline void invalidate_icache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE) + : "memory"); +} + +/* + * Applies the above functions on all lines that are touched by the + * specified virtual address range. + */ +void invalidate_dcache_region(void *start, size_t len); +void clean_dcache_region(void *start, size_t len); +void flush_dcache_region(void *start, size_t len); +void invalidate_icache_region(void *start, size_t len); + +/* + * Make sure any pending writes are completed before continuing. + */ +#define flush_write_buffer() asm volatile("sync 0" : : : "memory") + +/* + * The following functions are called when a virtual mapping changes. + * We do not need to flush anything in this case. + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* + * I think we need to implement this one to be able to reliably + * execute pages from RAMDISK. However, if we implement the + * flush_dcache_*() functions, it might not be needed anymore. + * + * #define flush_icache_page(vma, page) do { } while (0) + */ +extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); + +/* + * These are (I think) related to D-cache aliasing. We might need to + * do something here, but only for certain configurations. No such + * configurations exist at this time. + */ +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(page) do { } while (0) +#define flush_dcache_mmap_unlock(page) do { } while (0) + +/* + * These are for I/D cache coherency. In this case, we do need to + * flush with all configurations. + */ +extern void flush_icache_range(unsigned long start, unsigned long end); + +extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len); + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, + const void *src, unsigned long len) +{ + memcpy(dst, src, len); +} + +#endif /* __ASM_AVR32_CACHEFLUSH_H */ |