diff options
author | Chris Zankel <chris@zankel.net> | 2007-08-22 10:14:51 -0700 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2007-08-27 13:54:16 -0700 |
commit | 6656920b0b50beacb6cb64cf55273cbb686e436e (patch) | |
tree | dab9fdb81821b455a29779de6ca3306dbdf05dbd /include/asm-xtensa/page.h | |
parent | ff6fd469885aafa5ec387babcb6537f3c00d6df0 (diff) |
[XTENSA] Add support for cache-aliasing
Add support for processors that have cache-aliasing issues, such as
the Stretch S5000 processor. Cache-aliasing means that the size of
the cache (for one way) is larger than the page size, thus, a page
can end up in several places in cache depending on the virtual to
physical translation. The method used here is to map a user page
temporarily through the auto-refill way 0 and of of the DTLB.
We probably will want to revisit this issue and use a better
approach with kmap/kunmap.
Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'include/asm-xtensa/page.h')
-rw-r--r-- | include/asm-xtensa/page.h | 56 |
1 files changed, 52 insertions, 4 deletions
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h index 2d6ac21136c..55ce2c9749a 100644 --- a/include/asm-xtensa/page.h +++ b/include/asm-xtensa/page.h @@ -15,6 +15,7 @@ #include <asm/processor.h> #include <asm/types.h> +#include <asm/cache.h> /* * Fixed TLB translations in the processor. @@ -39,6 +40,53 @@ #define MAX_MEM_PFN XCHAL_KSEG_SIZE #define PGTABLE_START 0x80000000 +/* + * Cache aliasing: + * + * If the cache size for one way is greater than the page size, we have to + * deal with cache aliasing. The cache index is wider than the page size: + * + * | |cache| cache index + * | pfn |off| virtual address + * |xxxx:X|zzz| + * | : | | + * | \ / | | + * |trans.| | + * | / \ | | + * |yyyy:Y|zzz| physical address + * + * When the page number is translated to the physical page address, the lowest + * bit(s) (X) that are part of the cache index are also translated (Y). + * If this translation changes bit(s) (X), the cache index is also afected, + * thus resulting in a different cache line than before. + * The kernel does not provide a mechanism to ensure that the page color + * (represented by this bit) remains the same when allocated or when pages + * are remapped. When user pages are mapped into kernel space, the color of + * the page might also change. + * + * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 + * to temporarily map a patch so we can match the color. + */ + +#if DCACHE_WAY_SIZE > PAGE_SIZE +# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT) +# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1)) +# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT) +# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) +#else +# define DCACHE_ALIAS_ORDER 0 +#endif + +#if ICACHE_WAY_SIZE > PAGE_SIZE +# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) +# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1)) +# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT) +# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0) +#else +# define ICACHE_ALIAS_ORDER 0 +#endif + + #ifdef __ASSEMBLY__ #define __pgprot(x) (x) @@ -90,11 +138,11 @@ extern void copy_page(void *to, void *from); * some extra work */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) -void clear_user_page(void *addr, unsigned long vaddr, struct page* page); -void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); +#if DCACHE_WAY_SIZE > PAGE_SIZE +extern void clear_user_page(void*, unsigned long, struct page*); +extern void copy_user_page(void*, void*, unsigned long, struct page*); #else -# define clear_user_page(page,vaddr,pg) clear_page(page) +# define clear_user_page(page, vaddr, pg) clear_page(page) # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #endif |