diff options
author | Aurelien Jacquiot <a-jacquiot@ti.com> | 2011-10-04 11:11:35 -0400 |
---|---|---|
committer | Mark Salter <msalter@redhat.com> | 2011-10-06 19:48:10 -0400 |
commit | 784bdcd0aa1d8ce38025bcfaa321146762738fe0 (patch) | |
tree | 1b1bda6b0c573d39aaa6615b6ec2dc9e206378ff /arch/c6x | |
parent | 81ec98898188639ac53413605681b3e3bb0a2ff1 (diff) |
C6X: cache control
Original port to early 2.6 kernel using TI COFF toolchain.
Brought up to date by Mark Salter <msalter@redhat.com>
Signed-off-by: Aurelien Jacquiot <a-jacquiot@ti.com>
Signed-off-by: Mark Salter <msalter@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/c6x')
-rw-r--r-- | arch/c6x/include/asm/cache.h | 90 | ||||
-rw-r--r-- | arch/c6x/include/asm/cacheflush.h | 65 | ||||
-rw-r--r-- | arch/c6x/platforms/cache.c | 445 |
3 files changed, 600 insertions, 0 deletions
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h new file mode 100644 index 00000000000..6d521d96d94 --- /dev/null +++ b/arch/c6x/include/asm/cache.h @@ -0,0 +1,90 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_CACHE_H +#define _ASM_C6X_CACHE_H + +#include <linux/irqflags.h> + +/* + * Cache line size + */ +#define L1D_CACHE_BYTES 64 +#define L1P_CACHE_BYTES 32 +#define L2_CACHE_BYTES 128 + +/* + * L2 used as cache + */ +#define L2MODE_SIZE L2MODE_256K_CACHE + +/* + * For practical reasons the L1_CACHE_BYTES defines should not be smaller than + * the L2 line size + */ +#define L1_CACHE_BYTES L2_CACHE_BYTES + +#define L2_CACHE_ALIGN_LOW(x) \ + (((x) & ~(L2_CACHE_BYTES - 1))) +#define L2_CACHE_ALIGN_UP(x) \ + (((x) + (L2_CACHE_BYTES - 1)) & ~(L2_CACHE_BYTES - 1)) +#define L2_CACHE_ALIGN_CNT(x) \ + (((x) + (sizeof(int) - 1)) & ~(sizeof(int) - 1)) + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES + +/* + * This is the granularity of hardware cacheability control. + */ +#define CACHEABILITY_ALIGN 0x01000000 + +/* + * Align a physical address to MAR regions + */ +#define CACHE_REGION_START(v) \ + (((u32) (v)) & ~(CACHEABILITY_ALIGN - 1)) +#define CACHE_REGION_END(v) \ + (((u32) (v) + (CACHEABILITY_ALIGN - 1)) & ~(CACHEABILITY_ALIGN - 1)) + +extern void __init c6x_cache_init(void); + +extern void enable_caching(unsigned long start, unsigned long end); +extern void disable_caching(unsigned long start, unsigned long end); + +extern void L1_cache_off(void); +extern void L1_cache_on(void); + +extern void L1P_cache_global_invalidate(void); +extern void L1D_cache_global_invalidate(void); +extern void L1D_cache_global_writeback(void); +extern void L1D_cache_global_writeback_invalidate(void); +extern void L2_cache_set_mode(unsigned int mode); +extern void L2_cache_global_writeback_invalidate(void); +extern void L2_cache_global_writeback(void); + +extern void L1P_cache_block_invalidate(unsigned int start, unsigned int end); +extern void L1D_cache_block_invalidate(unsigned int start, unsigned int end); +extern void L1D_cache_block_writeback_invalidate(unsigned int start, + unsigned int end); +extern void L1D_cache_block_writeback(unsigned int start, unsigned int end); +extern void L2_cache_block_invalidate(unsigned int start, unsigned int end); +extern void L2_cache_block_writeback(unsigned int start, unsigned int end); +extern void L2_cache_block_writeback_invalidate(unsigned int start, + unsigned int end); +extern void L2_cache_block_invalidate_nowait(unsigned int start, + unsigned int end); +extern void L2_cache_block_writeback_nowait(unsigned int start, + unsigned int end); + +extern void L2_cache_block_writeback_invalidate_nowait(unsigned int start, + unsigned int end); + +#endif /* _ASM_C6X_CACHE_H */ diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h new file mode 100644 index 00000000000..df5db90dbe5 --- /dev/null +++ b/arch/c6x/include/asm/cacheflush.h @@ -0,0 +1,65 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_CACHEFLUSH_H +#define _ASM_C6X_CACHEFLUSH_H + +#include <linux/spinlock.h> + +#include <asm/setup.h> +#include <asm/cache.h> +#include <asm/mman.h> +#include <asm/page.h> +#include <asm/string.h> + +/* + * virtually-indexed cache management (our cache is physically indexed) + */ +#define flush_cache_all() do {} while (0) +#define flush_cache_mm(mm) do {} while (0) +#define flush_cache_dup_mm(mm) do {} while (0) +#define flush_cache_range(mm, start, end) do {} while (0) +#define flush_cache_page(vma, vmaddr, pfn) do {} while (0) +#define flush_cache_vmap(start, end) do {} while (0) +#define flush_cache_vunmap(start, end) do {} while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do {} while (0) +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) + +/* + * physically-indexed cache management + */ +#define flush_icache_range(s, e) \ +do { \ + L1D_cache_block_writeback((s), (e)); \ + L1P_cache_block_invalidate((s), (e)); \ +} while (0) + +#define flush_icache_page(vma, page) \ +do { \ + if ((vma)->vm_flags & PROT_EXEC) \ + L1D_cache_block_writeback_invalidate(page_address(page), \ + (unsigned long) page_address(page) + PAGE_SIZE)); \ + L1P_cache_block_invalidate(page_address(page), \ + (unsigned long) page_address(page) + PAGE_SIZE)); \ +} while (0) + + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ +} while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* _ASM_C6X_CACHEFLUSH_H */ diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c new file mode 100644 index 00000000000..86318a16a25 --- /dev/null +++ b/arch/c6x/platforms/cache.c @@ -0,0 +1,445 @@ +/* + * Copyright (C) 2011 Texas Instruments Incorporated + * Author: Mark Salter <msalter@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <asm/cache.h> +#include <asm/soc.h> + +/* + * Internal Memory Control Registers for caches + */ +#define IMCR_CCFG 0x0000 +#define IMCR_L1PCFG 0x0020 +#define IMCR_L1PCC 0x0024 +#define IMCR_L1DCFG 0x0040 +#define IMCR_L1DCC 0x0044 +#define IMCR_L2ALLOC0 0x2000 +#define IMCR_L2ALLOC1 0x2004 +#define IMCR_L2ALLOC2 0x2008 +#define IMCR_L2ALLOC3 0x200c +#define IMCR_L2WBAR 0x4000 +#define IMCR_L2WWC 0x4004 +#define IMCR_L2WIBAR 0x4010 +#define IMCR_L2WIWC 0x4014 +#define IMCR_L2IBAR 0x4018 +#define IMCR_L2IWC 0x401c +#define IMCR_L1PIBAR 0x4020 +#define IMCR_L1PIWC 0x4024 +#define IMCR_L1DWIBAR 0x4030 +#define IMCR_L1DWIWC 0x4034 +#define IMCR_L1DWBAR 0x4040 +#define IMCR_L1DWWC 0x4044 +#define IMCR_L1DIBAR 0x4048 +#define IMCR_L1DIWC 0x404c +#define IMCR_L2WB 0x5000 +#define IMCR_L2WBINV 0x5004 +#define IMCR_L2INV 0x5008 +#define IMCR_L1PINV 0x5028 +#define IMCR_L1DWB 0x5040 +#define IMCR_L1DWBINV 0x5044 +#define IMCR_L1DINV 0x5048 +#define IMCR_MAR_BASE 0x8000 +#define IMCR_MAR96_111 0x8180 +#define IMCR_MAR128_191 0x8200 +#define IMCR_MAR224_239 0x8380 +#define IMCR_L2MPFAR 0xa000 +#define IMCR_L2MPFSR 0xa004 +#define IMCR_L2MPFCR 0xa008 +#define IMCR_L2MPLK0 0xa100 +#define IMCR_L2MPLK1 0xa104 +#define IMCR_L2MPLK2 0xa108 +#define IMCR_L2MPLK3 0xa10c +#define IMCR_L2MPLKCMD 0xa110 +#define IMCR_L2MPLKSTAT 0xa114 +#define IMCR_L2MPPA_BASE 0xa200 +#define IMCR_L1PMPFAR 0xa400 +#define IMCR_L1PMPFSR 0xa404 +#define IMCR_L1PMPFCR 0xa408 +#define IMCR_L1PMPLK0 0xa500 +#define IMCR_L1PMPLK1 0xa504 +#define IMCR_L1PMPLK2 0xa508 +#define IMCR_L1PMPLK3 0xa50c +#define IMCR_L1PMPLKCMD 0xa510 +#define IMCR_L1PMPLKSTAT 0xa514 +#define IMCR_L1PMPPA_BASE 0xa600 +#define IMCR_L1DMPFAR 0xac00 +#define IMCR_L1DMPFSR 0xac04 +#define IMCR_L1DMPFCR 0xac08 +#define IMCR_L1DMPLK0 0xad00 +#define IMCR_L1DMPLK1 0xad04 +#define IMCR_L1DMPLK2 0xad08 +#define IMCR_L1DMPLK3 0xad0c +#define IMCR_L1DMPLKCMD 0xad10 +#define IMCR_L1DMPLKSTAT 0xad14 +#define IMCR_L1DMPPA_BASE 0xae00 +#define IMCR_L2PDWAKE0 0xc040 +#define IMCR_L2PDWAKE1 0xc044 +#define IMCR_L2PDSLEEP0 0xc050 +#define IMCR_L2PDSLEEP1 0xc054 +#define IMCR_L2PDSTAT0 0xc060 +#define IMCR_L2PDSTAT1 0xc064 + +/* + * CCFG register values and bits + */ +#define L2MODE_0K_CACHE 0x0 +#define L2MODE_32K_CACHE 0x1 +#define L2MODE_64K_CACHE 0x2 +#define L2MODE_128K_CACHE 0x3 +#define L2MODE_256K_CACHE 0x7 + +#define L2PRIO_URGENT 0x0 +#define L2PRIO_HIGH 0x1 +#define L2PRIO_MEDIUM 0x2 +#define L2PRIO_LOW 0x3 + +#define CCFG_ID 0x100 /* Invalidate L1P bit */ +#define CCFG_IP 0x200 /* Invalidate L1D bit */ + +static void __iomem *cache_base; + +/* + * L1 & L2 caches generic functions + */ +#define imcr_get(reg) soc_readl(cache_base + (reg)) +#define imcr_set(reg, value) \ +do { \ + soc_writel((value), cache_base + (reg)); \ + soc_readl(cache_base + (reg)); \ +} while (0) + +static void cache_block_operation_wait(unsigned int wc_reg) +{ + /* Wait for completion */ + while (imcr_get(wc_reg)) + cpu_relax(); +} + +static DEFINE_SPINLOCK(cache_lock); + +/* + * Generic function to perform a block cache operation as + * invalidate or writeback/invalidate + */ +static void cache_block_operation(unsigned int *start, + unsigned int *end, + unsigned int bar_reg, + unsigned int wc_reg) +{ + unsigned long flags; + unsigned int wcnt = + (L2_CACHE_ALIGN_CNT((unsigned int) end) + - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; + unsigned int wc = 0; + + for (; wcnt; wcnt -= wc, start += wc) { +loop: + spin_lock_irqsave(&cache_lock, flags); + + /* + * If another cache operation is occuring + */ + if (unlikely(imcr_get(wc_reg))) { + spin_unlock_irqrestore(&cache_lock, flags); + + /* Wait for previous operation completion */ + cache_block_operation_wait(wc_reg); + + /* Try again */ + goto loop; + } + + imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); + + if (wcnt > 0xffff) + wc = 0xffff; + else + wc = wcnt; + + /* Set word count value in the WC register */ + imcr_set(wc_reg, wc & 0xffff); + + spin_unlock_irqrestore(&cache_lock, flags); + + /* Wait for completion */ + cache_block_operation_wait(wc_reg); + } +} + +static void cache_block_operation_nowait(unsigned int *start, + unsigned int *end, + unsigned int bar_reg, + unsigned int wc_reg) +{ + unsigned long flags; + unsigned int wcnt = + (L2_CACHE_ALIGN_CNT((unsigned int) end) + - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; + unsigned int wc = 0; + + for (; wcnt; wcnt -= wc, start += wc) { + + spin_lock_irqsave(&cache_lock, flags); + + imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); + + if (wcnt > 0xffff) + wc = 0xffff; + else + wc = wcnt; + + /* Set word count value in the WC register */ + imcr_set(wc_reg, wc & 0xffff); + + spin_unlock_irqrestore(&cache_lock, flags); + + /* Don't wait for completion on last cache operation */ + if (wcnt > 0xffff) + cache_block_operation_wait(wc_reg); + } +} + +/* + * L1 caches management + */ + +/* + * Disable L1 caches + */ +void L1_cache_off(void) +{ + unsigned int dummy; + + imcr_set(IMCR_L1PCFG, 0); + dummy = imcr_get(IMCR_L1PCFG); + + imcr_set(IMCR_L1DCFG, 0); + dummy = imcr_get(IMCR_L1DCFG); +} + +/* + * Enable L1 caches + */ +void L1_cache_on(void) +{ + unsigned int dummy; + + imcr_set(IMCR_L1PCFG, 7); + dummy = imcr_get(IMCR_L1PCFG); + + imcr_set(IMCR_L1DCFG, 7); + dummy = imcr_get(IMCR_L1DCFG); +} + +/* + * L1P global-invalidate all + */ +void L1P_cache_global_invalidate(void) +{ + unsigned int set = 1; + imcr_set(IMCR_L1PINV, set); + while (imcr_get(IMCR_L1PINV) & 1) + cpu_relax(); +} + +/* + * L1D global-invalidate all + * + * Warning: this operation causes all updated data in L1D to + * be discarded rather than written back to the lower levels of + * memory + */ +void L1D_cache_global_invalidate(void) +{ + unsigned int set = 1; + imcr_set(IMCR_L1DINV, set); + while (imcr_get(IMCR_L1DINV) & 1) + cpu_relax(); +} + +void L1D_cache_global_writeback(void) +{ + unsigned int set = 1; + imcr_set(IMCR_L1DWB, set); + while (imcr_get(IMCR_L1DWB) & 1) + cpu_relax(); +} + +void L1D_cache_global_writeback_invalidate(void) +{ + unsigned int set = 1; + imcr_set(IMCR_L1DWBINV, set); + while (imcr_get(IMCR_L1DWBINV) & 1) + cpu_relax(); +} + +/* + * L2 caches management + */ + +/* + * Set L2 operation mode + */ +void L2_cache_set_mode(unsigned int mode) +{ + unsigned int ccfg = imcr_get(IMCR_CCFG); + + /* Clear and set the L2MODE bits in CCFG */ + ccfg &= ~7; + ccfg |= (mode & 7); + imcr_set(IMCR_CCFG, ccfg); + ccfg = imcr_get(IMCR_CCFG); +} + +/* + * L2 global-writeback and global-invalidate all + */ +void L2_cache_global_writeback_invalidate(void) +{ + imcr_set(IMCR_L2WBINV, 1); + while (imcr_get(IMCR_L2WBINV)) + cpu_relax(); +} + +/* + * L2 global-writeback all + */ +void L2_cache_global_writeback(void) +{ + imcr_set(IMCR_L2WB, 1); + while (imcr_get(IMCR_L2WB)) + cpu_relax(); +} + +/* + * Cacheability controls + */ +void enable_caching(unsigned long start, unsigned long end) +{ + unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); + unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); + + for (; mar <= mar_e; mar += 4) + imcr_set(mar, imcr_get(mar) | 1); +} + +void disable_caching(unsigned long start, unsigned long end) +{ + unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); + unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); + + for (; mar <= mar_e; mar += 4) + imcr_set(mar, imcr_get(mar) & ~1); +} + + +/* + * L1 block operations + */ +void L1P_cache_block_invalidate(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L1PIBAR, IMCR_L1PIWC); +} + +void L1D_cache_block_invalidate(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L1DIBAR, IMCR_L1DIWC); +} + +void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L1DWIBAR, IMCR_L1DWIWC); +} + +void L1D_cache_block_writeback(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L1DWBAR, IMCR_L1DWWC); +} + +/* + * L2 block operations + */ +void L2_cache_block_invalidate(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L2IBAR, IMCR_L2IWC); +} + +void L2_cache_block_writeback(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L2WBAR, IMCR_L2WWC); +} + +void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end) +{ + cache_block_operation((unsigned int *) start, + (unsigned int *) end, + IMCR_L2WIBAR, IMCR_L2WIWC); +} + +void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end) +{ + cache_block_operation_nowait((unsigned int *) start, + (unsigned int *) end, + IMCR_L2IBAR, IMCR_L2IWC); +} + +void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end) +{ + cache_block_operation_nowait((unsigned int *) start, + (unsigned int *) end, + IMCR_L2WBAR, IMCR_L2WWC); +} + +void L2_cache_block_writeback_invalidate_nowait(unsigned int start, + unsigned int end) +{ + cache_block_operation_nowait((unsigned int *) start, + (unsigned int *) end, + IMCR_L2WIBAR, IMCR_L2WIWC); +} + + +/* + * L1 and L2 caches configuration + */ +void __init c6x_cache_init(void) +{ + struct device_node *node; + + node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache"); + if (!node) + return; + + cache_base = of_iomap(node, 0); + + of_node_put(node); + + if (!cache_base) + return; + + /* Set L2 caches on the the whole L2 SRAM memory */ + L2_cache_set_mode(L2MODE_SIZE); + + /* Enable L1 */ + L1_cache_on(); +} |