diff options
author | James Hogan <james.hogan@imgtec.com> | 2012-10-09 10:54:17 +0100 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 20:09:19 +0000 |
commit | 99ef7c2ac1e3b01f532bfdebbe92e9960e95bebc (patch) | |
tree | fb34d1df645d7eee43fe2441fd4938b0da7bad6f /arch/metag/mm | |
parent | 027f891f7640144d4b7b15113f3ae9af2c8b095d (diff) |
metag: Cache/TLB handling
Add cache and TLB handling code for metag, including the required
callbacks used by MM switches and DMA operations. Caches can be
partitioned between the hardware threads and the global space, however
this is usually configured by the bootloader so Linux doesn't make any
changes to this configuration. TLBs aren't configurable, so only need
consideration to flush them.
On Meta1 the L1 cache was VIVT which required a full flush on MM switch.
Meta2 has a VIPT L1 cache so it doesn't require the full flush on MM
switch. Meta2 can also have a writeback L2 with hardware prefetch which
requires some special handling. Support is optional, and the L2 can be
detected and initialised by Linux.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag/mm')
-rw-r--r-- | arch/metag/mm/cache.c | 441 | ||||
-rw-r--r-- | arch/metag/mm/l2cache.c | 192 |
2 files changed, 633 insertions, 0 deletions
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c new file mode 100644 index 00000000000..b713ec01c20 --- /dev/null +++ b/arch/metag/mm/cache.c @@ -0,0 +1,441 @@ +/* + * arch/metag/mm/cache.c + * + * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + * + * Cache control code + */ + +#include <linux/export.h> +#include <linux/io.h> +#include <asm/cacheflush.h> +#include <asm/core_reg.h> +#include <asm/metag_isa.h> +#include <asm/metag_mem.h> +#include <asm/metag_regs.h> + +#define DEFAULT_CACHE_WAYS_LOG2 2 + +/* + * Size of a set in the caches. Initialised for default 16K stride, adjusted + * according to values passed through TBI global heap segment via LDLK (on ATP) + * or config registers (on HTP/MTP) + */ +static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 + - DEFAULT_CACHE_WAYS_LOG2; +static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 + - DEFAULT_CACHE_WAYS_LOG2; +/* + * The number of sets in the caches. Initialised for HTP/ATP, adjusted + * according to NOMMU setting in config registers + */ +static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; +static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; + +/** + * metag_cache_probe() - Probe L1 cache configuration. + * + * Probe the L1 cache configuration to aid the L1 physical cache flushing + * functions. + */ +void metag_cache_probe(void) +{ +#ifndef CONFIG_METAG_META12 + int coreid = metag_in32(METAC_CORE_ID); + int config = metag_in32(METAC_CORE_CONFIG2); + int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS; + + if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 || + cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) { + icache_sets_log2 = 1; + dcache_sets_log2 = 1; + } + + /* For normal size caches, the smallest size is 4Kb. + For small caches, the smallest size is 64b */ + icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT) + ? 6 : 12; + icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS) + >> METAC_CORE_C2ICSZ_S; + icache_set_shift -= icache_sets_log2; + + dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT) + ? 6 : 12; + dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS) + >> METAC_CORECFG2_DCSZ_S; + dcache_set_shift -= dcache_sets_log2; +#else + /* Extract cache sizes from global heap segment */ + unsigned long val, u; + int width, shift, addend; + PTBISEG seg; + + seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL, + TBID_SEGSCOPE_GLOBAL, + TBID_SEGTYPE_HEAP)); + if (seg != NULL) { + val = seg->Data[1]; + + /* Work out width of I-cache size bit-field */ + u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS) + >> METAG_TBI_ICACHE_SIZE_S; + width = 0; + while (u & 1) { + width++; + u >>= 1; + } + /* Extract sign-extended size addend value */ + shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width); + addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS) + << shift) + >> (shift + METAG_TBI_ICACHE_SIZE_S); + /* Now calculate I-cache set size */ + icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 + - DEFAULT_CACHE_WAYS_LOG2) + + addend; + + /* Similarly for D-cache */ + u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS) + >> METAG_TBI_DCACHE_SIZE_S; + width = 0; + while (u & 1) { + width++; + u >>= 1; + } + shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width); + addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS) + << shift) + >> (shift + METAG_TBI_DCACHE_SIZE_S); + dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 + - DEFAULT_CACHE_WAYS_LOG2) + + addend; + } +#endif +} + +static void metag_phys_data_cache_flush(const void *start) +{ + unsigned long flush0, flush1, flush2, flush3; + int loops, step; + int thread; + int part, offset; + int set_shift; + + /* Use a sequence of writes to flush the cache region requested */ + thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) + >> TXENABLE_THREAD_S; + + /* Cache is broken into sets which lie in contiguous RAMs */ + set_shift = dcache_set_shift; + + /* Move to the base of the physical cache flush region */ + flush0 = LINSYSCFLUSH_DCACHE_LINE; + step = 64; + + /* Get partition data for this thread */ + part = metag_in32(SYSC_DCPART0 + + (SYSC_xCPARTn_STRIDE * thread)); + + if ((int)start < 0) + /* Access Global vs Local partition */ + part >>= SYSC_xCPARTG_AND_S + - SYSC_xCPARTL_AND_S; + + /* Extract offset and move SetOff */ + offset = (part & SYSC_xCPARTL_OR_BITS) + >> SYSC_xCPARTL_OR_S; + flush0 += (offset << (set_shift - 4)); + + /* Shrink size */ + part = (part & SYSC_xCPARTL_AND_BITS) + >> SYSC_xCPARTL_AND_S; + loops = ((part + 1) << (set_shift - 4)); + + /* Reduce loops by step of cache line size */ + loops /= step; + + flush1 = flush0 + (1 << set_shift); + flush2 = flush0 + (2 << set_shift); + flush3 = flush0 + (3 << set_shift); + + if (dcache_sets_log2 == 1) { + flush2 = flush1; + flush3 = flush1 + step; + flush1 = flush0 + step; + step <<= 1; + loops >>= 1; + } + + /* Clear loops ways in cache */ + while (loops-- != 0) { + /* Clear the ways. */ +#if 0 + /* + * GCC doesn't generate very good code for this so we + * provide inline assembly instead. + */ + metag_out8(0, flush0); + metag_out8(0, flush1); + metag_out8(0, flush2); + metag_out8(0, flush3); + + flush0 += step; + flush1 += step; + flush2 += step; + flush3 += step; +#else + asm volatile ( + "SETB\t[%0+%4++],%5\n" + "SETB\t[%1+%4++],%5\n" + "SETB\t[%2+%4++],%5\n" + "SETB\t[%3+%4++],%5\n" + : "+e" (flush0), + "+e" (flush1), + "+e" (flush2), + "+e" (flush3) + : "e" (step), "a" (0)); +#endif + } +} + +void metag_data_cache_flush_all(const void *start) +{ + if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) + /* No need to flush the data cache it's not actually enabled */ + return; + + metag_phys_data_cache_flush(start); +} + +void metag_data_cache_flush(const void *start, int bytes) +{ + unsigned long flush0; + int loops, step; + + if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) + /* No need to flush the data cache it's not actually enabled */ + return; + + if (bytes >= 4096) { + metag_phys_data_cache_flush(start); + return; + } + + /* Use linear cache flush mechanism on META IP */ + flush0 = (int)start; + loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes + + (DCACHE_LINE_BYTES - 1); + loops >>= DCACHE_LINE_S; + +#define PRIM_FLUSH(addr, offset) do { \ + int __addr = ((int) (addr)) + ((offset) * 64); \ + __builtin_dcache_flush((void *)(__addr)); \ + } while (0) + +#define LOOP_INC (4*64) + + do { + /* By default stop */ + step = 0; + + switch (loops) { + /* Drop Thru Cases! */ + default: + PRIM_FLUSH(flush0, 3); + loops -= 4; + step = 1; + case 3: + PRIM_FLUSH(flush0, 2); + case 2: + PRIM_FLUSH(flush0, 1); + case 1: + PRIM_FLUSH(flush0, 0); + flush0 += LOOP_INC; + case 0: + break; + } + } while (step); +} +EXPORT_SYMBOL(metag_data_cache_flush); + +static void metag_phys_code_cache_flush(const void *start, int bytes) +{ + unsigned long flush0, flush1, flush2, flush3, end_set; + int loops, step; + int thread; + int set_shift, set_size; + int part, offset; + + /* Use a sequence of writes to flush the cache region requested */ + thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) + >> TXENABLE_THREAD_S; + set_shift = icache_set_shift; + + /* Move to the base of the physical cache flush region */ + flush0 = LINSYSCFLUSH_ICACHE_LINE; + step = 64; + + /* Get partition code for this thread */ + part = metag_in32(SYSC_ICPART0 + + (SYSC_xCPARTn_STRIDE * thread)); + + if ((int)start < 0) + /* Access Global vs Local partition */ + part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S; + + /* Extract offset and move SetOff */ + offset = (part & SYSC_xCPARTL_OR_BITS) + >> SYSC_xCPARTL_OR_S; + flush0 += (offset << (set_shift - 4)); + + /* Shrink size */ + part = (part & SYSC_xCPARTL_AND_BITS) + >> SYSC_xCPARTL_AND_S; + loops = ((part + 1) << (set_shift - 4)); + + /* Where does the Set end? */ + end_set = flush0 + loops; + set_size = loops; + +#ifdef CONFIG_METAG_META12 + if ((bytes < 4096) && (bytes < loops)) { + /* Unreachable on HTP/MTP */ + /* Only target the sets that could be relavent */ + flush0 += (loops - step) & ((int) start); + loops = (((int) start) & (step-1)) + bytes + step - 1; + } +#endif + + /* Reduce loops by step of cache line size */ + loops /= step; + + flush1 = flush0 + (1<<set_shift); + flush2 = flush0 + (2<<set_shift); + flush3 = flush0 + (3<<set_shift); + + if (icache_sets_log2 == 1) { + flush2 = flush1; + flush3 = flush1 + step; + flush1 = flush0 + step; +#if 0 + /* flush0 will stop one line early in this case + * (flush1 will do the final line). + * However we don't correct end_set here at the moment + * because it will never wrap on HTP/MTP + */ + end_set -= step; +#endif + step <<= 1; + loops >>= 1; + } + + /* Clear loops ways in cache */ + while (loops-- != 0) { +#if 0 + /* + * GCC doesn't generate very good code for this so we + * provide inline assembly instead. + */ + /* Clear the ways */ + metag_out8(0, flush0); + metag_out8(0, flush1); + metag_out8(0, flush2); + metag_out8(0, flush3); + + flush0 += step; + flush1 += step; + flush2 += step; + flush3 += step; +#else + asm volatile ( + "SETB\t[%0+%4++],%5\n" + "SETB\t[%1+%4++],%5\n" + "SETB\t[%2+%4++],%5\n" + "SETB\t[%3+%4++],%5\n" + : "+e" (flush0), + "+e" (flush1), + "+e" (flush2), + "+e" (flush3) + : "e" (step), "a" (0)); +#endif + + if (flush0 == end_set) { + /* Wrap within Set 0 */ + flush0 -= set_size; + flush1 -= set_size; + flush2 -= set_size; + flush3 -= set_size; + } + } +} + +void metag_code_cache_flush_all(const void *start) +{ + if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) + /* No need to flush the code cache it's not actually enabled */ + return; + + metag_phys_code_cache_flush(start, 4096); +} + +void metag_code_cache_flush(const void *start, int bytes) +{ +#ifndef CONFIG_METAG_META12 + void *flush; + int loops, step; +#endif /* !CONFIG_METAG_META12 */ + + if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) + /* No need to flush the code cache it's not actually enabled */ + return; + +#ifdef CONFIG_METAG_META12 + /* CACHEWD isn't available on Meta1, so always do full cache flush */ + metag_phys_code_cache_flush(start, bytes); + +#else /* CONFIG_METAG_META12 */ + /* If large size do full physical cache flush */ + if (bytes >= 4096) { + metag_phys_code_cache_flush(start, bytes); + return; + } + + /* Use linear cache flush mechanism on META IP */ + flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1)); + loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes + + (ICACHE_LINE_BYTES-1); + loops >>= ICACHE_LINE_S; + +#define PRIM_IFLUSH(addr, offset) \ + __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT) + +#define LOOP_INC (4*64) + + do { + /* By default stop */ + step = 0; + + switch (loops) { + /* Drop Thru Cases! */ + default: + PRIM_IFLUSH(flush, 3); + loops -= 4; + step = 1; + case 3: + PRIM_IFLUSH(flush, 2); + case 2: + PRIM_IFLUSH(flush, 1); + case 1: + PRIM_IFLUSH(flush, 0); + flush += LOOP_INC; + case 0: + break; + } + } while (step); +#endif /* !CONFIG_METAG_META12 */ +} +EXPORT_SYMBOL(metag_code_cache_flush); diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c new file mode 100644 index 00000000000..c64ee615cf9 --- /dev/null +++ b/arch/metag/mm/l2cache.c @@ -0,0 +1,192 @@ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/delay.h> + +#include <asm/l2cache.h> +#include <asm/metag_isa.h> + +/* If non-0, then initialise the L2 cache */ +static int l2cache_init = 1; +/* If non-0, then initialise the L2 cache prefetch */ +static int l2cache_init_pf = 1; + +int l2c_pfenable; + +static volatile u32 l2c_testdata[16] __initdata __aligned(64); + +static int __init parse_l2cache(char *p) +{ + char *cp = p; + + if (get_option(&cp, &l2cache_init) != 1) { + pr_err("Bad l2cache parameter (%s)\n", p); + return 1; + } + return 0; +} +early_param("l2cache", parse_l2cache); + +static int __init parse_l2cache_pf(char *p) +{ + char *cp = p; + + if (get_option(&cp, &l2cache_init_pf) != 1) { + pr_err("Bad l2cache_pf parameter (%s)\n", p); + return 1; + } + return 0; +} +early_param("l2cache_pf", parse_l2cache_pf); + +static int __init meta_l2c_setup(void) +{ + /* + * If the L2 cache isn't even present, don't do anything, but say so in + * the log. + */ + if (!meta_l2c_is_present()) { + pr_info("L2 Cache: Not present\n"); + return 0; + } + + /* + * Check whether the line size is recognised. + */ + if (!meta_l2c_linesize()) { + pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n", + meta_l2c_config()); + } + + /* + * Initialise state. + */ + l2c_pfenable = _meta_l2c_pf_is_enabled(); + + /* + * Enable the L2 cache and print to log whether it was already enabled + * by the bootloader. + */ + if (l2cache_init) { + pr_info("L2 Cache: Enabling... "); + if (meta_l2c_enable()) + pr_cont("already enabled\n"); + else + pr_cont("done\n"); + } else { + pr_info("L2 Cache: Not enabling\n"); + } + + /* + * Enable L2 cache prefetch. + */ + if (l2cache_init_pf) { + pr_info("L2 Cache: Enabling prefetch... "); + if (meta_l2c_pf_enable(1)) + pr_cont("already enabled\n"); + else + pr_cont("done\n"); + } else { + pr_info("L2 Cache: Not enabling prefetch\n"); + } + + return 0; +} +core_initcall(meta_l2c_setup); + +int meta_l2c_disable(void) +{ + unsigned long flags; + int en; + + if (!meta_l2c_is_present()) + return 1; + + /* + * Prevent other threads writing during the writeback, otherwise the + * writes will get "lost" when the L2 is disabled. + */ + __global_lock2(flags); + en = meta_l2c_is_enabled(); + if (likely(en)) { + _meta_l2c_pf_enable(0); + wr_fence(); + _meta_l2c_purge(); + _meta_l2c_enable(0); + } + __global_unlock2(flags); + + return !en; +} + +int meta_l2c_enable(void) +{ + unsigned long flags; + int en; + + if (!meta_l2c_is_present()) + return 0; + + /* + * Init (clearing the L2) can happen while the L2 is disabled, so other + * threads are safe to continue executing, however we must not init the + * cache if it's already enabled (dirty lines would be discarded), so + * this operation should still be atomic with other threads. + */ + __global_lock1(flags); + en = meta_l2c_is_enabled(); + if (likely(!en)) { + _meta_l2c_init(); + _meta_l2c_enable(1); + _meta_l2c_pf_enable(l2c_pfenable); + } + __global_unlock1(flags); + + return en; +} + +int meta_l2c_pf_enable(int pfenable) +{ + unsigned long flags; + int en = l2c_pfenable; + + if (!meta_l2c_is_present()) + return 0; + + /* + * We read modify write the enable register, so this operation must be + * atomic with other threads. + */ + __global_lock1(flags); + en = l2c_pfenable; + l2c_pfenable = pfenable; + if (meta_l2c_is_enabled()) + _meta_l2c_pf_enable(pfenable); + __global_unlock1(flags); + + return en; +} + +int meta_l2c_flush(void) +{ + unsigned long flags; + int en; + + /* + * Prevent other threads writing during the writeback. This also + * involves read modify writes. + */ + __global_lock2(flags); + en = meta_l2c_is_enabled(); + if (likely(en)) { + _meta_l2c_pf_enable(0); + wr_fence(); + _meta_l2c_purge(); + _meta_l2c_enable(0); + _meta_l2c_init(); + _meta_l2c_enable(1); + _meta_l2c_pf_enable(l2c_pfenable); + } + __global_unlock2(flags); + + return !en; +} |