summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c145
1 files changed, 82 insertions, 63 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5ea84bc98c6..38223b44d96 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -16,6 +16,7 @@
#include <asm/bcache.h>
#include <asm/bootinfo.h>
+#include <asm/cache.h>
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -26,8 +27,14 @@
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/war.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
-static unsigned long icache_size, dcache_size, scache_size;
+/*
+ * Must die.
+ */
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
/*
* Dummy cache handling routines for machines without boardcaches
@@ -43,8 +50,8 @@ static struct bcache_ops no_sc_ops = {
struct bcache_ops *bcops = &no_sc_ops;
-#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
-#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020)
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
@@ -190,12 +197,12 @@ static inline void r4k_blast_icache_page_indexed_setup(void)
if (ic_lsize == 16)
r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
else if (ic_lsize == 32) {
- if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache_page_indexed =
- tx49_blast_icache32_page_indexed;
- else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
r4k_blast_icache_page_indexed =
blast_icache32_r4600_v1_page_indexed;
+ else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache_page_indexed =
+ tx49_blast_icache32_page_indexed;
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed;
@@ -361,24 +368,33 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
struct flush_cache_page_args {
struct vm_area_struct *vma;
- unsigned long page;
+ unsigned long addr;
};
static inline void local_r4k_flush_cache_page(void *args)
{
struct flush_cache_page_args *fcp_args = args;
struct vm_area_struct *vma = fcp_args->vma;
- unsigned long page = fcp_args->page;
+ unsigned long addr = fcp_args->addr;
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- pmdp = pmd_offset(pgdp, page);
- ptep = pte_offset(pmdp, page);
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ addr &= PAGE_MASK;
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ ptep = pte_offset(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
@@ -395,12 +411,12 @@ static inline void local_r4k_flush_cache_page(void *args)
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(page);
+ r4k_blast_dcache_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page(page);
+ r4k_blast_scache_page(addr);
}
if (exec)
- r4k_blast_icache_page(page);
+ r4k_blast_icache_page(addr);
return;
}
@@ -409,36 +425,30 @@ static inline void local_r4k_flush_cache_page(void *args)
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
- page = INDEX_BASE + (page & (dcache_size - 1));
+ addr = INDEX_BASE + (addr & (dcache_size - 1));
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page_indexed(page);
+ r4k_blast_dcache_page_indexed(addr);
if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page_indexed(page);
+ r4k_blast_scache_page_indexed(addr);
}
if (exec) {
if (cpu_has_vtag_icache) {
int cpu = smp_processor_id();
- if (cpu_context(cpu, vma->vm_mm) != 0)
- drop_mmu_context(vma->vm_mm, cpu);
+ if (cpu_context(cpu, mm) != 0)
+ drop_mmu_context(mm, cpu);
} else
- r4k_blast_icache_page_indexed(page);
+ r4k_blast_icache_page_indexed(addr);
}
}
-static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
struct flush_cache_page_args args;
- /*
- * If ownes no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (cpu_context(smp_processor_id(), vma->vm_mm) == 0)
- return;
-
args.vma = vma;
- args.page = page;
+ args.addr = addr;
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
@@ -454,16 +464,16 @@ static void r4k_flush_data_cache_page(unsigned long addr)
}
struct flush_icache_range_args {
- unsigned long start;
- unsigned long end;
+ unsigned long __user start;
+ unsigned long __user end;
};
static inline void local_r4k_flush_icache_range(void *args)
{
struct flush_icache_range_args *fir_args = args;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
+ unsigned long ic_lsize = cpu_icache_line_size();
+ unsigned long sc_lsize = cpu_scache_line_size();
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned long addr, aend;
@@ -472,6 +482,7 @@ static inline void local_r4k_flush_icache_range(void *args)
if (end - start > dcache_size) {
r4k_blast_dcache();
} else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
addr = start & ~(dc_lsize - 1);
aend = (end - 1) & ~(dc_lsize - 1);
@@ -492,7 +503,7 @@ static inline void local_r4k_flush_icache_range(void *args)
aend = (end - 1) & ~(sc_lsize - 1);
while (1) {
- /* Hit_Writeback_Inv_D */
+ /* Hit_Writeback_Inv_SD */
protected_writeback_scache_line(addr);
if (addr == aend)
break;
@@ -517,7 +528,8 @@ static inline void local_r4k_flush_icache_range(void *args)
}
}
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+static void r4k_flush_icache_range(unsigned long __user start,
+ unsigned long __user end)
{
struct flush_icache_range_args args;
@@ -525,6 +537,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.end = end;
on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+ instruction_hazard();
}
/*
@@ -613,7 +626,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long sc_lsize = cpu_scache_line_size();
if (size >= scache_size) {
r4k_blast_scache();
@@ -639,7 +652,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
R4600_HIT_CACHEOP_WAR_IMPL;
a = addr & ~(dc_lsize - 1);
@@ -663,7 +676,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long sc_lsize = cpu_scache_line_size();
if (size >= scache_size) {
r4k_blast_scache();
@@ -684,7 +697,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
R4600_HIT_CACHEOP_WAR_IMPL;
a = addr & ~(dc_lsize - 1);
@@ -708,9 +721,9 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
*/
static void local_r4k_flush_cache_sigtramp(void * arg)
{
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long ic_lsize = cpu_icache_line_size();
+ unsigned long dc_lsize = cpu_dcache_line_size();
+ unsigned long sc_lsize = cpu_scache_line_size();
unsigned long addr = (unsigned long) arg;
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -762,6 +775,7 @@ static inline void rm7k_erratum31(void)
for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
__asm__ __volatile__ (
+ ".set push\n\t"
".set noreorder\n\t"
".set mips3\n\t"
"cache\t%1, 0(%0)\n\t"
@@ -776,8 +790,7 @@ static inline void rm7k_erratum31(void)
"cache\t%1, 0x1000(%0)\n\t"
"cache\t%1, 0x2000(%0)\n\t"
"cache\t%1, 0x3000(%0)\n\t"
- ".set\tmips0\n\t"
- ".set\treorder\n\t"
+ ".set pop\n"
:
: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
}
@@ -1011,9 +1024,19 @@ static void __init probe_pcache(void)
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
- if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000)
- if (c->dcache.waysize > PAGE_SIZE)
- c->dcache.flags |= MIPS_CACHE_ALIASES;
+ switch (c->cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_SB1:
+ break;
+ case CPU_24K:
+ if (!(read_c0_config7() & (1 << 16)))
+ default:
+ if (c->dcache.waysize > PAGE_SIZE)
+ c->dcache.flags |= MIPS_CACHE_ALIASES;
+ }
switch (c->cputype) {
case CPU_20KC:
@@ -1024,7 +1047,11 @@ static void __init probe_pcache(void)
c->icache.flags |= MIPS_CACHE_VTAG;
break;
+ case CPU_AU1000:
case CPU_AU1500:
+ case CPU_AU1100:
+ case CPU_AU1550:
+ case CPU_AU1200:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
}
@@ -1102,7 +1129,6 @@ static int __init probe_scache(void)
return 1;
}
-typedef int (*probe_func_t)(unsigned long);
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
@@ -1110,7 +1136,6 @@ static void __init setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
- probe_func_t probe_scache_kseg1;
int sc_present = 0;
/*
@@ -1123,8 +1148,7 @@ static void __init setup_scache(void)
case CPU_R4000MC:
case CPU_R4400SC:
case CPU_R4400MC:
- probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));
- sc_present = probe_scache_kseg1(config);
+ sc_present = run_uncached(probe_scache);
if (sc_present)
c->options |= MIPS_CPU_CACHE_CDEX_S;
break;
@@ -1198,7 +1222,7 @@ static inline void coherency_setup(void)
}
}
-void __init ld_mmu_r4xx0(void)
+void __init r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
@@ -1206,15 +1230,11 @@ void __init ld_mmu_r4xx0(void)
struct cpuinfo_mips *c = &current_cpu_data;
/* Default cache error handler for R4000 and R5000 family */
- memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80);
- memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
+ set_uncached_handler (0x100, &except_vec2_generic, 0x80);
probe_pcache();
setup_scache();
- if (c->dcache.sets * c->dcache.ways > PAGE_SIZE)
- c->dcache.flags |= MIPS_CACHE_ALIASES;
-
r4k_blast_dcache_page_setup();
r4k_blast_dcache_page_indexed_setup();
r4k_blast_dcache_setup();
@@ -1252,9 +1272,8 @@ void __init ld_mmu_r4xx0(void)
_dma_cache_inv = r4k_dma_cache_inv;
#endif
- __flush_cache_all();
- coherency_setup();
-
build_clear_page();
build_copy_page();
+ local_r4k___flush_cache_all(NULL);
+ coherency_setup();
}