summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-octeon.c8
-rw-r--r--arch/mips/mm/cache.c55
-rw-r--r--arch/mips/mm/dma-default.c1
-rw-r--r--arch/mips/mm/fault.c27
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/mm/hugetlbpage.c2
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/ioremap.c1
-rw-r--r--arch/mips/mm/page.c2
-rw-r--r--arch/mips/mm/pgtable-64.c44
-rw-r--r--arch/mips/mm/tlb-r4k.c84
-rw-r--r--arch/mips/mm/tlbex.c330
-rw-r--r--arch/mips/mm/uasm.c33
-rw-r--r--arch/mips/mm/uasm.h191
14 files changed, 403 insertions, 383 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index e06f1af760a..0f9c488044d 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -183,6 +183,7 @@ static void __cpuinit probe_octeon(void)
switch (c->cputype) {
case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
config1 = read_c0_config1();
c->icache.linesz = 2 << ((config1 >> 19) & 7);
c->icache.sets = 64 << ((config1 >> 22) & 7);
@@ -192,10 +193,10 @@ static void __cpuinit probe_octeon(void)
c->icache.sets * c->icache.ways * c->icache.linesz;
c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
c->dcache.linesz = 128;
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
- c->dcache.sets = 1; /* CN3XXX has one Dcache set */
- else
+ if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
+ else
+ c->dcache.sets = 1; /* CN3XXX has one Dcache set */
c->dcache.ways = 64;
dcache_size =
c->dcache.sets * c->dcache.ways * c->dcache.linesz;
@@ -305,4 +306,3 @@ asmlinkage void cache_parity_error_octeon_non_recoverable(void)
{
cache_parity_error_octeon(1);
}
-
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index e716cafc346..12af739048f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -133,26 +133,47 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
}
unsigned long _page_cachable_default;
-EXPORT_SYMBOL_GPL(_page_cachable_default);
+EXPORT_SYMBOL(_page_cachable_default);
static inline void setup_protection_map(void)
{
- protection_map[0] = PAGE_NONE;
- protection_map[1] = PAGE_READONLY;
- protection_map[2] = PAGE_COPY;
- protection_map[3] = PAGE_COPY;
- protection_map[4] = PAGE_READONLY;
- protection_map[5] = PAGE_READONLY;
- protection_map[6] = PAGE_COPY;
- protection_map[7] = PAGE_COPY;
- protection_map[8] = PAGE_NONE;
- protection_map[9] = PAGE_READONLY;
- protection_map[10] = PAGE_SHARED;
- protection_map[11] = PAGE_SHARED;
- protection_map[12] = PAGE_READONLY;
- protection_map[13] = PAGE_READONLY;
- protection_map[14] = PAGE_SHARED;
- protection_map[15] = PAGE_SHARED;
+ if (kernel_uses_smartmips_rixi) {
+ protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+
+ protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
+
+ } else {
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+ }
}
void __cpuinit cpu_cache_init(void)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 9367e33fbd1..9547bc0cf18 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/gfp.h>
#include <asm/cache.h>
#include <asm/io.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e97a7a2fb2c..b78f7d913ca 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -99,8 +99,31 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
- goto bad_area;
+ if (kernel_uses_smartmips_rixi) {
+ if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
+#if 0
+ pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
+ raw_smp_processor_id(),
+ current->comm, current->pid,
+ field, address, write,
+ field, regs->cp0_epc);
+#endif
+ goto bad_area;
+ }
+ if (!(vma->vm_flags & VM_READ)) {
+#if 0
+ pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
+ raw_smp_processor_id(),
+ current->comm, current->pid,
+ field, address, write,
+ field, regs->cp0_epc);
+#endif
+ goto bad_area;
+ }
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ goto bad_area;
+ }
}
/*
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f..127d732474b 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 8c2834f5919..a7fee0dfb7a 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
@@ -97,4 +96,3 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
-
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1651942f7fe..2efcbd24c82 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -28,6 +28,7 @@
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
+#include <linux/gfp.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
@@ -143,7 +144,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
- entrylo = pte_val(pte) >> 6;
+ entrylo = pte_to_entrylo(pte_val(pte));
#endif
ENTER_CRITICAL(flags);
@@ -298,7 +299,7 @@ void __init fixrange_init(unsigned long start, unsigned long end,
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-static int __init page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
@@ -477,7 +478,7 @@ unsigned long pgd_current[NR_CPUS];
* will officially be retired.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 0c43248347b..cacfd31e8ec 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -10,6 +10,7 @@
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index f5c73754d66..36272f7d374 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -35,7 +35,7 @@
#include <asm/sibyte/sb1250_dma.h>
#endif
-#include "uasm.h"
+#include <asm/uasm.h>
/* Registers used in the assembled routines. */
#define ZERO 0
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1121019fa45..78eaa4f0b0e 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -15,23 +15,31 @@
void pgd_init(unsigned long page)
{
unsigned long *p, *end;
+ unsigned long entry;
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ entry = (unsigned long)invalid_pte_table;
+#else
+ entry = (unsigned long)invalid_pmd_table;
+#endif
p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
while (p < end) {
- p[0] = (unsigned long) invalid_pmd_table;
- p[1] = (unsigned long) invalid_pmd_table;
- p[2] = (unsigned long) invalid_pmd_table;
- p[3] = (unsigned long) invalid_pmd_table;
- p[4] = (unsigned long) invalid_pmd_table;
- p[5] = (unsigned long) invalid_pmd_table;
- p[6] = (unsigned long) invalid_pmd_table;
- p[7] = (unsigned long) invalid_pmd_table;
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p[5] = entry;
+ p[6] = entry;
+ p[7] = entry;
p += 8;
}
}
+#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
@@ -40,17 +48,18 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
end = p + PTRS_PER_PMD;
while (p < end) {
- p[0] = (unsigned long)pagetable;
- p[1] = (unsigned long)pagetable;
- p[2] = (unsigned long)pagetable;
- p[3] = (unsigned long)pagetable;
- p[4] = (unsigned long)pagetable;
- p[5] = (unsigned long)pagetable;
- p[6] = (unsigned long)pagetable;
- p[7] = (unsigned long)pagetable;
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p[5] = pagetable;
+ p[6] = pagetable;
+ p[7] = pagetable;
p += 8;
}
}
+#endif
void __init pagetable_init(void)
{
@@ -59,8 +68,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
-
+#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d73428b18b0..c618eed933a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
- lo = pte_val(*ptep) >> 6;
+ lo = pte_to_entrylo(pte_val(*ptep));
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ptep++;
write_c0_entrylo1(ptep->pte_high);
#else
- write_c0_entrylo0(pte_val(*ptep++) >> 6);
- write_c0_entrylo1(pte_val(*ptep) >> 6);
+ write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
+ write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
#endif
mtc0_tlbw_hazard();
if (idx < 0)
@@ -337,40 +337,6 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
EXIT_CRITICAL(flags);
}
-#if 0
-static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
-{
- unsigned long flags;
- unsigned int asid;
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- int idx;
-
- ENTER_CRITICAL(flags);
- address &= (PAGE_MASK << 1);
- asid = read_c0_entryhi() & ASID_MASK;
- write_c0_entryhi(address | asid);
- pgdp = pgd_offset(vma->vm_mm, address);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- pmdp = pmd_offset(pgdp, address);
- idx = read_c0_index();
- ptep = pte_offset_map(pmdp, address);
- write_c0_entrylo0(pte_val(*ptep++) >> 6);
- write_c0_entrylo1(pte_val(*ptep) >> 6);
- mtc0_tlbw_hazard();
- if (idx < 0)
- tlb_write_random();
- else
- tlb_write_indexed();
- tlbw_use_hazard();
- EXIT_CRITICAL(flags);
-}
-#endif
-
void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -447,34 +413,6 @@ out:
return ret;
}
-static void __cpuinit probe_tlb(unsigned long config)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
- unsigned int reg;
-
- /*
- * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
- * is not supported, we assume R4k style. Cpu probing already figured
- * out the number of tlb entries.
- */
- if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
- return;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * If TLB is shared in SMTC system, total size already
- * has been calculated and written into cpu_data tlbsize
- */
- if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
- return;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- reg = read_c0_config1();
- if (!((config >> 7) & 3))
- panic("No TLB present");
-
- c->tlbsize = ((reg >> 25) & 0x3f) + 1;
-}
-
static int __cpuinitdata ntlb;
static int __init set_ntlb(char *str)
{
@@ -486,8 +424,6 @@ __setup("ntlb=", set_ntlb);
void __cpuinit tlb_init(void)
{
- unsigned int config = read_c0_config();
-
/*
* You should never change this register:
* - On R4600 1.7 the tlbp never hits for pages smaller than
@@ -495,13 +431,25 @@ void __cpuinit tlb_init(void)
* - The entire mm handling assumes the c0_pagemask register to
* be set to fixed-size pages.
*/
- probe_tlb(config);
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
current_cpu_type() == CPU_R14000)
write_c0_framemask(0);
+
+ if (kernel_uses_smartmips_rixi) {
+ /*
+ * Enable the no read, no exec bits, and enable large virtual
+ * address.
+ */
+ u32 pg = PG_RIE | PG_XIE;
+#ifdef CONFIG_64BIT
+ pg |= PG_ELPA;
+#endif
+ write_c0_pagegrain(pg);
+ }
+
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index badcf5e8d69..86f004dc835 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,8 +29,17 @@
#include <asm/mmu_context.h>
#include <asm/war.h>
+#include <asm/uasm.h>
+
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0(void);
+extern void tlb_do_page_fault_1(void);
-#include "uasm.h"
static inline int r45k_bvahwbug(void)
{
@@ -77,11 +86,14 @@ enum label_id {
label_vmalloc_done,
label_tlbw_hazard,
label_split,
+ label_tlbl_goaround1,
+ label_tlbl_goaround2,
label_nopage_tlbl,
label_nopage_tlbs,
label_nopage_tlbm,
label_smp_pgtable_change,
label_r3000_write_probe_fail,
+ label_large_segbits_fault,
#ifdef CONFIG_HUGETLB_PAGE
label_tlb_huge_update,
#endif
@@ -93,11 +105,14 @@ UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard)
UASM_L_LA(_split)
+UASM_L_LA(_tlbl_goaround1)
+UASM_L_LA(_tlbl_goaround2)
UASM_L_LA(_nopage_tlbl)
UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
+UASM_L_LA(_large_segbits_fault)
#ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA(_tlb_huge_update)
#endif
@@ -154,6 +169,10 @@ static u32 tlb_handler[128] __cpuinitdata;
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
+#ifdef CONFIG_64BIT
+static int check_for_high_segbits __cpuinitdata;
+#endif
+
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
@@ -397,36 +416,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
}
}
-#ifdef CONFIG_HUGETLB_PAGE
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
- struct uasm_label **l,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum tlb_write_entry wmode)
+static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ unsigned int reg)
{
- /* Set huge page tlb entry size */
- uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
- uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+ if (kernel_uses_smartmips_rixi) {
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ } else {
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#else
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#endif
+ }
+}
- build_tlb_write_entry(p, l, r, wmode);
+#ifdef CONFIG_HUGETLB_PAGE
+static __cpuinit void build_restore_pagemask(u32 **p,
+ struct uasm_reloc **r,
+ unsigned int tmp,
+ enum label_id lid)
+{
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
- uasm_il_b(p, r, label_leave);
+ uasm_il_b(p, r, lid);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
- uasm_il_b(p, r, label_leave);
+ uasm_il_b(p, r, lid);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else {
- uasm_il_b(p, r, label_leave);
+ uasm_il_b(p, r, lid);
uasm_i_mtc0(p, 0, C0_PAGEMASK);
}
}
+static __cpuinit void build_huge_tlb_write_entry(u32 **p,
+ struct uasm_label **l,
+ struct uasm_reloc **r,
+ unsigned int tmp,
+ enum tlb_write_entry wmode)
+{
+ /* Set huge page tlb entry size */
+ uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
+ uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+
+ build_tlb_write_entry(p, l, r, wmode);
+
+ build_restore_pagemask(p, r, tmp, label_leave);
+}
+
/*
* Check if Huge PTE is present, if so then jump to LABEL.
*/
@@ -460,15 +503,15 @@ static __cpuinit void build_huge_update_entries(u32 **p,
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
- UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
- uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
+ build_convert_pte_to_entrylo(p, pte);
+ UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
/* convert to entrylo1 */
if (small_sequence)
UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
else
UASM_i_ADDU(p, pte, pte, tmp);
- uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
+ UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
}
static __cpuinit void build_huge_handler_tail(u32 **p,
@@ -505,7 +548,24 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* The vmalloc handling is not in the hotpath.
*/
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
- uasm_il_bltz(p, r, tmp, label_vmalloc);
+
+ if (check_for_high_segbits) {
+ /*
+ * The kernel currently implicitely assumes that the
+ * MIPS SEGBITS parameter for the processor is
+ * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
+ * allocate virtual addresses outside the maximum
+ * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
+ * that doesn't prevent user code from accessing the
+ * higher xuseg addresses. Here, we make sure that
+ * everything but the lower xuseg addresses goes down
+ * the module_alloc/vmalloc path.
+ */
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_il_bnez(p, r, ptr, label_vmalloc);
+ } else {
+ uasm_il_bltz(p, r, tmp, label_vmalloc);
+ }
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -522,14 +582,14 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* SMTC uses TCBind value as "CPU" index
*/
uasm_i_mfc0(p, ptr, C0_TCBIND);
- uasm_i_dsrl(p, ptr, ptr, 19);
+ uasm_i_dsrl_safe(p, ptr, ptr, 19);
# else
/*
* 64 bit SMP running in XKPHYS has smp_processor_id() << 3
* stored in CONTEXT.
*/
uasm_i_dmfc0(p, ptr, C0_CONTEXT);
- uasm_i_dsrl(p, ptr, ptr, 23);
+ uasm_i_dsrl_safe(p, ptr, ptr, 23);
# endif
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_daddu(p, ptr, ptr, tmp);
@@ -542,42 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_l_vmalloc_done(l, *p);
- if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
- uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
- else
- uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
+ /* get pgd offset in bytes */
+ uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
- uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
+ uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+#endif
}
+enum vmalloc64_mode {not_refill, refill};
/*
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
static void __cpuinit
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
- unsigned int bvaddr, unsigned int ptr)
+ unsigned int bvaddr, unsigned int ptr,
+ enum vmalloc64_mode mode)
{
long swpd = (long)swapper_pg_dir;
+ int single_insn_swpd;
+ int did_vmalloc_branch = 0;
+
+ single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
uasm_l_vmalloc(l, *p);
- if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
- uasm_il_b(p, r, label_vmalloc_done);
- uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
- } else {
- UASM_i_LA_mostly(p, ptr, swpd);
- uasm_il_b(p, r, label_vmalloc_done);
- if (uasm_in_compat_space_p(swpd))
- uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
- else
- uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ if (mode == refill && check_for_high_segbits) {
+ if (single_insn_swpd) {
+ uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+ did_vmalloc_branch = 1;
+ /* fall through */
+ } else {
+ uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
+ }
+ }
+ if (!did_vmalloc_branch) {
+ if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+ uasm_il_b(p, r, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+ } else {
+ UASM_i_LA_mostly(p, ptr, swpd);
+ uasm_il_b(p, r, label_vmalloc_done);
+ if (uasm_in_compat_space_p(swpd))
+ uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ else
+ uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ }
+ }
+ if (mode == refill && check_for_high_segbits) {
+ uasm_l_large_segbits_fault(l, *p);
+ /*
+ * We get here if we are an xsseg address, or if we are
+ * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
+ *
+ * Ignoring xsseg (assume disabled so would generate
+ * (address errors?), the only remaining possibility
+ * is the upper xuseg addresses. On processors with
+ * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
+ * addresses would have taken an address error. We try
+ * to mimic that here by taking a load/istream page
+ * fault.
+ */
+ UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(p, ptr);
+ uasm_i_nop(p);
}
}
@@ -684,35 +780,53 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
if (cpu_has_64bits) {
uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
- uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
- uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
- uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+ if (kernel_uses_smartmips_rixi) {
+ UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ } else {
+ uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+ }
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
} else {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
/* The pte entries are pre-shifted */
uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
- uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
- uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
}
#else
UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
- UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
- if (r4k_250MHZhwbug())
- uasm_i_mtc0(p, 0, C0_ENTRYLO0);
- uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
- if (r45k_bvahwbug())
- uasm_i_mfc0(p, tmp, C0_INDEX);
+ if (kernel_uses_smartmips_rixi) {
+ UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ if (r4k_250MHZhwbug())
+ UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ } else {
+ UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+ if (r4k_250MHZhwbug())
+ UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+ if (r45k_bvahwbug())
+ uasm_i_mfc0(p, tmp, C0_INDEX);
+ }
if (r4k_250MHZhwbug())
- uasm_i_mtc0(p, 0, C0_ENTRYLO1);
- uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+ UASM_i_MTC0(p, 0, C0_ENTRYLO1);
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
#endif
}
@@ -741,10 +855,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
* create the plain linear handler
*/
if (bcm1250_m3_war()) {
- UASM_i_MFC0(&p, K0, C0_BADVADDR);
- UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
- UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_i_dsrl_safe(&p, K1, K0, 62);
+ uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+ uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
@@ -773,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
#endif
#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
+ build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
#endif
/*
@@ -883,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
}
/*
- * TLB load/store/modify handlers.
- *
- * Only the fastpath gets synthesized at runtime, the slowpath for
- * do_page_fault remains normal asm.
- */
-extern void tlb_do_page_fault_0(void);
-extern void tlb_do_page_fault_1(void);
-
-/*
* 128 instructions for the fastpath handler is generous and should
* never be exceeded.
*/
@@ -985,9 +1095,14 @@ static void __cpuinit
build_pte_present(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
{
- uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_il_bnez(p, r, pte, lid);
+ if (kernel_uses_smartmips_rixi) {
+ uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
+ uasm_il_beqz(p, r, pte, lid);
+ } else {
+ uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_il_bnez(p, r, pte, lid);
+ }
iPTE_LW(p, pte, ptr);
}
@@ -1245,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
uasm_i_eret(p); /* return from trap */
#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
+ build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
#endif
}
@@ -1260,10 +1375,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
memset(relocs, 0, sizeof(relocs));
if (bcm1250_m3_war()) {
- UASM_i_MFC0(&p, K0, C0_BADVADDR);
- UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
- UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_i_dsrl_safe(&p, K1, K0, 62);
+ uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+ uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
@@ -1272,6 +1392,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
+
+ if (kernel_uses_smartmips_rixi) {
+ /*
+ * If the page is not _PAGE_VALID, RI or XI could not
+ * have triggered it. Skip the expensive test..
+ */
+ uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+ uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+ uasm_i_nop(&p);
+
+ uasm_i_tlbr(&p);
+ /* Examine entrylo 0 or 1 based on ptr. */
+ uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+ uasm_i_beqz(&p, K0, 8);
+
+ UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+ UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+ /*
+ * If the entryLo (now in K0) is valid (bit 1), RI or
+ * XI must have triggered it.
+ */
+ uasm_i_andi(&p, K0, K0, 2);
+ uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
+
+ uasm_l_tlbl_goaround1(&l, p);
+ /* Reload the PTE value */
+ iPTE_LW(&p, K0, K1);
+ }
build_make_valid(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
@@ -1284,6 +1432,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
iPTE_LW(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
build_tlb_probe_entry(&p);
+
+ if (kernel_uses_smartmips_rixi) {
+ /*
+ * If the page is not _PAGE_VALID, RI or XI could not
+ * have triggered it. Skip the expensive test..
+ */
+ uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+ uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+ uasm_i_nop(&p);
+
+ uasm_i_tlbr(&p);
+ /* Examine entrylo 0 or 1 based on ptr. */
+ uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+ uasm_i_beqz(&p, K0, 8);
+
+ UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+ UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+ /*
+ * If the entryLo (now in K0) is valid (bit 1), RI or
+ * XI must have triggered it.
+ */
+ uasm_i_andi(&p, K0, K0, 2);
+ uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+ /* Reload the PTE value */
+ iPTE_LW(&p, K0, K1);
+
+ /*
+ * We clobbered C0_PAGEMASK, restore it. On the other branch
+ * it is restored in build_huge_tlb_write_entry.
+ */
+ build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
+
+ uasm_l_tlbl_goaround2(&l, p);
+ }
uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif
@@ -1402,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void)
*/
static int run_once = 0;
+#ifdef CONFIG_64BIT
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+#endif
+
switch (current_cpu_type()) {
case CPU_R2000:
case CPU_R3000:
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 0a165c5179a..611d564fdcf 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -19,8 +19,7 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-
-#include "uasm.h"
+#include <asm/uasm.h>
enum fields {
RS = 0x001,
@@ -32,7 +31,8 @@ enum fields {
BIMM = 0x040,
JIMM = 0x080,
FUNC = 0x100,
- SET = 0x200
+ SET = 0x200,
+ SCIMM = 0x400
};
#define OP_MASK 0x3f
@@ -53,6 +53,8 @@ enum fields {
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
+#define SCIMM_MASK 0xfffff
+#define SCIMM_SH 6
enum opcode {
insn_invalid,
@@ -62,9 +64,10 @@ enum opcode {
insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
- insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
- insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
- insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins
+ insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+ insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
+ insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
+ insn_dins, insn_syscall
};
struct insn {
@@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
@@ -126,14 +130,17 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
+ { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+ { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_invalid, 0, 0 }
};
@@ -206,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg)
return (arg >> 2) & JIMM_MASK;
}
+static inline __cpuinit u32 build_scimm(u32 arg)
+{
+ if (arg & ~SCIMM_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & SCIMM_MASK) << SCIMM_SH;
+}
+
static inline __cpuinit u32 build_func(u32 arg)
{
if (arg & ~FUNC_MASK)
@@ -264,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
op |= build_func(va_arg(ap, u32));
if (ip->fields & SET)
op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
va_end(ap);
**buf = op;
@@ -371,6 +388,7 @@ I_u2s3u1(_lw)
I_u1u2u3(_mfc0)
I_u1u2u3(_mtc0)
I_u2u1u3(_ori)
+I_u3u1u2(_or)
I_u2s3u1(_pref)
I_0(_rfe)
I_u2s3u1(_sc)
@@ -379,14 +397,17 @@ I_u2s3u1(_sd)
I_u2u1u3(_sll)
I_u2u1u3(_sra)
I_u2u1u3(_srl)
+I_u2u1u3(_rotr)
I_u3u1u2(_subu)
I_u2s3u1(_sw)
I_0(_tlbp)
+I_0(_tlbr)
I_0(_tlbwi)
I_0(_tlbwr)
I_u3u1u2(_xor)
I_u2u1u3(_xori)
I_u2u1msbu3(_dins);
+I_u1(_syscall);
/* Handle labels. */
void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
deleted file mode 100644
index 3d153edaa51..00000000000
--- a/arch/mips/mm/uasm.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005 Maciej W. Rozycki
- * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
- */
-
-#include <linux/types.h>
-
-#define Ip_u1u2u3(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
-
-#define Ip_u2u1u3(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
-
-#define Ip_u3u1u2(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
-
-#define Ip_u1u2s3(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
-
-#define Ip_u2s3u1(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
-
-#define Ip_u2u1s3(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
-
-#define Ip_u2u1msbu3(op) \
-void __cpuinit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
- unsigned int d)
-
-#define Ip_u1u2(op) \
-void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
-
-#define Ip_u1s2(op) \
-void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
-
-#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
-
-#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
-
-Ip_u2u1s3(_addiu);
-Ip_u3u1u2(_addu);
-Ip_u2u1u3(_andi);
-Ip_u3u1u2(_and);
-Ip_u1u2s3(_beq);
-Ip_u1u2s3(_beql);
-Ip_u1s2(_bgez);
-Ip_u1s2(_bgezl);
-Ip_u1s2(_bltz);
-Ip_u1s2(_bltzl);
-Ip_u1u2s3(_bne);
-Ip_u2s3u1(_cache);
-Ip_u1u2u3(_dmfc0);
-Ip_u1u2u3(_dmtc0);
-Ip_u2u1s3(_daddiu);
-Ip_u3u1u2(_daddu);
-Ip_u2u1u3(_dsll);
-Ip_u2u1u3(_dsll32);
-Ip_u2u1u3(_dsra);
-Ip_u2u1u3(_dsrl);
-Ip_u2u1u3(_dsrl32);
-Ip_u2u1u3(_drotr);
-Ip_u3u1u2(_dsubu);
-Ip_0(_eret);
-Ip_u1(_j);
-Ip_u1(_jal);
-Ip_u1(_jr);
-Ip_u2s3u1(_ld);
-Ip_u2s3u1(_ll);
-Ip_u2s3u1(_lld);
-Ip_u1s2(_lui);
-Ip_u2s3u1(_lw);
-Ip_u1u2u3(_mfc0);
-Ip_u1u2u3(_mtc0);
-Ip_u2u1u3(_ori);
-Ip_u2s3u1(_pref);
-Ip_0(_rfe);
-Ip_u2s3u1(_sc);
-Ip_u2s3u1(_scd);
-Ip_u2s3u1(_sd);
-Ip_u2u1u3(_sll);
-Ip_u2u1u3(_sra);
-Ip_u2u1u3(_srl);
-Ip_u3u1u2(_subu);
-Ip_u2s3u1(_sw);
-Ip_0(_tlbp);
-Ip_0(_tlbwi);
-Ip_0(_tlbwr);
-Ip_u3u1u2(_xor);
-Ip_u2u1u3(_xori);
-Ip_u2u1msbu3(_dins);
-
-/* Handle labels. */
-struct uasm_label {
- u32 *addr;
- int lab;
-};
-
-void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
-#ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
-#endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
-
-#define UASM_L_LA(lb) \
-static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
-{ \
- uasm_build_label(lab, addr, label##lb); \
-}
-
-/* convenience macros for instructions */
-#ifdef CONFIG_64BIT
-# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
-# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
-# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
-# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
-# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
-# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
-# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
-# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
-# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
-# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
-# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
-# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
-#else
-# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
-# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
-# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
-# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
-# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
-# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
-# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
-# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
-# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
-# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
-# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
-# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
-#endif
-
-#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
-#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
-#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
-#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
-#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
-#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
-#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
-#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
-#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
-
-/* Handle relocations. */
-struct uasm_reloc {
- u32 *addr;
- unsigned int type;
- int lab;
-};
-
-/* This is zero so we can use zeroed label arrays. */
-#define UASM_LABEL_INVALID 0
-
-void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
-void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
-void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
-void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
-void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
- u32 *first, u32 *end, u32 *target);
-int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
-
-/* Convenience functions for labeled branches. */
-void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
-void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
- unsigned int reg2, int lid);
-void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);