diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 17 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 20 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 123 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 23 |
9 files changed, 145 insertions, 84 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index fd6203f14f1..90ceb963aaf 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -11,27 +11,12 @@ obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_MIPS64) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_NEVADA) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R10000) += c-r4k.o cex-gen.o tlb-r4k.o +obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o -obj-$(CONFIG_CPU_R4300) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R4X00) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5000) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5500) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o -obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o -obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o -obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o -obj-$(CONFIG_CPU_XLP) += c-r4k.o tlb-r4k.o cex-gen.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index f092c265dc6..4c32ede464b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -786,6 +786,25 @@ static inline void rm7k_erratum31(void) } } +static inline void alias_74k_erratum(struct cpuinfo_mips *c) +{ + /* + * Early versions of the 74K do not update the cache tags on a + * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG + * aliases. In this case it is better to treat the cache as always + * having aliases. + */ + if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) + c->dcache.flags |= MIPS_CACHE_VTAG; + if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && + ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { + c->dcache.flags |= MIPS_CACHE_VTAG; + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + } +} + static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; @@ -1056,6 +1075,8 @@ static void __cpuinit probe_pcache(void) case CPU_34K: case CPU_74K: case CPU_1004K: + if (c->cputype == CPU_74K) + alias_74k_erratum(c); if ((read_c0_config7() & (1 << 16))) { /* effectively physically indexed dcache, thus no virtual aliases. */ diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 829320c7b17..07cec4407b0 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -142,7 +142,7 @@ EXPORT_SYMBOL(_page_cachable_default); static inline void setup_protection_map(void) { - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index c14f6dfed99..ddcec1e1a0c 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -114,7 +114,7 @@ good_area: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", @@ -171,6 +171,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1a85ba92eb5..be9acb2b959 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -469,19 +469,20 @@ void __init_refok free_initmem(void) #ifndef CONFIG_MIPS_PGD_C0_CONTEXT unsigned long pgd_current[NR_CPUS]; #endif -/* - * On 64-bit we've got three-level pagetables with a slightly - * different layout ... - */ -#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) /* * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER * are constants. So we use the variants from asm-offset.h until that gcc * will officially be retired. + * + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. */ -pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PMD_FOLDED -pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; #endif -pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index cda4e300eb0..25407794edb 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -26,17 +26,17 @@ void pgd_init(unsigned long page) p = (unsigned long *) page; end = p + PTRS_PER_PGD; - while (p < end) { + do { p[0] = entry; p[1] = entry; p[2] = entry; p[3] = entry; p[4] = entry; - p[5] = entry; - p[6] = entry; - p[7] = entry; p += 8; - } + p[-3] = entry; + p[-2] = entry; + p[-1] = entry; + } while (p != end); } #ifndef __PAGETABLE_PMD_FOLDED @@ -47,17 +47,17 @@ void pmd_init(unsigned long addr, unsigned long pagetable) p = (unsigned long *) addr; end = p + PTRS_PER_PMD; - while (p < end) { + do { p[0] = pagetable; p[1] = pagetable; p[2] = pagetable; p[3] = pagetable; p[4] = pagetable; - p[5] = pagetable; - p[6] = pagetable; - p[7] = pagetable; p += 8; - } + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); } #endif diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index d2572cb232d..4b9b935a070 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_random(); else tlb_write_indexed(); + tlbw_use_hazard(); write_c0_pagemask(PM_DEFAULT_MASK); } else #endif @@ -401,7 +402,7 @@ void __cpuinit tlb_init(void) current_cpu_type() == CPU_R14000) write_c0_framemask(0); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * Enable the no read, no exec bits, and enable large virtual * address. diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 03eb0ef9158..2833dcb67b5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -148,8 +148,8 @@ enum label_id { label_leave, label_vmalloc, label_vmalloc_done, - label_tlbw_hazard, - label_split, + label_tlbw_hazard_0, + label_split = label_tlbw_hazard_0 + 8, label_tlbl_goaround1, label_tlbl_goaround2, label_nopage_tlbl, @@ -167,7 +167,7 @@ UASM_L_LA(_second_part) UASM_L_LA(_leave) UASM_L_LA(_vmalloc) UASM_L_LA(_vmalloc_done) -UASM_L_LA(_tlbw_hazard) +/* _tlbw_hazard_x is handled differently. */ UASM_L_LA(_split) UASM_L_LA(_tlbl_goaround1) UASM_L_LA(_tlbl_goaround2) @@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault) UASM_L_LA(_tlb_huge_update) #endif +static int __cpuinitdata hazard_instance; + +static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) +{ + switch (instance) { + case 0 ... 7: + uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); + return; + default: + BUG(); + } +} + +static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) +{ + switch (instance) { + case 0 ... 7: + uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); + break; + default: + BUG(); + } +} + /* * For debug purposes. */ @@ -449,8 +473,20 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } if (cpu_has_mips_r2) { - if (cpu_has_mips_r2_exec_hazard) + /* + * The architecture spec says an ehb is required here, + * but a number of cores do not have the hazard and + * using an ehb causes an expensive pipeline stall. + */ + switch (current_cpu_type()) { + case CPU_M14KC: + case CPU_74K: + break; + + default: uasm_i_ehb(p); + break; + } tlbw(p); return; } @@ -466,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, * This branch uses up a mtc0 hazard nop slot and saves * two nops after the tlbw instruction. */ - uasm_il_bgezl(p, r, 0, label_tlbw_hazard); + uasm_bgezl_hazard(p, r, hazard_instance); tlbw(p); - uasm_l_tlbw_hazard(l, *p); + uasm_bgezl_label(l, p, hazard_instance); + hazard_instance++; uasm_i_nop(p); break; case CPU_R4600: case CPU_R4700: - case CPU_R5000: - case CPU_R5000A: uasm_i_nop(p); tlbw(p); uasm_i_nop(p); break; + case CPU_R5000: + case CPU_R5000A: + case CPU_NEVADA: + uasm_i_nop(p); /* QED specifies 2 nops hazard */ + uasm_i_nop(p); /* QED specifies 2 nops hazard */ + tlbw(p); + break; + case CPU_R4300: case CPU_5KC: case CPU_TX49XX: @@ -514,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_NEVADA: - uasm_i_nop(p); /* QED specifies 2 nops hazard */ - /* - * This branch uses up a mtc0 hazard nop slot and saves - * a nop after the tlbw instruction. - */ - uasm_il_bgezl(p, r, 0, label_tlbw_hazard); - tlbw(p); - uasm_l_tlbw_hazard(l, *p); - break; - case CPU_RM7000: uasm_i_nop(p); uasm_i_nop(p); @@ -586,9 +618,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { - if (kernel_uses_smartmips_rixi) { - UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + if (cpu_has_rixi) { + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { #ifdef CONFIG_64BIT_PHYS_ADDR uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -921,6 +952,13 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #endif uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); + + if (cpu_has_mips_r2) { + uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT)); + uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT)); + return; + } + uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ @@ -956,6 +994,15 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { + if (cpu_has_mips_r2) { + /* PTE ptr offset is obtained from BadVAddr */ + UASM_i_MFC0(p, tmp, C0_BADVADDR); + UASM_i_LW(p, ptr, 0, ptr); + uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1); + uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1); + return; + } + /* * Bug workaround for the Nevada. It seems as if under certain * circumstances the move from cp0_context might produce a @@ -990,12 +1037,10 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, if (cpu_has_64bits) { uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - if (kernel_uses_smartmips_rixi) { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + if (cpu_has_rixi) { + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); } else { uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ @@ -1017,14 +1062,12 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); - if (kernel_uses_smartmips_rixi) { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + if (cpu_has_rixi) { + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); } else { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ if (r4k_250MHZhwbug()) @@ -1183,14 +1226,10 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_LW(p, even, 0, ptr); /* get even pte */ UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ } - if (kernel_uses_smartmips_rixi) { - uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC)); - uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC)); - uasm_i_drotr(p, even, even, - ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + if (cpu_has_rixi) { + uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ - uasm_i_drotr(p, odd, odd, - ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); } else { uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ @@ -1545,7 +1584,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r, { int t = scratch >= 0 ? scratch : pte; - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); @@ -1875,7 +1914,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. @@ -1929,7 +1968,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 64a28e81906..39b89105622 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -63,11 +63,12 @@ enum opcode { insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, - insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld, - insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, - insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, - insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, - insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, + insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, + insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, + insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, + insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, + insn_xori, }; struct insn { @@ -115,6 +116,9 @@ static struct insn insn_table[] __uasminitdata = { { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, + { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, @@ -341,6 +345,13 @@ Ip_u2u1msbu3(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1msbdu3(op) \ +Ip_u2u1msbu3(op) \ +{ \ + build_insn(buf, insn##op, b, a, d-1, c); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u1u2(op) \ Ip_u1u2(op) \ { \ @@ -394,6 +405,8 @@ I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) +I_u2u1msbdu3(_ext) +I_u2u1msbu3(_ins) I_u1(_j) I_u1(_jal) I_u1(_jr) |