diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/branch.c | 288 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/kernel/cps-vec.S | 16 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-bugs64.c | 11 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 33 | ||||
-rw-r--r-- | arch/mips/kernel/elf.c | 301 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 23 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/idle.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/mips-r2-to-r6-emul.c | 2378 | ||||
-rw-r--r-- | arch/mips/kernel/mips_ksyms.c | 12 | ||||
-rw-r--r-- | arch/mips/kernel/octeon_switch.S | 218 | ||||
-rw-r--r-- | arch/mips/kernel/proc.c | 8 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 96 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_fpu.S | 12 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_switch.S | 14 | ||||
-rw-r--r-- | arch/mips/kernel/spram.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 60 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 390 |
22 files changed, 3598 insertions, 281 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 92987d1bbe5..d3d2ff2d76d 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o -obj-$(CONFIG_CPU_MIPSR2) += spram.o +obj-$(CONFIG_MIPS_SPRAM) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o @@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o +obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 3b2dfdb4865..750d67ac41e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -97,6 +97,7 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); @@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void) OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); + OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); OFFSET(THREAD_CP2, task_struct, thread.cp2); OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); BLANK(); diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d7d99d601c..c2e0f45ddf6 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -16,6 +16,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/inst.h> +#include <asm/mips-r2-to-r6-emul.h> #include <asm/ptrace.h> #include <asm/uaccess.h> @@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * @returns: -EFAULT on error and forces SIGBUS, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. + * + * MIPS R6 Compact branches and forbidden slots: + * Compact branches do not throw exceptions because they do + * not have delay slots. The forbidden slot instruction ($PC+4) + * is only executed if the branch was not taken. Otherwise the + * forbidden slot is skipped entirely. This means that the + * only possible reason to be here because of a MIPS R6 compact + * branch instruction is that the forbidden slot has thrown one. + * In that case the branch was not taken, so the EPC can be safely + * set to EPC + 8. */ int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn) { - unsigned int bit, fcr31, dspcontrol; + unsigned int bit, fcr31, dspcontrol, reg; long epc = regs->cp0_epc; int ret = 0; @@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->regs[insn.r_format.rd] = epc + 8; /* Fall through */ case jr_op: + if (NO_R6EMU && insn.r_format.func == jr_op) + goto sigill_r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case bcond_op: switch (insn.i_format.rt) { - case bltz_op: case bltzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzl_op) @@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgez_op: case bgezl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezl_op) @@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bltzall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a NAL + * instruction or because we are emulating an + * old bltzal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * NAL or BLTZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BLTZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzall_op) @@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bgezall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a BAL + * instruction or because we are emulating an + * old bgezal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * BAL or BGEZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BGEZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezall_op) @@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bposge32_op: if (!cpu_has_dsp) - goto sigill; + goto sigill_dsp; dspcontrol = rddsp(0x01); @@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are conditional and in i_format. */ - case beq_op: case beql_op: + if (NO_R6EMU) + goto sigill_r6; + case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bne_op: case bnel_op: + if (NO_R6EMU) + goto sigill_r6; + case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case blez_op: /* not really i_format */ - case blezl_op: + case blezl_op: /* not really i_format */ + if (NO_R6EMU) + goto sigill_r6; + case blez_op: + /* + * Compact branches for R6 for the + * blez and blezl opcodes. + * BLEZ | rs = 0 | rt != 0 == BLEZALC + * BLEZ | rs = rt != 0 == BGEZALC + * BLEZ | rs != 0 | rt != 0 == BGEUC + * BLEZL | rs = 0 | rt != 0 == BLEZC + * BLEZL | rs = rt != 0 == BGEZC + * BLEZL | rs != 0 | rt != 0 == BGEC + * + * For real BLEZ{,L}, rt is always 0. + */ + + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgtz_op: case bgtzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgtz_op: + /* + * Compact branches for R6 for the + * bgtz and bgtzl opcodes. + * BGTZ | rs = 0 | rt != 0 == BGTZALC + * BGTZ | rs = rt != 0 == BLTZALC + * BGTZ | rs != 0 | rt != 0 == BLTUC + * BGTZL | rs = 0 | rt != 0 == BGTZC + * BGTZL | rs = rt != 0 == BLTZC + * BGTZL | rs != 0 | rt != 0 == BLTC + * + * *ZALC varint for BGTZ &&& rt != 0 + * For real GTZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } + /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, * And now the FPA/cp1 branch instructions. */ case cop1_op: - preempt_disable(); - if (is_fpu_owner()) - fcr31 = read_32bit_cp1_register(CP1_STATUS); - else - fcr31 = current->thread.fpu.fcr31; - preempt_enable(); - - bit = (insn.i_format.rt >> 2); - bit += (bit != 0); - bit += 23; - switch (insn.i_format.rt & 3) { - case 0: /* bc1f */ - case 2: /* bc1fl */ - if (~fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 2) - ret = BRANCH_LIKELY_TAKEN; - } else + if (cpu_has_mips_r6 && + ((insn.i_format.rs == bc1eqz_op) || + (insn.i_format.rs == bc1nez_op))) { + if (!used_math()) { /* First time FPU user */ + ret = init_fpu(); + if (ret && NO_R6EMU) { + ret = -ret; + break; + } + ret = 0; + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + reg = insn.i_format.rt; + bit = 0; + switch (insn.i_format.rs) { + case bc1eqz_op: + /* Test bit 0 */ + if (get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1) + bit = 1; + break; + case bc1nez_op: + /* Test bit 0 */ + if (!(get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1)) + bit = 1; + break; + } + own_fpu(1); + if (bit) + epc = epc + 4 + + (insn.i_format.simmediate << 2); + else epc += 8; regs->cp0_epc = epc; + break; + } else { - case 1: /* bc1t */ - case 3: /* bc1tl */ - if (fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 3) - ret = BRANCH_LIKELY_TAKEN; - } else - epc += 8; - regs->cp0_epc = epc; + preempt_disable(); + if (is_fpu_owner()) + fcr31 = read_32bit_cp1_register(CP1_STATUS); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 2) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 3) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + } break; } - break; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) @@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc += 8; regs->cp0_epc = epc; break; +#else + case bc6_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + regs->cp0_epc += 8; + break; + case balc6_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BALC */ + regs->regs[31] = epc + 4; + epc += 4 + (insn.i_format.simmediate << 2); + regs->cp0_epc = epc; + break; + case beqzcjic_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BEQZC || JIC */ + regs->cp0_epc += 8; + break; + case bnezcjialc_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BNEZC || JIALC */ + if (insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; #endif + case cbcond0_op: + case cbcond1_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* + * Compact branches: + * bovc, beqc, beqzalc, bnvc, bnec, bnezlac + */ + if (insn.i_format.rt && !insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; } return ret; -sigill: +sigill_dsp: printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; +sigill_r6: + pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; } EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 6acaad0480a..82bd2b278a2 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -11,7 +11,6 @@ #include <linux/percpu.h> #include <linux/smp.h> #include <linux/irq.h> -#include <linux/irqchip/mips-gic.h> #include <asm/time.h> #include <asm/cevt-r4k.h> @@ -40,7 +39,7 @@ int cp0_timer_irq_installed; irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { - const int r2 = cpu_has_mips_r2; + const int r2 = cpu_has_mips_r2_r6; struct clock_event_device *cd; int cpu = smp_processor_id(); @@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev) */ static int c0_compare_int_pending(void) { -#ifdef CONFIG_MIPS_GIC - if (gic_present) - return gic_get_timer_pending(); -#endif + /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); } diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 0384b05ab5a..55b759a0019 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -99,11 +99,11 @@ not_nmi: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -126,11 +126,11 @@ icache_done: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -250,7 +250,7 @@ LEAF(mips_cps_core_init) mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) - addi t7, t0, 1 + addiu t7, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f @@ -280,7 +280,7 @@ LEAF(mips_cps_core_init) mttc0 t0, CP0_TCHALT /* Next VPE */ - addi t5, t5, 1 + addiu t5, t5, 1 slt t0, t5, t7 bnez t0, 1b nop @@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes) mfc0 t1, CP0_MVPCONF0 srl t1, t1, MVPCONF0_PVPE_SHIFT andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT - addi t1, t1, 1 + addiu t1, t1, 1 /* Calculate a mask for the VPE ID from EBase.CPUNum */ clz t1, t1 @@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes) /* Next VPE */ 2: srl t6, t6, 1 - addi t5, t5, 1 + addiu t5, t5, 1 bnez t6, 1b nop diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 2d80b5f1aea..09f4034f239 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -244,7 +244,7 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug = -1; +int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; static inline void check_daddiu(void) { @@ -314,11 +314,14 @@ static inline void check_daddiu(void) void __init check_bugs64_early(void) { - check_mult_sh(); - check_daddiu(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) { + check_mult_sh(); + check_daddiu(); + } } void __init check_bugs64(void) { - check_daddi(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) + check_daddi(); } diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5342674842f..48dfb9de853 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; + /* R6 incompatible with everything else */ + case MIPS_CPU_ISA_M64R6: + c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; + case MIPS_CPU_ISA_M32R6: + c->isa_level |= MIPS_CPU_ISA_M32R6; + /* Break here so we don't add incompatible ISAs */ + break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; case MIPS_CPU_ISA_M32R1: @@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M32R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M32R6); + break; default: goto unknown; } @@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M64R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M64R6); + break; default: goto unknown; } @@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) if (config3 & MIPS_CONF3_MSA) c->ases |= MIPS_ASE_MSA; /* Only tested on 32-bit cores */ - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { + c->htw_seq = 0; c->options |= MIPS_CPU_HTW; + } return config3 & MIPS_CONF_M; } @@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_EVA; if (config5 & MIPS_CONF5_MRP) c->options |= MIPS_CPU_MAAR; + if (config5 & MIPS_CONF5_LLB) + c->options |= MIPS_CPU_RW_LLB; return config5 & MIPS_CONF_M; } @@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c) if (cpu_has_rixi) { /* Enable the RIXI exceptions */ - write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); + set_c0_pagegrain(PG_IEC); back_to_back_c0_hazard(); /* Verify the IEC bit is set */ if (read_c0_pagegrain() & PG_IEC) @@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c) } #ifndef CONFIG_MIPS_CPS - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->core = get_ebase_cpunum(); if (cpu_has_mipsmt) c->core >>= fls(core_nvpes()) - 1; @@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { c->writecombine = _CACHE_UNCACHED_ACCELERATED; switch (c->processor_id & PRID_IMP_MASK) { + case PRID_IMP_QEMU_GENERIC: + c->writecombine = _CACHE_UNCACHED; + c->cputype = CPU_QEMU_GENERIC; + __cpu_name[cpu] = "MIPS GENERIC QEMU"; + break; case PRID_IMP_4KC: c->cputype = CPU_4KC; c->writecombine = _CACHE_UNCACHED; @@ -1345,8 +1367,7 @@ void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { + if (c->isa_level & cpu_has_mips_r) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; if (c->fpu_id & MIPS_FPIR_FREP) @@ -1354,7 +1375,7 @@ void cpu_probe(void) } } - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; /* R2 has Performance Counter Interrupt indicator */ c->options |= MIPS_CPU_PCI; diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index a5b5b56485c..d2c09f6475c 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -11,29 +11,112 @@ #include <linux/elf.h> #include <linux/sched.h> +/* FPU modes */ enum { - FP_ERROR = -1, - FP_DOUBLE_64A = -2, + FP_FRE, + FP_FR0, + FP_FR1, }; +/** + * struct mode_req - ABI FPU mode requirements + * @single: The program being loaded needs an FPU but it will only issue + * single precision instructions meaning that it can execute in + * either FR0 or FR1. + * @soft: The soft(-float) requirement means that the program being + * loaded needs has no FPU dependency at all (i.e. it has no + * FPU instructions). + * @fr1: The program being loaded depends on FPU being in FR=1 mode. + * @frdefault: The program being loaded depends on the default FPU mode. + * That is FR0 for O32 and FR1 for N32/N64. + * @fre: The program being loaded depends on FPU with FRE=1. This mode is + * a bridge which uses FR=1 whilst still being able to maintain + * full compatibility with pre-existing code using the O32 FP32 + * ABI. + * + * More information about the FP ABIs can be found here: + * + * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up + * + */ + +struct mode_req { + bool single; + bool soft; + bool fr1; + bool frdefault; + bool fre; +}; + +static const struct mode_req fpu_reqs[] = { + [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, + [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, + [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, + [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, + [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, + [MIPS_ABI_FP_XX] = { false, false, true, true, true }, + [MIPS_ABI_FP_64] = { false, false, true, false, false }, + [MIPS_ABI_FP_64A] = { false, false, true, false, true } +}; + +/* + * Mode requirements when .MIPS.abiflags is not present in the ELF. + * Not present means that everything is acceptable except FR1. + */ +static struct mode_req none_req = { true, true, false, true, true }; + int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool is_interp, struct arch_elf_state *state) { - struct elf32_hdr *ehdr = _ehdr; - struct elf32_phdr *phdr = _phdr; + struct elf32_hdr *ehdr32 = _ehdr; + struct elf32_phdr *phdr32 = _phdr; + struct elf64_phdr *phdr64 = _phdr; struct mips_elf_abiflags_v0 abiflags; int ret; - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) - return 0; - if (phdr->p_type != PT_MIPS_ABIFLAGS) - return 0; - if (phdr->p_filesz < sizeof(abiflags)) - return -EINVAL; + /* Lets see if this is an O32 ELF */ + if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { + /* FR = 1 for N32 */ + if (ehdr32->e_flags & EF_MIPS_ABI2) + state->overall_fp_mode = FP_FR1; + else + /* Set a good default FPU mode for O32 */ + state->overall_fp_mode = cpu_has_mips_r6 ? + FP_FRE : FP_FR0; + + if (ehdr32->e_flags & EF_MIPS_FP64) { + /* + * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it + * later if needed + */ + if (is_interp) + state->interp_fp_abi = MIPS_ABI_FP_OLD_64; + else + state->fp_abi = MIPS_ABI_FP_OLD_64; + } + if (phdr32->p_type != PT_MIPS_ABIFLAGS) + return 0; + + if (phdr32->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr32->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } else { + /* FR=1 is really the only option for 64-bit */ + state->overall_fp_mode = FP_FR1; + + if (phdr64->p_type != PT_MIPS_ABIFLAGS) + return 0; + if (phdr64->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr64->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } - ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, - sizeof(abiflags)); if (ret < 0) return ret; if (ret != sizeof(abiflags)) @@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) +static inline unsigned get_fp_abi(int in_abi) { /* If the ABI requirement is provided, simply return that */ - if (in_abi != -1) + if (in_abi != MIPS_ABI_FP_UNKNOWN) return in_abi; - /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ - if (ehdr->e_flags & EF_MIPS_FP64) - return MIPS_ABI_FP_64; - - /* Default to MIPS_ABI_FP_DOUBLE */ - return MIPS_ABI_FP_DOUBLE; + /* Unknown ABI */ + return MIPS_ABI_FP_UNKNOWN; } int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { struct elf32_hdr *ehdr = _ehdr; - unsigned fp_abi, interp_fp_abi, abi0, abi1; + struct mode_req prog_req, interp_req; + int fp_abi, interp_fp_abi, abi0, abi1, max_abi; - /* Ignore non-O32 binaries */ - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) return 0; - fp_abi = get_fp_abi(ehdr, state->fp_abi); + fp_abi = get_fp_abi(state->fp_abi); if (has_interpreter) { - interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); + interp_fp_abi = get_fp_abi(state->interp_fp_abi); abi0 = min(fp_abi, interp_fp_abi); abi1 = max(fp_abi, interp_fp_abi); @@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, abi0 = abi1 = fp_abi; } - state->overall_abi = FP_ERROR; - - if (abi0 == abi1) { - state->overall_abi = abi0; - } else if (abi0 == MIPS_ABI_FP_ANY) { - state->overall_abi = abi1; - } else if (abi0 == MIPS_ABI_FP_DOUBLE) { - switch (abi1) { - case MIPS_ABI_FP_XX: - state->overall_abi = MIPS_ABI_FP_DOUBLE; - break; - - case MIPS_ABI_FP_64A: - state->overall_abi = FP_DOUBLE_64A; - break; - } - } else if (abi0 == MIPS_ABI_FP_SINGLE || - abi0 == MIPS_ABI_FP_SOFT) { - /* Cannot link with other ABIs */ - } else if (abi0 == MIPS_ABI_FP_OLD_64) { - switch (abi1) { - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - state->overall_abi = MIPS_ABI_FP_64; - break; - } - } else if (abi0 == MIPS_ABI_FP_XX || - abi0 == MIPS_ABI_FP_64 || - abi0 == MIPS_ABI_FP_64A) { - state->overall_abi = MIPS_ABI_FP_64; - } + /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ + max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && + (!(ehdr->e_flags & EF_MIPS_ABI2))) ? + MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; - switch (state->overall_abi) { - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - case FP_DOUBLE_64A: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - return -ELIBBAD; - break; + if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || + (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) + return -ELIBBAD; + + /* It's time to determine the FPU mode requirements */ + prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; + interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; - case FP_ERROR: + /* + * Check whether the program's and interp's ABIs have a matching FPU + * mode requirement. + */ + prog_req.single = interp_req.single && prog_req.single; + prog_req.soft = interp_req.soft && prog_req.soft; + prog_req.fr1 = interp_req.fr1 && prog_req.fr1; + prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; + prog_req.fre = interp_req.fre && prog_req.fre; + + /* + * Determine the desired FPU mode + * + * Decision making: + * + * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This + * means that we have a combination of program and interpreter + * that inherently require the hybrid FP mode. + * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or + * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU + * instructions so we don't care about the mode. We will simply use + * the one preferred by the hardware. In fpxx case, that ABI can + * handle both FR=1 and FR=0, so, again, we simply choose the one + * preferred by the hardware. Next, if we only use single-precision + * FPU instructions, and the default ABI FPU mode is not good + * (ie single + any ABI combination), we set again the FPU mode to the + * one is preferred by the hardware. Next, if we know that the code + * will only use single-precision instructions, shown by single being + * true but frdefault being false, then we again set the FPU mode to + * the one that is preferred by the hardware. + * - We want FP_FR1 if that's the only matching mode and the default one + * is not good. + * - Return with -ELIBADD if we can't find a matching FPU mode. + */ + if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) + state->overall_fp_mode = FP_FRE; + else if ((prog_req.fr1 && prog_req.frdefault) || + (prog_req.single && !prog_req.frdefault)) + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ + state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && + cpu_has_mips_r2_r6) ? + FP_FR1 : FP_FR0; + else if (prog_req.fr1) + state->overall_fp_mode = FP_FR1; + else if (!prog_req.fre && !prog_req.frdefault && + !prog_req.fr1 && !prog_req.single && !prog_req.soft) return -ELIBBAD; - } return 0; } -void mips_set_personality_fp(struct arch_elf_state *state) +static inline void set_thread_fp_mode(int hybrid, int regs32) { - if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { - /* - * Use hybrid FPRs for all code which can correctly execute - * with that mode. - */ - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - /* FR=1, FRE=1 */ - clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - return; - } - } - - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - /* FR=0 */ - set_thread_flag(TIF_32BIT_FPREGS); + if (hybrid) + set_thread_flag(TIF_HYBRID_FPREGS); + else clear_thread_flag(TIF_HYBRID_FPREGS); - break; - - case FP_DOUBLE_64A: - /* FR=1, FRE=1 */ + if (regs32) + set_thread_flag(TIF_32BIT_FPREGS); + else clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - break; +} - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - /* FR=1, FRE=0 */ - clear_thread_flag(TIF_32BIT_FPREGS); - clear_thread_flag(TIF_HYBRID_FPREGS); - break; +void mips_set_personality_fp(struct arch_elf_state *state) +{ + /* + * This function is only ever called for O32 ELFs so we should + * not be worried about N32/N64 binaries. + */ - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - set_thread_flag(TIF_32BIT_FPREGS); - else - clear_thread_flag(TIF_32BIT_FPREGS); + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + return; - clear_thread_flag(TIF_HYBRID_FPREGS); + switch (state->overall_fp_mode) { + case FP_FRE: + set_thread_fp_mode(1, 0); + break; + case FP_FR0: + set_thread_fp_mode(0, 1); + break; + case FP_FR1: + set_thread_fp_mode(0, 0); break; - default: - case FP_ERROR: BUG(); } } diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 4353d323f01..af41ba6db96 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -46,6 +46,11 @@ resume_userspace: local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR + lw k0, TI_R2_EMUL_RET($28) + bnez k0, restore_all_from_r2_emul +#endif + LONG_L a2, TI_FLAGS($28) # current->work andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) bnez t0, work_pending @@ -114,6 +119,19 @@ restore_partial: # restore partial frame RESTORE_SP_AND_RET .set at +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR +restore_all_from_r2_emul: # restore full frame + .set noat + sw zero, TI_R2_EMUL_RET($28) # reset it + RESTORE_TEMP + RESTORE_AT + RESTORE_STATIC + RESTORE_SOME + LONG_L sp, PT_R29(sp) + eretnc + .set at +#endif + work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig @@ -158,7 +176,8 @@ syscall_exit_work: jal syscall_trace_leave b resume_userspace -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ + defined(CONFIG_MIPS_MT) /* * MIPS32R2 Instruction Hazard Barrier - must be called @@ -171,4 +190,4 @@ LEAF(mips_ihb) nop END(mips_ihb) -#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ +#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a5e26dd9059..2ebaabe3af1 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -125,7 +125,7 @@ LEAF(__r4k_wait) nop nop #endif - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW wait /* end of rollback region (the region size must be power of two) */ 1: diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 0b9082b6b68..368c88b7eb6 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -186,6 +186,7 @@ void __init check_wait(void) case CPU_PROAPTIV: case CPU_P5600: case CPU_M5150: + case CPU_QEMU_GENERIC: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c new file mode 100644 index 00000000000..64d17e41093 --- /dev/null +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -0,0 +1,2378 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> + * Author: Markos Chandras <markos.chandras@imgtec.com> + * + * MIPS R2 user space instruction emulator for MIPS R6 + * + */ +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ptrace.h> +#include <linux/seq_file.h> + +#include <asm/asm.h> +#include <asm/branch.h> +#include <asm/break.h> +#include <asm/fpu.h> +#include <asm/fpu_emulator.h> +#include <asm/inst.h> +#include <asm/mips-r2-to-r6-emul.h> +#include <asm/local.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_64BIT +#define ADDIU "daddiu " +#define INS "dins " +#define EXT "dext " +#else +#define ADDIU "addiu " +#define INS "ins " +#define EXT "ext " +#endif /* CONFIG_64BIT */ + +#define SB "sb " +#define LB "lb " +#define LL "ll " +#define SC "sc " + +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); +DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); + +extern const unsigned int fpucondbit[8]; + +#define MIPS_R2_EMUL_TOTAL_PASS 10 + +int mipsr2_emulation = 0; + +static int __init mipsr2emu_enable(char *s) +{ + mipsr2_emulation = 1; + + pr_info("MIPS R2-to-R6 Emulator Enabled!"); + + return 1; +} +__setup("mipsr2emu", mipsr2emu_enable); + +/** + * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot + * for performance instead of the traditional way of using a stack trampoline + * which is rather slow. + * @regs: Process register set + * @ir: Instruction + */ +static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) +{ + switch (MIPSInst_OPCODE(ir)) { + case addiu_op: + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s32)regs->regs[MIPSInst_RS(ir)] + + (s32)MIPSInst_SIMM(ir); + return 0; + case daddiu_op: + if (config_enabled(CONFIG_32BIT)) + break; + + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s64)regs->regs[MIPSInst_RS(ir)] + + (s64)MIPSInst_SIMM(ir); + return 0; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: + /* FPU instructions in delay slot */ + return -SIGFPE; + case spec_op: + switch (MIPSInst_FUNC(ir)) { + case or_op: + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + regs->regs[MIPSInst_RS(ir)] | + regs->regs[MIPSInst_RT(ir)]; + return 0; + case sll_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case srl_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case addu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] + + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case subu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] - + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case dsll_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case dsrl_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case daddu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (u64)regs->regs[MIPSInst_RS(ir)] + + (u64)regs->regs[MIPSInst_RT(ir)]; + return 0; + case dsubu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)((u64)regs->regs[MIPSInst_RS(ir)] - + (u64)regs->regs[MIPSInst_RT(ir)]); + return 0; + } + break; + default: + pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", + ir, MIPSInst_OPCODE(ir)); + } + + return SIGILL; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movf_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + if (((csr & cond) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + return 0; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movt_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + + if (((csr & cond) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * jr_func - Emulate a JR instruction. + * @pt_regs: Process register set + * @ir: Instruction + * + * Returns SIGILL if JR was in delay slot, SIGEMT if we + * can't compute the EPC, SIGSEGV if we can't access the + * userland instruction or 0 on success. + */ +static int jr_func(struct pt_regs *regs, u32 ir) +{ + int err; + unsigned long cepc, epc, nepc; + u32 nir; + + if (delay_slot(regs)) + return SIGILL; + + /* EPC after the RI/JR instruction */ + nepc = regs->cp0_epc; + /* Roll back to the reserved R2 JR instruction */ + regs->cp0_epc -= 4; + epc = regs->cp0_epc; + err = __compute_return_epc(regs); + + if (err < 0) + return SIGEMT; + + + /* Computed EPC */ + cepc = regs->cp0_epc; + + /* Get DS instruction */ + err = __get_user(nir, (u32 __user *)nepc); + if (err) + return SIGSEGV; + + MIPS_R2BR_STATS(jrs); + + /* If nir == 0(NOP), then nothing else to do */ + if (nir) { + /* + * Negative err means FPU instruction in BD-slot, + * Zero err means 'BD-slot emulation done' + * For anything else we go back to trampoline emulation. + */ + err = mipsr6_emul(regs, nir); + if (err > 0) { + regs->cp0_epc = nepc; + err = mips_dsemul(regs, nir, cepc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + + return err; +} + +/** + * movz_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movz_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * movn_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movn_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * mfhi_func - Emulate a MFHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mfhi_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->hi; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mthi_func - Emulate a MTHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mthi_func(struct pt_regs *regs, u32 ir) +{ + regs->hi = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mflo_func - Emulate a MFLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mflo_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->lo; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mtlo_func - Emulate a MTLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mtlo_func(struct pt_regs *regs, u32 ir) +{ + regs->lo = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mult_func - Emulate a MULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->lo = (s64)rs; + rt = res >> 32; + res = (s64)rt; + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * multu_func - Emulate a MULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int multu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = res; + regs->lo = (s64)rt; + regs->hi = (s64)(res >> 32); + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * div_func - Emulate a DIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int div_func(struct pt_regs *regs, u32 ir) +{ + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * divu_func - Emulate a DIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int divu_func(struct pt_regs *regs, u32 ir) +{ + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * dmult_func - Emulate a DMULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuh %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * dmultu_func - Emulate a DMULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmultu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuhu %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * ddiv_func - Emulate a DDIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddiv_func(struct pt_regs *regs, u32 ir) +{ + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * ddivu_func - Emulate a DDIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddivu_func(struct pt_regs *regs, u32 ir) +{ + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/* R6 removed instructions for the SPECIAL opcode */ +static struct r2_decoder_table spec_op_table[] = { + { 0xfc1ff83f, 0x00000008, jr_func }, + { 0xfc00ffff, 0x00000018, mult_func }, + { 0xfc00ffff, 0x00000019, multu_func }, + { 0xfc00ffff, 0x0000001c, dmult_func }, + { 0xfc00ffff, 0x0000001d, dmultu_func }, + { 0xffff07ff, 0x00000010, mfhi_func }, + { 0xfc1fffff, 0x00000011, mthi_func }, + { 0xffff07ff, 0x00000012, mflo_func }, + { 0xfc1fffff, 0x00000013, mtlo_func }, + { 0xfc0307ff, 0x00000001, movf_func }, + { 0xfc0307ff, 0x00010001, movt_func }, + { 0xfc0007ff, 0x0000000a, movz_func }, + { 0xfc0007ff, 0x0000000b, movn_func }, + { 0xfc00ffff, 0x0000001a, div_func }, + { 0xfc00ffff, 0x0000001b, divu_func }, + { 0xfc00ffff, 0x0000001e, ddiv_func }, + { 0xfc00ffff, 0x0000001f, ddivu_func }, + {} +}; + +/** + * madd_func - Emulate a MADD instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int madd_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * maddu_func - Emulate a MADDU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int maddu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msub_func - Emulate a MSUB instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msub_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msubu_func - Emulate a MSUBU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msubu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * mul_func - Emulate a MUL instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mul_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + if (!MIPSInst_RD(ir)) + return 0; + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->regs[MIPSInst_RD(ir)] = (s64)rs; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * clz_func - Emulate a CLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int clz_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * clo_func - Emulate a CLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ + +static int clo_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclz_func - Emulate a DCLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclz_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclo_func - Emulate a DCLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclo_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/* R6 removed instructions for the SPECIAL2 opcode */ +static struct r2_decoder_table spec2_op_table[] = { + { 0xfc00ffff, 0x70000000, madd_func }, + { 0xfc00ffff, 0x70000001, maddu_func }, + { 0xfc0007ff, 0x70000002, mul_func }, + { 0xfc00ffff, 0x70000004, msub_func }, + { 0xfc00ffff, 0x70000005, msubu_func }, + { 0xfc0007ff, 0x70000020, clz_func }, + { 0xfc0007ff, 0x70000021, clo_func }, + { 0xfc0007ff, 0x70000024, dclz_func }, + { 0xfc0007ff, 0x70000025, dclo_func }, + { } +}; + +static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, + struct r2_decoder_table *table) +{ + struct r2_decoder_table *p; + int err; + + for (p = table; p->func; p++) { + if ((inst & p->mask) == p->code) { + err = (p->func)(regs, inst); + return err; + } + } + return SIGILL; +} + +/** + * mipsr2_decoder: Decode and emulate a MIPS R2 instruction + * @regs: Process register set + * @inst: Instruction to decode and emulate + */ +int mipsr2_decoder(struct pt_regs *regs, u32 inst) +{ + int err = 0; + unsigned long vaddr; + u32 nir; + unsigned long cpc, epc, nepc, r31, res, rs, rt; + + void __user *fault_addr = NULL; + int pass = 0; + +repeat: + r31 = regs->regs[31]; + epc = regs->cp0_epc; + err = compute_return_epc(regs); + if (err < 0) { + BUG(); + return SIGEMT; + } + pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", + inst, epc, pass); + + switch (MIPSInst_OPCODE(inst)) { + case spec_op: + err = mipsr2_find_op_func(regs, inst, spec_op_table); + if (err < 0) { + /* FPU instruction under JR */ + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + break; + case spec2_op: + err = mipsr2_find_op_func(regs, inst, spec2_op_table); + break; + case bcond_op: + rt = MIPSInst_RT(inst); + rs = MIPSInst_RS(inst); + switch (rt) { + case tgei_op: + if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TGEI"); + + MIPS_R2_STATS(traps); + + break; + case tgeiu_op: + if (regs->regs[rs] >= MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TGEIU"); + + MIPS_R2_STATS(traps); + + break; + case tlti_op: + if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TLTI"); + + MIPS_R2_STATS(traps); + + break; + case tltiu_op: + if (regs->regs[rs] < MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TLTIU"); + + MIPS_R2_STATS(traps); + + break; + case teqi_op: + if (regs->regs[rs] == MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TEQI"); + + MIPS_R2_STATS(traps); + + break; + case tnei_op: + if (regs->regs[rs] != MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TNEI"); + + MIPS_R2_STATS(traps); + + break; + case bltzl_op: + case bgezl_op: + case bltzall_op: + case bgezall_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzl_op: + MIPS_R2BR_STATS(bltzl); + break; + case bgezl_op: + MIPS_R2BR_STATS(bgezl); + break; + case bltzall_op: + MIPS_R2BR_STATS(bltzall); + break; + case bgezall_op: + MIPS_R2BR_STATS(bgezall); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case bltzal_op: + case bgezal_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzal_op: + MIPS_R2BR_STATS(bltzal); + break; + case bgezal_op: + MIPS_R2BR_STATS(bgezal); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + default: + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = SIGILL; + break; + } + break; + + case beql_op: + case bnel_op: + case blezl_op: + case bgtzl_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (MIPSInst_OPCODE(inst)) { + case beql_op: + MIPS_R2BR_STATS(beql); + break; + case bnel_op: + MIPS_R2BR_STATS(bnel); + break; + case blezl_op: + MIPS_R2BR_STATS(blezl); + break; + case bgtzl_op: + MIPS_R2BR_STATS(bgtzl); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: +fpu_emul: + regs->regs[31] = r31; + regs->cp0_epc = epc; + if (!used_math()) { /* First time FPU user. */ + err = init_fpu(); + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + + err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, + &fault_addr); + + /* + * this is a tricky issue - lose_fpu() uses LL/SC atomics + * if FPU is owned and effectively cancels user level LL/SC. + * So, it could be logical to don't restore FPU ownership here. + * But the sequence of multiple FPU instructions is much much + * more often than LL-FPU-SC and I prefer loop here until + * next scheduler cycle cancels FPU ownership + */ + own_fpu(1); /* Restore FPU state. */ + + if (err) + current->thread.cp0_baduaddr = (unsigned long)fault_addr; + + MIPS_R2_STATS(fpus); + + break; + + case lwl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9: sll %0, %0, 0\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case lwr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case swl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case swr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case ldl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case ldr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case sdl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + break; + + case sdr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + case ll_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "ll %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + MIPS_R2_STATS(llsc); + + break; + + case sc_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "sc %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case lld_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "lld %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case scd_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "scd %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + case pref_op: + /* skip it */ + break; + default: + err = SIGILL; + } + + /* + * Lets not return to userland just yet. It's constly and + * it's likely we have more R2 instructions to emulate + */ + if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { + regs->cp0_cause &= ~CAUSEF_BD; + err = get_user(inst, (u32 __user *)regs->cp0_epc); + if (!err) + goto repeat; + + if (err < 0) + err = SIGSEGV; + } + + if (err && (err != SIGEMT)) { + regs->regs[31] = r31; + regs->cp0_epc = epc; + } + + /* Likely a MIPS R6 compatible instruction */ + if (pass && (err == SIGILL)) + err = 0; + + return err; +} + +#ifdef CONFIG_DEBUG_FS + +static int mipsr2_stats_show(struct seq_file *s, void *unused) +{ + + seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); + seq_printf(s, "movs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.movs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); + seq_printf(s, "hilo\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.hilo), + (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); + seq_printf(s, "muls\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.muls), + (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); + seq_printf(s, "divs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.divs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); + seq_printf(s, "dsps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); + seq_printf(s, "bops\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.bops), + (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); + seq_printf(s, "traps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.traps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); + seq_printf(s, "fpus\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.fpus), + (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); + seq_printf(s, "loads\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.loads), + (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); + seq_printf(s, "stores\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.stores), + (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); + seq_printf(s, "llsc\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.llsc), + (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); + seq_printf(s, "dsemul\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); + seq_printf(s, "jr\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); + seq_printf(s, "bltzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); + seq_printf(s, "bgezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); + seq_printf(s, "bltzll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); + seq_printf(s, "bgezll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); + seq_printf(s, "bltzal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); + seq_printf(s, "bgezal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); + seq_printf(s, "beql\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); + seq_printf(s, "bnel\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); + seq_printf(s, "blezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); + seq_printf(s, "bgtzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); + + return 0; +} + +static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) +{ + mipsr2_stats_show(s, unused); + + __this_cpu_write((mipsr2emustats).movs, 0); + __this_cpu_write((mipsr2bdemustats).movs, 0); + __this_cpu_write((mipsr2emustats).hilo, 0); + __this_cpu_write((mipsr2bdemustats).hilo, 0); + __this_cpu_write((mipsr2emustats).muls, 0); + __this_cpu_write((mipsr2bdemustats).muls, 0); + __this_cpu_write((mipsr2emustats).divs, 0); + __this_cpu_write((mipsr2bdemustats).divs, 0); + __this_cpu_write((mipsr2emustats).dsps, 0); + __this_cpu_write((mipsr2bdemustats).dsps, 0); + __this_cpu_write((mipsr2emustats).bops, 0); + __this_cpu_write((mipsr2bdemustats).bops, 0); + __this_cpu_write((mipsr2emustats).traps, 0); + __this_cpu_write((mipsr2bdemustats).traps, 0); + __this_cpu_write((mipsr2emustats).fpus, 0); + __this_cpu_write((mipsr2bdemustats).fpus, 0); + __this_cpu_write((mipsr2emustats).loads, 0); + __this_cpu_write((mipsr2bdemustats).loads, 0); + __this_cpu_write((mipsr2emustats).stores, 0); + __this_cpu_write((mipsr2bdemustats).stores, 0); + __this_cpu_write((mipsr2emustats).llsc, 0); + __this_cpu_write((mipsr2bdemustats).llsc, 0); + __this_cpu_write((mipsr2emustats).dsemul, 0); + __this_cpu_write((mipsr2bdemustats).dsemul, 0); + __this_cpu_write((mipsr2bremustats).jrs, 0); + __this_cpu_write((mipsr2bremustats).bltzl, 0); + __this_cpu_write((mipsr2bremustats).bgezl, 0); + __this_cpu_write((mipsr2bremustats).bltzll, 0); + __this_cpu_write((mipsr2bremustats).bgezll, 0); + __this_cpu_write((mipsr2bremustats).bltzal, 0); + __this_cpu_write((mipsr2bremustats).bgezal, 0); + __this_cpu_write((mipsr2bremustats).beql, 0); + __this_cpu_write((mipsr2bremustats).bnel, 0); + __this_cpu_write((mipsr2bremustats).blezl, 0); + __this_cpu_write((mipsr2bremustats).bgtzl, 0); + + return 0; +} + +static int mipsr2_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_show, inode->i_private); +} + +static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_clear_show, inode->i_private); +} + +static const struct file_operations mipsr2_emul_fops = { + .open = mipsr2_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations mipsr2_clear_fops = { + .open = mipsr2_stats_clear_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +static int __init mipsr2_init_debugfs(void) +{ + extern struct dentry *mips_debugfs_dir; + struct dentry *mipsr2_emul; + + if (!mips_debugfs_dir) + return -ENODEV; + + mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_emul_fops); + if (!mipsr2_emul) + return -ENOMEM; + + mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_clear_fops); + if (!mipsr2_emul) + return -ENOMEM; + + return 0; +} + +device_initcall(mipsr2_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 17eaf0cf760..291af0b5c48 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -14,6 +14,8 @@ #include <linux/mm.h> #include <asm/uaccess.h> #include <asm/ftrace.h> +#include <asm/fpu.h> +#include <asm/msa.h> extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, @@ -32,6 +34,14 @@ extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); /* + * Core architecture code + */ +EXPORT_SYMBOL_GPL(_save_fp); +#ifdef CONFIG_CPU_HAS_MSA +EXPORT_SYMBOL_GPL(_save_msa); +#endif + +/* * String functions */ EXPORT_SYMBOL(memset); @@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm); EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); +#ifndef CONFIG_CPU_MIPSR6 EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_kernel); EXPORT_SYMBOL(__csum_partial_copy_to_user); EXPORT_SYMBOL(__csum_partial_copy_from_user); +#endif EXPORT_SYMBOL(invalid_pte_table); #ifdef CONFIG_FUNCTION_TRACER diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index f6547680c81..423ae83af1f 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -31,15 +31,11 @@ /* * check if we need to save FPU registers */ - PTR_L t3, TASK_THREAD_INFO(a0) - LONG_L t0, TI_FLAGS(t3) - li t1, _TIF_USEDFPU - and t2, t0, t1 - beqz t2, 1f - nor t1, zero, t1 - - and t0, t0, t1 - LONG_S t0, TI_FLAGS(t3) + .set push + .set noreorder + beqz a3, 1f + PTR_L t3, TASK_THREAD_INFO(a0) + .set pop /* * clear saved user stack CU1 bit @@ -56,36 +52,9 @@ .set pop 1: - /* check if we need to save COP2 registers */ - PTR_L t2, TASK_THREAD_INFO(a0) - LONG_L t0, ST_OFF(t2) - bbit0 t0, 30, 1f - - /* Disable COP2 in the stored process state */ - li t1, ST0_CU2 - xor t0, t1 - LONG_S t0, ST_OFF(t2) - - /* Enable COP2 so we can save it */ - mfc0 t0, CP0_STATUS - or t0, t1 - mtc0 t0, CP0_STATUS - - /* Save COP2 */ - daddu a0, THREAD_CP2 - jal octeon_cop2_save - dsubu a0, THREAD_CP2 - - /* Disable COP2 now that we are done */ - mfc0 t0, CP0_STATUS - li t1, ST0_CU2 - xor t0, t1 - mtc0 t0, CP0_STATUS - -1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ @@ -109,9 +78,9 @@ .set reorder /* Disable access to CVMSEG */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ - mtc0 t0, $11,7 /* CvmMemCtl */ + dmtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: @@ -147,6 +116,8 @@ * void octeon_cop2_save(struct octeon_cop2_state *a0) */ .align 7 + .set push + .set noreorder LEAF(octeon_cop2_save) dmfc0 t9, $9,7 /* CvmCtl register. */ @@ -157,17 +128,17 @@ dmfc2 t2, 0x0200 sd t0, OCTEON_CP2_CRC_IV(a0) sd t1, OCTEON_CP2_CRC_LENGTH(a0) - sd t2, OCTEON_CP2_CRC_POLY(a0) /* Skip next instructions if CvmCtl[NODFA_CP2] set */ bbit1 t9, 28, 1f + sd t2, OCTEON_CP2_CRC_POLY(a0) /* Save the LLM state */ dmfc2 t0, 0x0402 dmfc2 t1, 0x040A sd t0, OCTEON_CP2_LLM_DAT(a0) - sd t1, OCTEON_CP2_LLM_DAT+8(a0) 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ + sd t1, OCTEON_CP2_LLM_DAT+8(a0) /* Save the COP2 crypto state */ /* this part is mostly common to both pass 1 and later revisions */ @@ -198,18 +169,20 @@ sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) - mfc0 t3, $15,0 /* Get the processor ID register */ + mfc0 v0, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) - sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* Skip to the Pass1 version of the remainder of the COP2 state */ - beq t3, t0, 2f + beq v0, v1, 2f + sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 + ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ dmfc2 t3, 0x0242 + subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ dmfc2 t0, 0x0243 sd t1, OCTEON_CP2_HSH_DATW(a0) dmfc2 t1, 0x0244 @@ -262,8 +235,16 @@ sd t1, OCTEON_CP2_GFM_MULT+8(a0) sd t2, OCTEON_CP2_GFM_POLY(a0) sd t3, OCTEON_CP2_GFM_RESULT(a0) - sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + bltz v1, 4f + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + /* OCTEON III things*/ + dmfc2 t0, 0x024F + dmfc2 t1, 0x0050 + sd t0, OCTEON_CP2_SHA3(a0) + sd t1, OCTEON_CP2_SHA3+8(a0) +4: jr ra + nop 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 @@ -289,7 +270,9 @@ 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra + nop END(octeon_cop2_save) + .set pop /* * void octeon_cop2_restore(struct octeon_cop2_state *a0) @@ -354,9 +337,9 @@ ld t2, OCTEON_CP2_AES_RESULT+8(a0) mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 - bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ + bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 /* this code is specific for pass 1 */ @@ -384,6 +367,7 @@ 3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) + ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t2, 0x0240 @@ -437,9 +421,15 @@ dmtc2 t2, 0x0259 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) dmtc2 t0, 0x025E + subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ dmtc2 t1, 0x025A - dmtc2 t2, 0x025B - + bltz v0, done_restore + dmtc2 t2, 0x025B + /* OCTEON III things*/ + ld t0, OCTEON_CP2_SHA3(a0) + ld t1, OCTEON_CP2_SHA3+8(a0) + dmtc2 t0, 0x0051 + dmtc2 t1, 0x0050 done_restore: jr ra nop @@ -450,18 +440,23 @@ done_restore: * void octeon_mult_save() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in SAVE_SOME in stackframe.h. It can only - * safely modify k0 and k1. + * NOTE: This is called in SAVE_TEMP in stackframe.h. It can + * safely modify v1,k0, k1,$10-$15, and $24. It will + * be overwritten with a processor specific version of the code. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_save) - dmfc0 k0, $9,7 /* CvmCtl register. */ - bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ + jr ra nop + .space 30 * 4, 0 +octeon_mult_save_end: + EXPORT(octeon_mult_save_end) + END(octeon_mult_save) - /* Save the multiplier state */ + LEAF(octeon_mult_save2) + /* Save the multiplier state OCTEON II and earlier*/ v3mulu k0, $0, $0 v3mulu k1, $0, $0 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ @@ -476,44 +471,107 @@ done_restore: sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ - -1: /* Resume here if CvmCtl[NOMUL] */ +octeon_mult_save2_end: + EXPORT(octeon_mult_save2_end) + END(octeon_mult_save2) + + LEAF(octeon_mult_save3) + /* Save the multiplier state OCTEON III */ + v3mulu $10, $0, $0 /* read P0 */ + v3mulu $11, $0, $0 /* read P1 */ + v3mulu $12, $0, $0 /* read P2 */ + sd $10, PT_MTP+(0*8)(sp) /* store P0 */ + v3mulu $10, $0, $0 /* read P3 */ + sd $11, PT_MTP+(1*8)(sp) /* store P1 */ + v3mulu $11, $0, $0 /* read P4 */ + sd $12, PT_MTP+(2*8)(sp) /* store P2 */ + ori $13, $0, 1 + v3mulu $12, $0, $0 /* read P5 */ + sd $10, PT_MTP+(3*8)(sp) /* store P3 */ + v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ + sd $11, PT_MTP+(4*8)(sp) /* store P4 */ + v3mulu $10, $0, $0 /* read MPL1 */ + sd $12, PT_MTP+(5*8)(sp) /* store P5 */ + v3mulu $11, $0, $0 /* read MPL2 */ + sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ + v3mulu $12, $0, $0 /* read MPL3 */ + sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ + v3mulu $10, $0, $0 /* read MPL4 */ + sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ + v3mulu $11, $0, $0 /* read MPL5 */ + sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ + sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra - END(octeon_mult_save) + sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ +octeon_mult_save3_end: + EXPORT(octeon_mult_save3_end) + END(octeon_mult_save3) .set pop /* * void octeon_mult_restore() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in RESTORE_SOME in stackframe.h. + * NOTE: This is called in RESTORE_TEMP in stackframe.h. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_restore) - dmfc0 k1, $9,7 /* CvmCtl register. */ - ld v0, PT_MPL(sp) /* MPL0 */ - ld v1, PT_MPL+8(sp) /* MPL1 */ - ld k0, PT_MPL+16(sp) /* MPL2 */ - bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ - /* Normally falls through, so no time wasted here */ - nop + jr ra + nop + .space 30 * 4, 0 +octeon_mult_restore_end: + EXPORT(octeon_mult_restore_end) + END(octeon_mult_restore) + LEAF(octeon_mult_restore2) + ld v0, PT_MPL(sp) /* MPL0 */ + ld v1, PT_MPL+8(sp) /* MPL1 */ + ld k0, PT_MPL+16(sp) /* MPL2 */ /* Restore the multiplier state */ - ld k1, PT_MTP+16(sp) /* P2 */ - MTM0 v0 /* MPL0 */ + ld k1, PT_MTP+16(sp) /* P2 */ + mtm0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ - MTM1 v1 /* MPL1 */ - ld v1, PT_MTP(sp) /* P0 */ - MTM2 k0 /* MPL2 */ - MTP2 k1 /* P2 */ - MTP1 v0 /* P1 */ + mtm1 v1 /* MPL1 */ + ld v1, PT_MTP(sp) /* P0 */ + mtm2 k0 /* MPL2 */ + mtp2 k1 /* P2 */ + mtp1 v0 /* P1 */ jr ra - MTP0 v1 /* P0 */ - -1: /* Resume here if CvmCtl[NOMUL] */ + mtp0 v1 /* P0 */ +octeon_mult_restore2_end: + EXPORT(octeon_mult_restore2_end) + END(octeon_mult_restore2) + + LEAF(octeon_mult_restore3) + ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ + ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ + ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ + ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ + .word 0x718d0008 + /* mtm0 $12, $13 restore MPL0 and MPL3 */ + ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ + .word 0x714b000c + /* mtm1 $10, $11 restore MPL1 and MPL4 */ + ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ + ld $10, PT_MTP+(0*8)(sp) /* read P0 */ + ld $11, PT_MTP+(3*8)(sp) /* read P3 */ + .word 0x718d000d + /* mtm2 $12, $13 restore MPL2 and MPL5 */ + ld $12, PT_MTP+(1*8)(sp) /* read P1 */ + .word 0x714b0009 + /* mtp0 $10, $11 restore P0 and P3 */ + ld $13, PT_MTP+(4*8)(sp) /* read P4 */ + ld $10, PT_MTP+(2*8)(sp) /* read P2 */ + ld $11, PT_MTP+(5*8)(sp) /* read P5 */ + .word 0x718d000a + /* mtp1 $12, $13 restore P1 and P4 */ jr ra - nop - END(octeon_mult_restore) + .word 0x714b000b + /* mtp2 $10, $11 restore P2 and P5 */ + +octeon_mult_restore3_end: + EXPORT(octeon_mult_restore3_end) + END(octeon_mult_restore3) .set pop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 097fc8d14e4..130af7d26a9 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "]\n"); } - seq_printf(m, "isa\t\t\t: mips1"); + seq_printf(m, "isa\t\t\t:"); + if (cpu_has_mips_r1) + seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) @@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "%s", " mips32r1"); if (cpu_has_mips32r2) seq_printf(m, "%s", " mips32r2"); + if (cpu_has_mips32r6) + seq_printf(m, "%s", " mips32r6"); if (cpu_has_mips64r1) seq_printf(m, "%s", " mips64r1"); if (cpu_has_mips64r2) seq_printf(m, "%s", " mips64r2"); + if (cpu_has_mips64r6) + seq_printf(m, "%s", " mips64r6"); seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 85bff5d513e..bf85cc180d9 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/random.h> +#include <linux/prctl.h> #include <asm/asm.h> #include <asm/bootinfo.h> @@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self) { smp_call_function(arch_dump_stack, NULL, 1); } + +int mips_get_process_fp_mode(struct task_struct *task) +{ + int value = 0; + + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) + value |= PR_FP_MODE_FR; + if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) + value |= PR_FP_MODE_FRE; + + return value; +} + +int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) +{ + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; + unsigned long switch_count; + struct task_struct *t; + + /* Check the value is valid */ + if (value & ~known_bits) + return -EOPNOTSUPP; + + /* Avoid inadvertently triggering emulation */ + if ((value & PR_FP_MODE_FR) && cpu_has_fpu && + !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + return -EOPNOTSUPP; + if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + return -EOPNOTSUPP; + + /* FR = 0 not supported in MIPS R6 */ + if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + return -EOPNOTSUPP; + + /* Save FP & vector context, then disable FPU & MSA */ + if (task->signal == current->signal) + lose_fpu(1); + + /* Prevent any threads from obtaining live FP context */ + atomic_set(&task->mm->context.fp_mode_switching, 1); + smp_mb__after_atomic(); + + /* + * If there are multiple online CPUs then wait until all threads whose + * FP mode is about to change have been context switched. This approach + * allows us to only worry about whether an FP mode switch is in + * progress when FP is first used in a tasks time slice. Pretty much all + * of the mode switch overhead can thus be confined to cases where mode + * switches are actually occuring. That is, to here. However for the + * thread performing the mode switch it may take a while... + */ + if (num_online_cpus() > 1) { + spin_lock_irq(&task->sighand->siglock); + + for_each_thread(task, t) { + if (t == current) + continue; + + switch_count = t->nvcsw + t->nivcsw; + + do { + spin_unlock_irq(&task->sighand->siglock); + cond_resched(); + spin_lock_irq(&task->sighand->siglock); + } while ((t->nvcsw + t->nivcsw) == switch_count); + } + + spin_unlock_irq(&task->sighand->siglock); + } + + /* + * There are now no threads of the process with live FP context, so it + * is safe to proceed with the FP mode switch. + */ + for_each_thread(task, t) { + /* Update desired FP register width */ + if (value & PR_FP_MODE_FR) { + clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); + } else { + set_tsk_thread_flag(t, TIF_32BIT_FPREGS); + clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); + } + + /* Update desired FP single layout */ + if (value & PR_FP_MODE_FRE) + set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + else + clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + } + + /* Allow threads to use FP again */ + atomic_set(&task->mm->context.fp_mode_switching, 0); + + return 0; +} diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 6c160c67984..676c5030a95 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -34,7 +34,7 @@ .endm .set noreorder - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW LEAF(_save_fp_context) .set push @@ -42,7 +42,8 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -105,10 +106,12 @@ LEAF(_save_fp_context32) SET_HARDFLOAT cfc1 t1, fcr31 +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 nop +#endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, SC32_FPREGS+8(a0) @@ -163,7 +166,8 @@ LEAF(_save_fp_context32) LEAF(_restore_fp_context) EX lw t1, SC_FPC_CSR(a0) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -223,10 +227,12 @@ LEAF(_restore_fp_context32) SET_HARDFLOAT EX lw t1, SC32_FPC_CSR(a0) +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 nop +#endif EX ldc1 $f1, SC32_FPREGS+8(a0) EX ldc1 $f3, SC32_FPREGS+24(a0) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 64591e67187..3b1a36f13a7 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -115,7 +115,8 @@ * Save a thread's fp context. */ LEAF(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -126,7 +127,8 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -240,9 +242,9 @@ LEAF(_init_fpu) mtc1 t1, $f30 mtc1 t1, $f31 -#ifdef CONFIG_CPU_MIPS32_R2 +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip setting upper 32b @@ -280,9 +282,9 @@ LEAF(_init_fpu) mthc1 t1, $f30 mthc1 t1, $f31 1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 */ +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ #else - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 67f2495def1..d1168d7c31e 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -208,6 +208,7 @@ void spram_config(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ if (config0 & (1<<24)) { diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 604b558809c..53a7ef9a8f3 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c3b41e24c05..33984c04b60 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -46,6 +46,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/idle.h> +#include <asm/mips-r2-to-r6-emul.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> @@ -837,7 +838,7 @@ out: exception_exit(prev_state); } -static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { siginfo_t info; @@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int opcode = 0; int status = -1; + /* + * Avoid any kernel code. Just emulate the R2 instruction + * as quickly as possible. + */ + if (mipsr2_emulation && cpu_has_mips_r6 && + likely(user_mode(regs))) { + if (likely(get_user(opcode, epc) >= 0)) { + status = mipsr2_decoder(regs, opcode); + switch (status) { + case 0: + case SIGEMT: + task_thread_info(current)->r2_emul_return = 1; + return; + case SIGILL: + goto no_r2_instr; + default: + process_fpemu_return(status, + ¤t->thread.cp0_baduaddr); + task_thread_info(current)->r2_emul_return = 1; + return; + } + } + } + +no_r2_instr: + prev_state = exception_enter(); + if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) goto out; @@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } +static int wait_on_fp_mode_switch(atomic_t *p) +{ + /* + * The FP mode for this task is currently being switched. That may + * involve modifications to the format of this tasks FP context which + * make it unsafe to proceed with execution for the moment. Instead, + * schedule some other task. + */ + schedule(); + return 0; +} + static int enable_restore_fp_context(int msa) { int err, was_fpu_owner, prior_msa; + /* + * If an FP mode switch is currently underway, wait for it to + * complete before proceeding. + */ + wait_on_atomic_t(¤t->mm->context.fp_mode_switching, + wait_on_fp_mode_switch, TASK_KILLABLE); + if (!used_math()) { /* First time FP context user. */ preempt_disable(); @@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void) printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", @@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void) unsigned int reg_val; /* For the moment, report the problem and hang. */ - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); @@ -1959,7 +2007,7 @@ static void configure_hwrena(void) { unsigned int hwrena = cpu_hwrena_impl_bits; - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) hwrena |= 0x0000000f; if (!noulri && cpu_has_userlocal) @@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu) * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt */ - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; @@ -2094,7 +2142,7 @@ void __init trap_init(void) #else ebase = CKSEG0; #endif - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) ebase += (read_c0_ebase() & 0x3ffff000); } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index e11906dff88..bbb69695a0a 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 0(%2)\n\t" \ + "2:lbu\t $1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 7(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "(%2)")"\n" \ @@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); -#endif +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "3(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "0(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 7(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 0(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ + +#else /* __BIG_ENDIAN */ -#ifdef __LITTLE_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"user_lb("%0", "1(%2)")"\n" \ @@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 7(%2)\n\t" \ + "2:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 0(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); - +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "3(%2)")"\n" \ @@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "0(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "3(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 0(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 7(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ #endif static void emulate_load_store_insn(struct pt_regs *regs, @@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; return; +#ifndef CONFIG_CPU_MIPSR6 /* * COP2 is available to implementor for application specific use. * It's up to applications to register a notifier chain and do * whatever they have to do, including possible sending of signals. + * + * This instruction has been reallocated in Release 6 */ case lwc2_op: cu2_notifier_call_chain(CU2_LWC2_OP, regs); @@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sdc2_op: cu2_notifier_call_chain(CU2_SDC2_OP, regs); break; - +#endif default: /* * Pheeee... We encountered an yet unknown instruction or |