diff options
Diffstat (limited to 'arch/mips/kernel')
46 files changed, 1908 insertions, 2743 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 277dab301ce..008a2fed058 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -17,7 +17,6 @@ endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o -obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o @@ -42,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o +obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o @@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o obj-$(CONFIG_MIPS_MT) += mips-mt.o obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o -obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o @@ -107,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_MIPS_CM) += mips-cm.o obj-$(CONFIG_MIPS_CPC) += mips-cpc.o +obj-$(CONFIG_CPU_PM) += pm.o +obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o + # # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0ea75c244b4..02f075df8f2 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/kbuild.h> #include <linux/suspend.h> +#include <asm/pm.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/smp-cps.h> @@ -64,9 +65,6 @@ void output_ptreg_defines(void) OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_CAUSE, pt_regs, cp0_cause); -#ifdef CONFIG_MIPS_MT_SMTC - OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); -#endif /* CONFIG_MIPS_MT_SMTC */ #ifdef CONFIG_CPU_CAVIUM_OCTEON OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MTP, pt_regs, mtp); @@ -404,6 +402,20 @@ void output_pbe_defines(void) } #endif +#ifdef CONFIG_CPU_PM +void output_pm_defines(void) +{ + COMMENT(" PM offsets. "); +#ifdef CONFIG_EVA + OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); + OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); + OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); +#endif + OFFSET(SSS_SP, mips_static_suspend_state, sp); + BLANK(); +} +#endif + void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specfic offsets. "); @@ -472,10 +484,14 @@ void output_kvm_defines(void) void output_cps_defines(void) { COMMENT(" MIPS CPS offsets. "); - OFFSET(BOOTCFG_CORE, boot_config, core); - OFFSET(BOOTCFG_VPE, boot_config, vpe); - OFFSET(BOOTCFG_PC, boot_config, pc); - OFFSET(BOOTCFG_SP, boot_config, sp); - OFFSET(BOOTCFG_GP, boot_config, gp); + + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); + OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); + DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); + + OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); + OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); + OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); + DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); } #endif diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d78bf445a9..7b2df224f04 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs) return epc; } +/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ +static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; + +int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) +{ + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + + if (!cpu_has_mmips) + return 0; + + switch (insn.mm_i_format.opcode) { + case mm_pool32a_op: + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == + mm_pool32axf_op) { + switch (insn.mm_i_format.simmediate >> + MM_POOL32A_MINOR_SHIFT) { + case mm_jalr_op: + case mm_jalrhb_op: + case mm_jalrs_op: + case mm_jalrshb_op: + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ + regs->regs[insn.mm_i_format.rt] = + regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + } + break; + case mm_pool32i_op: + switch (insn.mm_i_format.rt) { + case mm_bltzals_op: + case mm_bltzal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bltz_op: + if ((long)regs->regs[insn.mm_i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgezals_op: + case mm_bgezal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bgez_op: + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_blez_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgtz_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bc2f_op: + case mm_bc1f_op: + bc_false = 1; + /* Fall through */ + case mm_bc2t_op: + case mm_bc1t_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + if (bc_false) + fcr31 = ~fcr31; + + bit = (insn.mm_i_format.rs >> 2); + bit += (bit != 0); + bit += 23; + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + } + break; + case mm_pool16c_op: + switch (insn.mm_i_format.rt) { + case mm_jalr16_op: + case mm_jalrs16_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_jr16_op: + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + break; + case mm_beqz16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_bnez16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_b16_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc + + (insn.mm_b0_format.simmediate << 1); + return 1; + case mm_beq32_op: + if (regs->regs[insn.mm_i_format.rs] == + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bne32_op: + if (regs->regs[insn.mm_i_format.rs] != + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_jalx32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + return 1; + case mm_jals32_op: + case mm_jal32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_j32_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 27; + *contpc <<= 27; + *contpc |= (insn.j_format.target << 1); + set_isa16_mode(*contpc); + return 1; + } + return 0; +} + /* * Compute return address and emulate branch in microMIPS mode after an * exception only. It does not handle compact branches/jumps and cannot @@ -317,7 +513,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == beql_op) + if (insn.i_format.opcode == beql_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -329,7 +525,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bnel_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -341,7 +537,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == blezl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -353,7 +549,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bgtzl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case cop1_op: preempt_disable(); if (is_fpu_owner()) - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + asm volatile( + ".set push\n" + "\t.set mips1\n" + "\tcfc1\t%0,$31\n" + "\t.set pop" : "=r" (fcr31)); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c index 594cbbf16d6..6093716980b 100644 --- a/arch/mips/kernel/cevt-gic.c +++ b/arch/mips/kernel/cevt-gic.c @@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) cnt = gic_read_count(); cnt += (u64)delta; - gic_write_compare(cnt); + gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } @@ -73,7 +73,8 @@ int gic_clockevent_init(void) cd = &per_cpu(gic_clockevent_device, cpu); cd->name = "MIPS GIC"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clockevent_set_clock(cd, gic_frequency); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 50d3f5a8d6b..bc127e22fda 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -12,17 +12,10 @@ #include <linux/smp.h> #include <linux/irq.h> -#include <asm/smtc_ipi.h> #include <asm/time.h> #include <asm/cevt-r4k.h> #include <asm/gic.h> -/* - * The SMTC Kernel for the 34K, 1004K, et. al. replaces several - * of these routines with SMTC-specific variants. - */ - -#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { @@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, return res; } -#endif /* CONFIG_MIPS_MT_SMTC */ - void mips_set_clock_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; -#ifndef CONFIG_MIPS_MT_SMTC irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; @@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif cd->event_handler(cd); } @@ -82,8 +69,6 @@ out: return IRQ_HANDLED; } -#endif /* Not CONFIG_MIPS_MT_SMTC */ - struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, .flags = IRQF_PERCPU | IRQF_TIMER, @@ -170,7 +155,6 @@ int c0_compare_int_usable(void) return 1; } -#ifndef CONFIG_MIPS_MT_SMTC int r4k_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -195,7 +179,9 @@ int r4k_clockevent_init(void) cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP | + CLOCK_EVT_FEAT_PERCPU; clockevent_set_clock(cd, mips_hpt_frequency); @@ -210,9 +196,6 @@ int r4k_clockevent_init(void) cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif clockevents_register_device(cd); if (cp0_timer_irq_installed) @@ -225,4 +208,3 @@ int r4k_clockevent_init(void) return 0; } -#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c deleted file mode 100644 index b6cf0a60d89..00000000000 --- a/arch/mips/kernel/cevt-smtc.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2007 MIPS Technologies, Inc. - * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> - * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl - */ -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/percpu.h> -#include <linux/smp.h> -#include <linux/irq.h> - -#include <asm/smtc_ipi.h> -#include <asm/time.h> -#include <asm/cevt-r4k.h> - -/* - * Variant clock event timer support for SMTC on MIPS 34K, 1004K - * or other MIPS MT cores. - * - * Notes on SMTC Support: - * - * SMTC has multiple microthread TCs pretending to be Linux CPUs. - * But there's only one Count/Compare pair per VPE, and Compare - * interrupts are taken opportunisitically by available TCs - * bound to the VPE with the Count register. The new timer - * framework provides for global broadcasts, but we really - * want VPE-level multicasts for best behavior. So instead - * of invoking the high-level clock-event broadcast code, - * this version of SMTC support uses the historical SMTC - * multicast mechanisms "under the hood", appearing to the - * generic clock layer as if the interrupts are per-CPU. - * - * The approach taken here is to maintain a set of NR_CPUS - * virtual timers, and track which "CPU" needs to be alerted - * at each event. - * - * It's unlikely that we'll see a MIPS MT core with more than - * 2 VPEs, but we *know* that we won't need to handle more - * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements - * is always going to be overkill, but always going to be enough. - */ - -unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; -static int smtc_nextinvpe[NR_CPUS]; - -/* - * Timestamps stored are absolute values to be programmed - * into Count register. Valid timestamps will never be zero. - * If a Zero Count value is actually calculated, it is converted - * to be a 1, which will introduce 1 or two CPU cycles of error - * roughly once every four billion events, which at 1000 HZ means - * about once every 50 days. If that's actually a problem, one - * could alternate squashing 0 to 1 and to -1. - */ - -#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) -#define ISVALID(x) ((x) != 0L) - -/* - * Time comparison is subtle, as it's really truncated - * modular arithmetic. - */ - -#define IS_SOONER(a, b, reference) \ - (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) - -/* - * CATCHUP_INCREMENT, used when the function falls behind the counter. - * Could be an increasing function instead of a constant; - */ - -#define CATCHUP_INCREMENT 64 - -static int mips_next_event(unsigned long delta, - struct clock_event_device *evt) -{ - unsigned long flags; - unsigned int mtflags; - unsigned long timestamp, reference, previous; - unsigned long nextcomp = 0L; - int vpe = current_cpu_data.vpe_id; - int cpu = smp_processor_id(); - local_irq_save(flags); - mtflags = dmt(); - - /* - * Maintain the per-TC virtual timer - * and program the per-VPE shared Count register - * as appropriate here... - */ - reference = (unsigned long)read_c0_count(); - timestamp = MAKEVALID(reference + delta); - /* - * To really model the clock, we have to catch the case - * where the current next-in-VPE timestamp is the old - * timestamp for the calling CPE, but the new value is - * in fact later. In that case, we have to do a full - * scan and discover the new next-in-VPE CPU id and - * timestamp. - */ - previous = smtc_nexttime[vpe][cpu]; - if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) - && IS_SOONER(previous, timestamp, reference)) { - int i; - int soonest = cpu; - - /* - * Update timestamp array here, so that new - * value gets considered along with those of - * other virtual CPUs on the VPE. - */ - smtc_nexttime[vpe][cpu] = timestamp; - for_each_online_cpu(i) { - if (ISVALID(smtc_nexttime[vpe][i]) - && IS_SOONER(smtc_nexttime[vpe][i], - smtc_nexttime[vpe][soonest], reference)) { - soonest = i; - } - } - smtc_nextinvpe[vpe] = soonest; - nextcomp = smtc_nexttime[vpe][soonest]; - /* - * Otherwise, we don't have to process the whole array rank, - * we just have to see if the event horizon has gotten closer. - */ - } else { - if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || - IS_SOONER(timestamp, - smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { - smtc_nextinvpe[vpe] = cpu; - nextcomp = timestamp; - } - /* - * Since next-in-VPE may me the same as the executing - * virtual CPU, we update the array *after* checking - * its value. - */ - smtc_nexttime[vpe][cpu] = timestamp; - } - - /* - * It may be that, in fact, we don't need to update Compare, - * but if we do, we want to make sure we didn't fall into - * a crack just behind Count. - */ - if (ISVALID(nextcomp)) { - write_c0_compare(nextcomp); - ehb(); - /* - * We never return an error, we just make sure - * that we trigger the handlers as quickly as - * we can if we fell behind. - */ - while ((nextcomp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) { - nextcomp += CATCHUP_INCREMENT; - write_c0_compare(nextcomp); - ehb(); - } - } - emt(mtflags); - local_irq_restore(flags); - return 0; -} - - -void smtc_distribute_timer(int vpe) -{ - unsigned long flags; - unsigned int mtflags; - int cpu; - struct clock_event_device *cd; - unsigned long nextstamp; - unsigned long reference; - - -repeat: - nextstamp = 0L; - for_each_online_cpu(cpu) { - /* - * Find virtual CPUs within the current VPE who have - * unserviced timer requests whose time is now past. - */ - local_irq_save(flags); - mtflags = dmt(); - if (cpu_data[cpu].vpe_id == vpe && - ISVALID(smtc_nexttime[vpe][cpu])) { - reference = (unsigned long)read_c0_count(); - if ((smtc_nexttime[vpe][cpu] - reference) - > (unsigned long)LONG_MAX) { - smtc_nexttime[vpe][cpu] = 0L; - emt(mtflags); - local_irq_restore(flags); - /* - * We don't send IPIs to ourself. - */ - if (cpu != smp_processor_id()) { - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); - } else { - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); - } - } else { - /* Local to VPE but Valid Time not yet reached. */ - if (!ISVALID(nextstamp) || - IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, - reference)) { - smtc_nextinvpe[vpe] = cpu; - nextstamp = smtc_nexttime[vpe][cpu]; - } - emt(mtflags); - local_irq_restore(flags); - } - } else { - emt(mtflags); - local_irq_restore(flags); - - } - } - /* Reprogram for interrupt at next soonest timestamp for VPE */ - if (ISVALID(nextstamp)) { - write_c0_compare(nextstamp); - ehb(); - if ((nextstamp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) - goto repeat; - } -} - - -irqreturn_t c0_compare_interrupt(int irq, void *dev_id) -{ - int cpu = smp_processor_id(); - - /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ - handle_perf_irq(1); - - if (read_c0_cause() & (1 << 30)) { - /* Clear Count/Compare Interrupt */ - write_c0_compare(read_c0_compare()); - smtc_distribute_timer(cpu_data[cpu].vpe_id); - } - return IRQ_HANDLED; -} - - -int smtc_clockevent_init(void) -{ - uint64_t mips_freq = mips_hpt_frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - unsigned int irq; - int i; - int j; - - if (!cpu_has_counter || !mips_hpt_frequency) - return -ENXIO; - if (cpu == 0) { - for (i = 0; i < num_possible_cpus(); i++) { - smtc_nextinvpe[i] = 0; - for (j = 0; j < num_possible_cpus(); j++) - smtc_nexttime[i][j] = 0L; - } - /* - * SMTC also can't have the usablility test - * run by secondary TCs once Compare is in use. - */ - if (!c0_compare_int_usable()) - return -ENXIO; - } - - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of it's liking. - */ - irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; - if (get_c0_compare_int) - irq = get_c0_compare_int(); - - cd = &per_cpu(mips_clockevent_device, cpu); - - cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; - - /* Calculate the min / max delta */ - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 32; - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); - - cd->rating = 300; - cd->irq = irq; - cd->cpumask = cpumask_of(cpu); - cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_clock_mode; - cd->event_handler = mips_event_handler; - - clockevents_register_device(cd); - - /* - * On SMTC we only want to do the data structure - * initialization and IRQ setup once. - */ - if (cpu) - return 0; - /* - * And we need the hwmask associated with the c0_compare - * vector to be initialized. - */ - irq_hwmask[irq] = (0x100 << cp0_compare_irq); - if (cp0_timer_irq_installed) - return 0; - - cp0_timer_irq_installed = 1; - - setup_irq(irq, &c0_compare_irqaction); - - return 0; -} diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index f7a46db4b16..6f4f739dad9 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -14,19 +14,43 @@ #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/pm.h> -#define GCR_CL_COHERENCE_OFS 0x2008 +#define GCR_CL_COHERENCE_OFS 0x2008 +#define GCR_CL_ID_OFS 0x2028 + +.extern mips_cm_base + +.set noreorder + + /* + * Set dest to non-zero if the core supports the MT ASE, else zero. If + * MT is not supported then branch to nomt. + */ + .macro has_mt dest, nomt + mfc0 \dest, CP0_CONFIG + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 1 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 2 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 3 + andi \dest, \dest, MIPS_CONF3_MT + beqz \dest, \nomt + .endm .section .text.cps-vec .balign 0x1000 -.set noreorder LEAF(mips_cps_core_entry) /* - * These first 8 bytes will be patched by cps_smp_setup to load the - * base address of the CM GCRs into register v1. + * These first 12 bytes will be patched by cps_smp_setup to load the + * base address of the CM GCRs into register v1 and the CCA to use into + * register s0. */ .quad 0 + .word 0 /* Check whether we're here due to an NMI */ mfc0 k0, CP0_STATUS @@ -117,10 +141,11 @@ icache_done: add a0, a0, t0 dcache_done: - /* Set Kseg0 cacheable, coherent, write-back, write-allocate */ + /* Set Kseg0 CCA to that in s0 */ mfc0 t0, CP0_CONFIG ori t0, 0x7 - xori t0, 0x2 + xori t0, 0x7 + or t0, t0, s0 mtc0 t0, CP0_CONFIG ehb @@ -134,21 +159,24 @@ dcache_done: jr t0 nop -1: /* We're up, cached & coherent */ + /* + * We're up, cached & coherent. Perform any further required core-level + * initialisation. + */ +1: jal mips_cps_core_init + nop /* - * TODO: We should check the VPE number we intended to boot here, and - * if non-zero we should start that VPE and stop this one. For - * the moment this doesn't matter since CPUs are brought up - * sequentially and in order, but once hotplug is implemented - * this will need revisiting. + * Boot any other VPEs within this core that should be online, and + * deactivate this VPE if it should be offline. */ + jal mips_cps_boot_vpes + nop /* Off we go! */ - la t0, mips_cps_bootcfg - lw t1, BOOTCFG_PC(t0) - lw gp, BOOTCFG_GP(t0) - lw sp, BOOTCFG_SP(t0) + lw t1, VPEBOOTCFG_PC(v0) + lw gp, VPEBOOTCFG_GP(v0) + lw sp, VPEBOOTCFG_SP(v0) jr t1 nop END(mips_cps_core_entry) @@ -189,3 +217,271 @@ LEAF(excep_ejtag) jr k0 nop END(excep_ejtag) + +LEAF(mips_cps_core_init) +#ifdef CONFIG_MIPS_MT + /* Check that the core implements the MT ASE */ + has_mt t0, 3f + nop + + .set push + .set mt + + /* Only allow 1 TC per VPE to execute... */ + dmt + + /* ...and for the moment only 1 VPE */ + dvpe + la t1, 1f + jr.hb t1 + nop + + /* Enter VPE configuration state */ +1: mfc0 t0, CP0_MVPCONTROL + ori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + + /* Retrieve the number of VPEs within the core */ + mfc0 t0, CP0_MVPCONF0 + srl t0, t0, MVPCONF0_PVPE_SHIFT + andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) + addi t7, t0, 1 + + /* If there's only 1, we're done */ + beqz t0, 2f + nop + + /* Loop through each VPE within this core */ + li t5, 1 + +1: /* Operate on the appropriate TC */ + mtc0 t5, CP0_VPECONTROL + ehb + + /* Bind TC to VPE (1:1 TC:VPE mapping) */ + mttc0 t5, CP0_TCBIND + + /* Set exclusive TC, non-active, master */ + li t0, VPECONF0_MVP + sll t1, t5, VPECONF0_XTC_SHIFT + or t0, t0, t1 + mttc0 t0, CP0_VPECONF0 + + /* Set TC non-active, non-allocatable */ + mttc0 zero, CP0_TCSTATUS + + /* Set TC halted */ + li t0, TCHALT_H + mttc0 t0, CP0_TCHALT + + /* Next VPE */ + addi t5, t5, 1 + slt t0, t5, t7 + bnez t0, 1b + nop + + /* Leave VPE configuration state */ +2: mfc0 t0, CP0_MVPCONTROL + xori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + +3: .set pop +#endif + jr ra + nop + END(mips_cps_core_init) + +LEAF(mips_cps_boot_vpes) + /* Retrieve CM base address */ + la t0, mips_cm_base + lw t0, 0(t0) + + /* Calculate a pointer to this cores struct core_boot_config */ + lw t0, GCR_CL_ID_OFS(t0) + li t1, COREBOOTCFG_SIZE + mul t0, t0, t1 + la t1, mips_cps_core_bootcfg + lw t1, 0(t1) + addu t0, t0, t1 + + /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + has_mt t6, 1f + li t9, 0 + + /* Find the number of VPEs present in the core */ + mfc0 t1, CP0_MVPCONF0 + srl t1, t1, MVPCONF0_PVPE_SHIFT + andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT + addi t1, t1, 1 + + /* Calculate a mask for the VPE ID from EBase.CPUNum */ + clz t1, t1 + li t2, 31 + subu t1, t2, t1 + li t2, 1 + sll t1, t2, t1 + addiu t1, t1, -1 + + /* Retrieve the VPE ID from EBase.CPUNum */ + mfc0 t9, $15, 1 + and t9, t9, t1 + +1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ + li t1, VPEBOOTCFG_SIZE + mul v0, t9, t1 + lw t7, COREBOOTCFG_VPECONFIG(t0) + addu v0, v0, t7 + +#ifdef CONFIG_MIPS_MT + + /* If the core doesn't support MT then return */ + bnez t6, 1f + nop + jr ra + nop + + .set push + .set mt + +1: /* Enter VPE configuration state */ + dvpe + la t1, 1f + jr.hb t1 + nop +1: mfc0 t1, CP0_MVPCONTROL + ori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + + /* Loop through each VPE */ + lw t6, COREBOOTCFG_VPEMASK(t0) + move t8, t6 + li t5, 0 + + /* Check whether the VPE should be running. If not, skip it */ +1: andi t0, t6, 1 + beqz t0, 2f + nop + + /* Operate on the appropriate TC */ + mfc0 t0, CP0_VPECONTROL + ori t0, t0, VPECONTROL_TARGTC + xori t0, t0, VPECONTROL_TARGTC + or t0, t0, t5 + mtc0 t0, CP0_VPECONTROL + ehb + + /* Skip the VPE if its TC is not halted */ + mftc0 t0, CP0_TCHALT + beqz t0, 2f + nop + + /* Calculate a pointer to the VPEs struct vpe_boot_config */ + li t0, VPEBOOTCFG_SIZE + mul t0, t0, t5 + addu t0, t0, t7 + + /* Set the TC restart PC */ + lw t1, VPEBOOTCFG_PC(t0) + mttc0 t1, CP0_TCRESTART + + /* Set the TC stack pointer */ + lw t1, VPEBOOTCFG_SP(t0) + mttgpr t1, sp + + /* Set the TC global pointer */ + lw t1, VPEBOOTCFG_GP(t0) + mttgpr t1, gp + + /* Copy config from this VPE */ + mfc0 t0, CP0_CONFIG + mttc0 t0, CP0_CONFIG + + /* Ensure no software interrupts are pending */ + mttc0 zero, CP0_CAUSE + mttc0 zero, CP0_STATUS + + /* Set TC active, not interrupt exempt */ + mftc0 t0, CP0_TCSTATUS + li t1, ~TCSTATUS_IXMT + and t0, t0, t1 + ori t0, t0, TCSTATUS_A + mttc0 t0, CP0_TCSTATUS + + /* Clear the TC halt bit */ + mttc0 zero, CP0_TCHALT + + /* Set VPE active */ + mftc0 t0, CP0_VPECONF0 + ori t0, t0, VPECONF0_VPA + mttc0 t0, CP0_VPECONF0 + + /* Next VPE */ +2: srl t6, t6, 1 + addi t5, t5, 1 + bnez t6, 1b + nop + + /* Leave VPE configuration state */ + mfc0 t1, CP0_MVPCONTROL + xori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + evpe + + /* Check whether this VPE is meant to be running */ + li t0, 1 + sll t0, t0, t9 + and t0, t0, t8 + bnez t0, 2f + nop + + /* This VPE should be offline, halt the TC */ + li t0, TCHALT_H + mtc0 t0, CP0_TCHALT + la t0, 1f +1: jr.hb t0 + nop + +2: .set pop + +#endif /* CONFIG_MIPS_MT */ + + /* Return */ + jr ra + nop + END(mips_cps_boot_vpes) + +#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) + + /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ + .macro psstate dest + .set push + .set noat + lw $1, TI_CPU(gp) + sll $1, $1, LONGLOG + la \dest, __per_cpu_offset + addu $1, $1, \dest + lw $1, 0($1) + la \dest, cps_cpu_state + addu \dest, \dest, $1 + .set pop + .endm + +LEAF(mips_cps_pm_save) + /* Save CPU state */ + SUSPEND_SAVE_REGS + psstate t1 + SUSPEND_SAVE_STATIC + jr v0 + nop + END(mips_cps_pm_save) + +LEAF(mips_cps_pm_restore) + /* Restore CPU state */ + psstate t1 + RESUME_RESTORE_STATIC + RESUME_RESTORE_REGS_RETURN + END(mips_cps_pm_restore) + +#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6e8fb85ce7c..d74f957c561 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -62,7 +62,7 @@ static inline void check_errata(void) case CPU_34K: /* * Erratum "RPS May Cause Incorrect Instruction Execution" - * This code only handles VPE0, any SMP/SMTC/RTOS code + * This code only handles VPE0, any SMP/RTOS code * making use of VPE1 will be responsable for that VPE. */ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) @@ -423,7 +423,7 @@ static void decode_configs(struct cpuinfo_mips *c) #ifndef CONFIG_MIPS_CPS if (cpu_has_mips_r2) { - c->core = read_c0_ebase() & 0x3ff; + c->core = get_ebase_cpunum(); if (cpu_has_mipsmt) c->core >>= fls(core_nvpes()) - 1; } @@ -684,21 +684,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; - case PRID_IMP_RM9000: - c->cputype = CPU_RM9000; - __cpu_name[cpu] = "RM9000"; - set_isa(c, MIPS_CPU_ISA_IV); - c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_LLSC; - /* - * Bit 29 in the info register of the RM9000 - * indicates if the TLB has 48 or 64 entries. - * - * 29 1 => 64 entry JTLB - * 0 => 48 entry JTLB - */ - c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; - break; case PRID_IMP_R8000: c->cputype = CPU_R8000; __cpu_name[cpu] = "RM8000"; @@ -1041,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) decode_configs(c); /* JZRISC does not implement the CP0 counter. */ c->options &= ~MIPS_CPU_COUNTER; + BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter); switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_JZRISC: c->cputype = CPU_JZRISC; @@ -1074,6 +1060,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_NETLOGIC_XLP2XX: case PRID_IMP_NETLOGIC_XLP9XX: + case PRID_IMP_NETLOGIC_XLP5XX: c->cputype = CPU_XLP; __cpu_name[cpu] = "Broadcom XLPII"; break; diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e5786858cdb..4353d323f01 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -16,9 +16,6 @@ #include <asm/isadep.h> #include <asm/thread_info.h> #include <asm/war.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif #ifndef CONFIG_PREEMPT #define resume_kernel restore_all @@ -89,41 +86,6 @@ FEXPORT(syscall_exit) bnez t0, syscall_exit_work restore_all: # restore full frame -#ifdef CONFIG_MIPS_MT_SMTC -#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP -/* Re-arm any temporarily masked interrupts not explicitly "acked" */ - mfc0 v0, CP0_TCSTATUS - ori v1, v0, TCSTATUS_IXMT - mtc0 v1, CP0_TCSTATUS - andi v0, TCSTATUS_IXMT - _ehb - mfc0 t0, CP0_TCCONTEXT - DMT 9 # dmt t1 - jal mips_ihb - mfc0 t2, CP0_STATUS - andi t3, t0, 0xff00 - or t2, t2, t3 - mtc0 t2, CP0_STATUS - _ehb - andi t1, t1, VPECONTROL_TE - beqz t1, 1f - EMT -1: - mfc0 v1, CP0_TCSTATUS - /* We set IXMT above, XOR should clear it here */ - xori v1, v1, TCSTATUS_IXMT - or v1, v0, v1 - mtc0 v1, CP0_TCSTATUS - _ehb - xor t0, t0, t3 - mtc0 t0, CP0_TCCONTEXT -#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ -/* Detect and execute deferred IPI "interrupts" */ - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - jal deferred_smtc_ipi - LONG_S s0, TI_REGS($28) -#endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP RESTORE_AT diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a9ce3408be2..ac35e12cb1f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -21,20 +21,6 @@ #include <asm/war.h> #include <asm/thread_info.h> -#ifdef CONFIG_MIPS_MT_SMTC -#define PANIC_PIC(msg) \ - .set push; \ - .set nomicromips; \ - .set reorder; \ - PTR_LA a0,8f; \ - .set noat; \ - PTR_LA AT, panic; \ - jr AT; \ -9: b 9b; \ - .set pop; \ - TEXT(msg) -#endif - __INIT /* @@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp) SAVE_AT .set push .set noreorder -#ifdef CONFIG_MIPS_MT_SMTC - /* - * To keep from blindly blocking *all* interrupts - * during service by SMTC kernel, we also want to - * pass the IM value to be cleared. - */ -FEXPORT(except_vec_vi_mori) - ori a0, $0, 0 -#endif /* CONFIG_MIPS_MT_SMTC */ PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ @@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end) NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP SAVE_STATIC -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC has an interesting problem that interrupts are level-triggered, - * and the CLI macro will clear EXL, potentially causing a duplicate - * interrupt service invocation. So we need to clear the associated - * IM bit of Status prior to doing CLI, and restore it after the - * service routine has been invoked - we must assume that the - * service routine will have cleared the state, and any active - * level represents a new or otherwised unserviced event... - */ - mfc0 t1, CP0_STATUS - and t0, a0, t1 -#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP - mfc0 t2, CP0_TCCONTEXT - or t2, t0, t2 - mtc0 t2, CP0_TCCONTEXT -#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ - xor t1, t1, t0 - mtc0 t1, CP0_STATUS - _ehb -#endif /* CONFIG_MIPS_MT_SMTC */ CLI #ifdef CONFIG_TRACE_IRQFLAGS move s0, v0 -#ifdef CONFIG_MIPS_MT_SMTC - move s1, a0 -#endif TRACE_IRQS_OFF -#ifdef CONFIG_MIPS_MT_SMTC - move a0, s1 -#endif move v0, s0 #endif @@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp) .align 5 LEAF(handle_ri_rdhwr_vivt) -#ifdef CONFIG_MIPS_MT_SMTC - PANIC_PIC("handle_ri_rdhwr_vivt called") -#else .set push .set noat .set noreorder @@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp) .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ -#endif END(handle_ri_rdhwr_vivt) LEAF(handle_ri_rdhwr) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index e712dcf18b2..95afd663cd4 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -35,33 +35,12 @@ */ .macro setup_c0_status set clr .set push -#ifdef CONFIG_MIPS_MT_SMTC - /* - * For SMTC, we need to set privilege and disable interrupts only for - * the current TC, using the TCStatus register. - */ - mfc0 t0, CP0_TCSTATUS - /* Fortunately CU 0 is in the same place in both registers */ - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ - li t1, ST0_CU0 | 0x08001c00 - or t0, t1 - /* Clear TKSU, leave IXMT */ - xori t0, 0x00001800 - mtc0 t0, CP0_TCSTATUS - _ehb - /* We need to leave the global IE bit set, but clear EXL...*/ - mfc0 t0, CP0_STATUS - or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr - xor t0, ST0_EXL | ST0_ERL | \clr - mtc0 t0, CP0_STATUS -#else mfc0 t0, CP0_STATUS or t0, ST0_CU0|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder sll zero,3 # ehb -#endif .set pop .endm @@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point jr t0 0: -#ifdef CONFIG_MIPS_MT_SMTC - /* - * In SMTC kernel, "CLI" is thread-specific, in TCStatus. - * We still need to enable interrupts globally in Status, - * and clear EXL/ERL. - * - * TCContext is used to track interrupt levels under - * service in SMTC kernel. Clear for boot TC before - * allowing any interrupts. - */ - mtc0 zero, CP0_TCCONTEXT - - mfc0 t0, CP0_STATUS - ori t0, t0, 0xff1f - xori t0, t0, 0x001e - mtc0 t0, CP0_STATUS -#endif /* CONFIG_MIPS_MT_SMTC */ - PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE @@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) -#ifdef CONFIG_MIPS_MT_SMTC - /* - * Read-modify-writes of Status must be atomic, and this - * is one case where CLI is invoked without EXL being - * necessarily set. The CLI and setup_c0_status will - * in fact be redundant for all but the first TC of - * each VPE being booted. - */ - DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ - jal mips_ihb -#endif /* CONFIG_MIPS_MT_SMTC */ smp_slave_setup setup_c0_status_sec -#ifdef CONFIG_MIPS_MT_SMTC - andi t2, t2, VPECONTROL_TE - beqz t2, 2f - EMT # emt -2: -#endif /* CONFIG_MIPS_MT_SMTC */ j start_secondary END(smp_bootstrap) #endif /* CONFIG_SMP */ diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2b91fe80c43..50b364897dd 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = { .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - .irq_set_affinity = plat_set_irq_affinity, -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* @@ -180,7 +177,6 @@ handle_real_irq: outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } - smtc_im_ack_irq(irq); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 837ff27950b..09ce4598075 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -224,29 +224,26 @@ void __init check_wait(void) cpu_wait = r4k_wait; */ break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; default: break; } } -static void smtc_idle_hook(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif -} - void arch_cpu_idle(void) { - smtc_idle_hook(); if (cpu_wait) cpu_wait(); else local_irq_enable(); } + +#ifdef CONFIG_CPU_IDLE + +int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + arch_cpu_idle(); + return index; +} + +#endif diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 8520dad6d4e..88e4c323382 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt) (int)(cnt & 0xffffffff)); } +void gic_write_cpu_compare(cycle_t cnt, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); + + local_irq_restore(flags); +} + cycle_t gic_read_compare(void) { unsigned int hi, lo; diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index fab40f7d2e0..4858642d543 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d) */ static void level_mask_and_ack_msc_irq(struct irq_data *d) { - unsigned int irq = d->irq; - mask_msc_irq(d); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); - /* This actually needs to be a call into platform code */ - smtc_im_ack_irq(irq); } /* @@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } - smtc_im_ack_irq(irq); } /* diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d1fea7a054b..d2bfbc2e899 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void) void free_irqno(unsigned int irq) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(irq, irq_map); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /* @@ -73,7 +73,6 @@ void free_irqno(unsigned int irq) */ void ack_bad_irq(unsigned int irq) { - smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } @@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); check_stack_overflow(); - if (!smtc_handle_on_other_cpu(irq)) - generic_handle_irq(irq); - irq_exit(); -} - -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF -/* - * To avoid inefficient and in some cases pathological re-checking of - * IRQ affinity, we have this variant that skips the affinity check. - */ - -void __irq_entry do_IRQ_no_affinity(unsigned int irq) -{ - irq_enter(); - smtc_im_backstop(irq); generic_handle_irq(irq); irq_exit(); } -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index c9dc6740296..ba473608a34 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -9,12 +9,18 @@ */ #include <linux/errno.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> #include <asm/mips-cm.h> #include <asm/mips-cpc.h> void __iomem *mips_cpc_base; +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); + +static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); + phys_t __weak mips_cpc_phys_base(void) { u32 cpc_base; @@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void) int mips_cpc_probe(void) { phys_t addr; + unsigned cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cpc_core_lock, cpu)); addr = mips_cpc_phys_base(); if (!addr) @@ -50,3 +60,21 @@ int mips_cpc_probe(void) return 0; } + +void mips_cpc_lock_other(unsigned int core) +{ + unsigned curr_core; + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); +} + +void mips_cpc_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + preempt_enable(); +} diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index cb098628aee..362bb3707e6 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -1,5 +1,5 @@ /* - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * General MIPS MT support routines, usable in AP/SP and SMVP. * Copyright (C) 2005 Mips Technologies, Inc */ #include <linux/cpu.h> diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 6ded9bd1489..88b1ef5f868 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -1,5 +1,5 @@ /* - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * General MIPS MT support routines, usable in AP/SP and SMVP. * Copyright (C) 2005 Mips Technologies, Inc */ @@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl) int tc; unsigned long haltval; unsigned long tcstatval; -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_soft_dump(void); -#endif /* CONFIG_MIPT_MT_SMTC */ local_irq_save(flags); vpflags = dvpe(); @@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl) if (!haltval) write_tc_c0_tchalt(0); } -#ifdef CONFIG_MIPS_MT_SMTC - smtc_soft_dump(); -#endif /* CONFIG_MIPT_MT_SMTC */ printk("===========================\n"); evpe(vpflags); local_irq_restore(flags); @@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void) void mt_cflush_lockdown(void) { -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_cflush_lockdown(void); - - smtc_cflush_lockdown(); -#endif /* CONFIG_MIPS_MT_SMTC */ /* FILL IN VSMP and AP/SP VERSIONS HERE */ } void mt_cflush_release(void) { -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_cflush_release(void); - - smtc_cflush_release(); -#endif /* CONFIG_MIPS_MT_SMTC */ /* FILL IN VSMP and AP/SP VERSIONS HERE */ } diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 029e002a4ea..f6547680c81 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -10,24 +10,12 @@ * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ -#include <asm/asm.h> -#include <asm/cachectl.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/asm-offsets.h> -#include <asm/pgtable-bits.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> -#include <asm/thread_info.h> - -#include <asm/asmmacro.h> - -/* - * Offset to the current process status flags, the first 32 bytes of the - * stack are not used. - */ -#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) +#define USE_ALTERNATE_RESUME_IMPL 1 + .set push + .set arch=mips64r2 +#include "r4k_switch.S" + .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti, int usedfpu) @@ -40,6 +28,61 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) + /* + * check if we need to save FPU registers + */ + PTR_L t3, TASK_THREAD_INFO(a0) + LONG_L t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + LONG_S t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + LONG_L t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + LONG_S t0, ST_OFF(t3) + + .set push + .set arch=mips64r2 + fpu_save_double a0 t0 t1 # c0_status passed in t0 + # clobbers t1 + .set pop +1: + + /* check if we need to save COP2 registers */ + PTR_L t2, TASK_THREAD_INFO(a0) + LONG_L t0, ST_OFF(t2) + bbit0 t0, 30, 1f + + /* Disable COP2 in the stored process state */ + li t1, ST0_CU2 + xor t0, t1 + LONG_S t0, ST_OFF(t2) + + /* Enable COP2 so we can save it */ + mfc0 t0, CP0_STATUS + or t0, t1 + mtc0 t0, CP0_STATUS + + /* Save COP2 */ + daddu a0, THREAD_CP2 + jal octeon_cop2_save + dsubu a0, THREAD_CP2 + + /* Disable COP2 now that we are done */ + mfc0 t0, CP0_STATUS + li t1, ST0_CU2 + xor t0, t1 + mtc0 t0, CP0_STATUS + +1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ mfc0 t0, $11,7 /* CvmMemCtl */ @@ -85,12 +128,7 @@ move $28, a2 cpu_restore_nonscratch a1 -#if (_THREAD_SIZE - 32) < 0x8000 - PTR_ADDIU t0, $28, _THREAD_SIZE - 32 -#else - PTR_LI t0, _THREAD_SIZE - 32 - PTR_ADDU t0, $28 -#endif + PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c new file mode 100644 index 00000000000..5aa4c6f8cf8 --- /dev/null +++ b/arch/mips/kernel/pm-cps.c @@ -0,0 +1,716 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/slab.h> + +#include <asm/asm-offsets.h> +#include <asm/cacheflush.h> +#include <asm/cacheops.h> +#include <asm/idle.h> +#include <asm/mips-cm.h> +#include <asm/mips-cpc.h> +#include <asm/mipsmtregs.h> +#include <asm/pm.h> +#include <asm/pm-cps.h> +#include <asm/smp-cps.h> +#include <asm/uasm.h> + +/* + * cps_nc_entry_fn - type of a generated non-coherent state entry function + * @online: the count of online coupled VPEs + * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count + * + * The code entering & exiting non-coherent states is generated at runtime + * using uasm, in order to ensure that the compiler cannot insert a stray + * memory access at an unfortunate time and to allow the generation of optimal + * core-specific code particularly for cache routines. If coupled_coherence + * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, + * returns the number of VPEs that were in the wait state at the point this + * VPE left it. Returns garbage if coupled_coherence is zero or this is not + * the entry function for CPS_PM_NC_WAIT. + */ +typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); + +/* + * The entry point of the generated non-coherent idle state entry/exit + * functions. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], + nc_asm_enter); + +/* Bitmap indicating which states are supported by the system */ +DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); + +/* + * Indicates the number of coupled VPEs ready to operate in a non-coherent + * state. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); +static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); + +/* Indicates online CPUs coupled with the current CPU */ +static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); + +/* + * Used to synchronize entry to deep idle states. Actually per-core rather + * than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); + +/* Saved CPU state across the CPS_PM_POWER_GATED state */ +DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); + +/* A somewhat arbitrary number of labels & relocs for uasm */ +static struct uasm_label labels[32] __initdata; +static struct uasm_reloc relocs[32] __initdata; + +/* CPU dependant sync types */ +static unsigned stype_intervention; +static unsigned stype_memory; +static unsigned stype_ordering; + +enum mips_reg { + zero, at, v0, v1, a0, a1, a2, a3, + t0, t1, t2, t3, t4, t5, t6, t7, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, k0, k1, gp, sp, fp, ra, +}; + +bool cps_pm_support_state(enum cps_pm_state state) +{ + return test_bit(state, state_support); +} + +static void coupled_barrier(atomic_t *a, unsigned online) +{ + /* + * This function is effectively the same as + * cpuidle_coupled_parallel_barrier, which can't be used here since + * there's no cpuidle device. + */ + + if (!coupled_coherence) + return; + + smp_mb__before_atomic_inc(); + atomic_inc(a); + + while (atomic_read(a) < online) + cpu_relax(); + + if (atomic_inc_return(a) == online * 2) { + atomic_set(a, 0); + return; + } + + while (atomic_read(a) > online) + cpu_relax(); +} + +int cps_pm_enter_state(enum cps_pm_state state) +{ + unsigned cpu = smp_processor_id(); + unsigned core = current_cpu_data.core; + unsigned online, left; + cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); + u32 *core_ready_count, *nc_core_ready_count; + void *nc_addr; + cps_nc_entry_fn entry; + struct core_boot_config *core_cfg; + struct vpe_boot_config *vpe_cfg; + + /* Check that there is an entry function for this state */ + entry = per_cpu(nc_asm_enter, core)[state]; + if (!entry) + return -EINVAL; + + /* Calculate which coupled CPUs (VPEs) are online */ +#ifdef CONFIG_MIPS_MT + if (cpu_online(cpu)) { + cpumask_and(coupled_mask, cpu_online_mask, + &cpu_sibling_map[cpu]); + online = cpumask_weight(coupled_mask); + cpumask_clear_cpu(cpu, coupled_mask); + } else +#endif + { + cpumask_clear(coupled_mask); + online = 1; + } + + /* Setup the VPE to run mips_cps_pm_restore when started again */ + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + core_cfg = &mips_cps_core_bootcfg[core]; + vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; + vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; + vpe_cfg->gp = (unsigned long)current_thread_info(); + vpe_cfg->sp = 0; + } + + /* Indicate that this CPU might not be coherent */ + cpumask_clear_cpu(cpu, &cpu_coherent_mask); + smp_mb__after_clear_bit(); + + /* Create a non-coherent mapping of the core ready_count */ + core_ready_count = per_cpu(ready_count, core); + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), + (unsigned long)core_ready_count); + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); + nc_core_ready_count = nc_addr; + + /* Ensure ready_count is zero-initialised before the assembly runs */ + ACCESS_ONCE(*nc_core_ready_count) = 0; + coupled_barrier(&per_cpu(pm_barrier, core), online); + + /* Run the generated entry code */ + left = entry(online, nc_core_ready_count); + + /* Remove the non-coherent mapping of ready_count */ + kunmap_noncoherent(); + + /* Indicate that this CPU is definitely coherent */ + cpumask_set_cpu(cpu, &cpu_coherent_mask); + + /* + * If this VPE is the first to leave the non-coherent wait state then + * it needs to wake up any coupled VPEs still running their wait + * instruction so that they return to cpuidle, which can then complete + * coordination between the coupled VPEs & provide the governor with + * a chance to reflect on the length of time the VPEs were in the + * idle state. + */ + if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) + arch_send_call_function_ipi_mask(coupled_mask); + + return 0; +} + +static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cache_desc *cache, + unsigned op, int lbl) +{ + unsigned cache_size = cache->ways << cache->waybit; + unsigned i; + const unsigned unroll_lines = 32; + + /* If the cache isn't present this function has it easy */ + if (cache->flags & MIPS_CACHE_NOT_PRESENT) + return; + + /* Load base address */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Calculate end address */ + if (cache_size < 0x8000) + uasm_i_addiu(pp, t1, t0, cache_size); + else + UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + + /* Start of cache op loop */ + uasm_build_label(pl, *pp, lbl); + + /* Generate the cache ops */ + for (i = 0; i < unroll_lines; i++) + uasm_i_cache(pp, op, i * cache->linesz, t0); + + /* Update the base address */ + uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + + /* Loop if we haven't reached the end address yet */ + uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_i_nop(pp); +} + +static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cpuinfo_mips *cpu_info, + int lbl) +{ + unsigned i, fsb_size = 8; + unsigned num_loads = (fsb_size * 3) / 2; + unsigned line_stride = 2; + unsigned line_size = cpu_info->dcache.linesz; + unsigned perf_counter, perf_event; + unsigned revision = cpu_info->processor_id & PRID_REV_MASK; + + /* + * Determine whether this CPU requires an FSB flush, and if so which + * performance counter/event reflect stalls due to a full FSB. + */ + switch (__get_cpu_type(cpu_info->cputype)) { + case CPU_INTERAPTIV: + perf_counter = 1; + perf_event = 51; + break; + + case CPU_PROAPTIV: + /* Newer proAptiv cores don't require this workaround */ + if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) + return 0; + + /* On older ones it's unavailable */ + return -1; + + /* CPUs which do not require the workaround */ + case CPU_P5600: + return 0; + + default: + WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); + return -1; + } + + /* + * Ensure that the fill/store buffer (FSB) is not holding the results + * of a prefetch, since if it is then the CPC sequencer may become + * stuck in the D3 (ClrBus) state whilst entering a low power state. + */ + + /* Preserve perf counter setup */ + uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Setup perf counter to count FSB full pipeline stalls */ + uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + /* Base address for loads */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Start of clear loop */ + uasm_build_label(pl, *pp, lbl); + + /* Perform some loads to fill the FSB */ + for (i = 0; i < num_loads; i++) + uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + + /* + * Invalidate the new D-cache entries so that the cache will need + * refilling (via the FSB) if the loop is executed again. + */ + for (i = 0; i < num_loads; i++) { + uasm_i_cache(pp, Hit_Invalidate_D, + i * line_size * line_stride, t0); + uasm_i_cache(pp, Hit_Writeback_Inv_SD, + i * line_size * line_stride, t0); + } + + /* Completion barrier */ + uasm_i_sync(pp, stype_memory); + uasm_i_ehb(pp); + + /* Check whether the pipeline stalled due to the FSB being full */ + uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Loop if it didn't */ + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); + + /* Restore perf counter 1. The count may well now be wrong... */ + uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + return 0; +} + +static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + unsigned r_addr, int lbl) +{ + uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_build_label(pl, *pp, lbl); + uasm_i_ll(pp, t1, 0, r_addr); + uasm_i_or(pp, t1, t1, t0); + uasm_i_sc(pp, t1, 0, r_addr); + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); +} + +static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) +{ + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + u32 *buf, *p; + const unsigned r_online = a0; + const unsigned r_nc_count = a1; + const unsigned r_pcohctl = t7; + const unsigned max_instrs = 256; + unsigned cpc_cmd; + int err; + enum { + lbl_incready = 1, + lbl_poll_cont, + lbl_secondary_hang, + lbl_disable_coherence, + lbl_flush_fsb, + lbl_invicache, + lbl_flushdcache, + lbl_hang, + lbl_set_cont, + lbl_secondary_cont, + lbl_decready, + }; + + /* Allocate a buffer to hold the generated code */ + p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); + if (!buf) + return NULL; + + /* Clear labels & relocs ready for (re)use */ + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + /* + * Save CPU state. Note the non-standard calling convention + * with the return address placed in v0 to avoid clobbering + * the ra register before it is saved. + */ + UASM_i_LA(&p, t0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, v0, t0); + uasm_i_nop(&p); + } + + /* + * Load addresses of required CM & CPC registers. This is done early + * because they're needed in both the enable & disable coherence steps + * but in the coupled case the enable step will only run on one VPE. + */ + UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); + + if (coupled_coherence) { + /* Increment ready_count */ + uasm_i_sync(&p, stype_ordering); + uasm_build_label(&l, p, lbl_incready); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, 1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_incready); + uasm_i_addiu(&p, t1, t1, 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + + /* + * If this is the last VPE to become ready for non-coherence + * then it should branch below. + */ + uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_i_nop(&p); + + if (state < CPS_PM_POWER_GATED) { + /* + * Otherwise this is not the last VPE to become ready + * for non-coherence. It needs to wait until coherence + * has been disabled before proceeding, which it will do + * by polling for the top bit of ready_count being set. + */ + uasm_i_addiu(&p, t1, zero, -1); + uasm_build_label(&l, p, lbl_poll_cont); + uasm_i_lw(&p, t0, 0, r_nc_count); + uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_ehb(&p); + uasm_i_yield(&p, zero, t1); + uasm_il_b(&p, &r, lbl_poll_cont); + uasm_i_nop(&p); + } else { + /* + * The core will lose power & this VPE will not continue + * so it can simply halt here. + */ + uasm_i_addiu(&p, t0, zero, TCHALT_H); + uasm_i_mtc0(&p, t0, 2, 4); + uasm_build_label(&l, p, lbl_secondary_hang); + uasm_il_b(&p, &r, lbl_secondary_hang); + uasm_i_nop(&p); + } + } + + /* + * This is the point of no return - this VPE will now proceed to + * disable coherence. At this point we *must* be sure that no other + * VPE within the core will interfere with the L1 dcache. + */ + uasm_build_label(&l, p, lbl_disable_coherence); + + /* Invalidate the L1 icache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, + Index_Invalidate_I, lbl_invicache); + + /* Writeback & invalidate the L1 dcache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, + Index_Writeback_Inv_D, lbl_flushdcache); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + /* + * Disable all but self interventions. The load from COHCTL is defined + * by the interAptiv & proAptiv SUMs as ensuring that the operation + * resulting from the preceeding store is complete. + */ + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Sync to ensure previous interventions are complete */ + uasm_i_sync(&p, stype_intervention); + uasm_i_ehb(&p); + + /* Disable coherence */ + uasm_i_sw(&p, zero, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + if (state >= CPS_PM_CLOCK_GATED) { + err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], + lbl_flush_fsb); + if (err) + goto out_err; + + /* Determine the CPC command to issue */ + switch (state) { + case CPS_PM_CLOCK_GATED: + cpc_cmd = CPC_Cx_CMD_CLOCKOFF; + break; + case CPS_PM_POWER_GATED: + cpc_cmd = CPC_Cx_CMD_PWRDOWN; + break; + default: + BUG(); + goto out_err; + } + + /* Issue the CPC command */ + UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, t1, zero, cpc_cmd); + uasm_i_sw(&p, t1, 0, t0); + + if (state == CPS_PM_POWER_GATED) { + /* If anything goes wrong just hang */ + uasm_build_label(&l, p, lbl_hang); + uasm_il_b(&p, &r, lbl_hang); + uasm_i_nop(&p); + + /* + * There's no point generating more code, the core is + * powered down & if powered back up will run from the + * reset vector not from here. + */ + goto gen_done; + } + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + } + + if (state == CPS_PM_NC_WAIT) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + if (coupled_coherence) + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, + lbl_set_cont); + + /* + * VPEs which did not disable coherence will continue + * executing, after coherence has been disabled, from this + * point. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Now perform our wait */ + uasm_i_wait(&p, 0); + } + + /* + * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs + * will run this. The first will actually re-enable coherence & the + * rest will just be performing a rather unusual nop. + */ + uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { + /* Decrement ready_count */ + uasm_build_label(&l, p, lbl_decready); + uasm_i_sync(&p, stype_ordering); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, -1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_decready); + uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); + + /* + * This core will be reliant upon another core sending a + * power-up command to the CPC in order to resume operation. + * Thus an arbitrary VPE can't trigger the core leaving the + * idle state and the one that disables coherence might as well + * be the one to re-enable it. The rest will continue from here + * after that has been done. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + /* The core is coherent, time to return to C code */ + uasm_i_jr(&p, ra); + uasm_i_nop(&p); + +gen_done: + /* Ensure the code didn't exceed the resources allocated for it */ + BUG_ON((p - buf) > max_instrs); + BUG_ON((l - labels) > ARRAY_SIZE(labels)); + BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); + + /* Patch branch offsets */ + uasm_resolve_relocs(relocs, labels); + + /* Flush the icache */ + local_flush_icache_range((unsigned long)buf, (unsigned long)p); + + return buf; +out_err: + kfree(buf); + return NULL; +} + +static int __init cps_gen_core_entries(unsigned cpu) +{ + enum cps_pm_state state; + unsigned core = cpu_data[cpu].core; + unsigned dlinesz = cpu_data[cpu].dcache.linesz; + void *entry_fn, *core_rc; + + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { + if (per_cpu(nc_asm_enter, core)[state]) + continue; + if (!test_bit(state, state_support)) + continue; + + entry_fn = cps_gen_entry_code(cpu, state); + if (!entry_fn) { + pr_err("Failed to generate core %u state %u entry\n", + core, state); + clear_bit(state, state_support); + } + + per_cpu(nc_asm_enter, core)[state] = entry_fn; + } + + if (!per_cpu(ready_count, core)) { + core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + if (!core_rc) { + pr_err("Failed allocate core %u ready_count\n", core); + return -ENOMEM; + } + per_cpu(ready_count_alloc, core) = core_rc; + + /* Ensure ready_count is aligned to a cacheline boundary */ + core_rc += dlinesz - 1; + core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); + per_cpu(ready_count, core) = core_rc; + } + + return 0; +} + +static int __init cps_pm_init(void) +{ + unsigned cpu; + int err; + + /* Detect appropriate sync types for the system */ + switch (current_cpu_data.cputype) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_M5150: + case CPU_P5600: + stype_intervention = 0x2; + stype_memory = 0x3; + stype_ordering = 0x10; + break; + + default: + pr_warn("Power management is using heavyweight sync 0\n"); + } + + /* A CM is required for all non-coherent states */ + if (!mips_cm_present()) { + pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); + goto out; + } + + /* + * If interrupts were enabled whilst running a wait instruction on a + * non-coherent core then the VPE may end up processing interrupts + * whilst non-coherent. That would be bad. + */ + if (cpu_wait == r4k_wait_irqoff) + set_bit(CPS_PM_NC_WAIT, state_support); + else + pr_warn("pm-cps: non-coherent wait unavailable\n"); + + /* Detect whether a CPC is present */ + if (mips_cpc_present()) { + /* Detect whether clock gating is implemented */ + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) + set_bit(CPS_PM_CLOCK_GATED, state_support); + else + pr_warn("pm-cps: CPC does not support clock gating\n"); + + /* Power gating is available with CPS SMP & any CPC */ + if (mips_cps_smp_in_use()) + set_bit(CPS_PM_POWER_GATED, state_support); + else + pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); + } else { + pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); + } + + for_each_present_cpu(cpu) { + err = cps_gen_core_entries(cpu); + if (err) + return err; + } +out: + return 0; +} +arch_initcall(cps_pm_init); diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c new file mode 100644 index 00000000000..fefdf39d3df --- /dev/null +++ b/arch/mips/kernel/pm.c @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2014 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * CPU PM notifiers for saving/restoring general CPU state. + */ + +#include <linux/cpu_pm.h> +#include <linux/init.h> + +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/mmu_context.h> +#include <asm/pm.h> +#include <asm/watch.h> + +/* Used by PM helper macros in asm/pm.h */ +struct mips_static_suspend_state mips_static_suspend_state; + +/** + * mips_cpu_save() - Save general CPU state. + * Ensures that general CPU context is saved, notably FPU and DSP. + */ +static int mips_cpu_save(void) +{ + /* Save FPU state */ + lose_fpu(1); + + /* Save DSP state */ + save_dsp(current); + + return 0; +} + +/** + * mips_cpu_restore() - Restore general CPU state. + * Restores important CPU context. + */ +static void mips_cpu_restore(void) +{ + unsigned int cpu = smp_processor_id(); + + /* Restore ASID */ + if (current->mm) + write_c0_entryhi(cpu_asid(cpu, current->mm)); + + /* Restore DSP state */ + restore_dsp(current); + + /* Restore UserLocal */ + if (cpu_has_userlocal) + write_c0_userlocal(current_thread_info()->tp_value); + + /* Restore watch registers */ + __restore_watch(); +} + +/** + * mips_pm_notifier() - Notifier for preserving general CPU context. + * @self: Notifier block. + * @cmd: CPU PM event. + * @v: Private data (unused). + * + * This is called when a CPU power management event occurs, and is used to + * ensure that important CPU context is preserved across a CPU power down. + */ +static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + int ret; + + switch (cmd) { + case CPU_PM_ENTER: + ret = mips_cpu_save(); + if (ret) + return NOTIFY_STOP; + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + mips_cpu_restore(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block mips_pm_notifier_block = { + .notifier_call = mips_pm_notifier, +}; + +static int __init mips_pm_init(void) +{ + return cpu_pm_register_notifier(&mips_pm_notifier_block); +} +arch_initcall(mips_pm_init); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index e40971b51d2..037a44d962f 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) - if (cpu_has_mipsmt) { - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); -#if defined(CONFIG_MIPS_MT_SMTC) - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); -#endif - } -#endif + sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 60e39dc7f1e..0a1ec0f3bef 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, */ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC restores TCStatus after Status, and the CU bits - * are aliased there. - */ - childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); -#endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 3c3b0df8f48..5d39bb85bf3 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -47,7 +47,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } -void __init __dt_setup_arch(struct boot_param_header *bph) +void __init __dt_setup_arch(void *bph) { if (!early_init_dt_scan(bph)) return; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 71f85f42703..f639ccd5060 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child, enum pt_watch_style style; int i; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) return -EIO; @@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child, #endif __put_user(style, &addr->style); - __put_user(current_cpu_data.watch_reg_use_cnt, + __put_user(boot_cpu_data.watch_reg_use_cnt, &addr->WATCH_STYLE.num_valid); - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __put_user(child->thread.watch.mips3264.watchlo[i], &addr->WATCH_STYLE.watchlo[i]); __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, &addr->WATCH_STYLE.watchhi[i]); - __put_user(current_cpu_data.watch_reg_masks[i], + __put_user(boot_cpu_data.watch_reg_masks[i], &addr->WATCH_STYLE.watch_masks[i]); } for (; i < 8; i++) { @@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child, unsigned long lt[NUM_WATCH_REGS]; u16 ht[NUM_WATCH_REGS]; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) return -EIO; /* Check the values. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); #ifdef CONFIG_32BIT if (lt[i] & __UA_LIMIT) @@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child, return -EINVAL; } /* Install them. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { if (lt[i] & 7) watch_active = 1; child->thread.watch.mips3264.watchlo[i] = lt[i]; diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index abacac7c33e..81ca3f70fe2 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -28,6 +28,7 @@ */ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) +#ifndef USE_ALTERNATE_RESUME_IMPL /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti, s32 fp_save) @@ -87,18 +88,6 @@ PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 -#ifdef CONFIG_MIPS_MT_SMTC - /* Read-modify-writes of Status must be atomic on a VPE */ - mfc0 t2, CP0_TCSTATUS - ori t1, t2, TCSTATUS_IXMT - mtc0 t1, CP0_TCSTATUS - andi t2, t2, TCSTATUS_IXMT - _ehb - DMT 8 # dmt t0 - move t1,ra - jal mips_ihb - move ra,t1 -#endif /* CONFIG_MIPS_MT_SMTC */ mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 @@ -107,22 +96,12 @@ and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS -#ifdef CONFIG_MIPS_MT_SMTC - _ehb - andi t0, t0, VPECONTROL_TE - beqz t0, 1f - emt -1: - mfc0 t1, CP0_TCSTATUS - xori t1, t1, TCSTATUS_IXMT - or t1, t1, t2 - mtc0 t1, CP0_TCSTATUS - _ehb -#endif /* CONFIG_MIPS_MT_SMTC */ move v0, a0 jr ra END(resume) +#endif /* USE_ALTERNATE_RESUME_IMPL */ + /* * Save a thread's fp context. */ @@ -176,19 +155,10 @@ LEAF(_restore_msa) #define FPU_DEFAULT 0x00000000 LEAF(_init_fpu) -#ifdef CONFIG_MIPS_MT_SMTC - /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ - mfc0 t0, CP0_TCSTATUS - /* Bit position is the same for Status, TCStatus */ - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_TCSTATUS -#else /* Normal MIPS CU1 enable */ mfc0 t0, CP0_STATUS li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS -#endif /* CONFIG_MIPS_MT_SMTC */ enable_fpu_hazard li t1, FPU_DEFAULT diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 9c1aca00fd5..5a66b975989 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) unsigned long flags; int i; - /* Ought not to be strictly necessary for SMTC builds */ local_irq_save(flags); vpeflags = dvpe(); set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fdc70b40044..3245474f19d 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -577,3 +577,4 @@ EXPORT(sys_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index dd99c3285ae..be2fedd4ae3 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -430,4 +430,5 @@ EXPORT(sys_call_table) PTR sys_getdents64 PTR sys_sched_setattr PTR sys_sched_getattr /* 5310 */ + PTR sys_renameat2 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f68d2f4f009..c1dbcda4b81 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -423,4 +423,5 @@ EXPORT(sysn32_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr + PTR sys_renameat2 /* 6315 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 70f6acecd92..f1343ccd7ed 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -556,4 +556,5 @@ EXPORT(sys32_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index ea4c2dc3169..df9e2bd9b2c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -281,13 +281,6 @@ static void bmips_smp_finish(void) } /* - * Runs on CPU0 after all CPUs have been booted - */ -static void bmips_cpus_done(void) -{ -} - -/* * BMIPS5000 raceless IPIs * * Each CPU has two inbound SW IRQs which are independent of all other CPUs. @@ -434,7 +427,6 @@ struct plat_smp_ops bmips43xx_smp_ops = { .boot_secondary = bmips_boot_secondary, .smp_finish = bmips_smp_finish, .init_secondary = bmips_init_secondary, - .cpus_done = bmips_cpus_done, .send_ipi_single = bmips43xx_send_ipi_single, .send_ipi_mask = bmips43xx_send_ipi_mask, #ifdef CONFIG_HOTPLUG_CPU @@ -449,7 +441,6 @@ struct plat_smp_ops bmips5000_smp_ops = { .boot_secondary = bmips_boot_secondary, .smp_finish = bmips_smp_finish, .init_secondary = bmips_init_secondary, - .cpus_done = bmips_cpus_done, .send_ipi_single = bmips5000_send_ipi_single, .send_ipi_mask = bmips5000_send_ipi_mask, #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 3ef55fb7ac0..fc8a5155342 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -49,14 +49,11 @@ static void cmp_init_secondary(void) /* Enable per-cpu interrupts: platform specific */ -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_MIPS_MT_SMP if (cpu_has_mipsmt) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif -#ifdef CONFIG_MIPS_MT_SMTC - c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; -#endif } static void cmp_smp_finish(void) @@ -75,11 +72,6 @@ static void cmp_smp_finish(void) local_irq_enable(); } -static void cmp_cpus_done(void) -{ - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); -} - /* * Setup the PC, SP, and GP of a secondary processor and start it running * smp_bootstrap is the place to resume from @@ -135,10 +127,6 @@ void __init cmp_smp_setup(void) unsigned int mvpconf0 = read_c0_mvpconf0(); nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; -#elif defined(CONFIG_MIPS_MT_SMTC) - unsigned int mvpconf0 = read_c0_mvpconf0(); - - nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; #endif smp_num_siblings = nvpe; } @@ -165,7 +153,6 @@ struct plat_smp_ops cmp_smp_ops = { .send_ipi_mask = gic_send_ipi_mask, .init_secondary = cmp_init_secondary, .smp_finish = cmp_smp_finish, - .cpus_done = cmp_cpus_done, .boot_secondary = cmp_boot_secondary, .smp_setup = cmp_smp_setup, .prepare_cpus = cmp_prepare_cpus, diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 536eec0d21b..df0598d9bfd 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -20,104 +20,43 @@ #include <asm/mips-cpc.h> #include <asm/mips_mt.h> #include <asm/mipsregs.h> +#include <asm/pm-cps.h> #include <asm/smp-cps.h> #include <asm/time.h> #include <asm/uasm.h> static DECLARE_BITMAP(core_power, NR_CPUS); -struct boot_config mips_cps_bootcfg; +struct core_boot_config *mips_cps_core_bootcfg; -static void init_core(void) +static unsigned core_vpe_count(unsigned core) { - unsigned int nvpes, t; - u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status; + unsigned cfg; - if (!cpu_has_mipsmt) - return; - - /* Enter VPE configuration state */ - dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* Retrieve the count of VPEs in this core */ - mvpconf0 = read_c0_mvpconf0(); - nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - smp_num_siblings = nvpes; - - for (t = 1; t < nvpes; t++) { - /* Use a 1:1 mapping of TC index to VPE index */ - settc(t); - - /* Bind 1 TC to this VPE */ - tcbind = read_tc_c0_tcbind(); - tcbind &= ~TCBIND_CURVPE; - tcbind |= t << TCBIND_CURVPE_SHIFT; - write_tc_c0_tcbind(tcbind); - - /* Set exclusive TC, non-active, master */ - vpeconf0 = read_vpe_c0_vpeconf0(); - vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA); - vpeconf0 |= t << VPECONF0_XTC_SHIFT; - vpeconf0 |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(vpeconf0); - - /* Declare TC non-active, non-allocatable & interrupt exempt */ - tcstatus = read_tc_c0_tcstatus(); - tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA); - tcstatus |= TCSTATUS_IXMT; - write_tc_c0_tcstatus(tcstatus); - - /* Halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - - /* Allow only 1 TC to execute */ - vpecontrol = read_vpe_c0_vpecontrol(); - vpecontrol &= ~VPECONTROL_TE; - write_vpe_c0_vpecontrol(vpecontrol); - - /* Copy (most of) Status from VPE 0 */ - status = read_c0_status(); - status &= ~(ST0_IM | ST0_IE | ST0_KSU); - status |= ST0_CU0; - write_vpe_c0_status(status); - - /* Copy Config from VPE 0 */ - write_vpe_c0_config(read_c0_config()); - write_vpe_c0_config7(read_c0_config7()); - - /* Ensure no software interrupts are pending */ - write_vpe_c0_cause(0); - - /* Sync Count */ - write_vpe_c0_count(read_c0_count()); - } + if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + return 1; - /* Leave VPE configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); + cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; } static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; int c, v; - u32 core_cfg, *entry_code; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("VPE topology "); for (c = nvpes = 0; c < ncores; c++) { - if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { - write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF); - core_cfg = read_gcr_co_config(); - core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >> - CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; - } else { - core_vpes = 1; - } - + core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); + /* Use the number of VPEs in core 0 for smp_num_siblings */ + if (!c) + smp_num_siblings = core_vpes; + for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #ifdef CONFIG_MIPS_MT_SMP @@ -137,19 +76,14 @@ static void __init cps_smp_setup(void) __cpu_logical_map[v] = v; } + /* Set a coherent default CCA (CWB) */ + change_c0_config(CONF_CM_CMASK, 0x5); + /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); - /* Disable MT - we only want to run 1 TC per VPE */ - if (cpu_has_mipsmt) - dmt(); - /* Initialise core 0 */ - init_core(); - - /* Patch the start of mips_cps_core_entry to provide the CM base */ - entry_code = (u32 *)&mips_cps_core_entry; - UASM_i_LA(&entry_code, 3, (long)mips_cm_base); + mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); @@ -157,15 +91,99 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { + unsigned ncores, core_vpes, c, cca; + bool cca_unsuitable; + u32 *entry_code; + mips_mt_set_cpuoptions(); + + /* Detect whether the CCA is unsuited to multi-core SMP */ + cca = read_c0_config() & CONF_CM_CMASK; + switch (cca) { + case 0x4: /* CWBE */ + case 0x5: /* CWB */ + /* The CCA is coherent, multi-core is fine */ + cca_unsuitable = false; + break; + + default: + /* CCA is not coherent, multi-core is not usable */ + cca_unsuitable = true; + } + + /* Warn the user if the CCA prevents multi-core */ + ncores = mips_cm_numcores(); + if (cca_unsuitable && ncores > 1) { + pr_warn("Using only one core due to unsuitable CCA 0x%x\n", + cca); + + for_each_present_cpu(c) { + if (cpu_data[c].core) + set_cpu_present(c, false); + } + } + + /* + * Patch the start of mips_cps_core_entry to provide: + * + * v0 = CM base address + * s0 = kseg0 CCA + */ + entry_code = (u32 *)&mips_cps_core_entry; + UASM_i_LA(&entry_code, 3, (long)mips_cm_base); + uasm_i_addiu(&entry_code, 16, 0, cca); + dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, + (void *)entry_code - (void *)&mips_cps_core_entry); + + /* Allocate core boot configuration structs */ + mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), + GFP_KERNEL); + if (!mips_cps_core_bootcfg) { + pr_err("Failed to allocate boot config for %u cores\n", ncores); + goto err_out; + } + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(c); + mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*mips_cps_core_bootcfg[c].vpe_config), + GFP_KERNEL); + if (!mips_cps_core_bootcfg[c].vpe_config) { + pr_err("Failed to allocate %u VPE boot configs\n", + core_vpes); + goto err_out; + } + } + + /* Mark this CPU as booted */ + atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, + 1 << cpu_vpe_id(¤t_cpu_data)); + + return; +err_out: + /* Clean up allocations */ + if (mips_cps_core_bootcfg) { + for (c = 0; c < ncores; c++) + kfree(mips_cps_core_bootcfg[c].vpe_config); + kfree(mips_cps_core_bootcfg); + mips_cps_core_bootcfg = NULL; + } + + /* Effectively disable SMP by declaring CPUs not present */ + for_each_possible_cpu(c) { + if (c == 0) + continue; + set_cpu_present(c, false); + } } -static void boot_core(struct boot_config *cfg) +static void boot_core(unsigned core) { u32 access; /* Select the appropriate core */ - write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); /* Set its reset vector */ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); @@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg) /* Ensure the core can access the GCRs */ access = read_gcr_access(); - access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); + access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); write_gcr_access(access); - /* Copy cfg */ - mips_cps_bootcfg = *cfg; - if (mips_cpc_present()) { - /* Select the appropriate core */ - write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); - /* Reset the core */ + mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_RESET); + mips_cpc_unlock_other(); } else { /* Take the core out of reset */ write_gcr_co_reset_release(0); } /* The core is now powered up */ - bitmap_set(core_power, cfg->core, 1); + bitmap_set(core_power, core, 1); } -static void boot_vpe(void *info) +static void remote_vpe_boot(void *dummy) { - struct boot_config *cfg = info; - u32 tcstatus, vpeconf0; - - /* Enter VPE configuration state */ - dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(cfg->vpe); - - /* Set the TC restart PC */ - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); - - /* Activate the TC, allow interrupts */ - tcstatus = read_tc_c0_tcstatus(); - tcstatus &= ~TCSTATUS_IXMT; - tcstatus |= TCSTATUS_A; - write_tc_c0_tcstatus(tcstatus); - - /* Clear the TC halt bit */ - write_tc_c0_tchalt(0); - - /* Activate the VPE */ - vpeconf0 = read_vpe_c0_vpeconf0(); - vpeconf0 |= VPECONF0_VPA; - write_vpe_c0_vpeconf0(vpeconf0); - - /* Set the stack & global pointer registers */ - write_tc_gpr_sp(cfg->sp); - write_tc_gpr_gp(cfg->gp); - - /* Leave VPE configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - /* Enable other VPEs to execute */ - evpe(EVPE_ENABLE); + mips_cps_boot_vpes(); } static void cps_boot_secondary(int cpu, struct task_struct *idle) { - struct boot_config cfg; + unsigned core = cpu_data[cpu].core; + unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); + struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; unsigned int remote; int err; - cfg.core = cpu_data[cpu].core; - cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); - cfg.pc = (unsigned long)&smp_bootstrap; - cfg.sp = __KSTK_TOS(idle); - cfg.gp = (unsigned long)task_thread_info(idle); + vpe_cfg->pc = (unsigned long)&smp_bootstrap; + vpe_cfg->sp = __KSTK_TOS(idle); + vpe_cfg->gp = (unsigned long)task_thread_info(idle); + + atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); - if (!test_bit(cfg.core, core_power)) { + preempt_disable(); + + if (!test_bit(core, core_power)) { /* Boot a VPE on a powered down core */ - boot_core(&cfg); - return; + boot_core(core); + goto out; } - if (cfg.core != current_cpu_data.core) { + if (core != current_cpu_data.core) { /* Boot a VPE on another powered up core */ for (remote = 0; remote < NR_CPUS; remote++) { - if (cpu_data[remote].core != cfg.core) + if (cpu_data[remote].core != core) continue; if (cpu_online(remote)) break; } BUG_ON(remote >= NR_CPUS); - err = smp_call_function_single(remote, boot_vpe, &cfg, 1); + err = smp_call_function_single(remote, remote_vpe_boot, + NULL, 1); if (err) panic("Failed to call remote CPU\n"); - return; + goto out; } BUG_ON(!cpu_has_mipsmt); /* Boot a VPE on this core */ - boot_vpe(&cfg); + mips_cps_boot_vpes(); +out: + preempt_enable(); } static void cps_init_secondary(void) @@ -281,10 +269,6 @@ static void cps_init_secondary(void) if (cpu_has_mipsmt) dmt(); - /* TODO: revisit this assumption once hotplug is implemented */ - if (cpu_vpe_id(¤t_cpu_data) == 0) - init_core(); - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | STATUSF_IP7); } @@ -302,10 +286,148 @@ static void cps_smp_finish(void) local_irq_enable(); } -static void cps_cpus_done(void) +#ifdef CONFIG_HOTPLUG_CPU + +static int cps_cpu_disable(void) +{ + unsigned cpu = smp_processor_id(); + struct core_boot_config *core_cfg; + + if (!cpu) + return -EBUSY; + + if (!cps_pm_support_state(CPS_PM_POWER_GATED)) + return -EINVAL; + + core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; + atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); + smp_mb__after_atomic_dec(); + set_cpu_online(cpu, false); + cpu_clear(cpu, cpu_callin_map); + + return 0; +} + +static DECLARE_COMPLETION(cpu_death_chosen); +static unsigned cpu_death_sibling; +static enum { + CPU_DEATH_HALT, + CPU_DEATH_POWER, +} cpu_death; + +void play_dead(void) +{ + unsigned cpu, core; + + local_irq_disable(); + idle_task_exit(); + cpu = smp_processor_id(); + cpu_death = CPU_DEATH_POWER; + + if (cpu_has_mipsmt) { + core = cpu_data[cpu].core; + + /* Look for another online VPE within the core */ + for_each_online_cpu(cpu_death_sibling) { + if (cpu_data[cpu_death_sibling].core != core) + continue; + + /* + * There is an online VPE within the core. Just halt + * this TC and leave the core alone. + */ + cpu_death = CPU_DEATH_HALT; + break; + } + } + + /* This CPU has chosen its way out */ + complete(&cpu_death_chosen); + + if (cpu_death == CPU_DEATH_HALT) { + /* Halt this TC */ + write_c0_tchalt(TCHALT_H); + instruction_hazard(); + } else { + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } + + /* This should never be reached */ + panic("Failed to offline CPU %u", cpu); +} + +static void wait_for_sibling_halt(void *ptr_cpu) { + unsigned cpu = (unsigned)ptr_cpu; + unsigned vpe_id = cpu_data[cpu].vpe_id; + unsigned halted; + unsigned long flags; + + do { + local_irq_save(flags); + settc(vpe_id); + halted = read_tc_c0_tchalt(); + local_irq_restore(flags); + } while (!(halted & TCHALT_H)); +} + +static void cps_cpu_die(unsigned int cpu) +{ + unsigned core = cpu_data[cpu].core; + unsigned stat; + int err; + + /* Wait for the cpu to choose its way out */ + if (!wait_for_completion_timeout(&cpu_death_chosen, + msecs_to_jiffies(5000))) { + pr_err("CPU%u: didn't offline\n", cpu); + return; + } + + /* + * Now wait for the CPU to actually offline. Without doing this that + * offlining may race with one or more of: + * + * - Onlining the CPU again. + * - Powering down the core if another VPE within it is offlined. + * - A sibling VPE entering a non-coherent state. + * + * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing + * with which we could race, so do nothing. + */ + if (cpu_death == CPU_DEATH_POWER) { + /* + * Wait for the core to enter a powered down or clock gated + * state, the latter happening when a JTAG probe is connected + * in which case the CPC will refuse to power down the core. + */ + do { + mips_cpc_lock_other(core); + stat = read_cpc_co_stat_conf(); + stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; + mips_cpc_unlock_other(); + } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); + + /* Indicate the core is powered off */ + bitmap_clear(core_power, core, 1); + } else if (cpu_has_mipsmt) { + /* + * Have a CPU with access to the offlined CPUs registers wait + * for its TC to halt. + */ + err = smp_call_function_single(cpu_death_sibling, + wait_for_sibling_halt, + (void *)cpu, 1); + if (err) + panic("Failed to call remote sibling CPU\n"); + } } +#endif /* CONFIG_HOTPLUG_CPU */ + static struct plat_smp_ops cps_smp_ops = { .smp_setup = cps_smp_setup, .prepare_cpus = cps_prepare_cpus, @@ -314,9 +436,18 @@ static struct plat_smp_ops cps_smp_ops = { .smp_finish = cps_smp_finish, .send_ipi_single = gic_send_ipi_single, .send_ipi_mask = gic_send_ipi_mask, - .cpus_done = cps_cpus_done, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = cps_cpu_disable, + .cpu_die = cps_cpu_die, +#endif }; +bool mips_cps_smp_in_use(void) +{ + extern struct plat_smp_ops *mp_ops; + return mp_ops == &cps_smp_ops; +} + int register_cps_smp_ops(void) { if (!mips_cm_present()) { diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c index 3bb1f92ab52..3b21a96d1cc 100644 --- a/arch/mips/kernel/smp-gic.c +++ b/arch/mips/kernel/smp-gic.c @@ -15,12 +15,14 @@ #include <linux/printk.h> #include <asm/gic.h> +#include <asm/mips-cpc.h> #include <asm/smp-ops.h> void gic_send_ipi_single(int cpu, unsigned int action) { unsigned long flags; unsigned int intr; + unsigned int core = cpu_data[cpu].core; pr_debug("CPU%d: %s cpu %d action %u status %08x\n", smp_processor_id(), __func__, cpu, action, read_c0_status()); @@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action) } gic_send_ipi(intr); + + if (mips_cpc_present() && (core != current_cpu_data.core)) { + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { + mips_cpc_lock_other(core); + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); + mips_cpc_unlock_other(); + } + } + local_irq_restore(flags); } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index f8e13149604..3babf6e4f89 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -183,10 +183,6 @@ static void vsmp_smp_finish(void) local_irq_enable(); } -static void vsmp_cpus_done(void) -{ -} - /* * Setup the PC, SP, and GP of a secondary processor and start it * running! @@ -287,7 +283,6 @@ struct plat_smp_ops vsmp_smp_ops = { .send_ipi_mask = vsmp_send_ipi_mask, .init_secondary = vsmp_init_secondary, .smp_finish = vsmp_smp_finish, - .cpus_done = vsmp_cpus_done, .boot_secondary = vsmp_boot_secondary, .smp_setup = vsmp_smp_setup, .prepare_cpus = vsmp_prepare_cpus, diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c index 7fde3e4d978..17878d71ef2 100644 --- a/arch/mips/kernel/smp-up.c +++ b/arch/mips/kernel/smp-up.c @@ -36,11 +36,6 @@ static void up_smp_finish(void) { } -/* Hook for after all CPUs are online */ -static void up_cpus_done(void) -{ -} - /* * Firmware CPU startup hook */ @@ -73,7 +68,6 @@ struct plat_smp_ops up_smp_ops = { .send_ipi_mask = up_send_ipi_mask, .init_secondary = up_init_secondary, .smp_finish = up_smp_finish, - .cpus_done = up_cpus_done, .boot_secondary = up_boot_secondary, .smp_setup = up_smp_setup, .prepare_cpus = up_prepare_cpus, diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0a022ee33b2..9bad52ede90 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -43,10 +43,6 @@ #include <asm/time.h> #include <asm/setup.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif /* CONFIG_MIPS_MT_SMTC */ - volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ @@ -66,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +cpumask_t cpu_coherent_mask; + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -102,12 +100,6 @@ asmlinkage void start_secondary(void) { unsigned int cpu; -#ifdef CONFIG_MIPS_MT_SMTC - /* Only do cpu_probe for first TC of CPU */ - if ((read_c0_tcbind() & TCBIND_CURTC) != 0) - __cpu_name[smp_processor_id()] = __cpu_name[0]; - else -#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(false); @@ -124,6 +116,7 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + cpu_set(cpu, cpu_coherent_mask); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -173,7 +166,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { - mp_ops->cpus_done(); } /* called from main before smp_init() */ @@ -186,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ @@ -238,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm) * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { -#ifndef CONFIG_MIPS_MT_SMTC smp_call_function(func, info, 1); -#endif } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) @@ -404,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *)) } EXPORT_SYMBOL(dump_send_ipi); #endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + +static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); +static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); + +void tick_broadcast(const struct cpumask *mask) +{ + atomic_t *count; + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, mask) { + count = &per_cpu(tick_broadcast_count, cpu); + csd = &per_cpu(tick_broadcast_csd, cpu); + + if (atomic_inc_return(count) == 1) + smp_call_function_single_async(cpu, csd); + } +} + +static void tick_broadcast_callee(void *info) +{ + int cpu = smp_processor_id(); + tick_receive_broadcast(); + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); +} + +static int __init tick_broadcast_init(void) +{ + struct call_single_data *csd; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + csd = &per_cpu(tick_broadcast_csd, cpu); + csd->func = tick_broadcast_callee; + } + + return 0; +} +early_initcall(tick_broadcast_init); + +#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S deleted file mode 100644 index 2866863a39d..00000000000 --- a/arch/mips/kernel/smtc-asm.S +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Assembly Language Functions for MIPS MT SMTC support - */ - -/* - * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ - -#include <asm/regdef.h> -#include <asm/asmmacro.h> -#include <asm/stackframe.h> -#include <asm/irqflags.h> - -/* - * "Software Interrupt" linkage. - * - * This is invoked when an "Interrupt" is sent from one TC to another, - * where the TC to be interrupted is halted, has it's Restart address - * and Status values saved by the "remote control" thread, then modified - * to cause execution to begin here, in kenel mode. This code then - * disguises the TC state as that of an exception and transfers - * control to the general exception or vectored interrupt handler. - */ - .set noreorder - -/* -The __smtc_ipi_vector would use k0 and k1 as temporaries and -1) Set EXL (this is per-VPE, so this can't be done by proxy!) -2) Restore the K/CU and IXMT bits to the pre "exception" state - (EXL means no interrupts and access to the kernel map). -3) Set EPC to be the saved value of TCRestart. -4) Jump to the exception handler entry point passed by the sender. - -CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? -*/ - -/* - * Reviled and slandered vision: Set EXL and restore K/CU/IXMT - * state of pre-halt thread, then save everything and call - * thought some function pointer to imaginary_exception, which - * will parse a register value or memory message queue to - * deliver things like interprocessor interrupts. On return - * from that function, jump to the global ret_from_irq code - * to invoke the scheduler and return as appropriate. - */ - -#define PT_PADSLOT4 (PT_R0-8) -#define PT_PADSLOT5 (PT_R0-4) - - .text - .align 5 -FEXPORT(__smtc_ipi_vector) -#ifdef CONFIG_CPU_MICROMIPS - nop -#endif - .set noat - /* Disable thread scheduling to make Status update atomic */ - DMT 27 # dmt k1 - _ehb - /* Set EXL */ - mfc0 k0,CP0_STATUS - ori k0,k0,ST0_EXL - mtc0 k0,CP0_STATUS - _ehb - /* Thread scheduling now inhibited by EXL. Restore TE state. */ - andi k1,k1,VPECONTROL_TE - beqz k1,1f - emt -1: - /* - * The IPI sender has put some information on the anticipated - * kernel stack frame. If we were in user mode, this will be - * built above the saved kernel SP. If we were already in the - * kernel, it will be built above the current CPU SP. - * - * Were we in kernel mode, as indicated by CU0? - */ - sll k1,k0,3 - .set noreorder - bltz k1,2f - move k1,sp - .set reorder - /* - * If previously in user mode, set CU0 and use kernel stack. - */ - li k1,ST0_CU0 - or k1,k1,k0 - mtc0 k1,CP0_STATUS - _ehb - get_saved_sp - /* Interrupting TC will have pre-set values in slots in the new frame */ -2: subu k1,k1,PT_SIZE - /* Load TCStatus Value */ - lw k0,PT_TCSTATUS(k1) - /* Write it to TCStatus to restore CU/KSU/IXMT state */ - mtc0 k0,$2,1 - _ehb - lw k0,PT_EPC(k1) - mtc0 k0,CP0_EPC - /* Save all will redundantly recompute the SP, but use it for now */ - SAVE_ALL - CLI - TRACE_IRQS_OFF - /* Function to be invoked passed stack pad slot 5 */ - lw t0,PT_PADSLOT5(sp) - /* Argument from sender passed in stack pad slot 4 */ - lw a0,PT_PADSLOT4(sp) - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - PTR_LA ra, ret_from_irq - jr t0 - -/* - * Called from idle loop to provoke processing of queued IPIs - * First IPI message in queue passed as argument. - */ - -LEAF(self_ipi) - /* Before anything else, block interrupts */ - mfc0 t0,CP0_TCSTATUS - ori t1,t0,TCSTATUS_IXMT - mtc0 t1,CP0_TCSTATUS - _ehb - /* We know we're in kernel mode, so prepare stack frame */ - subu t1,sp,PT_SIZE - sw ra,PT_EPC(t1) - sw a0,PT_PADSLOT4(t1) - la t2,ipi_decode - sw t2,PT_PADSLOT5(t1) - /* Save pre-disable value of TCStatus */ - sw t0,PT_TCSTATUS(t1) - j __smtc_ipi_vector - nop -END(self_ipi) diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c deleted file mode 100644 index 38635a996cb..00000000000 --- a/arch/mips/kernel/smtc-proc.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * /proc hooks for SMTC kernel - * Copyright (C) 2005 Mips Technologies, Inc - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/cpumask.h> -#include <linux/interrupt.h> - -#include <asm/cpu.h> -#include <asm/processor.h> -#include <linux/atomic.h> -#include <asm/hardirq.h> -#include <asm/mmu_context.h> -#include <asm/mipsregs.h> -#include <asm/cacheflush.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> - -#include <asm/smtc_proc.h> - -/* - * /proc diagnostic and statistics hooks - */ - -/* - * Statistics gathered - */ -unsigned long selfipis[NR_CPUS]; - -struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; - -atomic_t smtc_fpu_recoveries; - -static int smtc_proc_show(struct seq_file *m, void *v) -{ - int i; - extern unsigned long ebase; - - seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); - seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); - seq_printf(m, "EBASE: 0x%08lx\n", ebase); - seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); - for (i=0; i < NR_CPUS; i++) - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); - seq_printf(m, "Self-IPIs by CPU:\n"); - for(i = 0; i < NR_CPUS; i++) - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); - seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); - return 0; -} - -static int smtc_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, smtc_proc_show, NULL); -} - -static const struct file_operations smtc_proc_fops = { - .open = smtc_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void init_smtc_stats(void) -{ - int i; - - for (i=0; i<NR_CPUS; i++) { - smtc_cpu_stats[i].timerints = 0; - smtc_cpu_stats[i].selfipis = 0; - } - - atomic_set(&smtc_fpu_recoveries, 0); - - proc_create("smtc", 0444, NULL, &smtc_proc_fops); -} - -static int proc_cpuinfo_chain_call(struct notifier_block *nfb, - unsigned long action_unused, void *data) -{ - struct proc_cpuinfo_notifier_args *pcn = data; - struct seq_file *m = pcn->m; - unsigned long n = pcn->n; - - if (!cpu_has_mipsmt) - return NOTIFY_OK; - - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); - - return NOTIFY_OK; -} - -static int __init proc_cpuinfo_notifier_init(void) -{ - return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); -} - -subsys_initcall(proc_cpuinfo_notifier_init); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c deleted file mode 100644 index c1681d65dd5..00000000000 --- a/arch/mips/kernel/smtc.c +++ /dev/null @@ -1,1528 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2004 Mips Technologies, Inc - * Copyright (C) 2008 Kevin D. Kissell - */ - -#include <linux/clockchips.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpumask.h> -#include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/module.h> -#include <linux/ftrace.h> -#include <linux/slab.h> - -#include <asm/cpu.h> -#include <asm/processor.h> -#include <linux/atomic.h> -#include <asm/hardirq.h> -#include <asm/hazards.h> -#include <asm/irq.h> -#include <asm/idle.h> -#include <asm/mmu_context.h> -#include <asm/mipsregs.h> -#include <asm/cacheflush.h> -#include <asm/time.h> -#include <asm/addrspace.h> -#include <asm/smtc.h> -#include <asm/smtc_proc.h> -#include <asm/setup.h> - -/* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. - */ -unsigned long irq_hwmask[NR_IRQS]; - -#define LOCK_MT_PRA() \ - local_irq_save(flags); \ - mtflags = dmt() - -#define UNLOCK_MT_PRA() \ - emt(mtflags); \ - local_irq_restore(flags) - -#define LOCK_CORE_PRA() \ - local_irq_save(flags); \ - mtflags = dvpe() - -#define UNLOCK_CORE_PRA() \ - evpe(mtflags); \ - local_irq_restore(flags) - -/* - * Data structures purely associated with SMTC parallelism - */ - - -/* - * Table for tracking ASIDs whose lifetime is prolonged. - */ - -asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; - -/* - * Number of InterProcessor Interrupt (IPI) message buffers to allocate - */ - -#define IPIBUF_PER_CPU 4 - -struct smtc_ipi_q IPIQ[NR_CPUS]; -static struct smtc_ipi_q freeIPIq; - - -/* - * Number of FPU contexts for each VPE - */ - -static int smtc_nconf1[MAX_SMTC_VPES]; - - -/* Forward declarations */ - -void ipi_decode(struct smtc_ipi *); -static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); -static void setup_cross_vpe_interrupts(unsigned int nvpe); -void init_smtc_stats(void); - -/* Global SMTC Status */ - -unsigned int smtc_status; - -/* Boot command line configuration overrides */ - -static int vpe0limit; -static int ipibuffers; -static int nostlb; -static int asidmask; -unsigned long smtc_asid_mask = 0xff; - -static int __init vpe0tcs(char *str) -{ - get_option(&str, &vpe0limit); - - return 1; -} - -static int __init ipibufs(char *str) -{ - get_option(&str, &ipibuffers); - return 1; -} - -static int __init stlb_disable(char *s) -{ - nostlb = 1; - return 1; -} - -static int __init asidmask_set(char *str) -{ - get_option(&str, &asidmask); - switch (asidmask) { - case 0x1: - case 0x3: - case 0x7: - case 0xf: - case 0x1f: - case 0x3f: - case 0x7f: - case 0xff: - smtc_asid_mask = (unsigned long)asidmask; - break; - default: - printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); - } - return 1; -} - -__setup("vpe0tcs=", vpe0tcs); -__setup("ipibufs=", ipibufs); -__setup("nostlb", stlb_disable); -__setup("asidmask=", asidmask_set); - -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - -static int hang_trig; - -static int __init hangtrig_enable(char *s) -{ - hang_trig = 1; - return 1; -} - - -__setup("hangtrig", hangtrig_enable); - -#define DEFAULT_BLOCKED_IPI_LIMIT 32 - -static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; - -static int __init tintq(char *str) -{ - get_option(&str, &timerq_limit); - return 1; -} - -__setup("tintq=", tintq); - -static int imstuckcount[MAX_SMTC_VPES][8]; -/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ -static int vpemask[MAX_SMTC_VPES][8] = { - {0, 0, 1, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 0, 0, 1} -}; -int tcnoprog[NR_CPUS]; -static atomic_t idle_hook_initialized = ATOMIC_INIT(0); -static int clock_hang_reported[NR_CPUS]; - -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - -/* - * Configure shared TLB - VPC configuration bit must be set by caller - */ - -static void smtc_configure_tlb(void) -{ - int i, tlbsiz, vpes; - unsigned long mvpconf0; - unsigned long config1val; - - /* Set up ASID preservation table */ - for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { - for(i = 0; i < MAX_SMTC_ASIDS; i++) { - smtc_live_asid[vpes][i] = 0; - } - } - mvpconf0 = read_c0_mvpconf0(); - - if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) - >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { - /* If we have multiple VPEs, try to share the TLB */ - if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { - /* - * If TLB sizing is programmable, shared TLB - * size is the total available complement. - * Otherwise, we have to take the sum of all - * static VPE TLB entries. - */ - if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) - >> MVPCONF0_PTLBE_SHIFT)) == 0) { - /* - * If there's more than one VPE, there had better - * be more than one TC, because we need one to bind - * to each VPE in turn to be able to read - * its configuration state! - */ - settc(1); - /* Stop the TC from doing anything foolish */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - /* No need to un-Halt - that happens later anyway */ - for (i=0; i < vpes; i++) { - write_tc_c0_tcbind(i); - /* - * To be 100% sure we're really getting the right - * information, we exit the configuration state - * and do an IHB after each rebinding. - */ - write_c0_mvpcontrol( - read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); - mips_ihb(); - /* - * Only count if the MMU Type indicated is TLB - */ - if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { - config1val = read_vpe_c0_config1(); - tlbsiz += ((config1val >> 25) & 0x3f) + 1; - } - - /* Put core back in configuration state */ - write_c0_mvpcontrol( - read_c0_mvpcontrol() | MVPCONTROL_VPC ); - mips_ihb(); - } - } - write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); - ehb(); - - /* - * Setup kernel data structures to use software total, - * rather than read the per-VPE Config1 value. The values - * for "CPU 0" gets copied to all the other CPUs as part - * of their initialization in smtc_cpu_setup(). - */ - - /* MIPS32 limits TLB indices to 64 */ - if (tlbsiz > 64) - tlbsiz = 64; - cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; - smtc_status |= SMTC_TLB_SHARED; - local_flush_tlb_all(); - - printk("TLB of %d entry pairs shared by %d VPEs\n", - tlbsiz, vpes); - } else { - printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); - } - } -} - - -/* - * Incrementally build the CPU map out of constituent MIPS MT cores, - * using the specified available VPEs and TCs. Plaform code needs - * to ensure that each MIPS MT core invokes this routine on reset, - * one at a time(!). - * - * This version of the build_cpu_map and prepare_cpus routines assumes - * that *all* TCs of a MIPS MT core will be used for Linux, and that - * they will be spread across *all* available VPEs (to minimise the - * loss of efficiency due to exception service serialization). - * An improved version would pick up configuration information and - * possibly leave some TCs/VPEs as "slave" processors. - * - * Use c0_MVPConf0 to find out how many TCs are available, setting up - * cpu_possible_mask and the logical/physical mappings. - */ - -int __init smtc_build_cpu_map(int start_cpu_slot) -{ - int i, ntcs; - - /* - * The CPU map isn't actually used for anything at this point, - * so it's not clear what else we should do apart from set - * everything up so that "logical" = "physical". - */ - ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { - set_cpu_possible(i, true); - __cpu_number_map[i] = i; - __cpu_logical_map[i] = i; - } -#ifdef CONFIG_MIPS_MT_FPAFF - /* Initialize map of CPUs with FPUs */ - cpus_clear(mt_fpu_cpumask); -#endif - - /* One of those TC's is the one booting, and not a secondary... */ - printk("%i available secondary CPU TC(s)\n", i - 1); - - return i; -} - -/* - * Common setup before any secondaries are started - * Make sure all CPUs are in a sensible state before we boot any of the - * secondaries. - * - * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly - * as possible across the available VPEs. - */ - -static void smtc_tc_setup(int vpe, int tc, int cpu) -{ - static int cp1contexts[MAX_SMTC_VPES]; - - /* - * Make a local copy of the available FPU contexts in order - * to keep track of TCs that can have one. - */ - if (tc == 1) - { - /* - * FIXME: Multi-core SMTC hasn't been tested and the - * maximum number of VPEs may change. - */ - cp1contexts[0] = smtc_nconf1[0] - 1; - cp1contexts[1] = smtc_nconf1[1]; - } - - settc(tc); - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - write_tc_c0_tcstatus((read_tc_c0_tcstatus() - & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) - | TCSTATUS_A); - /* - * TCContext gets an offset from the base of the IPIQ array - * to be used in low-level code to detect the presence of - * an active IPI queue. - */ - write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); - - /* Bind TC to VPE. */ - write_tc_c0_tcbind(vpe); - - /* In general, all TCs should have the same cpu_data indications. */ - memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); - - /* Check to see if there is a FPU context available for this TC. */ - if (!cp1contexts[vpe]) - cpu_data[cpu].options &= ~MIPS_CPU_FPU; - else - cp1contexts[vpe]--; - - /* Store the TC and VPE into the cpu_data structure. */ - cpu_data[cpu].vpe_id = vpe; - cpu_data[cpu].tc_id = tc; - - /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ - cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; -} - -/* - * Tweak to get Count registers synced as closely as possible. The - * value seems good for 34K-class cores. - */ - -#define CP0_SKEW 8 - -void smtc_prepare_cpus(int cpus) -{ - int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; - unsigned long flags; - unsigned long val; - int nipi; - struct smtc_ipi *pipi; - - /* disable interrupts so we can disable MT */ - local_irq_save(flags); - /* disable MT so we can configure */ - dvpe(); - dmt(); - - spin_lock_init(&freeIPIq.lock); - - /* - * We probably don't have as many VPEs as we do SMP "CPUs", - * but it's possible - and in any case we'll never use more! - */ - for (i=0; i<NR_CPUS; i++) { - IPIQ[i].head = IPIQ[i].tail = NULL; - spin_lock_init(&IPIQ[i].lock); - IPIQ[i].depth = 0; - IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ - } - - /* cpu_data index starts at zero */ - cpu = 0; - cpu_data[cpu].vpe_id = 0; - cpu_data[cpu].tc_id = 0; - cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; - cpu++; - - /* Report on boot-time options */ - mips_mt_set_cpuoptions(); - if (vpelimit > 0) - printk("Limit of %d VPEs set\n", vpelimit); - if (tclimit > 0) - printk("Limit of %d TCs set\n", tclimit); - if (nostlb) { - printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); - } - if (asidmask) - printk("ASID mask value override to 0x%x\n", asidmask); - - /* Temporary */ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - if (hang_trig) - printk("Logic Analyser Trigger on suspected TC hang\n"); -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - - /* Put MVPE's into 'configuration state' */ - write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); - - val = read_c0_mvpconf0(); - nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - if (vpelimit > 0 && nvpe > vpelimit) - nvpe = vpelimit; - ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - if (ntc > NR_CPUS) - ntc = NR_CPUS; - if (tclimit > 0 && ntc > tclimit) - ntc = tclimit; - slop = ntc % nvpe; - for (i = 0; i < nvpe; i++) { - tcpervpe[i] = ntc / nvpe; - if (slop) { - if((slop - i) > 0) tcpervpe[i]++; - } - } - /* Handle command line override for VPE0 */ - if (vpe0limit > ntc) vpe0limit = ntc; - if (vpe0limit > 0) { - int slopslop; - if (vpe0limit < tcpervpe[0]) { - /* Reducing TC count - distribute to others */ - slop = tcpervpe[0] - vpe0limit; - slopslop = slop % (nvpe - 1); - tcpervpe[0] = vpe0limit; - for (i = 1; i < nvpe; i++) { - tcpervpe[i] += slop / (nvpe - 1); - if(slopslop && ((slopslop - (i - 1) > 0))) - tcpervpe[i]++; - } - } else if (vpe0limit > tcpervpe[0]) { - /* Increasing TC count - steal from others */ - slop = vpe0limit - tcpervpe[0]; - slopslop = slop % (nvpe - 1); - tcpervpe[0] = vpe0limit; - for (i = 1; i < nvpe; i++) { - tcpervpe[i] -= slop / (nvpe - 1); - if(slopslop && ((slopslop - (i - 1) > 0))) - tcpervpe[i]--; - } - } - } - - /* Set up shared TLB */ - smtc_configure_tlb(); - - for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { - /* Get number of CP1 contexts for each VPE. */ - if (tc == 0) - { - /* - * Do not call settc() for TC0 or the FPU context - * value will be incorrect. Besides, we know that - * we are TC0 anyway. - */ - smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); - if (nvpe == 2) - { - settc(1); - smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); - settc(0); - } - } - if (tcpervpe[vpe] == 0) - continue; - if (vpe != 0) - printk(", "); - printk("VPE %d: TC", vpe); - for (i = 0; i < tcpervpe[vpe]; i++) { - /* - * TC 0 is bound to VPE 0 at reset, - * and is presumably executing this - * code. Leave it alone! - */ - if (tc != 0) { - smtc_tc_setup(vpe, tc, cpu); - if (vpe != 0) { - /* - * Set MVP bit (possibly again). Do it - * here to catch CPUs that have no TCs - * bound to the VPE at reset. In that - * case, a TC must be bound to the VPE - * before we can set VPEControl[MVP] - */ - write_vpe_c0_vpeconf0( - read_vpe_c0_vpeconf0() | - VPECONF0_MVP); - } - cpu++; - } - printk(" %d", tc); - tc++; - } - if (vpe != 0) { - /* - * Allow this VPE to control others. - */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | - VPECONF0_MVP); - - /* - * Clear any stale software interrupts from VPE's Cause - */ - write_vpe_c0_cause(0); - - /* - * Clear ERL/EXL of VPEs other than 0 - * and set restricted interrupt enable/mask. - */ - write_vpe_c0_status((read_vpe_c0_status() - & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) - | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 - | ST0_IE)); - /* - * set config to be the same as vpe0, - * particularly kseg0 coherency alg - */ - write_vpe_c0_config(read_c0_config()); - /* Clear any pending timer interrupt */ - write_vpe_c0_compare(0); - /* Propagate Config7 */ - write_vpe_c0_config7(read_c0_config7()); - write_vpe_c0_count(read_c0_count() + CP0_SKEW); - ehb(); - } - /* enable multi-threading within VPE */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); - /* enable the VPE */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); - } - - /* - * Pull any physically present but unused TCs out of circulation. - */ - while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { - set_cpu_possible(tc, false); - set_cpu_present(tc, false); - tc++; - } - - /* release config state */ - write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); - - printk("\n"); - - /* Set up coprocessor affinity CPU mask(s) */ - -#ifdef CONFIG_MIPS_MT_FPAFF - for (tc = 0; tc < ntc; tc++) { - if (cpu_data[tc].options & MIPS_CPU_FPU) - cpu_set(tc, mt_fpu_cpumask); - } -#endif - - /* set up ipi interrupts... */ - - /* If we have multiple VPEs running, set up the cross-VPE interrupt */ - - setup_cross_vpe_interrupts(nvpe); - - /* Set up queue of free IPI "messages". */ - nipi = NR_CPUS * IPIBUF_PER_CPU; - if (ipibuffers > 0) - nipi = ipibuffers; - - pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); - if (pipi == NULL) - panic("kmalloc of IPI message buffers failed"); - else - printk("IPI buffer pool of %d buffers\n", nipi); - for (i = 0; i < nipi; i++) { - smtc_ipi_nq(&freeIPIq, pipi); - pipi++; - } - - /* Arm multithreading and enable other VPEs - but all TCs are Halted */ - emt(EMT_ENABLE); - evpe(EVPE_ENABLE); - local_irq_restore(flags); - /* Initialize SMTC /proc statistics/diagnostics */ - init_smtc_stats(); -} - - -/* - * Setup the PC, SP, and GP of a secondary processor and start it - * running! - * smp_bootstrap is the place to resume from - * __KSTK_TOS(idle) is apparently the stack pointer - * (unsigned long)idle->thread_info the gp - * - */ -void smtc_boot_secondary(int cpu, struct task_struct *idle) -{ - extern u32 kernelsp[NR_CPUS]; - unsigned long flags; - int mtflags; - - LOCK_MT_PRA(); - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - dvpe(); - } - settc(cpu_data[cpu].tc_id); - - /* pc */ - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); - - /* stack pointer */ - kernelsp[cpu] = __KSTK_TOS(idle); - write_tc_gpr_sp(__KSTK_TOS(idle)); - - /* global pointer */ - write_tc_gpr_gp((unsigned long)task_thread_info(idle)); - - smtc_status |= SMTC_MTC_ACTIVE; - write_tc_c0_tchalt(0); - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - evpe(EVPE_ENABLE); - } - UNLOCK_MT_PRA(); -} - -void smtc_init_secondary(void) -{ -} - -void smtc_smp_finish(void) -{ - int cpu = smp_processor_id(); - - /* - * Lowest-numbered CPU per VPE starts a clock tick. - * Like per_cpu_trap_init() hack, this assumes that - * SMTC init code assigns TCs consdecutively and - * in ascending order across available VPEs. - */ - if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - - local_irq_enable(); - - printk("TC %d going on-line as CPU %d\n", - cpu_data[smp_processor_id()].tc_id, smp_processor_id()); -} - -void smtc_cpus_done(void) -{ -} - -/* - * Support for SMTC-optimized driver IRQ registration - */ - -/* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. - */ - -int setup_irq_smtc(unsigned int irq, struct irqaction * new, - unsigned long hwmask) -{ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - unsigned int vpe = current_cpu_data.vpe_id; - - vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; -#endif - irq_hwmask[irq] = hwmask; - - return setup_irq(irq, new); -} - -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF -/* - * Support for IRQ affinity to TCs - */ - -void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) -{ - /* - * If a "fast path" cache of quickly decodable affinity state - * is maintained, this is where it gets done, on a call up - * from the platform affinity code. - */ -} - -void smtc_forward_irq(struct irq_data *d) -{ - unsigned int irq = d->irq; - int target; - - /* - * OK wise guy, now figure out how to get the IRQ - * to be serviced on an authorized "CPU". - * - * Ideally, to handle the situation where an IRQ has multiple - * eligible CPUS, we would maintain state per IRQ that would - * allow a fair distribution of service requests. Since the - * expected use model is any-or-only-one, for simplicity - * and efficiency, we just pick the easiest one to find. - */ - - target = cpumask_first(d->affinity); - - /* - * We depend on the platform code to have correctly processed - * IRQ affinity change requests to ensure that the IRQ affinity - * mask has been purged of bits corresponding to nonexistent and - * offline "CPUs", and to TCs bound to VPEs other than the VPE - * connected to the physical interrupt input for the interrupt - * in question. Otherwise we have a nasty problem with interrupt - * mask management. This is best handled in non-performance-critical - * platform IRQ affinity setting code, to minimize interrupt-time - * checks. - */ - - /* If no one is eligible, service locally */ - if (target >= NR_CPUS) - do_IRQ_no_affinity(irq); - else - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); -} - -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ - -/* - * IPI model for SMTC is tricky, because interrupts aren't TC-specific. - * Within a VPE one TC can interrupt another by different approaches. - * The easiest to get right would probably be to make all TCs except - * the target IXMT and set a software interrupt, but an IXMT-based - * scheme requires that a handler must run before a new IPI could - * be sent, which would break the "broadcast" loops in MIPS MT. - * A more gonzo approach within a VPE is to halt the TC, extract - * its Restart, Status, and a couple of GPRs, and program the Restart - * address to emulate an interrupt. - * - * Within a VPE, one can be confident that the target TC isn't in - * a critical EXL state when halted, since the write to the Halt - * register could not have issued on the writing thread if the - * halting thread had EXL set. So k0 and k1 of the target TC - * can be used by the injection code. Across VPEs, one can't - * be certain that the target TC isn't in a critical exception - * state. So we try a two-step process of sending a software - * interrupt to the target VPE, which either handles the event - * itself (if it was the target) or injects the event within - * the VPE. - */ - -static void smtc_ipi_qdump(void) -{ - int i; - struct smtc_ipi *temp; - - for (i = 0; i < NR_CPUS ;i++) { - pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", - i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, - IPIQ[i].depth); - temp = IPIQ[i].head; - - while (temp != IPIQ[i].tail) { - pr_debug("%d %d %d: ", temp->type, temp->dest, - (int)temp->arg); -#ifdef SMTC_IPI_DEBUG - pr_debug("%u %lu\n", temp->sender, temp->stamp); -#else - pr_debug("\n"); -#endif - temp = temp->flink; - } - } -} - -/* - * The standard atomic.h primitives don't quite do what we want - * here: We need an atomic add-and-return-previous-value (which - * could be done with atomic_add_return and a decrement) and an - * atomic set/zero-and-return-previous-value (which can't really - * be done with the atomic.h primitives). And since this is - * MIPS MT, we can assume that we have LL/SC. - */ -static inline int atomic_postincrement(atomic_t *v) -{ - unsigned long result; - - unsigned long temp; - - __asm__ __volatile__( - "1: ll %0, %2 \n" - " addu %1, %0, 1 \n" - " sc %1, %2 \n" - " beqz %1, 1b \n" - __WEAK_LLSC_MB - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "m" (v->counter) - : "memory"); - - return result; -} - -void smtc_send_ipi(int cpu, int type, unsigned int action) -{ - int tcstatus; - struct smtc_ipi *pipi; - unsigned long flags; - int mtflags; - unsigned long tcrestart; - int set_resched_flag = (type == LINUX_SMP_IPI && - action == SMP_RESCHEDULE_YOURSELF); - - if (cpu == smp_processor_id()) { - printk("Cannot Send IPI to self!\n"); - return; - } - if (set_resched_flag && IPIQ[cpu].resched_flag != 0) - return; /* There is a reschedule queued already */ - - /* Set up a descriptor, to be delivered either promptly or queued */ - pipi = smtc_ipi_dq(&freeIPIq); - if (pipi == NULL) { - bust_spinlocks(1); - mips_mt_regdump(dvpe()); - panic("IPI Msg. Buffers Depleted"); - } - pipi->type = type; - pipi->arg = (void *)action; - pipi->dest = cpu; - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - /* If not on same VPE, enqueue and send cross-VPE interrupt */ - IPIQ[cpu].resched_flag |= set_resched_flag; - smtc_ipi_nq(&IPIQ[cpu], pipi); - LOCK_CORE_PRA(); - settc(cpu_data[cpu].tc_id); - write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); - UNLOCK_CORE_PRA(); - } else { - /* - * Not sufficient to do a LOCK_MT_PRA (dmt) here, - * since ASID shootdown on the other VPE may - * collide with this operation. - */ - LOCK_CORE_PRA(); - settc(cpu_data[cpu].tc_id); - /* Halt the targeted TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* - * Inspect TCStatus - if IXMT is set, we have to queue - * a message. Otherwise, we set up the "interrupt" - * of the other TC - */ - tcstatus = read_tc_c0_tcstatus(); - - if ((tcstatus & TCSTATUS_IXMT) != 0) { - /* - * If we're in the the irq-off version of the wait - * loop, we need to force exit from the wait and - * do a direct post of the IPI. - */ - if (cpu_wait == r4k_wait_irqoff) { - tcrestart = read_tc_c0_tcrestart(); - if (address_is_in_r4k_wait_irqoff(tcrestart)) { - write_tc_c0_tcrestart(__pastwait); - tcstatus &= ~TCSTATUS_IXMT; - write_tc_c0_tcstatus(tcstatus); - goto postdirect; - } - } - /* - * Otherwise we queue the message for the target TC - * to pick up when he does a local_irq_restore() - */ - write_tc_c0_tchalt(0); - UNLOCK_CORE_PRA(); - IPIQ[cpu].resched_flag |= set_resched_flag; - smtc_ipi_nq(&IPIQ[cpu], pipi); - } else { -postdirect: - post_direct_ipi(cpu, pipi); - write_tc_c0_tchalt(0); - UNLOCK_CORE_PRA(); - } - } -} - -/* - * Send IPI message to Halted TC, TargTC/TargVPE already having been set - */ -static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) -{ - struct pt_regs *kstack; - unsigned long tcstatus; - unsigned long tcrestart; - extern u32 kernelsp[NR_CPUS]; - extern void __smtc_ipi_vector(void); -//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); - - /* Extract Status, EPC from halted TC */ - tcstatus = read_tc_c0_tcstatus(); - tcrestart = read_tc_c0_tcrestart(); - /* If TCRestart indicates a WAIT instruction, advance the PC */ - if ((tcrestart & 0x80000000) - && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { - tcrestart += 4; - } - /* - * Save on TC's future kernel stack - * - * CU bit of Status is indicator that TC was - * already running on a kernel stack... - */ - if (tcstatus & ST0_CU0) { - /* Note that this "- 1" is pointer arithmetic */ - kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; - } else { - kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; - } - - kstack->cp0_epc = (long)tcrestart; - /* Save TCStatus */ - kstack->cp0_tcstatus = tcstatus; - /* Pass token of operation to be performed kernel stack pad area */ - kstack->pad0[4] = (unsigned long)pipi; - /* Pass address of function to be called likewise */ - kstack->pad0[5] = (unsigned long)&ipi_decode; - /* Set interrupt exempt and kernel mode */ - tcstatus |= TCSTATUS_IXMT; - tcstatus &= ~TCSTATUS_TKSU; - write_tc_c0_tcstatus(tcstatus); - ehb(); - /* Set TC Restart address to be SMTC IPI vector */ - write_tc_c0_tcrestart(__smtc_ipi_vector); -} - -static void ipi_resched_interrupt(void) -{ - scheduler_ipi(); -} - -static void ipi_call_interrupt(void) -{ - /* Invoke generic function invocation code in smp.c */ - smp_call_function_interrupt(); -} - -DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); - -static void __irq_entry smtc_clock_tick_interrupt(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - int irq = MIPS_CPU_IRQ_BASE + 1; - - irq_enter(); - kstat_incr_irq_this_cpu(irq); - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); - irq_exit(); -} - -void ipi_decode(struct smtc_ipi *pipi) -{ - void *arg_copy = pipi->arg; - int type_copy = pipi->type; - - smtc_ipi_nq(&freeIPIq, pipi); - - switch (type_copy) { - case SMTC_CLOCK_TICK: - smtc_clock_tick_interrupt(); - break; - - case LINUX_SMP_IPI: - switch ((int)arg_copy) { - case SMP_RESCHEDULE_YOURSELF: - ipi_resched_interrupt(); - break; - case SMP_CALL_FUNCTION: - ipi_call_interrupt(); - break; - default: - printk("Impossible SMTC IPI Argument %p\n", arg_copy); - break; - } - break; -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - case IRQ_AFFINITY_IPI: - /* - * Accept a "forwarded" interrupt that was initially - * taken by a TC who doesn't have affinity for the IRQ. - */ - do_IRQ_no_affinity((int)arg_copy); - break; -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ - default: - printk("Impossible SMTC IPI Type 0x%x\n", type_copy); - break; - } -} - -/* - * Similar to smtc_ipi_replay(), but invoked from context restore, - * so it reuses the current exception frame rather than set up a - * new one with self_ipi. - */ - -void deferred_smtc_ipi(void) -{ - int cpu = smp_processor_id(); - - /* - * Test is not atomic, but much faster than a dequeue, - * and the vast majority of invocations will have a null queue. - * If irq_disabled when this was called, then any IPIs queued - * after we test last will be taken on the next irq_enable/restore. - * If interrupts were enabled, then any IPIs added after the - * last test will be taken directly. - */ - - while (IPIQ[cpu].head != NULL) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - unsigned long flags; - - /* - * It may be possible we'll come in with interrupts - * already enabled. - */ - local_irq_save(flags); - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - if (pipi != NULL) { - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; - ipi_decode(pipi); - } - /* - * The use of the __raw_local restore isn't - * as obviously necessary here as in smtc_ipi_replay(), - * but it's more efficient, given that we're already - * running down the IPI queue. - */ - __arch_local_irq_restore(flags); - } -} - -/* - * Cross-VPE interrupts in the SMTC prototype use "software interrupts" - * set via cross-VPE MTTR manipulation of the Cause register. It would be - * in some regards preferable to have external logic for "doorbell" hardware - * interrupts. - */ - -static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; - -static irqreturn_t ipi_interrupt(int irq, void *dev_idm) -{ - int my_vpe = cpu_data[smp_processor_id()].vpe_id; - int my_tc = cpu_data[smp_processor_id()].tc_id; - int cpu; - struct smtc_ipi *pipi; - unsigned long tcstatus; - int sent; - unsigned long flags; - unsigned int mtflags; - unsigned int vpflags; - - /* - * So long as cross-VPE interrupts are done via - * MFTR/MTTR read-modify-writes of Cause, we need - * to stop other VPEs whenever the local VPE does - * anything similar. - */ - local_irq_save(flags); - vpflags = dvpe(); - clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); - set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); - irq_enable_hazard(); - evpe(vpflags); - local_irq_restore(flags); - - /* - * Cross-VPE Interrupt handler: Try to directly deliver IPIs - * queued for TCs on this VPE other than the current one. - * Return-from-interrupt should cause us to drain the queue - * for the current TC, so we ought not to have to do it explicitly here. - */ - - for_each_online_cpu(cpu) { - if (cpu_data[cpu].vpe_id != my_vpe) - continue; - - pipi = smtc_ipi_dq(&IPIQ[cpu]); - if (pipi != NULL) { - if (cpu_data[cpu].tc_id != my_tc) { - sent = 0; - LOCK_MT_PRA(); - settc(cpu_data[cpu].tc_id); - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - tcstatus = read_tc_c0_tcstatus(); - if ((tcstatus & TCSTATUS_IXMT) == 0) { - post_direct_ipi(cpu, pipi); - sent = 1; - } - write_tc_c0_tchalt(0); - UNLOCK_MT_PRA(); - if (!sent) { - smtc_ipi_req(&IPIQ[cpu], pipi); - } - } else { - /* - * ipi_decode() should be called - * with interrupts off - */ - local_irq_save(flags); - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; - ipi_decode(pipi); - local_irq_restore(flags); - } - } - } - - return IRQ_HANDLED; -} - -static void ipi_irq_dispatch(void) -{ - do_IRQ(cpu_ipi_irq); -} - -static struct irqaction irq_ipi = { - .handler = ipi_interrupt, - .flags = IRQF_PERCPU, - .name = "SMTC_IPI" -}; - -static void setup_cross_vpe_interrupts(unsigned int nvpe) -{ - if (nvpe < 1) - return; - - if (!cpu_has_vint) - panic("SMTC Kernel requires Vectored Interrupt support"); - - set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); - - setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); - - irq_set_handler(cpu_ipi_irq, handle_percpu_irq); -} - -/* - * SMTC-specific hacks invoked from elsewhere in the kernel. - */ - - /* - * smtc_ipi_replay is called from raw_local_irq_restore - */ - -void smtc_ipi_replay(void) -{ - unsigned int cpu = smp_processor_id(); - - /* - * To the extent that we've ever turned interrupts off, - * we may have accumulated deferred IPIs. This is subtle. - * we should be OK: If we pick up something and dispatch - * it here, that's great. If we see nothing, but concurrent - * with this operation, another TC sends us an IPI, IXMT - * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. The important thing - * is to do the last check for queued message *after* the - * re-enabling of interrupts. - */ - while (IPIQ[cpu].head != NULL) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - unsigned long flags; - - /* - * It's just possible we'll come in with interrupts - * already enabled. - */ - local_irq_save(flags); - - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - /* - ** But use a raw restore here to avoid recursion. - */ - __arch_local_irq_restore(flags); - - if (pipi) { - self_ipi(pipi); - smtc_cpu_stats[cpu].selfipis++; - } - } -} - -EXPORT_SYMBOL(smtc_ipi_replay); - -void smtc_idle_loop_hook(void) -{ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - int im; - int flags; - int mtflags; - int bit; - int vpe; - int tc; - int hook_ntcs; - /* - * printk within DMT-protected regions can deadlock, - * so buffer diagnostic messages for later output. - */ - char *pdb_msg; - char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ - - if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ - if (atomic_add_return(1, &idle_hook_initialized) == 1) { - int mvpconf0; - /* Tedious stuff to just do once */ - mvpconf0 = read_c0_mvpconf0(); - hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - if (hook_ntcs > NR_CPUS) - hook_ntcs = NR_CPUS; - for (tc = 0; tc < hook_ntcs; tc++) { - tcnoprog[tc] = 0; - clock_hang_reported[tc] = 0; - } - for (vpe = 0; vpe < 2; vpe++) - for (im = 0; im < 8; im++) - imstuckcount[vpe][im] = 0; - printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); - atomic_set(&idle_hook_initialized, 1000); - } else { - /* Someone else is initializing in parallel - let 'em finish */ - while (atomic_read(&idle_hook_initialized) < 1000) - ; - } - } - - /* Have we stupidly left IXMT set somewhere? */ - if (read_c0_tcstatus() & 0x400) { - write_c0_tcstatus(read_c0_tcstatus() & ~0x400); - ehb(); - printk("Dangling IXMT in cpu_idle()\n"); - } - - /* Have we stupidly left an IM bit turned off? */ -#define IM_LIMIT 2000 - local_irq_save(flags); - mtflags = dmt(); - pdb_msg = &id_ho_db_msg[0]; - im = read_c0_status(); - vpe = current_cpu_data.vpe_id; - for (bit = 0; bit < 8; bit++) { - /* - * In current prototype, I/O interrupts - * are masked for VPE > 0 - */ - if (vpemask[vpe][bit]) { - if (!(im & (0x100 << bit))) - imstuckcount[vpe][bit]++; - else - imstuckcount[vpe][bit] = 0; - if (imstuckcount[vpe][bit] > IM_LIMIT) { - set_c0_status(0x100 << bit); - ehb(); - imstuckcount[vpe][bit] = 0; - pdb_msg += sprintf(pdb_msg, - "Dangling IM %d fixed for VPE %d\n", bit, - vpe); - } - } - } - - emt(mtflags); - local_irq_restore(flags); - if (pdb_msg != &id_ho_db_msg[0]) - printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - - smtc_ipi_replay(); -} - -void smtc_soft_dump(void) -{ - int i; - - printk("Counter Interrupts taken per CPU (TC)\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); - } - printk("Self-IPI invocations:\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); - } - smtc_ipi_qdump(); - printk("%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); -} - - -/* - * TLB management routines special to SMTC - */ - -void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - unsigned long flags, mtflags, tcstat, prevhalt, asid; - int tlb, i; - - /* - * It would be nice to be able to use a spinlock here, - * but this is invoked from within TLB flush routines - * that protect themselves with DVPE, so if a lock is - * held by another TC, it'll never be freed. - * - * DVPE/DMT must not be done with interrupts enabled, - * so even so most callers will already have disabled - * them, let's be really careful... - */ - - local_irq_save(flags); - if (smtc_status & SMTC_TLB_SHARED) { - mtflags = dvpe(); - tlb = 0; - } else { - mtflags = dmt(); - tlb = cpu_data[cpu].vpe_id; - } - asid = asid_cache(cpu); - - do { - if (!((asid += ASID_INC) & ASID_MASK) ) { - if (cpu_has_vtag_icache) - flush_icache_all(); - /* Traverse all online CPUs (hack requires contiguous range) */ - for_each_online_cpu(i) { - /* - * We don't need to worry about our own CPU, nor those of - * CPUs who don't share our TLB. - */ - if ((i != smp_processor_id()) && - ((smtc_status & SMTC_TLB_SHARED) || - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { - settc(cpu_data[i].tc_id); - prevhalt = read_tc_c0_tchalt() & TCHALT_H; - if (!prevhalt) { - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - } - tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); - if (!prevhalt) - write_tc_c0_tchalt(0); - } - } - if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; - local_flush_tlb_all(); /* start new asid cycle */ - } - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); - - /* - * SMTC shares the TLB within VPEs and possibly across all VPEs. - */ - for_each_online_cpu(i) { - if ((smtc_status & SMTC_TLB_SHARED) || - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) - cpu_context(i, mm) = asid_cache(i) = asid; - } - - if (smtc_status & SMTC_TLB_SHARED) - evpe(mtflags); - else - emt(mtflags); - local_irq_restore(flags); -} - -/* - * Invoked from macros defined in mmu_context.h - * which must already have disabled interrupts - * and done a DVPE or DMT as appropriate. - */ - -void smtc_flush_tlb_asid(unsigned long asid) -{ - int entry; - unsigned long ehi; - - entry = read_c0_wired(); - - /* Traverse all non-wired entries */ - while (entry < current_cpu_data.tlbsize) { - write_c0_index(entry); - ehb(); - tlb_read(); - ehb(); - ehi = read_c0_entryhi(); - if ((ehi & ASID_MASK) == asid) { - /* - * Invalidate only entries with specified ASID, - * makiing sure all entries differ. - */ - write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); - write_c0_entrylo0(0); - write_c0_entrylo1(0); - mtc0_tlbw_hazard(); - tlb_write_indexed(); - } - entry++; - } - write_c0_index(PARKED_INDEX); - tlbw_use_hazard(); -} - -/* - * Support for single-threading cache flush operations. - */ - -static int halt_state_save[NR_CPUS]; - -/* - * To really, really be sure that nothing is being done - * by other TCs, halt them all. This code assumes that - * a DVPE has already been done, so while their Halted - * state is theoretically architecturally unstable, in - * practice, it's not going to change while we're looking - * at it. - */ - -void smtc_cflush_lockdown(void) -{ - int cpu; - - for_each_online_cpu(cpu) { - if (cpu != smp_processor_id()) { - settc(cpu_data[cpu].tc_id); - halt_state_save[cpu] = read_tc_c0_tchalt(); - write_tc_c0_tchalt(TCHALT_H); - } - } - mips_ihb(); -} - -/* It would be cheating to change the cpu_online states during a flush! */ - -void smtc_cflush_release(void) -{ - int cpu; - - /* - * Start with a hazard barrier to ensure - * that all CACHE ops have played through. - */ - mips_ihb(); - - for_each_online_cpu(cpu) { - if (cpu != smp_processor_id()) { - settc(cpu_data[cpu].tc_id); - write_tc_c0_tchalt(halt_state_save[cpu]); - } - } - mips_ihb(); -} diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index c24ad5f4b32..2242bdd4370 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -6,8 +6,6 @@ * not have done anything significant (but they may have had interrupts * enabled briefly - prom_smp_finish() should not be responsible for enabling * interrupts...) - * - * FIXME: broken for SMTC */ #include <linux/kernel.h> @@ -33,14 +31,6 @@ void synchronise_count_master(int cpu) unsigned long flags; unsigned int initcount; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); @@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu) int i; unsigned int initcount; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index dcb8e5d3bb8..8d0170969e2 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -26,7 +26,6 @@ #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/div64.h> -#include <asm/smtc_ipi.h> #include <asm/time.h> /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 074e857ced2..51706d6dd5b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -15,6 +15,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/context_tracking.h> +#include <linux/cpu_pm.h> #include <linux/kexec.h> #include <linux/init.h> #include <linux/kernel.h> @@ -370,9 +371,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long dvpret; -#endif /* CONFIG_MIPS_MT_SMTC */ oops_enter(); @@ -382,13 +380,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) console_verbose(); raw_spin_lock_irq(&die_lock); -#ifdef CONFIG_MIPS_MT_SMTC - dvpret = dvpe(); -#endif /* CONFIG_MIPS_MT_SMTC */ bust_spinlocks(1); -#ifdef CONFIG_MIPS_MT_SMTC - mips_mt_regdump(dvpret); -#endif /* CONFIG_MIPS_MT_SMTC */ printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); @@ -712,10 +704,12 @@ int process_fpemu_return(int sig, void __user *fault_addr) si.si_addr = fault_addr; si.si_signo = sig; if (sig == SIGSEGV) { + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, (unsigned long)fault_addr)) si.si_code = SEGV_ACCERR; else si.si_code = SEGV_MAPERR; + up_read(¤t->mm->mmap_sem); } else { si.si_code = BUS_ADRERR; } @@ -1545,7 +1539,7 @@ asmlinkage void cache_parity_error(void) reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", reg_val & (1<<28) ? "ET " : "", @@ -1585,7 +1579,7 @@ asmlinkage void do_ftlb(void) /* For the moment, report the problem and hang. */ if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); @@ -1759,19 +1753,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char rollback_except_vec_vi; char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * We need to provide the SMTC vectored interrupt handler - * not only with the address of the handler, but with the - * Status.IM bit to be masked before going there. - */ - extern char except_vec_vi_mori; -#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int mori_offset = &except_vec_vi_mori - vec_start + 2; -#else - const int mori_offset = &except_vec_vi_mori - vec_start; -#endif -#endif /* CONFIG_MIPS_MT_SMTC */ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) const int lui_offset = &except_vec_vi_lui - vec_start + 2; const int ori_offset = &except_vec_vi_ori - vec_start + 2; @@ -1795,12 +1776,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) #else handler_len); #endif -#ifdef CONFIG_MIPS_MT_SMTC - BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ - - h = (u16 *)(b + mori_offset); - *h = (0x100 << n); -#endif /* CONFIG_MIPS_MT_SMTC */ h = (u16 *)(b + lui_offset); *h = (handler >> 16) & 0xffff; h = (u16 *)(b + ori_offset); @@ -1865,32 +1840,16 @@ static int __init ulri_disable(char *s) } __setup("noulri", ulri_disable); -void per_cpu_trap_init(bool is_boot_cpu) +/* configure STATUS register */ +static void configure_status(void) { - unsigned int cpu = smp_processor_id(); - unsigned int status_set = ST0_CU0; - unsigned int hwrena = cpu_hwrena_impl_bits; -#ifdef CONFIG_MIPS_MT_SMTC - int secondaryTC = 0; - int bootTC = (cpu == 0); - - /* - * Only do per_cpu_trap_init() for first TC of Each VPE. - * Note that this hack assumes that the SMTC init code - * assigns TCs consecutively and in ascending order. - */ - - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) - secondaryTC = 1; -#endif /* CONFIG_MIPS_MT_SMTC */ - /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ + unsigned int status_set = ST0_CU0; #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif @@ -1901,6 +1860,12 @@ void per_cpu_trap_init(bool is_boot_cpu) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); +} + +/* configure HWRENA register */ +static void configure_hwrena(void) +{ + unsigned int hwrena = cpu_hwrena_impl_bits; if (cpu_has_mips_r2) hwrena |= 0x0000000f; @@ -1910,11 +1875,10 @@ void per_cpu_trap_init(bool is_boot_cpu) if (hwrena) write_c0_hwrena(hwrena); +} -#ifdef CONFIG_MIPS_MT_SMTC - if (!secondaryTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - +static void configure_exception_vector(void) +{ if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); @@ -1930,6 +1894,16 @@ void per_cpu_trap_init(bool is_boot_cpu) } else set_c0_cause(CAUSEF_IV); } +} + +void per_cpu_trap_init(bool is_boot_cpu) +{ + unsigned int cpu = smp_processor_id(); + + configure_status(); + configure_hwrena(); + + configure_exception_vector(); /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: @@ -1949,10 +1923,6 @@ void per_cpu_trap_init(bool is_boot_cpu) cp0_perfcount_irq = -1; } -#ifdef CONFIG_MIPS_MT_SMTC - } -#endif /* CONFIG_MIPS_MT_SMTC */ - if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; @@ -1961,23 +1931,10 @@ void per_cpu_trap_init(bool is_boot_cpu) BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); -#ifdef CONFIG_MIPS_MT_SMTC - if (bootTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ /* Boot CPU's cache setup in setup_arch(). */ if (!is_boot_cpu) cpu_cache_init(); tlb_init(); -#ifdef CONFIG_MIPS_MT_SMTC - } else if (!secondaryTC) { - /* - * First TC in non-boot VPE must do subset of tlb_init() - * for MMU countrol registers. - */ - write_c0_pagemask(PM_DEFAULT_MASK); - write_c0_wired(0); - } -#endif /* CONFIG_MIPS_MT_SMTC */ TLBMISS_HANDLER_SETUP(); } @@ -2185,3 +2142,32 @@ void __init trap_init(void) cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ } + +static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + configure_status(); + configure_hwrena(); + configure_exception_vector(); + + /* Restore register with CPU number for TLB handlers */ + TLBMISS_HANDLER_RESTORE(); + + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block trap_pm_notifier_block = { + .notifier_call = trap_pm_notifier, +}; + +static int __init trap_pm_init(void) +{ + return cpu_pm_register_notifier(&trap_pm_notifier_block); +} +arch_initcall(trap_pm_init); diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 949ae0e1701..2e003b11a09 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -127,9 +127,8 @@ int vpe_run(struct vpe *v) clear_c0_mvpcontrol(MVPCONTROL_VPC); /* - * SMTC/SMVP kernels manage VPE enable independently, - * but uniprocessor kernels need to turn it on, even - * if that wasn't the pre-dvpe() state. + * SMVP kernels manage VPE enable independently, but uniprocessor + * kernels need to turn it on, even if that wasn't the pre-dvpe() state. */ #ifdef CONFIG_SMP evpe(vpeflags); @@ -454,12 +453,11 @@ int __init vpe_module_init(void) settc(tc); - /* Any TC that is bound to VPE0 gets left as is - in - * case we are running SMTC on VPE0. A TC that is bound - * to any other VPE gets bound to VPE0, ideally I'd like - * to make it homeless but it doesn't appear to let me - * bind a TC to a non-existent VPE. Which is perfectly - * reasonable. + /* + * A TC that is bound to any other VPE gets bound to + * VPE0, ideally I'd like to make it homeless but it + * doesn't appear to let me bind a TC to a non-existent + * VPE. Which is perfectly reasonable. * * The (un)bound state is visible to an EJTAG probe so * may notify GDB... |