summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/smtc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smtc.c')
-rw-r--r--arch/mips/kernel/smtc.c187
1 files changed, 116 insertions, 71 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 342d873b2ec..a8c1a698d58 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
/* Copyright (C) 2004 Mips Technologies, Inc */
+#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
@@ -25,10 +26,11 @@
#include <asm/smtc_proc.h>
/*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
*/
-
-#define MIPS_CPU_IPI_IRQ 1
+unsigned long irq_hwmask[NR_IRQS];
#define LOCK_MT_PRA() \
local_irq_save(flags); \
@@ -61,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
* Clock interrupt "latch" buffers, per "CPU"
*/
-unsigned int ipi_timer_latch[NR_CPUS];
+static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -86,25 +88,11 @@ unsigned int smtc_status = 0;
/* Boot command line configuration overrides */
-static int vpelimit = 0;
-static int tclimit = 0;
static int ipibuffers = 0;
static int nostlb = 0;
static int asidmask = 0;
unsigned long smtc_asid_mask = 0xff;
-static int __init maxvpes(char *str)
-{
- get_option(&str, &vpelimit);
- return 1;
-}
-
-static int __init maxtcs(char *str)
-{
- get_option(&str, &tclimit);
- return 1;
-}
-
static int __init ipibufs(char *str)
{
get_option(&str, &ipibuffers);
@@ -137,8 +125,6 @@ static int __init asidmask_set(char *str)
return 1;
}
-__setup("maxvpes=", maxvpes);
-__setup("maxtcs=", maxtcs);
__setup("ipibufs=", ipibufs);
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
@@ -168,9 +154,9 @@ static int __init tintq(char *str)
__setup("tintq=", tintq);
-int imstuckcount[2][8];
+static int imstuckcount[2][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-int vpemask[2][8] = {
+static int vpemask[2][8] = {
{0, 0, 1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}
};
@@ -194,7 +180,7 @@ void __init sanitize_tlb_entries(void)
static void smtc_configure_tlb(void)
{
- int i,tlbsiz,vpes;
+ int i, tlbsiz, vpes;
unsigned long mvpconf0;
unsigned long config1val;
@@ -311,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
+#ifdef CONFIG_MIPS_MT_FPAFF
/* Initialize map of CPUs with FPUs */
cpus_clear(mt_fpu_cpumask);
+#endif
/* One of those TC's is the one booting, and not a secondary... */
printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -374,7 +362,7 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- ipi_timer_latch[i] = 0;
+ atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
@@ -384,7 +372,7 @@ void mipsmt_prepare_cpus(void)
cpu++;
/* Report on boot-time options */
- mips_mt_set_cpuoptions ();
+ mips_mt_set_cpuoptions();
if (vpelimit > 0)
printk("Limit of %d VPEs set\n", vpelimit);
if (tclimit > 0)
@@ -435,7 +423,7 @@ void mipsmt_prepare_cpus(void)
* code. Leave it alone!
*/
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -443,7 +431,7 @@ void mipsmt_prepare_cpus(void)
}
if (slop) {
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -497,10 +485,12 @@ void mipsmt_prepare_cpus(void)
/* Set up coprocessor affinity CPU mask(s) */
+#ifdef CONFIG_MIPS_MT_FPAFF
for (tc = 0; tc < ntc; tc++) {
if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
+#endif
/* set up ipi interrupts... */
@@ -540,7 +530,7 @@ void mipsmt_prepare_cpus(void)
* (unsigned long)idle->thread_info the gp
*
*/
-void smtc_boot_secondary(int cpu, struct task_struct *idle)
+void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
long flags;
@@ -582,7 +572,7 @@ void smtc_init_secondary(void)
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE)
!= cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
}
local_irq_enable();
@@ -621,6 +611,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
return setup_irq(irq, new);
}
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * Support for IRQ affinity to TCs
+ */
+
+void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ /*
+ * If a "fast path" cache of quickly decodable affinity state
+ * is maintained, this is where it gets done, on a call up
+ * from the platform affinity code.
+ */
+}
+
+void smtc_forward_irq(unsigned int irq)
+{
+ int target;
+
+ /*
+ * OK wise guy, now figure out how to get the IRQ
+ * to be serviced on an authorized "CPU".
+ *
+ * Ideally, to handle the situation where an IRQ has multiple
+ * eligible CPUS, we would maintain state per IRQ that would
+ * allow a fair distribution of service requests. Since the
+ * expected use model is any-or-only-one, for simplicity
+ * and efficiency, we just pick the easiest one to find.
+ */
+
+ target = first_cpu(irq_desc[irq].affinity);
+
+ /*
+ * We depend on the platform code to have correctly processed
+ * IRQ affinity change requests to ensure that the IRQ affinity
+ * mask has been purged of bits corresponding to nonexistent and
+ * offline "CPUs", and to TCs bound to VPEs other than the VPE
+ * connected to the physical interrupt input for the interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
+ * mask management. This is best handled in non-performance-critical
+ * platform IRQ affinity setting code, to minimize interrupt-time
+ * checks.
+ */
+
+ /* If no one is eligible, service locally */
+ if (target >= NR_CPUS) {
+ do_IRQ_no_affinity(irq);
+ return;
+ }
+
+ smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
/*
* IPI model for SMTC is tricky, because interrupts aren't TC-specific.
* Within a VPE one TC can interrupt another by different approaches.
@@ -663,7 +707,7 @@ static void smtc_ipi_qdump(void)
* be done with the atomic.h primitives). And since this is
* MIPS MT, we can assume that we have LL/SC.
*/
-static __inline__ int atomic_postincrement(unsigned int *pv)
+static inline int atomic_postincrement(atomic_t *v)
{
unsigned long result;
@@ -674,9 +718,9 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
" addu %1, %0, 1 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "m" (v->counter)
: "memory");
return result;
@@ -704,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -745,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
}
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -762,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
unsigned long tcrestart;
extern u32 kernelsp[NR_CPUS];
extern void __smtc_ipi_vector(void);
+//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
/* Extract Status, EPC from halted TC */
tcstatus = read_tc_c0_tcstatus();
@@ -812,25 +861,31 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
+DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
void ipi_decode(struct smtc_ipi *pipi)
{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int dest_copy = pipi->dest;
+ int ticks;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
- /* Invoke Clock "Interrupt" */
- ipi_timer_latch[dest_copy] = 0;
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- clock_hang_reported[dest_copy] = 0;
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL);
+ kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+ ticks = atomic_read(&ipi_timer_latch[cpu]);
+ atomic_sub(ticks, &ipi_timer_latch[cpu]);
+ while (ticks) {
+ cd->event_handler(cd);
+ ticks--;
+ }
irq_exit();
break;
+
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
case SMP_RESCHEDULE_YOURSELF:
@@ -845,6 +900,15 @@ void ipi_decode(struct smtc_ipi *pipi)
break;
}
break;
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ case IRQ_AFFINITY_IPI:
+ /*
+ * Accept a "forwarded" interrupt that was initially
+ * taken by a TC who doesn't have affinity for the IRQ.
+ */
+ do_IRQ_no_affinity((int)arg_copy);
+ break;
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
default:
printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
break;
@@ -873,25 +937,6 @@ void deferred_smtc_ipi(void)
}
/*
- * Send clock tick to all TCs except the one executing the funtion
- */
-
-void smtc_timer_broadcast(int vpe)
-{
- int cpu;
- int myTC = cpu_data[smp_processor_id()].tc_id;
- int myVPE = cpu_data[smp_processor_id()].vpe_id;
-
- smtc_cpu_stats[smp_processor_id()].timerints++;
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id == myVPE &&
- cpu_data[cpu].tc_id != myTC)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
- }
-}
-
-/*
* Cross-VPE interrupts in the SMTC prototype use "software interrupts"
* set via cross-VPE MTTR manipulation of the Cause register. It would be
* in some regards preferable to have external logic for "doorbell" hardware
@@ -975,7 +1020,12 @@ static void ipi_irq_dispatch(void)
do_IRQ(cpu_ipi_irq);
}
-static struct irqaction irq_ipi;
+static struct irqaction irq_ipi = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "SMTC_IPI",
+ .flags = IRQF_PERCPU
+};
static void setup_cross_vpe_interrupts(unsigned int nvpe)
{
@@ -987,13 +1037,8 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
- irq_ipi.handler = ipi_interrupt;
- irq_ipi.flags = IRQF_DISABLED;
- irq_ipi.name = "SMTC_IPI";
-
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
- irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
}
@@ -1132,11 +1177,11 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < NR_CPUS; tc++) {
/* Don't check ourself - we'll dequeue IPIs just below */
if ((tc != smp_processor_id()) &&
- ipi_timer_latch[tc] > timerq_limit) {
+ atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
if (clock_hang_reported[tc] == 0) {
pdb_msg += sprintf(pdb_msg,
"TC %d looks hung with timer latch at %d\n",
- tc, ipi_timer_latch[tc]);
+ tc, atomic_read(&ipi_timer_latch[tc]));
clock_hang_reported[tc]++;
}
}
@@ -1177,7 +1222,7 @@ void smtc_soft_dump(void)
smtc_ipi_qdump();
printk("Timer IPI Backlogs:\n");
for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, ipi_timer_latch[i]);
+ printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
}
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
@@ -1219,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
@@ -1248,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;