diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 10:51:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 10:51:43 -0700 |
commit | e31a94ed371c70855eb30b77c490d6d85dd4da26 (patch) | |
tree | 58d9f1a75a22319f97731db8d9ac07b78a8d8aaf /arch/mips/kernel/sync-r4k.c | |
parent | 9d9ad4b51d2b29b5bbeb4011f5e76f7538119cf9 (diff) | |
parent | fcbd3b4b92efe29b59df16b910138cf43683be88 (diff) |
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (45 commits)
[MIPS] Pb1200/DBAu1200: move platform code to its proper place
[MIPS] Fix handling of trap and breakpoint instructions
[MIPS] Pb1200: do register SMC 91C111
[MIPS] DBAu1200: fix bad SMC 91C111 resource size
[NET] Kconfig: Rename MIKROTIK_RB500 -> MIKROTIK_RB532
[MIPS] IP27: Fix build bug due to missing include
[MIPS] Fix some sparse warnings on traps.c and irq-msc01.c
[MIPS] cevt-gt641xx: Kill unnecessary include
[MIPS] DS1287: Add clockevent driver
[MIPS] add DECstation I/O ASIC clocksource
[MIPS] rbtx4938: minor cleanup
[MIPS] Alchemy: kill unused PCI_IRQ_TABLE_LOOKUP macro
[MIPS] rbtx4938: misc cleanups
[MIPS] jmr3927: use generic txx9 gpio
[MIPS] rbhma4500: use generic txx9 gpio
[MIPS] generic txx9 gpio support
[MIPS] make fallback gpio.h gpiolib-friendly
[MIPS] unexport null_perf_irq() and make it static
[MIPS] unexport rtc_mips_set_time()
[MIPS] unexport copy_from_user_page()
...
Diffstat (limited to 'arch/mips/kernel/sync-r4k.c')
-rw-r--r-- | arch/mips/kernel/sync-r4k.c | 159 |
1 files changed, 159 insertions, 0 deletions
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c new file mode 100644 index 00000000000..9021108eb9c --- /dev/null +++ b/arch/mips/kernel/sync-r4k.c @@ -0,0 +1,159 @@ +/* + * Count register synchronisation. + * + * All CPUs will have their count registers synchronised to the CPU0 expirelo + * value. This can cause a small timewarp for CPU0. All other CPU's should + * not have done anything significant (but they may have had interrupts + * enabled briefly - prom_smp_finish() should not be responsible for enabling + * interrupts...) + * + * FIXME: broken for SMTC + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irqflags.h> +#include <linux/r4k-timer.h> + +#include <asm/atomic.h> +#include <asm/barrier.h> +#include <asm/cpumask.h> +#include <asm/mipsregs.h> + +static atomic_t __initdata count_start_flag = ATOMIC_INIT(0); +static atomic_t __initdata count_count_start = ATOMIC_INIT(0); +static atomic_t __initdata count_count_stop = ATOMIC_INIT(0); + +#define COUNTON 100 +#define NR_LOOPS 5 + +void __init synchronise_count_master(void) +{ + int i; + unsigned long flags; + unsigned int initcount; + int nslaves; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC needs to synchronise per VPE, not per CPU + * ignore for now + */ + return; +#endif + + pr_info("Checking COUNT synchronization across %u CPUs: ", + num_online_cpus()); + + local_irq_save(flags); + + /* + * Notify the slaves that it's time to start + */ + atomic_set(&count_start_flag, 1); + smp_wmb(); + + /* Count will be initialised to expirelo for all CPU's */ + initcount = expirelo; + + /* + * We loop a few times to get a primed instruction cache, + * then the last pass is more or less synchronised and + * the master and slaves each set their cycle counters to a known + * value all at once. This reduces the chance of having random offsets + * between the processors, and guarantees that the maximum + * delay between the cycle counters is never bigger than + * the latency of information-passing (cachelines) between + * two CPUs. + */ + + nslaves = num_online_cpus()-1; + for (i = 0; i < NR_LOOPS; i++) { + /* slaves loop on '!= ncpus' */ + while (atomic_read(&count_count_start) != nslaves) + mb(); + atomic_set(&count_count_stop, 0); + smp_wmb(); + + /* this lets the slaves write their count register */ + atomic_inc(&count_count_start); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + write_c0_count(initcount); + + /* + * Wait for all slaves to leave the synchronization point: + */ + while (atomic_read(&count_count_stop) != nslaves) + mb(); + atomic_set(&count_count_start, 0); + smp_wmb(); + atomic_inc(&count_count_stop); + } + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); + + /* + * i386 code reported the skew here, but the + * count registers were almost certainly out of sync + * so no point in alarming people + */ + printk("done.\n"); +} + +void __init synchronise_count_slave(void) +{ + int i; + unsigned long flags; + unsigned int initcount; + int ncpus; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC needs to synchronise per VPE, not per CPU + * ignore for now + */ + return; +#endif + + local_irq_save(flags); + + /* + * Not every cpu is online at the time this gets called, + * so we first wait for the master to say everyone is ready + */ + + while (!atomic_read(&count_start_flag)) + mb(); + + /* Count will be initialised to expirelo for all CPU's */ + initcount = expirelo; + + ncpus = num_online_cpus(); + for (i = 0; i < NR_LOOPS; i++) { + atomic_inc(&count_count_start); + while (atomic_read(&count_count_start) != ncpus) + mb(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + write_c0_count(initcount); + + atomic_inc(&count_count_stop); + while (atomic_read(&count_count_stop) != ncpus) + mb(); + } + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); +} +#undef NR_LOOPS +#endif |