summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c5
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c164
-rw-r--r--arch/mips/kernel/gdb-stub.c30
-rw-r--r--arch/mips/kernel/head.S3
-rw-r--r--arch/mips/kernel/i8253.c213
-rw-r--r--arch/mips/kernel/i8259.c43
-rw-r--r--arch/mips/kernel/irixelf.c40
-rw-r--r--arch/mips/kernel/irixinv.c42
-rw-r--r--arch/mips/kernel/irixioctl.c2
-rw-r--r--arch/mips/kernel/irixsig.c10
-rw-r--r--arch/mips/kernel/irq-gt641xx.c131
-rw-r--r--arch/mips/kernel/irq-msc01.c14
-rw-r--r--arch/mips/kernel/irq.c14
-rw-r--r--arch/mips/kernel/irq_txx9.c192
-rw-r--r--arch/mips/kernel/kspd.c31
-rw-r--r--arch/mips/kernel/linux32.c33
-rw-r--r--arch/mips/kernel/machine_kexec.c18
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c9
-rw-r--r--arch/mips/kernel/mips-mt.c31
-rw-r--r--arch/mips/kernel/proc.c73
-rw-r--r--arch/mips/kernel/process.c27
-rw-r--r--arch/mips/kernel/ptrace.c64
-rw-r--r--arch/mips/kernel/ptrace32.c16
-rw-r--r--arch/mips/kernel/relocate_kernel.S78
-rw-r--r--arch/mips/kernel/rtlx.c30
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/signal.c4
-rw-r--r--arch/mips/kernel/signal32.c45
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c8
-rw-r--r--arch/mips/kernel/smp.c121
-rw-r--r--arch/mips/kernel/smtc.c187
-rw-r--r--arch/mips/kernel/syscall.c83
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/kernel/time.c417
-rw-r--r--arch/mips/kernel/traps.c78
-rw-r--r--arch/mips/kernel/unaligned.c55
-rw-r--r--arch/mips/kernel/vmlinux.lds.S339
-rw-r--r--arch/mips/kernel/vpe.c407
46 files changed, 2009 insertions, 1093 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 5c8085b6d7a..a2689f93c16 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -50,6 +50,8 @@ obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
+obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
+obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
@@ -71,3 +74,5 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 3b27309d54b..ca136298acd 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -132,7 +132,6 @@ void output_thread_defines(void)
offset("#define THREAD_ECODE ", struct task_struct, \
thread.error_code);
offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no);
- offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags);
offset("#define THREAD_TRAMP ", struct task_struct, \
thread.irix_trampoline);
offset("#define THREAD_OLDCTX ", struct task_struct, \
@@ -233,6 +232,10 @@ void output_mm_defines(void)
constant("#define _PMD_T_LOG2 ", PMD_T_LOG2);
constant("#define _PTE_T_LOG2 ", PTE_T_LOG2);
linefeed;
+ constant("#define _PGD_ORDER ", PGD_ORDER);
+ constant("#define _PMD_ORDER ", PMD_ORDER);
+ constant("#define _PTE_ORDER ", PTE_ORDER);
+ linefeed;
constant("#define _PMD_SHIFT ", PMD_SHIFT);
constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
linefeed;
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 993f7ec70f3..da41eac195c 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -110,7 +110,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
}
#undef ELF_CORE_COPY_REGS
-#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
+#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index c09337b947b..af78456d413 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -29,7 +29,7 @@ static inline void align_mod(const int align, const int mod)
".endr\n\t"
".set pop"
:
- : "n" (align), "n" (mod));
+ : GCC_IMM_ASM(align), GCC_IMM_ASM(mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c6b8b074a81..c8c47a2d197 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -75,6 +75,27 @@ static void r4k_wait_irqoff(void)
local_irq_enable();
}
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+ local_irq_disable();
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set mips3 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ local_irq_enable();
+}
+
/* The Au1xxx wait is available only if using 32khz counter or
* external timer source, but specifically not CP0 Counter. */
int allow_au1k_wait;
@@ -132,16 +153,20 @@ static inline void check_wait(void)
case CPU_R4700:
case CPU_R5000:
case CPU_NEVADA:
- case CPU_RM7000:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
+ case CPU_BCM3302:
cpu_wait = r4k_wait;
break;
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
case CPU_24K:
case CPU_34K:
cpu_wait = r4k_wait;
@@ -175,7 +200,14 @@ static inline void check_wait(void)
if ((c->processor_id & 0xff) <= 0x64)
break;
- cpu_wait = r4k_wait;
+ /*
+ * Another rev is incremeting c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
break;
case CPU_RM9000:
if ((c->processor_id & 0x00ff) >= 0x40)
@@ -714,14 +746,6 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
{
decode_configs(c);
- /*
- * For historical reasons the SB1 comes with it's own variant of
- * cache code which eventually will be folded into c-r4k.c. Until
- * then we pretend it's got it's own cache architecture.
- */
- c->options &= ~MIPS_CPU_4K_CACHE;
- c->options |= MIPS_CPU_SB1_CACHE;
-
switch (c->processor_id & 0xff00) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
@@ -762,9 +786,111 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
}
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c)
+{
+ decode_configs(c);
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_BCM3302:
+ c->cputype = CPU_BCM3302;
+ break;
+ case PRID_IMP_BCM4710:
+ c->cputype = CPU_BCM4710;
+ break;
+ default:
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+}
+
+const char *__cpu_name[NR_CPUS];
+
+/*
+ * Name a CPU
+ */
+static __init const char *cpu_to_name(struct cpuinfo_mips *c)
+{
+ const char *name = NULL;
+
+ switch (c->cputype) {
+ case CPU_UNKNOWN: name = "unknown"; break;
+ case CPU_R2000: name = "R2000"; break;
+ case CPU_R3000: name = "R3000"; break;
+ case CPU_R3000A: name = "R3000A"; break;
+ case CPU_R3041: name = "R3041"; break;
+ case CPU_R3051: name = "R3051"; break;
+ case CPU_R3052: name = "R3052"; break;
+ case CPU_R3081: name = "R3081"; break;
+ case CPU_R3081E: name = "R3081E"; break;
+ case CPU_R4000PC: name = "R4000PC"; break;
+ case CPU_R4000SC: name = "R4000SC"; break;
+ case CPU_R4000MC: name = "R4000MC"; break;
+ case CPU_R4200: name = "R4200"; break;
+ case CPU_R4400PC: name = "R4400PC"; break;
+ case CPU_R4400SC: name = "R4400SC"; break;
+ case CPU_R4400MC: name = "R4400MC"; break;
+ case CPU_R4600: name = "R4600"; break;
+ case CPU_R6000: name = "R6000"; break;
+ case CPU_R6000A: name = "R6000A"; break;
+ case CPU_R8000: name = "R8000"; break;
+ case CPU_R10000: name = "R10000"; break;
+ case CPU_R12000: name = "R12000"; break;
+ case CPU_R14000: name = "R14000"; break;
+ case CPU_R4300: name = "R4300"; break;
+ case CPU_R4650: name = "R4650"; break;
+ case CPU_R4700: name = "R4700"; break;
+ case CPU_R5000: name = "R5000"; break;
+ case CPU_R5000A: name = "R5000A"; break;
+ case CPU_R4640: name = "R4640"; break;
+ case CPU_NEVADA: name = "Nevada"; break;
+ case CPU_RM7000: name = "RM7000"; break;
+ case CPU_RM9000: name = "RM9000"; break;
+ case CPU_R5432: name = "R5432"; break;
+ case CPU_4KC: name = "MIPS 4Kc"; break;
+ case CPU_5KC: name = "MIPS 5Kc"; break;
+ case CPU_R4310: name = "R4310"; break;
+ case CPU_SB1: name = "SiByte SB1"; break;
+ case CPU_SB1A: name = "SiByte SB1A"; break;
+ case CPU_TX3912: name = "TX3912"; break;
+ case CPU_TX3922: name = "TX3922"; break;
+ case CPU_TX3927: name = "TX3927"; break;
+ case CPU_AU1000: name = "Au1000"; break;
+ case CPU_AU1500: name = "Au1500"; break;
+ case CPU_AU1100: name = "Au1100"; break;
+ case CPU_AU1550: name = "Au1550"; break;
+ case CPU_AU1200: name = "Au1200"; break;
+ case CPU_4KEC: name = "MIPS 4KEc"; break;
+ case CPU_4KSC: name = "MIPS 4KSc"; break;
+ case CPU_VR41XX: name = "NEC Vr41xx"; break;
+ case CPU_R5500: name = "R5500"; break;
+ case CPU_TX49XX: name = "TX49xx"; break;
+ case CPU_20KC: name = "MIPS 20Kc"; break;
+ case CPU_24K: name = "MIPS 24K"; break;
+ case CPU_25KF: name = "MIPS 25Kf"; break;
+ case CPU_34K: name = "MIPS 34K"; break;
+ case CPU_74K: name = "MIPS 74K"; break;
+ case CPU_VR4111: name = "NEC VR4111"; break;
+ case CPU_VR4121: name = "NEC VR4121"; break;
+ case CPU_VR4122: name = "NEC VR4122"; break;
+ case CPU_VR4131: name = "NEC VR4131"; break;
+ case CPU_VR4133: name = "NEC VR4133"; break;
+ case CPU_VR4181: name = "NEC VR4181"; break;
+ case CPU_VR4181A: name = "NEC VR4181A"; break;
+ case CPU_SR71000: name = "Sandcraft SR71000"; break;
+ case CPU_BCM3302: name = "Broadcom BCM3302"; break;
+ case CPU_BCM4710: name = "Broadcom BCM4710"; break;
+ case CPU_PR4450: name = "Philips PR4450"; break;
+ case CPU_LOONGSON2: name = "ICT Loongson-2"; break;
+ default:
+ BUG();
+ }
+
+ return name;
+}
+
__init void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
@@ -784,6 +910,9 @@ __init void cpu_probe(void)
case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c);
break;
+ case PRID_COMP_BROADCOM:
+ cpu_probe_broadcom(c);
+ break;
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c);
break;
@@ -793,6 +922,14 @@ __init void cpu_probe(void)
default:
c->cputype = CPU_UNKNOWN;
}
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
@@ -804,13 +941,16 @@ __init void cpu_probe(void)
c->ases |= MIPS_ASE_MIPS3D;
}
}
+
+ __cpu_name[cpu] = cpu_to_name(c);
}
__init void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk("CPU revision is: %08x\n", c->processor_id);
+ printk(KERN_INFO "CPU revision is: %08x (%s)\n",
+ c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
- printk("FPU revision is: %08x\n", c->fpu_id);
+ printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 7bc88204926..3191afa29ad 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -676,15 +676,18 @@ static void kgdb_wait(void *arg)
static int kgdb_smp_call_kgdb_wait(void)
{
#ifdef CONFIG_SMP
+ cpumask_t mask = cpu_online_map;
struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
+ int cpus;
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON(!cpu_online(cpu));
+ cpu_clear(cpu, mask);
+ cpus = cpus_weight(mask);
if (!cpus)
return 0;
@@ -711,10 +714,7 @@ static int kgdb_smp_call_kgdb_wait(void)
call_data = &data;
mb();
- /* Send a message to all other CPUs and wait for them to respond */
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i) && i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
+ core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
@@ -733,7 +733,7 @@ static int kgdb_smp_call_kgdb_wait(void)
* returns 1 if you should skip the instruction at the trap address, 0
* otherwise.
*/
-void handle_exception (struct gdb_regs *regs)
+void handle_exception(struct gdb_regs *regs)
{
int trap; /* Trap type */
int sigval;
@@ -769,7 +769,7 @@ void handle_exception (struct gdb_regs *regs)
/*
* acquire the CPU spinlocks
*/
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
panic("kgdb: couldn't get cpulock %d\n", i);
@@ -902,7 +902,7 @@ void handle_exception (struct gdb_regs *regs)
hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
ptr += 2*(2*sizeof(long));
hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
- strcpy(output_buffer,"OK");
+ strcpy(output_buffer, "OK");
}
break;
@@ -917,9 +917,9 @@ void handle_exception (struct gdb_regs *regs)
&& hexToInt(&ptr, &length)) {
if (mem2hex((char *)addr, output_buffer, length, 1))
break;
- strcpy (output_buffer, "E03");
+ strcpy(output_buffer, "E03");
} else
- strcpy(output_buffer,"E01");
+ strcpy(output_buffer, "E01");
break;
/*
@@ -996,7 +996,7 @@ void handle_exception (struct gdb_regs *regs)
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &baudrate))
{
- strcpy(output_buffer,"B01");
+ strcpy(output_buffer, "B01");
break;
}
@@ -1015,7 +1015,7 @@ void handle_exception (struct gdb_regs *regs)
break;
default:
baudrate = 0;
- strcpy(output_buffer,"B02");
+ strcpy(output_buffer, "B02");
goto x1;
}
@@ -1044,7 +1044,7 @@ finish_kgdb:
exit_kgdb_exception:
/* release locks so other CPUs can go */
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
__raw_spin_unlock(&kgdb_cpulock[i]);
spin_unlock(&kgdb_lock);
@@ -1099,12 +1099,12 @@ void adel(void)
* malloc is needed by gdb client in "call func()", even a private one
* will make gdb happy
*/
-static void * __attribute_used__ malloc(size_t size)
+static void __used *malloc(size_t size)
{
return kmalloc(size, GFP_ATOMIC);
}
-static void __attribute_used__ free (void *where)
+static void __used free(void *where)
{
kfree(where);
}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index f78538eceef..e46782b0ebc 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -138,10 +138,9 @@
.fill 0x400
#endif
-EXPORT(stext) # used for profiling
EXPORT(_stext)
-#ifdef CONFIG_BOOT_RAW
+#ifndef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 00000000000..5d9830df359
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,213 @@
+/*
+ * i8253.c 8253/PIT functions
+ *
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include <asm/delay.h>
+#include <asm/i8253.h>
+#include <asm/io.h>
+
+static DEFINE_SPINLOCK(i8253_lock);
+
+/*
+ * Initialize the PIT timer.
+ *
+ * This is also called after resume to bring the PIT into operation again.
+ */
+static void init_pit_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ outb(LATCH >> 8 , PIT_CH0); /* MSB */
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* One shot setup */
+ outb_p(0x38, PIT_MODE);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+ spin_unlock_irqrestore(&i8253_lock, flags);
+}
+
+/*
+ * Program the next event in oneshot mode
+ *
+ * Delta is given in PIT ticks
+ */
+static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ outb_p(delta & 0xff , PIT_CH0); /* LSB */
+ outb(delta >> 8 , PIT_CH0); /* MSB */
+ spin_unlock_irqrestore(&i8253_lock, flags);
+
+ return 0;
+}
+
+/*
+ * On UP the PIT can serve all of the possible timer functions. On SMP systems
+ * it can be solely used for the global tick.
+ *
+ * The profiling and update capabilites are switched off once the local apic is
+ * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
+ * !using_apic_timer decisions in do_timer_interrupt_hook()
+ */
+struct clock_event_device pit_clockevent = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = init_pit_timer,
+ .set_next_event = pit_next_event,
+ .shift = 32,
+ .irq = 0,
+};
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ pit_clockevent.event_handler(&pit_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING,
+ .mask = CPU_MASK_NONE,
+ .name = "timer"
+};
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init setup_pit_timer(void)
+{
+ /*
+ * Start pit with the boot cpu mask and make it global after the
+ * IO_APIC has been initialized.
+ */
+ pit_clockevent.cpumask = cpumask_of_cpu(0);
+ pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
+ pit_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFF, &pit_clockevent);
+ pit_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &pit_clockevent);
+ clockevents_register_device(&pit_clockevent);
+
+ irq0.mask = cpumask_of_cpu(0);
+ setup_irq(0, &irq0);
+}
+
+/*
+ * Since the PIT overflows every tick, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static cycle_t pit_read(void)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ outb_p(0x00, PIT_MODE); /* latch the count ASAP */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
+
+ /* VIA686a test code... reset the latch if count > max + 1 */
+ if (count > LATCH) {
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff, PIT_CH0);
+ outb(LATCH >> 8, PIT_CH0);
+ count = LATCH - 1;
+ }
+
+ /*
+ * It's possible for count to appear to go the wrong way for a
+ * couple of reasons:
+ *
+ * 1. The timer counter underflows, but we haven't handled the
+ * resulting interrupt and incremented jiffies yet.
+ * 2. Hardware problem with the timer, not giving us continuous time,
+ * the counter does small "jumps" upwards on some Pentium systems,
+ * (see c't 95/10 page 335 for Neptun bug.)
+ *
+ * Previous attempts to handle these cases intelligently were
+ * buggy, so we just do the simple thing now.
+ */
+ if (count > old_count && jifs == old_jifs) {
+ count = old_count;
+ }
+ old_count = count;
+ old_jifs = jifs;
+
+ spin_unlock_irqrestore(&i8253_lock, flags);
+
+ count = (LATCH - 1) - count;
+
+ return (cycle_t)(jifs * LATCH) + count;
+}
+
+static struct clocksource clocksource_pit = {
+ .name = "pit",
+ .rating = 110,
+ .read = pit_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .mult = 0,
+ .shift = 20,
+};
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 1) /* PIT does not scale! */
+ return 0;
+
+ clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
+ return clocksource_register(&clocksource_pit);
+}
+arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2345160e63f..47101357710 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -30,14 +30,20 @@
static int i8259A_auto_eoi = -1;
DEFINE_SPINLOCK(i8259A_lock);
-/* some platforms call this... */
-void mask_and_ack_8259A(unsigned int);
+static void disable_8259A_irq(unsigned int irq);
+static void enable_8259A_irq(unsigned int irq);
+static void mask_and_ack_8259A(unsigned int irq);
+static void init_8259A(int auto_eoi);
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
.mask = disable_8259A_irq,
+ .disable = disable_8259A_irq,
.unmask = enable_8259A_irq,
.mask_ack = mask_and_ack_8259A,
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ .set_affinity = plat_set_irq_affinity,
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
};
/*
@@ -52,7 +58,7 @@ static unsigned int cached_irq_mask = 0xffff;
#define cached_master_mask (cached_irq_mask)
#define cached_slave_mask (cached_irq_mask >> 8)
-void disable_8259A_irq(unsigned int irq)
+static void disable_8259A_irq(unsigned int irq)
{
unsigned int mask;
unsigned long flags;
@@ -68,7 +74,7 @@ void disable_8259A_irq(unsigned int irq)
spin_unlock_irqrestore(&i8259A_lock, flags);
}
-void enable_8259A_irq(unsigned int irq)
+static void enable_8259A_irq(unsigned int irq)
{
unsigned int mask;
unsigned long flags;
@@ -121,14 +127,14 @@ static inline int i8259A_irq_real(unsigned int irq)
int irqmask = 1 << irq;
if (irq < 8) {
- outb(0x0B,PIC_MASTER_CMD); /* ISR register */
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
value = inb(PIC_MASTER_CMD) & irqmask;
- outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
- outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
- outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
@@ -138,7 +144,7 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-void mask_and_ack_8259A(unsigned int irq)
+static void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask;
unsigned long flags;
@@ -169,17 +175,14 @@ handle_real_irq:
if (irq & 8) {
inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
outb(cached_slave_mask, PIC_SLAVE_IMR);
- outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
- outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
@@ -255,7 +258,7 @@ static int __init i8259A_init_sysfs(void)
device_initcall(i8259A_init_sysfs);
-void init_8259A(int auto_eoi)
+static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -302,7 +305,9 @@ void init_8259A(int auto_eoi)
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
- no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL
+ .handler = no_action,
+ .mask = CPU_MASK_NONE,
+ .name = "cascade",
};
static struct resource pic1_io_resource = {
@@ -324,7 +329,7 @@ static struct resource pic2_io_resource = {
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
-void __init init_i8259_irqs (void)
+void __init init_i8259_irqs(void)
{
int i;
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 403d96f99e7..8ef5cf4cc42 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -203,8 +203,8 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
* Put the ELF interpreter info on the stack
*/
#define NEW_AUX_ENT(nr, id, val) \
- __put_user ((id), sp+(nr*2)); \
- __put_user ((val), sp+(nr*2+1)); \
+ __put_user((id), sp+(nr*2)); \
+ __put_user((val), sp+(nr*2+1)); \
sp -= 2;
NEW_AUX_ENT(0, AT_NULL, 0);
@@ -212,17 +212,17 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
if (exec) {
sp -= 11*2;
- NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr));
- NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum);
- NEW_AUX_ENT (3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
- NEW_AUX_ENT (4, AT_BASE, interp_load_addr);
- NEW_AUX_ENT (5, AT_FLAGS, 0);
- NEW_AUX_ENT (6, AT_ENTRY, (elf_addr_t) exec->e_entry);
- NEW_AUX_ENT (7, AT_UID, (elf_addr_t) current->uid);
- NEW_AUX_ENT (8, AT_EUID, (elf_addr_t) current->euid);
- NEW_AUX_ENT (9, AT_GID, (elf_addr_t) current->gid);
- NEW_AUX_ENT (10, AT_EGID, (elf_addr_t) current->egid);
+ NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff);
+ NEW_AUX_ENT(1, AT_PHENT, sizeof(struct elf_phdr));
+ NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum);
+ NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
+ NEW_AUX_ENT(4, AT_BASE, interp_load_addr);
+ NEW_AUX_ENT(5, AT_FLAGS, 0);
+ NEW_AUX_ENT(6, AT_ENTRY, (elf_addr_t) exec->e_entry);
+ NEW_AUX_ENT(7, AT_UID, (elf_addr_t) current->uid);
+ NEW_AUX_ENT(8, AT_EUID, (elf_addr_t) current->euid);
+ NEW_AUX_ENT(9, AT_GID, (elf_addr_t) current->gid);
+ NEW_AUX_ENT(10, AT_EGID, (elf_addr_t) current->egid);
}
#undef NEW_AUX_ENT
@@ -231,16 +231,16 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
sp -= argc+1;
argv = sp;
- __put_user((elf_addr_t)argc,--sp);
+ __put_user((elf_addr_t)argc, --sp);
current->mm->arg_start = (unsigned long) p;
while (argc-->0) {
- __put_user((unsigned long)p,argv++);
+ __put_user((unsigned long)p, argv++);
p += strlen_user(p);
}
__put_user((unsigned long) NULL, argv);
current->mm->arg_end = current->mm->env_start = (unsigned long) p;
while (envc-->0) {
- __put_user((unsigned long)p,envp++);
+ __put_user((unsigned long)p, envp++);
p += strlen_user(p);
}
__put_user((unsigned long) NULL, envp);
@@ -581,7 +581,7 @@ static void irix_map_prda_page(void)
struct prda *pp;
down_write(&current->mm->mmap_sem);
- v = do_brk (PRDA_ADDRESS, PAGE_SIZE);
+ v = do_brk(PRDA_ADDRESS, PAGE_SIZE);
up_write(&current->mm->mmap_sem);
if (v < 0)
@@ -815,7 +815,7 @@ out_free_interp:
kfree(elf_interpreter);
out_free_file:
out_free_ph:
- kfree (elf_phdata);
+ kfree(elf_phdata);
goto out;
}
@@ -831,7 +831,7 @@ static int load_irix_library(struct file *file)
int retval;
unsigned int bss;
int error;
- int i,j, k;
+ int i, j, k;
error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
if (error != sizeof(elf_ex))
@@ -1232,7 +1232,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
/* Try to dump the FPU. */
- prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
+ prstatus.pr_fpvalid = dump_fpu(regs, &fpu);
if (!prstatus.pr_fpvalid) {
numnote--;
} else {
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
index de8584f6231..cf2dcd3d6a9 100644
--- a/arch/mips/kernel/irixinv.c
+++ b/arch/mips/kernel/irixinv.c
@@ -14,7 +14,7 @@ int inventory_items = 0;
static inventory_t inventory [MAX_INVENTORY];
-void add_to_inventory (int class, int type, int controller, int unit, int state)
+void add_to_inventory(int class, int type, int controller, int unit, int state)
{
inventory_t *ni = &inventory [inventory_items];
@@ -30,7 +30,7 @@ void add_to_inventory (int class, int type, int controller, int unit, int state)
inventory_items++;
}
-int dump_inventory_to_user (void __user *userbuf, int size)
+int dump_inventory_to_user(void __user *userbuf, int size)
{
inventory_t *inv = &inventory [0];
inventory_t __user *user = userbuf;
@@ -45,7 +45,7 @@ int dump_inventory_to_user (void __user *userbuf, int size)
return -EFAULT;
user++;
}
- return inventory_items * sizeof (inventory_t);
+ return inventory_items * sizeof(inventory_t);
}
int __init init_inventory(void)
@@ -55,24 +55,24 @@ int __init init_inventory(void)
* most likely this will not let just anyone run the X server
* until we put the right values all over the place
*/
- add_to_inventory (10, 3, 0, 0, 16400);
- add_to_inventory (1, 1, 150, -1, 12);
- add_to_inventory (1, 3, 0, 0, 8976);
- add_to_inventory (1, 2, 0, 0, 8976);
- add_to_inventory (4, 8, 0, 0, 2);
- add_to_inventory (5, 5, 0, 0, 1);
- add_to_inventory (3, 3, 0, 0, 32768);
- add_to_inventory (3, 4, 0, 0, 32768);
- add_to_inventory (3, 8, 0, 0, 524288);
- add_to_inventory (3, 9, 0, 0, 64);
- add_to_inventory (3, 1, 0, 0, 67108864);
- add_to_inventory (12, 3, 0, 0, 16);
- add_to_inventory (8, 7, 17, 0, 16777472);
- add_to_inventory (8, 0, 0, 0, 1);
- add_to_inventory (2, 1, 0, 13, 2);
- add_to_inventory (2, 2, 0, 2, 0);
- add_to_inventory (2, 2, 0, 1, 0);
- add_to_inventory (7, 14, 0, 0, 6);
+ add_to_inventory(10, 3, 0, 0, 16400);
+ add_to_inventory(1, 1, 150, -1, 12);
+ add_to_inventory(1, 3, 0, 0, 8976);
+ add_to_inventory(1, 2, 0, 0, 8976);
+ add_to_inventory(4, 8, 0, 0, 2);
+ add_to_inventory(5, 5, 0, 0, 1);
+ add_to_inventory(3, 3, 0, 0, 32768);
+ add_to_inventory(3, 4, 0, 0, 32768);
+ add_to_inventory(3, 8, 0, 0, 524288);
+ add_to_inventory(3, 9, 0, 0, 64);
+ add_to_inventory(3, 1, 0, 0, 67108864);
+ add_to_inventory(12, 3, 0, 0, 16);
+ add_to_inventory(8, 7, 17, 0, 16777472);
+ add_to_inventory(8, 0, 0, 0, 1);
+ add_to_inventory(2, 1, 0, 13, 2);
+ add_to_inventory(2, 2, 0, 2, 0);
+ add_to_inventory(2, 2, 0, 1, 0);
+ add_to_inventory(7, 14, 0, 0, 6);
return 0;
}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
index 30f9eb09db3..2bde200d5ad 100644
--- a/arch/mips/kernel/irixioctl.c
+++ b/arch/mips/kernel/irixioctl.c
@@ -238,7 +238,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
current->comm, current->pid, cmd);
do_exit(255);
#else
- error = sys_ioctl (fd, cmd, arg);
+ error = sys_ioctl(fd, cmd, arg);
#endif
}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 6980deb6dce..85c2e389edd 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -163,9 +163,9 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
ret = setup_irix_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
+ sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
@@ -605,8 +605,8 @@ repeat:
current->state = TASK_INTERRUPTIBLE;
read_lock(&tasklist_lock);
tsk = current;
- list_for_each(_p,&tsk->children) {
- p = list_entry(_p,struct task_struct,sibling);
+ list_for_each(_p, &tsk->children) {
+ p = list_entry(_p, struct task_struct, sibling);
if ((type == IRIX_P_PID) && p->pid != pid)
continue;
if ((type == IRIX_P_PGID) && process_group(p) != pid)
@@ -725,7 +725,7 @@ asmlinkage int irix_getcontext(struct pt_regs *regs)
current->comm, current->pid, ctx);
#endif
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)));
+ if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
return -EFAULT;
error = __put_user(current->thread.irix_oldctx, &ctx->link);
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 00000000000..1b81b131f43
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,131 @@
+/*
+ * GT641xx IRQ routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/gt64120.h>
+
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+
+static DEFINE_SPINLOCK(gt641xx_irq_lock);
+
+static void ack_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 cause;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_ack_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 cause, mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void unmask_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask |= GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static struct irq_chip gt641xx_irq_chip = {
+ .name = "GT641xx",
+ .ack = ack_gt641xx_irq,
+ .mask = mask_gt641xx_irq,
+ .mask_ack = mask_ack_gt641xx_irq,
+ .unmask = unmask_gt641xx_irq,
+};
+
+void gt641xx_irq_dispatch(void)
+{
+ u32 cause, mask;
+ int i;
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ cause &= mask;
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++) {
+ if (cause & (1U << i)) {
+ do_IRQ(GT641XX_IRQ_BASE + i);
+ return;
+ }
+ }
+
+ atomic_inc(&irq_err_count);
+}
+
+void __init gt641xx_irq_init(void)
+{
+ int i;
+
+ GT_WRITE(GT_INTRMASK_OFS, 0);
+ GT_WRITE(GT_INTRCAUSE_OFS, 0);
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++)
+ set_irq_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 410868b5ea5..4edc7e451d9 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
mask_msc_irq(irq);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
/* This actually needs to be a call into platform code */
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
}
/*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
}
/*
@@ -105,7 +99,7 @@ void ll_msc_irq(void)
}
void
-msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
+msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
@@ -136,7 +130,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
{
extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
- _icctrl_msc = (unsigned long) ioremap (icubase, 0x40000);
+ _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index aeded6c17de..d06e9c9af79 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
*/
void ack_bad_irq(unsigned int irq)
{
+ smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
-
/*
* Generic, controller-independent functions:
*/
@@ -101,7 +93,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%d ", j);
seq_putc(p, '\n');
}
@@ -110,7 +102,7 @@ int show_interrupts(struct seq_file *p, void *v)
action = irq_desc[i].action;
if (!action)
goto skip;
- seq_printf(p, "%3d: ",i);
+ seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
new file mode 100644
index 00000000000..a4d1462c27f
--- /dev/null
+++ b/arch/mips/kernel/irq_txx9.c
@@ -0,0 +1,192 @@
+/*
+ * linux/arch/mips/kernel/irq_txx9.c
+ *
+ * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * source@mvista.com
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/txx9irq.h>
+
+struct txx9_irc_reg {
+ u32 cer;
+ u32 cr[2];
+ u32 unused0;
+ u32 ilr[8];
+ u32 unused1[4];
+ u32 imr;
+ u32 unused2[7];
+ u32 scr;
+ u32 unused3[7];
+ u32 ssr;
+ u32 unused4[7];
+ u32 csr;
+};
+
+/* IRCER : Int. Control Enable */
+#define TXx9_IRCER_ICE 0x00000001
+
+/* IRCR : Int. Control */
+#define TXx9_IRCR_LOW 0x00000000
+#define TXx9_IRCR_HIGH 0x00000001
+#define TXx9_IRCR_DOWN 0x00000002
+#define TXx9_IRCR_UP 0x00000003
+#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
+
+/* IRSCR : Int. Status Control */
+#define TXx9_IRSCR_EIClrE 0x00000100
+#define TXx9_IRSCR_EIClr_MASK 0x0000000f
+
+/* IRCSR : Int. Current Status */
+#define TXx9_IRCSR_IF 0x00010000
+#define TXx9_IRCSR_ILV_MASK 0x00000700
+#define TXx9_IRCSR_IVL_MASK 0x0000001f
+
+#define irc_dlevel 0
+#define irc_elevel 1
+
+static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
+
+static struct {
+ unsigned char level;
+ unsigned char mode;
+} txx9irq[TXx9_MAX_IR] __read_mostly;
+
+static void txx9_irq_unmask(unsigned int irq)
+{
+ unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (txx9irq[irq_nr].level << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+#endif
+}
+
+static inline void txx9_irq_mask(unsigned int irq)
+{
+ unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (irc_dlevel << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+ /* flush write buffer */
+ __raw_readl(&txx9_ircptr->ssr);
+#else
+ mmiowb();
+#endif
+}
+
+static void txx9_irq_mask_ack(unsigned int irq)
+{
+ unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+
+ txx9_irq_mask(irq);
+ /* clear edge detection */
+ if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
+ __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
+}
+
+static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ u32 cr;
+ u32 __iomem *crp;
+ int ofs;
+ int mode;
+
+ if (flow_type & IRQF_TRIGGER_PROBE)
+ return 0;
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
+ case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
+ default:
+ return -EINVAL;
+ }
+ crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
+ cr = __raw_readl(crp);
+ ofs = (irq_nr & (8 - 1)) * 2;
+ cr &= ~(0x3 << ofs);
+ cr |= (mode & 0x3) << ofs;
+ __raw_writel(cr, crp);
+ txx9irq[irq_nr].mode = mode;
+ return 0;
+}
+
+static struct irq_chip txx9_irq_chip = {
+ .name = "TXX9",
+ .ack = txx9_irq_mask_ack,
+ .mask = txx9_irq_mask,
+ .mask_ack = txx9_irq_mask_ack,
+ .unmask = txx9_irq_unmask,
+ .set_type = txx9_irq_set_type,
+};
+
+void __init txx9_irq_init(unsigned long baseaddr)
+{
+ int i;
+
+ txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
+ for (i = 0; i < TXx9_MAX_IR; i++) {
+ txx9irq[i].level = 4; /* middle level */
+ txx9irq[i].mode = TXx9_IRCR_LOW;
+ set_irq_chip_and_handler(TXX9_IRQ_BASE + i,
+ &txx9_irq_chip, handle_level_irq);
+ }
+
+ /* mask all IRC interrupts */
+ __raw_writel(0, &txx9_ircptr->imr);
+ for (i = 0; i < 8; i++)
+ __raw_writel(0, &txx9_ircptr->ilr[i]);
+ /* setup IRC interrupt mode (Low Active) */
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &txx9_ircptr->cr[i]);
+ /* enable interrupt control */
+ __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+}
+
+int __init txx9_irq_set_pri(int irc_irq, int new_pri)
+{
+ int old_pri;
+
+ if ((unsigned int)irc_irq >= TXx9_MAX_IR)
+ return 0;
+ old_pri = txx9irq[irc_irq].level;
+ txx9irq[irc_irq].level = new_pri;
+ return old_pri;
+}
+
+int txx9_irq(void)
+{
+ u32 csr = __raw_readl(&txx9_ircptr->csr);
+
+ if (likely(!(csr & TXx9_IRCSR_IF)))
+ return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
+ return -1;
+}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index c6580018c94..d2c2e00e586 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -89,7 +89,7 @@ static int sp_stopping = 0;
#define MTSP_O_EXCL 0x0800
#define MTSP_O_BINARY 0x8000
-#define SP_VPE 1
+extern int tclimit;
struct apsp_table {
int sp;
@@ -118,11 +118,11 @@ struct apsp_table syscall_command_table[] = {
static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
{
- register long int _num __asm__ ("$2") = num;
- register long int _arg0 __asm__ ("$4") = arg0;
- register long int _arg1 __asm__ ("$5") = arg1;
- register long int _arg2 __asm__ ("$6") = arg2;
- register long int _arg3 __asm__ ("$7") = arg3;
+ register long int _num __asm__("$2") = num;
+ register long int _arg0 __asm__("$4") = arg0;
+ register long int _arg1 __asm__("$5") = arg1;
+ register long int _arg2 __asm__("$6") = arg2;
+ register long int _arg3 __asm__("$7") = arg3;
mm_segment_t old_fs;
@@ -225,8 +225,8 @@ void sp_work_handle_request(void)
/* Run the syscall at the priviledge of the user who loaded the
SP program */
- if (vpe_getuid(SP_VPE))
- sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE));
+ if (vpe_getuid(tclimit))
+ sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
switch (sc.cmd) {
/* needs the flags argument translating from SDE kit to
@@ -239,13 +239,13 @@ void sp_work_handle_request(void)
case MTSP_SYSCALL_GETTOD:
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
- (int)&tz, 0,0)) == 0)
+ (int)&tz, 0, 0)) == 0)
ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:
list_for_each_entry(n, &kspd_notifylist, list)
- n->kspd_sp_exit(SP_VPE);
+ n->kspd_sp_exit(tclimit);
sp_stopping = 1;
printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
@@ -255,7 +255,7 @@ void sp_work_handle_request(void)
case MTSP_SYSCALL_OPEN:
generic.arg1 = translate_open_flags(generic.arg1);
- vcwd = vpe_getcwd(SP_VPE);
+ vcwd = vpe_getcwd(tclimit);
/* change to the cwd of the process that loaded the SP program */
old_fs = get_fs();
@@ -283,7 +283,7 @@ void sp_work_handle_request(void)
break;
} /* switch */
- if (vpe_getuid(SP_VPE))
+ if (vpe_getuid(tclimit))
sp_setfsuidgid( 0, 0);
old_fs = get_fs();
@@ -364,10 +364,9 @@ static void startwork(int vpe)
}
INIT_WORK(&work, sp_work);
- queue_work(workqueue, &work);
- } else
- queue_work(workqueue, &work);
+ }
+ queue_work(workqueue, &work);
}
static void stopwork(int vpe)
@@ -389,7 +388,7 @@ static int kspd_module_init(void)
notify.start = startwork;
notify.stop = stopwork;
- vpe_notify(SP_VPE, &notify);
+ vpe_notify(tclimit, &notify);
return 0;
}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 06e04da211d..d6e01215fb2 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -58,10 +58,10 @@
#define AA(__x) ((unsigned long)((int)__x))
#ifdef __MIPSEB__
-#define merge_64(r1,r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
#ifdef __MIPSEL__
-#define merge_64(r1,r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
/*
@@ -96,7 +96,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
#endif
tmp.st_blocks = stat->blocks;
tmp.st_blksize = stat->blksize;
- return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+ return copy_to_user(statbuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
asmlinkage unsigned long
@@ -300,13 +300,13 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
{
struct timespec t;
int ret;
- mm_segment_t old_fs = get_fs ();
+ mm_segment_t old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
- set_fs (old_fs);
+ set_fs(old_fs);
if (put_user (t.tv_sec, &interval->tv_sec) ||
- __put_user (t.tv_nsec, &interval->tv_nsec))
+ __put_user(t.tv_nsec, &interval->tv_nsec))
return -EFAULT;
return ret;
}
@@ -314,7 +314,7 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
#ifdef CONFIG_SYSVIPC
asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version, err;
@@ -373,7 +373,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
#else
asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
return -ENOSYS;
}
@@ -505,16 +505,16 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
set_fs(KERNEL_DS);
err = sys_ustat(dev, (struct ustat __user *)&tmp);
- set_fs (old_fs);
+ set_fs(old_fs);
if (err)
goto out;
- memset(&tmp32,0,sizeof(struct ustat32));
+ memset(&tmp32, 0, sizeof(struct ustat32));
tmp32.f_tfree = tmp.f_tfree;
tmp32.f_tinode = tmp.f_tinode;
- err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
+ err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
out:
return err;
@@ -566,8 +566,15 @@ asmlinkage long sys32_fadvise64_64(int fd, int __pad,
flags);
}
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
+ unsigned offset_a3, unsigned len_a4, unsigned len_a5)
+{
+ return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
+ merge_64(len_a4, len_a5));
+}
+
save_static_function(sys32_clone);
-__attribute_used__ noinline static int
+static int noinline __used
_sys32_clone(nabi_no_regargs struct pt_regs regs)
{
unsigned long clone_flags;
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 8f42fa85ac9..85beb9b0b2d 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -14,7 +14,7 @@
#include <asm/page.h>
extern const unsigned char relocate_new_kernel[];
-extern const unsigned int relocate_new_kernel_size;
+extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
@@ -40,6 +40,8 @@ machine_crash_shutdown(struct pt_regs *regs)
{
}
+typedef void (*noretfun_t)(void) __attribute__((noreturn));
+
void
machine_kexec(struct kimage *image)
{
@@ -51,7 +53,8 @@ machine_kexec(struct kimage *image)
(unsigned long)page_address(image->control_code_page);
kexec_start_address = image->start;
- kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
@@ -67,7 +70,7 @@ machine_kexec(struct kimage *image)
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
- *ptr = phys_to_virt(*ptr);
+ *ptr = (unsigned long) phys_to_virt(*ptr);
}
/*
@@ -75,11 +78,8 @@ machine_kexec(struct kimage *image)
*/
local_irq_disable();
- flush_icache_range(reboot_code_buffer,
- reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
-
- printk("Will call new kernel at %08x\n", image->start);
+ printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
- flush_cache_all();
- ((void (*)(void))reboot_code_buffer)();
+ __flush_cache_all();
+ ((noretfun_t) reboot_code_buffer)();
}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index ede5d73d652..892665bb12b 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -50,6 +50,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
cpumask_t effective_mask;
int retval;
struct task_struct *p;
+ struct thread_info *ti;
if (len < sizeof(new_mask))
return -EINVAL;
@@ -93,16 +94,16 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
read_unlock(&tasklist_lock);
/* Compute new global allowed CPU set if necessary */
- if ((p->thread.mflags & MF_FPUBOUND)
- && cpus_intersects(new_mask, mt_fpu_cpumask)) {
+ ti = task_thread_info(p);
+ if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
+ cpus_intersects(new_mask, mt_fpu_cpumask)) {
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
retval = set_cpus_allowed(p, effective_mask);
} else {
- p->thread.mflags &= ~MF_FPUBOUND;
+ clear_ti_thread_flag(ti, TIF_FPUBOUND);
retval = set_cpus_allowed(p, new_mask);
}
-
out_unlock:
put_task_struct(p);
unlock_cpu_hotplug();
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 1a7d8923129..3d6b1ec1f32 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -21,6 +22,28 @@
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
+int vpelimit;
+
+static int __init maxvpes(char *str)
+{
+ get_option(&str, &vpelimit);
+
+ return 1;
+}
+
+__setup("maxvpes=", maxvpes);
+
+int tclimit;
+
+static int __init maxtcs(char *str)
+{
+ get_option(&str, &tclimit);
+
+ return 1;
+}
+
+__setup("maxtcs=", maxtcs);
+
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
@@ -62,8 +85,9 @@ void mips_mt_regdump(unsigned long mvpctl)
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
- printk(" VPE%d.EPC : %08lx\n",
+ printk(" VPE%d.EPC : %08lx ",
i, read_vpe_c0_epc());
+ print_symbol("%s\n", read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
@@ -88,7 +112,8 @@ void mips_mt_regdump(unsigned long mvpctl)
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
- printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
+ printk(" TCRestart : %08lx ", read_tc_c0_tcrestart());
+ print_symbol("%s\n", read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
@@ -211,7 +236,7 @@ void mips_mt_set_cpuoptions(void)
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
- ehb ();
+ ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index ec04f5a1a5e..efd2d131412 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,76 +17,6 @@
unsigned int vced_count, vcei_count;
-static const char *cpu_name[] = {
- [CPU_UNKNOWN] = "unknown",
- [CPU_R2000] = "R2000",
- [CPU_R3000] = "R3000",
- [CPU_R3000A] = "R3000A",
- [CPU_R3041] = "R3041",
- [CPU_R3051] = "R3051",
- [CPU_R3052] = "R3052",
- [CPU_R3081] = "R3081",
- [CPU_R3081E] = "R3081E",
- [CPU_R4000PC] = "R4000PC",
- [CPU_R4000SC] = "R4000SC",
- [CPU_R4000MC] = "R4000MC",
- [CPU_R4200] = "R4200",
- [CPU_R4400PC] = "R4400PC",
- [CPU_R4400SC] = "R4400SC",
- [CPU_R4400MC] = "R4400MC",
- [CPU_R4600] = "R4600",
- [CPU_R6000] = "R6000",
- [CPU_R6000A] = "R6000A",
- [CPU_R8000] = "R8000",
- [CPU_R10000] = "R10000",
- [CPU_R12000] = "R12000",
- [CPU_R14000] = "R14000",
- [CPU_R4300] = "R4300",
- [CPU_R4650] = "R4650",
- [CPU_R4700] = "R4700",
- [CPU_R5000] = "R5000",
- [CPU_R5000A] = "R5000A",
- [CPU_R4640] = "R4640",
- [CPU_NEVADA] = "Nevada",
- [CPU_RM7000] = "RM7000",
- [CPU_RM9000] = "RM9000",
- [CPU_R5432] = "R5432",
- [CPU_4KC] = "MIPS 4Kc",
- [CPU_5KC] = "MIPS 5Kc",
- [CPU_R4310] = "R4310",
- [CPU_SB1] = "SiByte SB1",
- [CPU_SB1A] = "SiByte SB1A",
- [CPU_TX3912] = "TX3912",
- [CPU_TX3922] = "TX3922",
- [CPU_TX3927] = "TX3927",
- [CPU_AU1000] = "Au1000",
- [CPU_AU1500] = "Au1500",
- [CPU_AU1100] = "Au1100",
- [CPU_AU1550] = "Au1550",
- [CPU_AU1200] = "Au1200",
- [CPU_4KEC] = "MIPS 4KEc",
- [CPU_4KSC] = "MIPS 4KSc",
- [CPU_VR41XX] = "NEC Vr41xx",
- [CPU_R5500] = "R5500",
- [CPU_TX49XX] = "TX49xx",
- [CPU_20KC] = "MIPS 20Kc",
- [CPU_24K] = "MIPS 24K",
- [CPU_25KF] = "MIPS 25Kf",
- [CPU_34K] = "MIPS 34K",
- [CPU_74K] = "MIPS 74K",
- [CPU_VR4111] = "NEC VR4111",
- [CPU_VR4121] = "NEC VR4121",
- [CPU_VR4122] = "NEC VR4122",
- [CPU_VR4131] = "NEC VR4131",
- [CPU_VR4133] = "NEC VR4133",
- [CPU_VR4181] = "NEC VR4181",
- [CPU_VR4181A] = "NEC VR4181A",
- [CPU_SR71000] = "Sandcraft SR71000",
- [CPU_PR4450] = "Philips PR4450",
- [CPU_LOONGSON2] = "ICT Loongson-2",
-};
-
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
@@ -108,8 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
- seq_printf(m, fmt, cpu_name[cpu_data[n].cputype <= CPU_LAST ?
- cpu_data[n].cputype : CPU_UNKNOWN],
+ seq_printf(m, fmt, __cpu_name[smp_processor_id()],
(version >> 4) & 0x0f, version & 0x0f,
(fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8f4cf27c715..11cb264f59c 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
@@ -25,7 +26,9 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
@@ -50,6 +53,7 @@ void __noreturn cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
+ tick_nohz_stop_sched_tick();
while (!need_resched()) {
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
extern void smtc_idle_loop_hook(void);
@@ -59,6 +63,7 @@ void __noreturn cpu_idle(void)
if (cpu_wait)
(*cpu_wait)();
}
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -75,7 +80,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK);
#ifdef CONFIG_64BIT
status &= ~ST0_FR;
- status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
+ status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
#endif
status |= KU_USER;
regs->cp0_status = status;
@@ -197,13 +202,13 @@ void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
#endif
}
-int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
elf_dump_regs(*regs, task_pt_regs(tsk));
return 1;
}
-int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
+int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
{
memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
@@ -229,8 +234,8 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.cp0_epc = (unsigned long) kernel_thread_helper;
regs.cp0_status = read_c0_status();
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- regs.cp0_status &= ~(ST0_KUP | ST0_IEC);
- regs.cp0_status |= ST0_IEP;
+ regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
#else
regs.cp0_status |= ST0_EXL;
#endif
@@ -460,3 +465,15 @@ unsigned long get_wchan(struct task_struct *task)
out:
return pc;
}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() & ~PAGE_MASK;
+
+ return sp & ALMASK;
+}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 893e7bccf22..58aa6fec114 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -20,11 +20,11 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
-#include <linux/audit.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/security.h>
-#include <linux/signal.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -54,7 +54,7 @@ void ptrace_disable(struct task_struct *child)
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
-int ptrace_getregs (struct task_struct *child, __s64 __user *data)
+int ptrace_getregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
@@ -65,13 +65,13 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __put_user (regs->regs[i], data + i);
- __put_user (regs->lo, data + EF_LO - EF_R0);
- __put_user (regs->hi, data + EF_HI - EF_R0);
- __put_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
- __put_user (regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
- __put_user (regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
- __put_user (regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+ __put_user(regs->regs[i], data + i);
+ __put_user(regs->lo, data + EF_LO - EF_R0);
+ __put_user(regs->hi, data + EF_HI - EF_R0);
+ __put_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __put_user(regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
+ __put_user(regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
+ __put_user(regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
return 0;
}
@@ -81,7 +81,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
-int ptrace_setregs (struct task_struct *child, __s64 __user *data)
+int ptrace_setregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
@@ -92,17 +92,17 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __get_user (regs->regs[i], data + i);
- __get_user (regs->lo, data + EF_LO - EF_R0);
- __get_user (regs->hi, data + EF_HI - EF_R0);
- __get_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __get_user(regs->regs[i], data + i);
+ __get_user(regs->lo, data + EF_LO - EF_R0);
+ __get_user(regs->hi, data + EF_HI - EF_R0);
+ __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
/* badvaddr, status, and cause may not be written. */
return 0;
}
-int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
unsigned int tmp;
@@ -113,13 +113,13 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
if (tsk_used_math(child)) {
fpureg_t *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __put_user (fregs[i], i + (__u64 __user *) data);
+ __put_user(fregs[i], i + (__u64 __user *) data);
} else {
for (i = 0; i < 32; i++)
- __put_user ((__u64) -1, i + (__u64 __user *) data);
+ __put_user((__u64) -1, i + (__u64 __user *) data);
}
- __put_user (child->thread.fpu.fcr31, data + 64);
+ __put_user(child->thread.fpu.fcr31, data + 64);
preempt_disable();
if (cpu_has_fpu) {
@@ -142,12 +142,12 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
tmp = 0;
}
preempt_enable();
- __put_user (tmp, data + 65);
+ __put_user(tmp, data + 65);
return 0;
}
-int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
fpureg_t *fregs;
int i;
@@ -158,9 +158,9 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __get_user (fregs[i], i + (__u64 __user *) data);
+ __get_user(fregs[i], i + (__u64 __user *) data);
- __get_user (child->thread.fpu.fcr31, data + 64);
+ __get_user(child->thread.fpu.fcr31, data + 64);
/* FIR may not be written. */
@@ -390,19 +390,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs (child, (__u64 __user *) data);
+ ret = ptrace_getregs(child, (__u64 __user *) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs (child, (__u64 __user *) data);
+ ret = ptrace_setregs(child, (__u64 __user *) data);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs (child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs (child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
@@ -470,12 +470,17 @@ static inline int audit_arch(void)
*/
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
+ /* do the secure computing check first */
+ if (!entryexit)
+ secure_computing(regs->regs[0]);
+
if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
regs->regs[2]);
if (!(current->ptrace & PT_PTRACED))
goto out;
+
if (!test_thread_flag(TIF_SYSCALL_TRACE))
goto out;
@@ -493,9 +498,10 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
- out:
+
+out:
if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[2],
+ audit_syscall_entry(audit_arch(), regs->regs[0],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index d9a39c16945..f2bffed94fa 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -36,11 +36,11 @@
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
-int ptrace_getregs (struct task_struct *child, __s64 __user *data);
-int ptrace_setregs (struct task_struct *child, __s64 __user *data);
+int ptrace_getregs(struct task_struct *child, __s64 __user *data);
+int ptrace_setregs(struct task_struct *child, __s64 __user *data);
-int ptrace_getfpregs (struct task_struct *child, __u32 __user *data);
-int ptrace_setfpregs (struct task_struct *child, __u32 __user *data);
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
@@ -346,19 +346,19 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs (child, (__u64 __user *) (__u64) data);
+ ret = ptrace_getregs(child, (__u64 __user *) (__u64) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs (child, (__u64 __user *) (__u64) data);
+ ret = ptrace_setregs(child, (__u64 __user *) (__u64) data);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs (child, (__u32 __user *) (__u64) data);
+ ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs (child, (__u32 __user *) (__u64) data);
+ ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index a3f0d00c133..87481f916a6 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -14,67 +14,69 @@
#include <asm/stackframe.h>
#include <asm/addrspace.h>
- .globl relocate_new_kernel
-relocate_new_kernel:
-
- PTR_L s0, kexec_indirection_page
- PTR_L s1, kexec_start_address
+LEAF(relocate_new_kernel)
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
process_entry:
- PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_L s2, (s0)
+ PTR_ADD s0, s0, SZREG
/* destination page */
- and s3, s2, 0x1
- beq s3, zero, 1f
- and s4, s2, ~0x1 /* store destination addr in s4 */
- move a0, s4
- b process_entry
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+ move a0, s4
+ b process_entry
1:
/* indirection page, update s0 */
- and s3, s2, 0x2
- beq s3, zero, 1f
- and s0, s2, ~0x2
- b process_entry
+ and s3, s2, 0x2
+ beq s3, zero, 1f
+ and s0, s2, ~0x2
+ b process_entry
1:
/* done page */
- and s3, s2, 0x4
- beq s3, zero, 1f
- b done
+ and s3, s2, 0x4
+ beq s3, zero, 1f
+ b done
1:
/* source page */
- and s3, s2, 0x8
- beq s3, zero, process_entry
- and s2, s2, ~0x8
- li s6, (1 << PAGE_SHIFT) / SZREG
+ and s3, s2, 0x8
+ beq s3, zero, process_entry
+ and s2, s2, ~0x8
+ li s6, (1 << PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
- REG_L s5, (s2)
- REG_S s5, (s4)
- INT_ADD s4, s4, SZREG
- INT_ADD s2, s2, SZREG
- INT_SUB s6, s6, 1
- beq s6, zero, process_entry
- b copy_word
- b process_entry
+ REG_L s5, (s2)
+ REG_S s5, (s4)
+ PTR_ADD s4, s4, SZREG
+ PTR_ADD s2, s2, SZREG
+ LONG_SUB s6, s6, 1
+ beq s6, zero, process_entry
+ b copy_word
+ b process_entry
done:
/* jump to kexec_start_address */
- j s1
+ j s1
+ END(relocate_new_kernel)
- .globl kexec_start_address
kexec_start_address:
- .long 0x0
+ EXPORT(kexec_start_address)
+ PTR 0x0
+ .size kexec_start_address, PTRSIZE
- .globl kexec_indirection_page
kexec_indirection_page:
- .long 0x0
+ EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end:
- .globl relocate_new_kernel_size
relocate_new_kernel_size:
- .long relocate_new_kernel_end - relocate_new_kernel
+ EXPORT(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+ .size relocate_new_kernel_size, PTRSIZE
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index bfc8ca168f8..1ba00c15505 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -40,12 +40,11 @@
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
+#include <asm/mips_mt.h>
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
-#define RTLX_TARG_VPE 1
-
static struct rtlx_info *rtlx;
static int major;
static char module_name[] = "rtlx";
@@ -57,8 +56,6 @@ static struct chan_waitqueues {
struct mutex mutex;
} channel_wqs[RTLX_CHANNELS];
-static struct irqaction irq;
-static int irq_num;
static struct vpe_notifications notify;
static int sp_stopping = 0;
@@ -85,7 +82,7 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static __attribute_used__ void dump_rtlx(void)
+static void __used dump_rtlx(void)
{
int i;
@@ -112,7 +109,7 @@ static __attribute_used__ void dump_rtlx(void)
static int rtlx_init(struct rtlx_info *rtlxi)
{
if (rtlxi->id != RTLX_ID) {
- printk(KERN_ERR "no valid RTLX id at 0x%p 0x%x\n", rtlxi, rtlxi->id);
+ printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
return -ENOEXEC;
}
@@ -165,10 +162,10 @@ int rtlx_open(int index, int can_sleep)
}
if (rtlx == NULL) {
- if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
+ if( (p = vpe_get_shared(tclimit)) == NULL) {
if (can_sleep) {
__wait_event_interruptible(channel_wqs[index].lx_queue,
- (p = vpe_get_shared(RTLX_TARG_VPE)),
+ (p = vpe_get_shared(tclimit)),
ret);
if (ret)
goto out_fail;
@@ -472,11 +469,24 @@ static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
static char register_chrdev_failed[] __initdata =
KERN_ERR "rtlx_module_init: unable to register device\n";
-static int rtlx_module_init(void)
+static int __init rtlx_module_init(void)
{
struct device *dev;
int i, err;
+ if (!cpu_has_mipsmt) {
+ printk("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (tclimit == 0) {
+ printk(KERN_WARNING "No TCs reserved for AP/SP, not "
+ "initializing RTLX.\nPass maxtcs=<n> argument as kernel "
+ "argument\n");
+
+ return -ENODEV;
+ }
+
major = register_chrdev(0, module_name, &rtlx_fops);
if (major < 0) {
printk(register_chrdev_failed);
@@ -501,7 +511,7 @@ static int rtlx_module_init(void)
/* set up notifiers */
notify.start = starting;
notify.stop = stopping;
- vpe_notify(RTLX_TARG_VPE, &notify);
+ vpe_notify(tclimit, &notify);
if (cpu_has_vint)
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index ae985d1fcca..82480a1717d 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -662,6 +662,7 @@ einval: li v0, -EINVAL
sys sys_signalfd 3
sys sys_timerfd 4
sys sys_eventfd 1
+ sys sys_fallocate 6 /* 4320 */
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 7bcd5a1a85f..c2c10876da2 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -477,4 +477,5 @@ sys_call_table:
PTR sys_signalfd
PTR sys_timerfd
PTR sys_eventfd
+ PTR sys_fallocate
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 532a2f3b42f..118be24224f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -375,7 +375,7 @@ EXPORT(sysn32_call_table)
PTR sys_mkdirat
PTR sys_mknodat
PTR sys_fchownat
- PTR sys_futimesat /* 6255 */
+ PTR compat_sys_futimesat /* 6255 */
PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat
@@ -403,4 +403,5 @@ EXPORT(sysn32_call_table)
PTR compat_sys_signalfd /* 5280 */
PTR compat_sys_timerfd
PTR sys_eventfd
+ PTR sys_fallocate
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6bbe0f4ed8b..dd68afce7da 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -525,4 +525,5 @@ sys_call_table:
PTR compat_sys_signalfd
PTR compat_sys_timerfd
PTR sys_eventfd
+ PTR sys32_fallocate /* 4320 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 316685fca05..a06a27d6cfc 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -51,10 +51,8 @@ EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
* These are initialized so they are in the .data section
*/
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
-unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN;
EXPORT_SYMBOL(mips_machtype);
-EXPORT_SYMBOL(mips_machgroup);
struct boot_mem_map boot_mem_map;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2a08ce41bf2..a4e106c56ab 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -613,9 +613,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
+ sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 486b8e5f52d..572c610db1b 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -18,7 +18,6 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
-#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
@@ -262,11 +261,11 @@ static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
default:
__put_sigset_unknown_nsig();
case 2:
- err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]);
- err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
+ err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
+ err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
case 1:
- err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]);
- err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
+ err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
+ err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
}
return err;
@@ -284,12 +283,12 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
default:
__get_sigset_unknown_nsig();
case 2:
- err |= __get_user (sig[3], &ubuf->sig[3]);
- err |= __get_user (sig[2], &ubuf->sig[2]);
+ err |= __get_user(sig[3], &ubuf->sig[3]);
+ err |= __get_user(sig[2], &ubuf->sig[2]);
kbuf->sig[1] = sig[2] | (sig[3] << 32);
case 1:
- err |= __get_user (sig[1], &ubuf->sig[1]);
- err |= __get_user (sig[0], &ubuf->sig[0]);
+ err |= __get_user(sig[1], &ubuf->sig[1]);
+ err |= __get_user(sig[0], &ubuf->sig[0]);
kbuf->sig[0] = sig[0] | (sig[1] << 32);
}
@@ -413,10 +412,10 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
return -EFAULT;
}
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL,
uoss ? (stack_t __user *)&koss : NULL, usp);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && uoss) {
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
@@ -560,9 +559,9 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
- set_fs (old_fs);
+ set_fs(old_fs);
/*
* Don't let your children do this ...
@@ -747,11 +746,11 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
if (set && get_sigset(&new_set, set))
return -EFAULT;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL,
oset ? (sigset_t __user *)&old_set : NULL,
sigsetsize);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && oset && put_sigset(&old_set, oset))
return -EFAULT;
@@ -766,9 +765,9 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
sigset_t set;
mm_segment_t old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && put_sigset(&set, uset))
return -EFAULT;
@@ -782,12 +781,12 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
int ret;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
- copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
+ if (copy_from_user(&info, uinfo, 3*sizeof(int)) ||
+ copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
return -EFAULT;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
- set_fs (old_fs);
+ set_fs(old_fs);
return ret;
}
@@ -802,10 +801,10 @@ sys32_waitid(int which, compat_pid_t pid,
mm_segment_t old_fs = get_fs();
info.si_signo = 0;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
uru ? (struct rusage __user *) &ru : NULL);
- set_fs (old_fs);
+ set_fs(old_fs);
if (ret < 0 || info.si_signo == 0)
return ret;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index eb7e05926eb..bb277e82d42 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -88,7 +88,7 @@ struct rt_sigframe_n32 {
#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
-extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat);
+extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
@@ -105,7 +105,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
unewset = (compat_sigset_t __user *) regs.regs[4];
if (copy_from_user(&uset, unewset, sizeof(uset)))
return -EFAULT;
- sigset_from_compat (&newset, &uset);
+ sigset_from_compat(&newset, &uset);
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 19b30d6f172..94e210cc6cb 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -287,7 +287,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
-void prom_boot_secondary(int cpu, struct task_struct *idle)
+void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
@@ -321,7 +321,7 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
evpe(EVPE_ENABLE);
}
-void prom_init_secondary(void)
+void __cpuinit prom_init_secondary(void)
{
/* Enable per-cpu interrupts */
@@ -330,7 +330,7 @@ void prom_init_secondary(void)
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}
-void prom_smp_finish(void)
+void __cpuinit prom_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -353,7 +353,7 @@ void core_send_ipi(int cpu, unsigned int action)
unsigned long flags;
int vpflags;
- local_irq_save (flags);
+ local_irq_save(flags);
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index be7362bc2c9..432f2e376ae 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
+#include <linux/err.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
@@ -37,6 +38,7 @@
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
+#include <asm/time.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
@@ -69,6 +71,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_probe();
cpu_report();
per_cpu_trap_init();
+ mips_clockevent_init();
prom_init_secondary();
/*
@@ -94,6 +97,8 @@ struct call_data_struct *call_data;
/*
* Run a function on all other CPUs.
+ *
+ * <mask> cpuset_t of all processors to run the function on.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <retry> If true, keep retrying until ready.
@@ -118,18 +123,20 @@ struct call_data_struct *call_data;
* Spin waiting for call_lock
* Deadlock Deadlock
*/
-int smp_call_function (void (*func) (void *info), void *info, int retry,
- int wait)
+int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
+ void *info, int retry, int wait)
{
struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
+ int cpus;
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON(!cpu_online(cpu));
+ cpu_clear(cpu, mask);
+ cpus = cpus_weight(mask);
if (!cpus)
return 0;
@@ -148,9 +155,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
- for_each_online_cpu(i)
- if (i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
+ core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
@@ -166,6 +171,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
return 0;
}
+int smp_call_function(void (*func) (void *info), void *info, int retry,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+}
void smp_call_function_interrupt(void)
{
@@ -193,6 +203,35 @@ void smp_call_function_interrupt(void)
}
}
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ int ret, me;
+
+ /*
+ * Can die spectacularly if this CPU isn't yet marked online
+ */
+ if (!cpu_online(cpu))
+ return 0;
+
+ me = get_cpu();
+ BUG_ON(!cpu_online(me));
+
+ if (cpu == me) {
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
+ put_cpu();
+ return 0;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
+ wait);
+
+ put_cpu();
+ return 0;
+}
+
static void stop_this_cpu(void *dummy)
{
/*
@@ -334,12 +373,15 @@ void flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_mm(mm);
@@ -354,7 +396,7 @@ struct flush_tlb_data {
static void flush_tlb_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
@@ -365,17 +407,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.vma = vma;
- fd.addr1 = start;
- fd.addr2 = end;
- smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
@@ -383,23 +429,24 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
static void flush_tlb_kernel_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.addr1 = start;
- fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
}
static void flush_tlb_page_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
@@ -408,16 +455,20 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
- fd.vma = vma;
- fd.addr1 = page;
- smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, vma->vm_mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 342d873b2ec..a8c1a698d58 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
/* Copyright (C) 2004 Mips Technologies, Inc */
+#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
@@ -25,10 +26,11 @@
#include <asm/smtc_proc.h>
/*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
*/
-
-#define MIPS_CPU_IPI_IRQ 1
+unsigned long irq_hwmask[NR_IRQS];
#define LOCK_MT_PRA() \
local_irq_save(flags); \
@@ -61,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
* Clock interrupt "latch" buffers, per "CPU"
*/
-unsigned int ipi_timer_latch[NR_CPUS];
+static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -86,25 +88,11 @@ unsigned int smtc_status = 0;
/* Boot command line configuration overrides */
-static int vpelimit = 0;
-static int tclimit = 0;
static int ipibuffers = 0;
static int nostlb = 0;
static int asidmask = 0;
unsigned long smtc_asid_mask = 0xff;
-static int __init maxvpes(char *str)
-{
- get_option(&str, &vpelimit);
- return 1;
-}
-
-static int __init maxtcs(char *str)
-{
- get_option(&str, &tclimit);
- return 1;
-}
-
static int __init ipibufs(char *str)
{
get_option(&str, &ipibuffers);
@@ -137,8 +125,6 @@ static int __init asidmask_set(char *str)
return 1;
}
-__setup("maxvpes=", maxvpes);
-__setup("maxtcs=", maxtcs);
__setup("ipibufs=", ipibufs);
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
@@ -168,9 +154,9 @@ static int __init tintq(char *str)
__setup("tintq=", tintq);
-int imstuckcount[2][8];
+static int imstuckcount[2][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-int vpemask[2][8] = {
+static int vpemask[2][8] = {
{0, 0, 1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}
};
@@ -194,7 +180,7 @@ void __init sanitize_tlb_entries(void)
static void smtc_configure_tlb(void)
{
- int i,tlbsiz,vpes;
+ int i, tlbsiz, vpes;
unsigned long mvpconf0;
unsigned long config1val;
@@ -311,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
+#ifdef CONFIG_MIPS_MT_FPAFF
/* Initialize map of CPUs with FPUs */
cpus_clear(mt_fpu_cpumask);
+#endif
/* One of those TC's is the one booting, and not a secondary... */
printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -374,7 +362,7 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- ipi_timer_latch[i] = 0;
+ atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
@@ -384,7 +372,7 @@ void mipsmt_prepare_cpus(void)
cpu++;
/* Report on boot-time options */
- mips_mt_set_cpuoptions ();
+ mips_mt_set_cpuoptions();
if (vpelimit > 0)
printk("Limit of %d VPEs set\n", vpelimit);
if (tclimit > 0)
@@ -435,7 +423,7 @@ void mipsmt_prepare_cpus(void)
* code. Leave it alone!
*/
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -443,7 +431,7 @@ void mipsmt_prepare_cpus(void)
}
if (slop) {
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -497,10 +485,12 @@ void mipsmt_prepare_cpus(void)
/* Set up coprocessor affinity CPU mask(s) */
+#ifdef CONFIG_MIPS_MT_FPAFF
for (tc = 0; tc < ntc; tc++) {
if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
+#endif
/* set up ipi interrupts... */
@@ -540,7 +530,7 @@ void mipsmt_prepare_cpus(void)
* (unsigned long)idle->thread_info the gp
*
*/
-void smtc_boot_secondary(int cpu, struct task_struct *idle)
+void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
long flags;
@@ -582,7 +572,7 @@ void smtc_init_secondary(void)
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE)
!= cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
}
local_irq_enable();
@@ -621,6 +611,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
return setup_irq(irq, new);
}
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * Support for IRQ affinity to TCs
+ */
+
+void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ /*
+ * If a "fast path" cache of quickly decodable affinity state
+ * is maintained, this is where it gets done, on a call up
+ * from the platform affinity code.
+ */
+}
+
+void smtc_forward_irq(unsigned int irq)
+{
+ int target;
+
+ /*
+ * OK wise guy, now figure out how to get the IRQ
+ * to be serviced on an authorized "CPU".
+ *
+ * Ideally, to handle the situation where an IRQ has multiple
+ * eligible CPUS, we would maintain state per IRQ that would
+ * allow a fair distribution of service requests. Since the
+ * expected use model is any-or-only-one, for simplicity
+ * and efficiency, we just pick the easiest one to find.
+ */
+
+ target = first_cpu(irq_desc[irq].affinity);
+
+ /*
+ * We depend on the platform code to have correctly processed
+ * IRQ affinity change requests to ensure that the IRQ affinity
+ * mask has been purged of bits corresponding to nonexistent and
+ * offline "CPUs", and to TCs bound to VPEs other than the VPE
+ * connected to the physical interrupt input for the interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
+ * mask management. This is best handled in non-performance-critical
+ * platform IRQ affinity setting code, to minimize interrupt-time
+ * checks.
+ */
+
+ /* If no one is eligible, service locally */
+ if (target >= NR_CPUS) {
+ do_IRQ_no_affinity(irq);
+ return;
+ }
+
+ smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
/*
* IPI model for SMTC is tricky, because interrupts aren't TC-specific.
* Within a VPE one TC can interrupt another by different approaches.
@@ -663,7 +707,7 @@ static void smtc_ipi_qdump(void)
* be done with the atomic.h primitives). And since this is
* MIPS MT, we can assume that we have LL/SC.
*/
-static __inline__ int atomic_postincrement(unsigned int *pv)
+static inline int atomic_postincrement(atomic_t *v)
{
unsigned long result;
@@ -674,9 +718,9 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
" addu %1, %0, 1 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "m" (v->counter)
: "memory");
return result;
@@ -704,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -745,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
}
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -762,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
unsigned long tcrestart;
extern u32 kernelsp[NR_CPUS];
extern void __smtc_ipi_vector(void);
+//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
/* Extract Status, EPC from halted TC */
tcstatus = read_tc_c0_tcstatus();
@@ -812,25 +861,31 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
+DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
void ipi_decode(struct smtc_ipi *pipi)
{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int dest_copy = pipi->dest;
+ int ticks;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
- /* Invoke Clock "Interrupt" */
- ipi_timer_latch[dest_copy] = 0;
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- clock_hang_reported[dest_copy] = 0;
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL);
+ kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+ ticks = atomic_read(&ipi_timer_latch[cpu]);
+ atomic_sub(ticks, &ipi_timer_latch[cpu]);
+ while (ticks) {
+ cd->event_handler(cd);
+ ticks--;
+ }
irq_exit();
break;
+
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
case SMP_RESCHEDULE_YOURSELF:
@@ -845,6 +900,15 @@ void ipi_decode(struct smtc_ipi *pipi)
break;
}
break;
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ case IRQ_AFFINITY_IPI:
+ /*
+ * Accept a "forwarded" interrupt that was initially
+ * taken by a TC who doesn't have affinity for the IRQ.
+ */
+ do_IRQ_no_affinity((int)arg_copy);
+ break;
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
default:
printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
break;
@@ -873,25 +937,6 @@ void deferred_smtc_ipi(void)
}
/*
- * Send clock tick to all TCs except the one executing the funtion
- */
-
-void smtc_timer_broadcast(int vpe)
-{
- int cpu;
- int myTC = cpu_data[smp_processor_id()].tc_id;
- int myVPE = cpu_data[smp_processor_id()].vpe_id;
-
- smtc_cpu_stats[smp_processor_id()].timerints++;
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id == myVPE &&
- cpu_data[cpu].tc_id != myTC)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
- }
-}
-
-/*
* Cross-VPE interrupts in the SMTC prototype use "software interrupts"
* set via cross-VPE MTTR manipulation of the Cause register. It would be
* in some regards preferable to have external logic for "doorbell" hardware
@@ -975,7 +1020,12 @@ static void ipi_irq_dispatch(void)
do_IRQ(cpu_ipi_irq);
}
-static struct irqaction irq_ipi;
+static struct irqaction irq_ipi = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "SMTC_IPI",
+ .flags = IRQF_PERCPU
+};
static void setup_cross_vpe_interrupts(unsigned int nvpe)
{
@@ -987,13 +1037,8 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
- irq_ipi.handler = ipi_interrupt;
- irq_ipi.flags = IRQF_DISABLED;
- irq_ipi.name = "SMTC_IPI";
-
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
- irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
}
@@ -1132,11 +1177,11 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < NR_CPUS; tc++) {
/* Don't check ourself - we'll dequeue IPIs just below */
if ((tc != smp_processor_id()) &&
- ipi_timer_latch[tc] > timerq_limit) {
+ atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
if (clock_hang_reported[tc] == 0) {
pdb_msg += sprintf(pdb_msg,
"TC %d looks hung with timer latch at %d\n",
- tc, ipi_timer_latch[tc]);
+ tc, atomic_read(&ipi_timer_latch[tc]));
clock_hang_reported[tc]++;
}
}
@@ -1177,7 +1222,7 @@ void smtc_soft_dump(void)
smtc_ipi_qdump();
printk("Timer IPI Backlogs:\n");
for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, ipi_timer_latch[i]);
+ printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
}
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
@@ -1219,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
@@ -1248,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b947c61c0cc..17c4374d220 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/mman.h>
#include <linux/ptrace.h>
@@ -167,14 +168,14 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
}
save_static_function(sys_fork);
-__attribute_used__ noinline static int
+static int __used noinline
_sys_fork(nabi_no_regargs struct pt_regs regs)
{
return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
}
save_static_function(sys_clone);
-__attribute_used__ noinline static int
+static int __used noinline
_sys_clone(nabi_no_regargs struct pt_regs regs)
{
unsigned long clone_flags;
@@ -244,7 +245,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
if (!name)
return -EFAULT;
- if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
error = __copy_to_user(&name->sysname, &utsname()->sysname,
@@ -280,16 +281,24 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
{
- int tmp;
-
- switch(cmd) {
+ switch (cmd) {
case MIPS_ATOMIC_SET:
printk(KERN_CRIT "How did I get here?\n");
return -EINVAL;
case MIPS_FIXADE:
- tmp = current->thread.mflags & ~3;
- current->thread.mflags = tmp | (arg1 & 3);
+ if (arg1 & ~3)
+ return -EINVAL;
+
+ if (arg1 & 1)
+ set_thread_flag(TIF_FIXADE);
+ else
+ clear_thread_flag(TIF_FIXADE);
+ if (arg1 & 2)
+ set_thread_flag(TIF_LOGADE);
+ else
+ clear_thread_flag(TIF_FIXADE);
+
return 0;
case FLUSH_CACHE:
@@ -305,8 +314,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
*
* This is really horribly ugly.
*/
-asmlinkage int sys_ipc (unsigned int call, int first, int second,
- unsigned long third, void __user *ptr, long fifth)
+asmlinkage int sys_ipc(unsigned int call, int first, int second,
+ unsigned long third, void __user *ptr, long fifth)
{
int version, ret;
@@ -315,26 +324,26 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
switch (call) {
case SEMOP:
- return sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, NULL);
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second, NULL);
case SEMTIMEDOP:
- return sys_semtimedop (first, (struct sembuf __user *)ptr,
- second,
- (const struct timespec __user *)fifth);
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second,
+ (const struct timespec __user *)fifth);
case SEMGET:
- return sys_semget (first, second, third);
+ return sys_semget(first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void __user *__user *) ptr))
return -EFAULT;
- return sys_semctl (first, second, third, fourth);
+ return sys_semctl(first, second, third, fourth);
}
case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- second, third);
+ return sys_msgsnd(first, (struct msgbuf __user *) ptr,
+ second, third);
case MSGRCV:
switch (version) {
case 0: {
@@ -344,45 +353,45 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
if (copy_from_user(&tmp,
(struct ipc_kludge __user *) ptr,
- sizeof (tmp)))
+ sizeof(tmp)))
return -EFAULT;
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
+ return sys_msgrcv(first, tmp.msgp, second,
+ tmp.msgtyp, third);
}
default:
- return sys_msgrcv (first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
+ return sys_msgrcv(first,
+ (struct msgbuf __user *) ptr,
+ second, fifth, third);
}
case MSGGET:
- return sys_msgget ((key_t) first, second);
+ return sys_msgget((key_t) first, second);
case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds __user *) ptr);
+ return sys_msgctl(first, second,
+ (struct msqid_ds __user *) ptr);
case SHMAT:
switch (version) {
default: {
unsigned long raddr;
- ret = do_shmat (first, (char __user *) ptr, second,
- &raddr);
+ ret = do_shmat(first, (char __user *) ptr, second,
+ &raddr);
if (ret)
return ret;
- return put_user (raddr, (unsigned long __user *) third);
+ return put_user(raddr, (unsigned long __user *) third);
}
case 1: /* iBCS2 emulator entry point */
if (!segment_eq(get_fs(), get_ds()))
return -EINVAL;
- return do_shmat (first, (char __user *) ptr, second,
- (unsigned long *) third);
+ return do_shmat(first, (char __user *) ptr, second,
+ (unsigned long *) third);
}
case SHMDT:
- return sys_shmdt ((char __user *)ptr);
+ return sys_shmdt((char __user *)ptr);
case SHMGET:
- return sys_shmget (first, second, third);
+ return sys_shmget(first, second, third);
case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds __user *) ptr);
+ return sys_shmctl(first, second,
+ (struct shmid_ds __user *) ptr);
default:
return -ENOSYS;
}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 93a148486f8..ee7790d9deb 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -486,10 +486,10 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
switch (arg1) {
case SGI_INV_SIZEOF:
- retval = sizeof (inventory_t);
+ retval = sizeof(inventory_t);
break;
case SGI_INV_READ:
- retval = dump_inventory_to_user (buffer, count);
+ retval = dump_inventory_to_user(buffer, count);
break;
default:
retval = -EINVAL;
@@ -778,7 +778,7 @@ asmlinkage int irix_times(struct tms __user *tbuf)
int err = 0;
if (tbuf) {
- if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf))
+ if (!access_ok(VERIFY_WRITE, tbuf, sizeof *tbuf))
return -EFAULT;
err = __put_user(current->utime, &tbuf->tms_utime);
@@ -1042,9 +1042,9 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
long max_size = offset + len;
if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, (void __user *) "", 1);
- sys_lseek (fd, old_pos, 0);
+ old_pos = sys_lseek(fd, max_size - 1, 0);
+ sys_write(fd, (void __user *) "", 1);
+ sys_lseek(fd, old_pos, 0);
}
}
}
@@ -1176,7 +1176,7 @@ static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf)
ub.st_ctime1 = stat->atime.tv_nsec;
ub.st_blksize = stat->blksize;
ub.st_blocks = stat->blocks;
- strcpy (ub.st_fstype, "efs");
+ strcpy(ub.st_fstype, "efs");
return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
}
@@ -1208,7 +1208,7 @@ static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf)
ks.st_nlink = (u32) stat->nlink;
ks.st_uid = (s32) stat->uid;
ks.st_gid = (s32) stat->gid;
- ks.st_rdev = sysv_encode_dev (stat->rdev);
+ ks.st_rdev = sysv_encode_dev(stat->rdev);
ks.st_pad2[0] = ks.st_pad2[1] = 0;
ks.st_size = (long long) stat->size;
ks.st_pad3 = 0;
@@ -1527,9 +1527,9 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
long max_size = off2 + len;
if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, (void __user *) "", 1);
- sys_lseek (fd, old_pos, 0);
+ old_pos = sys_lseek(fd, max_size - 1, 0);
+ sys_write(fd, (void __user *) "", 1);
+ sys_lseek(fd, old_pos, 0);
}
}
}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index d48d1d5bea0..5892491b40e 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,11 +11,13 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/param.h>
+#include <linux/profile.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/smp.h>
@@ -23,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
#include <asm/bootinfo.h>
#include <asm/cache.h>
@@ -31,8 +34,11 @@
#include <asm/cpu-features.h>
#include <asm/div64.h>
#include <asm/sections.h>
+#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <irq.h>
+
/*
* The integer part of the number of usecs per jiffy is taken from tick,
* but the fractional part is not recorded, so we calculate it using the
@@ -48,32 +54,27 @@
* forward reference
*/
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
-/*
- * By default we provide the null RTC ops
- */
-static unsigned long null_rtc_get_time(void)
+int __weak rtc_mips_set_time(unsigned long sec)
{
- return mktime(2000, 1, 1, 0, 0, 0);
+ return 0;
}
+EXPORT_SYMBOL(rtc_mips_set_time);
-static int null_rtc_set_time(unsigned long sec)
+int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
- return 0;
+ return rtc_mips_set_time(nowtime);
}
-unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
-int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
-int (*rtc_mips_set_mmss)(unsigned long);
-
+int update_persistent_clock(struct timespec now)
+{
+ return rtc_mips_set_mmss(now.tv_sec);
+}
/* how many counter cycles in a jiffy */
static unsigned long cycles_per_jiffy __read_mostly;
-/* expirelo is the count value for next CPU timer interrupt */
-static unsigned int expirelo;
-
-
/*
* Null timer ack for systems not needing one (e.g. i8254).
*/
@@ -92,18 +93,7 @@ static cycle_t null_hpt_read(void)
*/
static void c0_timer_ack(void)
{
- unsigned int count;
-
- /* Ack this timer interrupt and set the next one. */
- expirelo += cycles_per_jiffy;
- write_c0_compare(expirelo);
-
- /* Check to see if we have missed any timer interrupts. */
- while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
- /* missed_timer_count++; */
- expirelo = count + cycles_per_jiffy;
- write_c0_compare(expirelo);
- }
+ write_c0_compare(read_c0_compare());
}
/*
@@ -114,19 +104,9 @@ static cycle_t c0_hpt_read(void)
return read_c0_count();
}
-/* For use both as a high precision timer and an interrupt source. */
-static void __init c0_hpt_timer_init(void)
-{
- expirelo = read_c0_count() + cycles_per_jiffy;
- write_c0_compare(expirelo);
-}
-
int (*mips_timer_state)(void);
void (*mips_timer_ack)(void);
-/* last time when xtime and rtc are sync'ed up */
-static long last_rtc_update;
-
/*
* local_timer_interrupt() does profiling and process accounting
* on a per-CPU basis.
@@ -143,60 +123,15 @@ void local_timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
-/*
- * High-level timer interrupt service routines. This function
- * is set as irqaction->handler and is invoked through do_IRQ.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- write_seqlock(&xtime_lock);
-
- mips_timer_ack();
-
- /*
- * call the generic timer interrupt handling
- */
- do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
- last_rtc_update = xtime.tv_sec;
- } else {
- /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- }
-
- write_sequnlock(&xtime_lock);
-
- /*
- * In UP mode, we call local_timer_interrupt() to do profiling
- * and process accouting.
- *
- * In SMP mode, local_timer_interrupt() is invoked by appropriate
- * low-level local timer interrupt handler.
- */
- local_timer_interrupt(irq, dev_id);
-
- return IRQ_HANDLED;
-}
-
int null_perf_irq(void)
{
return 0;
}
+EXPORT_SYMBOL(null_perf_irq);
+
int (*perf_irq)(void) = null_perf_irq;
-EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);
/*
@@ -214,7 +149,7 @@ EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
-static inline int handle_perf_irq (int r2)
+static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
@@ -228,63 +163,23 @@ static inline int handle_perf_irq (int r2)
!r2;
}
-asmlinkage void ll_timer_interrupt(int irq)
-{
- int r2 = cpu_has_mips_r2;
-
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
-
- if (handle_perf_irq(r2))
- goto out;
-
- if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
- goto out;
-
- timer_interrupt(irq, NULL);
-
-out:
- irq_exit();
-}
-
-asmlinkage void ll_local_timer_interrupt(int irq)
-{
- irq_enter();
- if (smp_processor_id() != 0)
- kstat_this_cpu.irqs[irq]++;
-
- /* we keep interrupt disabled all the time */
- local_timer_interrupt(irq, NULL);
-
- irq_exit();
-}
-
/*
* time_init() - it does the following things.
*
- * 1) board_time_init() -
+ * 1) plat_time_init() -
* a) (optional) set up RTC routines,
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
- * 2) setup xtime based on rtc_mips_get_time().
- * 3) calculate a couple of cached variables for later usage
- * 4) plat_timer_setup() -
+ * 2) calculate a couple of cached variables for later usage
+ * 3) plat_timer_setup() -
* a) (optional) over-write any choices made above by time_init().
* b) machine specific code should setup the timer irqaction.
* c) enable the timer interrupt
*/
-void (*board_time_init)(void);
-
unsigned int mips_hpt_frequency;
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
- .name = "timer",
-};
-
static unsigned int __init calibrate_hpt(void)
{
cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -333,6 +228,84 @@ struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ {
+ unsigned long flags, vpflags;
+ local_irq_save(flags);
+ vpflags = dvpe();
+#endif
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
+#ifdef CONFIG_MIPS_MT_SMTC
+ evpe(vpflags);
+ local_irq_restore(flags);
+ }
+#endif
+ return res;
+}
+
+static void mips_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+static int cp0_timer_irq_installed;
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ c0_timer_ack();
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id)
+ goto out;
+ cpu = 0;
+#endif
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+#ifdef CONFIG_MIPS_MT_SMTC
+ .flags = IRQF_DISABLED,
+#else
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+#endif
+ .name = "timer",
+};
+
static void __init init_mips_clocksource(void)
{
u64 temp;
@@ -356,19 +329,127 @@ static void __init init_mips_clocksource(void)
clocksource_register(&clocksource_mips);
}
-void __init time_init(void)
+void __init __weak plat_time_init(void)
+{
+}
+
+void __init __weak plat_timer_setup(struct irqaction *irq)
+{
+}
+
+#ifdef CONFIG_MIPS_MT_SMTC
+DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
+static void smtc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+int dummycnt[NR_CPUS];
+
+static void mips_broadcast(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+}
+
+static void setup_smtc_dummy_clockevent_device(void)
+{
+ //uint64_t mips_freq = mips_hpt_^frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+
+ cd->name = "SMTC";
+ cd->features = CLOCK_EVT_FEAT_DUMMY;
+
+ /* Calculate the min / max delta */
+ cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 0; //32;
+ cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 200;
+ cd->irq = 17; //-1;
+// if (cpu)
+// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
+// else
+ cd->cpumask = cpumask_of_cpu(cpu);
+
+ cd->set_mode = smtc_set_mode;
+
+ cd->broadcast = mips_broadcast;
+
+ clockevents_register_device(cd);
+}
+#endif
+
+static void mips_event_handler(struct clock_event_device *dev)
{
- if (board_time_init)
- board_time_init();
+}
- if (!rtc_mips_set_mmss)
- rtc_mips_set_mmss = rtc_mips_set_time;
+void __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
- xtime.tv_sec = rtc_mips_get_time();
- xtime.tv_nsec = 0;
+ if (!cpu_has_counter)
+ return;
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_smtc_dummy_clockevent_device();
+
+ /*
+ * On SMTC we only register VPE0's compare interrupt as clockevent
+ * device.
+ */
+ if (cpu)
+ return;
+#endif
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+#ifdef CONFIG_MIPS_MT_SMTC
+ cd->cpumask = CPU_MASK_ALL;
+#else
+ cd->cpumask = cpumask_of_cpu(cpu);
+#endif
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (!cp0_timer_irq_installed) {
+#ifdef CONFIG_MIPS_MT_SMTC
+#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
+ setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
+#else
+ setup_irq(irq, &timer_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
+ cp0_timer_irq_installed = 1;
+ }
+}
+
+void __init time_init(void)
+{
+ plat_time_init();
/* Choose appropriate high precision timer routines. */
if (!cpu_has_counter && !clocksource_mips.read)
@@ -391,11 +472,6 @@ void __init time_init(void)
/* Calculate cache parameters. */
cycles_per_jiffy =
(mips_hpt_frequency + HZ / 2) / HZ;
- /*
- * This sets up the high precision
- * timer for the first interrupt.
- */
- c0_hpt_timer_init();
}
}
if (!mips_hpt_frequency)
@@ -405,6 +481,10 @@ void __init time_init(void)
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
((mips_hpt_frequency + 500) / 1000) % 1000);
+
+#ifdef CONFIG_IRQ_CPU
+ setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
+#endif
}
if (!mips_timer_ack)
@@ -425,56 +505,5 @@ void __init time_init(void)
plat_timer_setup(&timer_irqaction);
init_mips_clocksource();
+ mips_clockevent_init();
}
-
-#define FEBRUARY 2
-#define STARTOFTIME 1970
-#define SECDAY 86400L
-#define SECYR (SECDAY * 365)
-#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
-#define days_in_year(y) (leapyear(y) ? 366 : 365)
-#define days_in_month(m) (month_days[(m) - 1])
-
-static int month_days[12] = {
- 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
-};
-
-void to_tm(unsigned long tim, struct rtc_time *tm)
-{
- long hms, day, gday;
- int i;
-
- gday = day = tim / SECDAY;
- hms = tim % SECDAY;
-
- /* Hours, minutes, seconds are easy */
- tm->tm_hour = hms / 3600;
- tm->tm_min = (hms % 3600) / 60;
- tm->tm_sec = (hms % 3600) % 60;
-
- /* Number of years in days */
- for (i = STARTOFTIME; day >= days_in_year(i); i++)
- day -= days_in_year(i);
- tm->tm_year = i;
-
- /* Number of months in days left */
- if (leapyear(tm->tm_year))
- days_in_month(FEBRUARY) = 29;
- for (i = 1; day >= days_in_month(i); i++)
- day -= days_in_month(i);
- days_in_month(FEBRUARY) = 28;
- tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
-
- /* Days are what is left over (+1) from all that. */
- tm->tm_mday = day + 1;
-
- /*
- * Determine the day of week
- */
- tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
-}
-
-EXPORT_SYMBOL(rtc_lock);
-EXPORT_SYMBOL(to_tm);
-EXPORT_SYMBOL(rtc_mips_set_time);
-EXPORT_SYMBOL(rtc_mips_get_time);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ce277cb34dd..632bce1bf42 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -295,7 +295,8 @@ void show_regs(struct pt_regs *regs)
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
- printk("PrId : %08x\n", read_c0_prid());
+ printk("PrId : %08x (%s)\n", read_c0_prid(),
+ cpu_name_string());
}
void show_registers(struct pt_regs *regs)
@@ -606,6 +607,8 @@ asmlinkage void do_ov(struct pt_regs *regs)
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
+ siginfo_t info;
+
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
@@ -625,7 +628,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
lose_fpu(1);
/* Run the emulator */
- sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
/*
* We can't allow the emulated instruction to leave any of
@@ -641,9 +644,22 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
force_sig(sig, current);
return;
- }
-
- force_sig(SIGFPE, current);
+ } else if (fcr31 & FPU_CSR_INV_X)
+ info.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ info.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ info.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ info.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ info.si_code = FPE_FLTRES;
+ else
+ info.si_code = __SI_FAULT;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_addr = (void __user *) regs->cp0_epc;
+ force_sig_info(SIGFPE, &info, current);
}
asmlinkage void do_bp(struct pt_regs *regs)
@@ -775,7 +791,7 @@ static void mt_ase_fp_affinity(void)
cpus_and(tmask, current->thread.user_cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed(current, tmask);
- current->thread.mflags |= MF_FPUBOUND;
+ set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
@@ -939,7 +955,7 @@ asmlinkage void do_reserved(struct pt_regs *regs)
*/
static inline void parity_protection_init(void)
{
- switch (current_cpu_data.cputype) {
+ switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_5KC:
@@ -1035,19 +1051,11 @@ void ejtag_exception_handler(struct pt_regs *regs)
/*
* NMI exception handler.
*/
-void nmi_exception_handler(struct pt_regs *regs)
+NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long dvpret = dvpe();
bust_spinlocks(1);
printk("NMI taken!!!!\n");
- mips_mt_regdump(dvpret);
-#else
- bust_spinlocks(1);
- printk("NMI taken!!!!\n");
-#endif /* CONFIG_MIPS_MT_SMTC */
die("NMI", regs);
- while(1) ;
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -1068,8 +1076,8 @@ void *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
- *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
- (0x03ffffff & (handler >> 2));
+ *(u32 *)(ebase + 0x200) = 0x08000000 |
+ (0x03ffffff & (handler >> 2));
flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
@@ -1158,11 +1166,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
- board_bind_eic_interrupt (n, srs);
+ board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (mips_srs_max() > 1)
- change_c0_srsmap (0xf << n*4, srs << n*4);
+ change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
@@ -1191,10 +1199,10 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
- panic ("VECTORSPACING too small");
+ panic("VECTORSPACING too small");
}
- memcpy (b, &except_vec_vi, handler_len);
+ memcpy(b, &except_vec_vi, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
@@ -1363,9 +1371,9 @@ void __init per_cpu_trap_init(void)
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
- write_c0_ebase (ebase);
+ write_c0_ebase(ebase);
/* Setting vector spacing enables EI/VI mode */
- change_c0_intctl (0x3e0, VECTORSPACING);
+ change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
@@ -1383,8 +1391,8 @@ void __init per_cpu_trap_init(void)
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
- cp0_compare_irq = (read_c0_intctl () >> 29) & 7;
- cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7;
+ cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
+ cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
@@ -1422,14 +1430,17 @@ void __init per_cpu_trap_init(void)
}
/* Install CPU exception handler */
-void __init set_handler (unsigned long offset, void *addr, unsigned long size)
+void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
flush_icache_range(ebase + offset, ebase + offset + size);
}
+static char panic_null_cerr[] __initdata =
+ "Trying to set NULL cache error exception handler";
+
/* Install uncached CPU exception handler */
-void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
+void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase);
@@ -1438,6 +1449,9 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
unsigned long uncached_ebase = TO_UNCAC(ebase);
#endif
+ if (!addr)
+ panic(panic_null_cerr);
+
memcpy((void *)(uncached_ebase + offset), addr, size);
}
@@ -1457,7 +1471,7 @@ void __init trap_init(void)
unsigned long i;
if (cpu_has_veic || cpu_has_vint)
- ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
+ ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
else
ebase = CAC_BASE;
@@ -1483,7 +1497,7 @@ void __init trap_init(void)
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
- board_ejtag_handler_setup ();
+ board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
@@ -1536,8 +1550,8 @@ void __init trap_init(void)
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
- if (current_cpu_data.cputype == CPU_R6000 ||
- current_cpu_data.cputype == CPU_R6000A) {
+ if (current_cpu_type() == CPU_R6000 ||
+ current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 8b9c34ffae1..c327b21bca8 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -101,16 +101,14 @@ static u32 unaligned_action;
#endif
extern void show_registers(struct pt_regs *regs);
-static inline int emulate_load_store_insn(struct pt_regs *regs,
- void __user *addr, unsigned int __user *pc,
- unsigned long **regptr, unsigned long *newvalue)
+static void emulate_load_store_insn(struct pt_regs *regs,
+ void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
regs->regs[0] = 0;
- *regptr=NULL;
/*
* This load never faults.
@@ -179,8 +177,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
- *newvalue = value;
- *regptr = &regs->regs[insn.i_format.rt];
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
@@ -209,8 +207,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
- *newvalue = value;
- *regptr = &regs->regs[insn.i_format.rt];
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
@@ -243,8 +241,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
- *newvalue = value;
- *regptr = &regs->regs[insn.i_format.rt];
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
@@ -283,8 +281,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
- *newvalue = value;
- *regptr = &regs->regs[insn.i_format.rt];
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
@@ -325,8 +323,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
- *newvalue = value;
- *regptr = &regs->regs[insn.i_format.rt];
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
@@ -367,6 +365,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
+ compute_return_epc(regs);
break;
case sw_op:
@@ -397,6 +396,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
+ compute_return_epc(regs);
break;
case sd_op:
@@ -435,6 +435,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
+ compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
@@ -473,34 +474,31 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
unaligned_instructions++;
#endif
- return 0;
+ return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
- return 1;
+ return;
- die_if_kernel ("Unhandled kernel unaligned access", regs);
+ die_if_kernel("Unhandled kernel unaligned access", regs);
send_sig(SIGSEGV, current, 1);
- return 0;
+ return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
send_sig(SIGBUS, current, 1);
- return 0;
+ return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
send_sig(SIGILL, current, 1);
-
- return 0;
}
asmlinkage void do_ade(struct pt_regs *regs)
{
- unsigned long *regptr, newval;
extern int do_dsemulret(struct pt_regs *);
unsigned int __user *pc;
mm_segment_t seg;
@@ -524,7 +522,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
- if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0)
+ if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
@@ -538,16 +536,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
- if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc,
- &regptr, &newval)) {
- compute_return_epc(regs);
- /*
- * Now that branch is evaluated, update the dest
- * register if necessary
- */
- if (regptr)
- *regptr = newval;
- }
+ emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9b9992cd562..84f9a4cc6f2 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -6,163 +6,202 @@
OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
jiffies = JIFFIES;
+
SECTIONS
{
#ifdef CONFIG_BOOT_ELF64
- /* Read-only sections, merged into text segment: */
- /* . = 0xc000000000000000; */
+ /* Read-only sections, merged into text segment: */
+ /* . = 0xc000000000000000; */
- /* This is the value for an Origin kernel, taken from an IRIX kernel. */
- /* . = 0xc00000000001c000; */
+ /* This is the value for an Origin kernel, taken from an IRIX kernel. */
+ /* . = 0xc00000000001c000; */
- /* Set the vaddr for the text segment to a value
- >= 0xa800 0000 0001 9000 if no symmon is going to configured
- >= 0xa800 0000 0030 0000 otherwise */
+ /* Set the vaddr for the text segment to a value
+ * >= 0xa800 0000 0001 9000 if no symmon is going to configured
+ * >= 0xa800 0000 0030 0000 otherwise
+ */
- /* . = 0xa800000000300000; */
- /* . = 0xa800000000300000; */
- . = 0xffffffff80300000;
+ /* . = 0xa800000000300000; */
+ /* . = 0xa800000000300000; */
+ . = 0xffffffff80300000;
#endif
- . = LOADADDR;
- /* read-only */
- _text = .; /* Text and read-only data */
- .text : {
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- *(.fixup)
- *(.gnu.warning)
- } =0
-
- _etext = .; /* End of text section */
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
- __start___dbe_table = .; /* Exception table for data bus errors */
- __dbe_table : { *(__dbe_table) }
- __stop___dbe_table = .;
-
- RODATA
-
- /* writeable */
- .data : { /* Data */
- . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- /*
- * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
- * limits the maximum alignment to at most 32kB and results in the following
- * warning:
- *
- * CC arch/mips/kernel/init_task.o
- * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
- * is greater than maximum object file alignment. Using 32768
- */
- . = ALIGN(_PAGE_SIZE);
- *(.data.init_task)
-
- DATA_DATA
-
- CONSTRUCTORS
- }
- _gp = . + 0x8000;
- .lit8 : { *(.lit8) }
- .lit4 : { *(.lit4) }
- /* We want the small data sections together, so single-instruction offsets
- can access them all, and initialized data all before uninitialized, so
- we can shorten the on-disk segment size. */
- .sdata : { *(.sdata) }
-
- . = ALIGN(_PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(_PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
- _edata = .; /* End of data section */
-
- /* will be freed after init */
- . = ALIGN(_PAGE_SIZE); /* Init code and data */
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- *(.init.text)
- _einittext = .;
- }
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
-
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- /* .exit.text is discarded at runtime, not link time, to deal with
- references from .rodata */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ . = LOADADDR;
+ /* read-only */
+ _text = .; /* Text and read-only data */
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } =0
+ _etext = .; /* End of text section */
+
+ /* Exception table */
+ . = ALIGN(16);
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
+ /* Exception table for data bus errors */
+ __dbe_table : {
+ __start___dbe_table = .;
+ *(__dbe_table)
+ __stop___dbe_table = .;
+ }
+ RODATA
+
+ /* writeable */
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+ /*
+ * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
+ * limits the maximum alignment to at most 32kB and results in the following
+ * warning:
+ *
+ * CC arch/mips/kernel/init_task.o
+ * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
+ * is greater than maximum object file alignment. Using 32768
+ */
+ . = ALIGN(_PAGE_SIZE);
+ *(.data.init_task)
+
+ DATA_DATA
+ CONSTRUCTORS
+ }
+ _gp = . + 0x8000;
+ .lit8 : {
+ *(.lit8)
+ }
+ .lit4 : {
+ *(.lit4)
+ }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : {
+ *(.sdata)
+ }
+
+ . = ALIGN(_PAGE_SIZE);
+ .data_nosave : {
+ __nosave_begin = .;
+ *(.data.nosave)
+ }
+ . = ALIGN(_PAGE_SIZE);
+ __nosave_end = .;
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : {
+ *(.data.cacheline_aligned)
+ }
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+ . = ALIGN(_PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : {
+ *(.init.data)
+ }
+ . = ALIGN(16);
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+
+ .initcall.init : {
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ }
+
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+ SECURITY_INIT
+
+ /* .exit.text is discarded at runtime, not link time, to deal with
+ * references from .rodata
+ */
+ .exit.text : {
+ *(.exit.text)
+ }
+ .exit.data : {
+ *(.exit.data)
+ }
#if defined(CONFIG_BLK_DEV_INITRD)
- . = ALIGN(_PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ . = ALIGN(_PAGE_SIZE);
+ .init.ramfs : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
#endif
- . = ALIGN(_PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
- . = ALIGN(_PAGE_SIZE);
- __init_end = .;
- /* freed after init ends here */
-
- __bss_start = .; /* BSS */
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
-
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exitcall.exit)
-
- /* ABI crap starts here */
- *(.comment)
- *(.MIPS.options)
- *(.note)
- *(.options)
- *(.pdr)
- *(.reginfo)
- *(.mdebug*)
- }
-
- /* This is the MIPS specific mdebug section. */
- .mdebug : { *(.mdebug) }
-
- STABS_DEBUG
-
- DWARF_DEBUG
-
- /* These must appear regardless of . */
- .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
- .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
- .note : { *(.note) }
+ PERCPU(_PAGE_SIZE)
+ . = ALIGN(_PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+
+ __bss_start = .; /* BSS */
+ .sbss : {
+ *(.sbss)
+ *(.scommon)
+ }
+ .bss : {
+ *(.bss)
+ *(COMMON)
+ }
+ __bss_stop = .;
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+
+ /* ABI crap starts here */
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ }
+
+ /* These mark the ABI of the kernel for debuggers. */
+ .mdebug.abi32 : {
+ KEEP(*(.mdebug.abi32))
+ }
+ .mdebug.abi64 : {
+ KEEP(*(.mdebug.abi64))
+ }
+
+ /* This is the MIPS specific mdebug section. */
+ .mdebug : {
+ *(.mdebug)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ /* These must appear regardless of . */
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+ .note : {
+ *(.note)
+ }
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9e66354dee8..61b729fa054 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -27,7 +27,6 @@
* To load and run, simply cat a SP 'program file' to /dev/vpe1.
* i.e cat spapp >/dev/vpe1.
*/
-
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -54,6 +53,7 @@
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/kspd.h>
+#include <asm/mips_mt.h>
typedef void *vpe_handle;
@@ -64,6 +64,10 @@ typedef void *vpe_handle;
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+/*
+ * The number of TCs and VPEs physically available on the core
+ */
+static int hw_tcs, hw_vpes;
static char module_name[] = "vpe";
static int major;
static const int minor = 1; /* fixed for now */
@@ -126,20 +130,17 @@ struct vpe {
/* the list of who wants to know when something major happens */
struct list_head notify;
+
+ unsigned int ntcs;
};
struct tc {
enum tc_state state;
int index;
- /* parent VPE */
- struct vpe *pvpe;
-
- /* The list of TC's with this VPE */
- struct list_head tc;
-
- /* The global list of tc's */
- struct list_head list;
+ struct vpe *pvpe; /* parent VPE */
+ struct list_head tc; /* The list of TC's with this VPE */
+ struct list_head list; /* The global list of tc's */
};
struct {
@@ -154,7 +155,6 @@ struct {
};
static void release_progmem(void *ptr);
-/* static __attribute_used__ void dump_vpe(struct vpe * v); */
extern void save_gp_address(unsigned int secbase, unsigned int rel);
/* get the vpe associated with this minor */
@@ -218,18 +218,17 @@ struct vpe *alloc_vpe(int minor)
/* allocate a tc. At startup only tc0 is running, all other can be halted. */
struct tc *alloc_tc(int index)
{
- struct tc *t;
+ struct tc *tc;
- if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
- return NULL;
- }
+ if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
+ goto out;
- INIT_LIST_HEAD(&t->tc);
- list_add_tail(&t->list, &vpecontrol.tc_list);
+ INIT_LIST_HEAD(&tc->tc);
+ tc->index = index;
+ list_add_tail(&tc->list, &vpecontrol.tc_list);
- t->index = index;
-
- return t;
+out:
+ return tc;
}
/* clean up and free everything */
@@ -664,66 +663,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
}
#endif
-static void dump_tc(struct tc *t)
-{
- unsigned long val;
-
- settc(t->index);
- printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld "
- "TCStatus 0x%lx halt 0x%lx\n",
- t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC,
- read_tc_c0_tcstatus(), read_tc_c0_tchalt());
-
- printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart());
- printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind());
-
- val = read_c0_vpeconf0();
- printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val,
- (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
-
- printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status());
- printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause());
-
- printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr());
- printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc());
-}
-
-static void dump_tclist(void)
-{
- struct tc *t;
-
- list_for_each_entry(t, &vpecontrol.tc_list, list) {
- dump_tc(t);
- }
-}
-
/* We are prepared so configure and start the VPE... */
static int vpe_run(struct vpe * v)
{
+ unsigned long flags, val, dmt_flag;
struct vpe_notifications *n;
- unsigned long val, dmt_flag;
+ unsigned int vpeflags;
struct tc *t;
/* check we are the Master VPE */
+ local_irq_save(flags);
val = read_c0_vpeconf0();
if (!(val & VPECONF0_MVP)) {
printk(KERN_WARNING
"VPE loader: only Master VPE's are allowed to configure MT\n");
+ local_irq_restore(flags);
+
return -1;
}
- /* disable MT (using dvpe) */
- dvpe();
+ dmt_flag = dmt();
+ vpeflags = dvpe();
if (!list_empty(&v->tc)) {
if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
- t->index);
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ printk(KERN_WARNING
+ "VPE loader: TC %d is already in use.\n",
+ t->index);
return -ENOEXEC;
}
} else {
- printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ printk(KERN_WARNING
+ "VPE loader: No TC's associated with VPE %d\n",
v->minor);
+
return -ENOEXEC;
}
@@ -734,21 +715,20 @@ static int vpe_run(struct vpe * v)
/* should check it is halted, and not activated */
if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
- printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n",
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
t->index);
- dump_tclist();
+
return -ENOEXEC;
}
- /*
- * Disable multi-threaded execution whilst we activate, clear the
- * halt bit and bound the tc to the other VPE...
- */
- dmt_flag = dmt();
-
/* Write the address we want it to start running from in the TCPC register. */
write_tc_c0_tcrestart((unsigned long)v->__start);
write_tc_c0_tccontext((unsigned long)0);
+
/*
* Mark the TC as activated, not interrupt exempt and not dynamically
* allocatable
@@ -764,15 +744,15 @@ static int vpe_run(struct vpe * v)
* here... Or set $a3 to zero and define DFLT_STACK_SIZE and
* DFLT_HEAP_SIZE when you compile your program
*/
- mttgpr(7, physical_memsize);
-
+ mttgpr(6, v->ntcs);
+ mttgpr(7, physical_memsize);
/* set up VPE1 */
/*
* bind the TC to VPE 1 as late as possible so we only have the final
* VPE registers to set up, and so an EJTAG probe can trigger on it
*/
- write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
+ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
@@ -794,15 +774,16 @@ static int vpe_run(struct vpe * v)
/* take system out of configuration state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
- /* now safe to re-enable multi-threading */
- emt(dmt_flag);
-
- /* set it running */
+#ifdef CONFIG_SMP
evpe(EVPE_ENABLE);
+#else
+ evpe(vpeflags);
+#endif
+ emt(dmt_flag);
+ local_irq_restore(flags);
- list_for_each_entry(n, &v->notify, list) {
- n->start(v->minor);
- }
+ list_for_each_entry(n, &v->notify, list)
+ n->start(minor);
return 0;
}
@@ -955,8 +936,18 @@ static int vpe_elfload(struct vpe * v)
}
} else {
- for (i = 0; i < hdr->e_shnum; i++) {
+ struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
+
+ for (i = 0; i < hdr->e_phnum; i++) {
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ memcpy((void *)phdr->p_vaddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
+ memset((void *)phdr->p_vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ phdr++;
+ }
+ for (i = 0; i < hdr->e_shnum; i++) {
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
@@ -967,39 +958,6 @@ static int vpe_elfload(struct vpe * v)
magic symbols */
sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
}
-
- /* filter sections we dont want in the final image */
- if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
- (sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
- printk( KERN_DEBUG " ignoring section, "
- "name %s type %x address 0x%x \n",
- secstrings + sechdrs[i].sh_name,
- sechdrs[i].sh_type, sechdrs[i].sh_addr);
- continue;
- }
-
- if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
- printk( KERN_WARNING "VPE loader: "
- "fully linked image has invalid section, "
- "name %s type %x address 0x%x, before load "
- "address of 0x%x\n",
- secstrings + sechdrs[i].sh_name,
- sechdrs[i].sh_type, sechdrs[i].sh_addr,
- (unsigned int)v->load_addr);
- return -ENOEXEC;
- }
-
- printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
- "size 0x%x0 from x%p\n",
- secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
- sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
-
- if (sechdrs[i].sh_type != SHT_NOBITS)
- memcpy((void *)sechdrs[i].sh_addr,
- (char *)hdr + sechdrs[i].sh_offset,
- sechdrs[i].sh_size);
- else
- memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
}
}
@@ -1024,23 +982,15 @@ static int vpe_elfload(struct vpe * v)
return 0;
}
-__attribute_used__ void dump_vpe(struct vpe * v)
-{
- struct tc *t;
-
- settc(v->minor);
-
- printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
- printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
-
- list_for_each_entry(t, &vpecontrol.tc_list, list)
- dump_tc(t);
-}
-
static void cleanup_tc(struct tc *tc)
{
+ unsigned long flags;
+ unsigned int mtflags, vpflags;
int tmp;
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -1055,9 +1005,12 @@ static void cleanup_tc(struct tc *tc)
write_tc_c0_tchalt(TCHALT_H);
/* bind it to anything other than VPE1 */
- write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
+// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
}
static int getcwd(char *buff, int size)
@@ -1068,7 +1021,7 @@ static int getcwd(char *buff, int size)
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_getcwd(buff,size);
+ ret = sys_getcwd(buff, size);
set_fs(old_fs);
@@ -1078,36 +1031,32 @@ static int getcwd(char *buff, int size)
/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
- int minor, ret;
enum vpe_state state;
- struct vpe *v;
struct vpe_notifications *not;
+ struct vpe *v;
+ int ret;
- /* assume only 1 device at the mo. */
- if ((minor = iminor(inode)) != 1) {
+ if (minor != iminor(inode)) {
+ /* assume only 1 device at the moment. */
printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
return -ENODEV;
}
- if ((v = get_vpe(minor)) == NULL) {
+ if ((v = get_vpe(tclimit)) == NULL) {
printk(KERN_WARNING "VPE loader: unable to get vpe\n");
return -ENODEV;
}
state = xchg(&v->state, VPE_STATE_INUSE);
if (state != VPE_STATE_UNUSED) {
- dvpe();
-
printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
- dump_tc(get_tc(minor));
-
list_for_each_entry(not, &v->notify, list) {
- not->stop(minor);
+ not->stop(tclimit);
}
release_progmem(v->load_addr);
- cleanup_tc(get_tc(minor));
+ cleanup_tc(get_tc(tclimit));
}
/* this of-course trashes what was there before... */
@@ -1134,26 +1083,25 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->shared_ptr = NULL;
v->__start = 0;
+
return 0;
}
static int vpe_release(struct inode *inode, struct file *filp)
{
- int minor, ret = 0;
struct vpe *v;
Elf_Ehdr *hdr;
+ int ret = 0;
- minor = iminor(inode);
- if ((v = get_vpe(minor)) == NULL)
+ v = get_vpe(tclimit);
+ if (v == NULL)
return -ENODEV;
- // simple case of fire and forget, so tell the VPE to run...
-
hdr = (Elf_Ehdr *) v->pbuffer;
if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) {
- if (vpe_elfload(v) >= 0)
+ if (vpe_elfload(v) >= 0) {
vpe_run(v);
- else {
+ } else {
printk(KERN_WARNING "VPE loader: ELF load failed.\n");
ret = -ENOEXEC;
}
@@ -1180,12 +1128,14 @@ static int vpe_release(struct inode *inode, struct file *filp)
static ssize_t vpe_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
- int minor;
size_t ret = count;
struct vpe *v;
- minor = iminor(file->f_path.dentry->d_inode);
- if ((v = get_vpe(minor)) == NULL)
+ if (iminor(file->f_path.dentry->d_inode) != minor)
+ return -ENODEV;
+
+ v = get_vpe(tclimit);
+ if (v == NULL)
return -ENODEV;
if (v->pbuffer == NULL) {
@@ -1367,62 +1317,173 @@ static void kspd_sp_exit( int sp_id)
}
#endif
-static struct device *vpe_dev;
+static ssize_t store_kill(struct class_device *dev, const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(tclimit);
+ struct vpe_notifications *not;
+
+ list_for_each_entry(not, &vpe->notify, list) {
+ not->stop(tclimit);
+ }
+
+ release_progmem(vpe->load_addr);
+ cleanup_tc(get_tc(tclimit));
+ vpe_stop(vpe);
+ vpe_free(vpe);
+
+ return len;
+}
+
+static ssize_t show_ntcs(struct class_device *cd, char *buf)
+{
+ struct vpe *vpe = get_vpe(tclimit);
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t store_ntcs(struct class_device *dev, const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(tclimit);
+ unsigned long new;
+ char *endp;
+
+ new = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ goto out_einval;
+
+ if (new == 0 || new > (hw_tcs - tclimit))
+ goto out_einval;
+
+ vpe->ntcs = new;
+
+ return len;
+
+out_einval:
+ return -EINVAL;;
+}
+
+static struct class_device_attribute vpe_class_attributes[] = {
+ __ATTR(kill, S_IWUSR, NULL, store_kill),
+ __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
+ {}
+};
+
+static void vpe_class_device_release(struct class_device *cd)
+{
+ kfree(cd);
+}
+
+struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .release = vpe_class_device_release,
+ .class_dev_attrs = vpe_class_attributes,
+};
+
+struct class_device vpe_device;
static int __init vpe_module_init(void)
{
+ unsigned int mtflags, vpflags;
+ unsigned long flags, val;
struct vpe *v = NULL;
- struct device *dev;
struct tc *t;
- unsigned long val;
- int i, err;
+ int tc, err;
if (!cpu_has_mipsmt) {
printk("VPE loader: not a MIPS MT capable processor\n");
return -ENODEV;
}
+ if (vpelimit == 0) {
+ printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
+ "initializing VPE loader.\nPass maxvpes=<n> argument as "
+ "kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ if (tclimit == 0) {
+ printk(KERN_WARNING "No TCs reserved for AP/SP, not "
+ "initializing VPE loader.\nPass maxtcs=<n> argument as "
+ "kernel argument\n");
+
+ return -ENODEV;
+ }
+
major = register_chrdev(0, module_name, &vpe_fops);
if (major < 0) {
printk("VPE loader: unable to register character device\n");
return major;
}
- dev = device_create(mt_class, NULL, MKDEV(major, minor),
- "tc%d", minor);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
+ err = class_register(&vpe_class);
+ if (err) {
+ printk(KERN_ERR "vpe_class registration failed\n");
goto out_chrdev;
}
- vpe_dev = dev;
- dmt();
- dvpe();
+ class_device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class,
+ vpe_device.parent = NULL,
+ strlcpy(vpe_device.class_id, "vpe1", BUS_ID_SIZE);
+ vpe_device.devt = MKDEV(major, minor);
+ err = class_device_add(&vpe_device);
+ if (err) {
+ printk(KERN_ERR "Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
/* dump_mtregs(); */
-
val = read_c0_mvpconf0();
- for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) {
- t = alloc_tc(i);
+ hw_tcs = (val & MVPCONF0_PTC) + 1;
+ hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+ for (tc = tclimit; tc < hw_tcs; tc++) {
+ /*
+ * Must re-enable multithreading temporarily or in case we
+ * reschedule send IPIs or similar we might hang.
+ */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+ t = alloc_tc(tc);
+ if (!t) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
/* VPE's */
- if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) {
- settc(i);
+ if (tc < hw_tcs) {
+ settc(tc);
- if ((v = alloc_vpe(i)) == NULL) {
+ if ((v = alloc_vpe(tc)) == NULL) {
printk(KERN_WARNING "VPE: unable to allocate VPE\n");
- return -ENODEV;
+
+ goto out_reenable;
}
+ v->ntcs = hw_tcs - tclimit;
+
/* add the tc to the list of this vpe's tc's. */
list_add(&t->tc, &v->tc);
/* deactivate all but vpe0 */
- if (i != 0) {
+ if (tc >= tclimit) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
@@ -1435,7 +1496,7 @@ static int __init vpe_module_init(void)
/* disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
- if (i != 0) {
+ if (tc >= vpelimit) {
/*
* Set config to be the same as vpe0,
* particularly kseg0 coherency alg
@@ -1447,10 +1508,10 @@ static int __init vpe_module_init(void)
/* TC's */
t->pvpe = v; /* set the parent vpe */
- if (i != 0) {
+ if (tc >= tclimit) {
unsigned long tmp;
- settc(i);
+ settc(tc);
/* Any TC that is bound to VPE0 gets left as is - in case
we are running SMTC on VPE0. A TC that is bound to any
@@ -1480,17 +1541,25 @@ static int __init vpe_module_init(void)
}
}
+out_reenable:
/* release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+
#ifdef CONFIG_MIPS_APSP_KSPD
kspd_events.kspd_sp_exit = kspd_sp_exit;
#endif
return 0;
+out_class:
+ class_unregister(&vpe_class);
out_chrdev:
unregister_chrdev(major, module_name);
+out:
return err;
}
@@ -1504,7 +1573,7 @@ static void __exit vpe_module_exit(void)
}
}
- device_destroy(mt_class, MKDEV(major, minor));
+ class_device_del(&vpe_device);
unregister_chrdev(major, module_name);
}