summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c129
-rw-r--r--arch/mips/kernel/gdb-stub.c26
-rw-r--r--arch/mips/kernel/i8253.c213
-rw-r--r--arch/mips/kernel/i8259.c37
-rw-r--r--arch/mips/kernel/irixelf.c40
-rw-r--r--arch/mips/kernel/irixinv.c42
-rw-r--r--arch/mips/kernel/irixioctl.c2
-rw-r--r--arch/mips/kernel/irixsig.c8
-rw-r--r--arch/mips/kernel/irq-gt641xx.c131
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/kspd.c12
-rw-r--r--arch/mips/kernel/linux32.c24
-rw-r--r--arch/mips/kernel/mips-mt.c2
-rw-r--r--arch/mips/kernel/proc.c73
-rw-r--r--arch/mips/kernel/process.c11
-rw-r--r--arch/mips/kernel/ptrace.c50
-rw-r--r--arch/mips/kernel/ptrace32.c16
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/signal.c4
-rw-r--r--arch/mips/kernel/signal32.c44
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c123
-rw-r--r--arch/mips/kernel/smtc.c146
-rw-r--r--arch/mips/kernel/syscall.c60
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/kernel/time.c416
-rw-r--r--arch/mips/kernel/traps.c45
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S339
-rw-r--r--arch/mips/kernel/vpe.c47
35 files changed, 1292 insertions, 794 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 2fd96d95a39..a2689f93c16 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
@@ -64,6 +65,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
+obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 993f7ec70f3..da41eac195c 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -110,7 +110,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
}
#undef ELF_CORE_COPY_REGS
-#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
+#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 6648fde20b9..af78456d413 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -29,7 +29,7 @@ static inline void align_mod(const int align, const int mod)
".endr\n\t"
".set pop"
:
- : GCC_IMM_ASM (align), GCC_IMM_ASM (mod));
+ : GCC_IMM_ASM(align), GCC_IMM_ASM(mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3e004161ebd..c8c47a2d197 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -159,6 +159,7 @@ static inline void check_wait(void)
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
+ case CPU_BCM3302:
cpu_wait = r4k_wait;
break;
@@ -745,14 +746,6 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
{
decode_configs(c);
- /*
- * For historical reasons the SB1 comes with it's own variant of
- * cache code which eventually will be folded into c-r4k.c. Until
- * then we pretend it's got it's own cache architecture.
- */
- c->options &= ~MIPS_CPU_4K_CACHE;
- c->options |= MIPS_CPU_SB1_CACHE;
-
switch (c->processor_id & 0xff00) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
@@ -793,9 +786,111 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
}
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c)
+{
+ decode_configs(c);
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_BCM3302:
+ c->cputype = CPU_BCM3302;
+ break;
+ case PRID_IMP_BCM4710:
+ c->cputype = CPU_BCM4710;
+ break;
+ default:
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+}
+
+const char *__cpu_name[NR_CPUS];
+
+/*
+ * Name a CPU
+ */
+static __init const char *cpu_to_name(struct cpuinfo_mips *c)
+{
+ const char *name = NULL;
+
+ switch (c->cputype) {
+ case CPU_UNKNOWN: name = "unknown"; break;
+ case CPU_R2000: name = "R2000"; break;
+ case CPU_R3000: name = "R3000"; break;
+ case CPU_R3000A: name = "R3000A"; break;
+ case CPU_R3041: name = "R3041"; break;
+ case CPU_R3051: name = "R3051"; break;
+ case CPU_R3052: name = "R3052"; break;
+ case CPU_R3081: name = "R3081"; break;
+ case CPU_R3081E: name = "R3081E"; break;
+ case CPU_R4000PC: name = "R4000PC"; break;
+ case CPU_R4000SC: name = "R4000SC"; break;
+ case CPU_R4000MC: name = "R4000MC"; break;
+ case CPU_R4200: name = "R4200"; break;
+ case CPU_R4400PC: name = "R4400PC"; break;
+ case CPU_R4400SC: name = "R4400SC"; break;
+ case CPU_R4400MC: name = "R4400MC"; break;
+ case CPU_R4600: name = "R4600"; break;
+ case CPU_R6000: name = "R6000"; break;
+ case CPU_R6000A: name = "R6000A"; break;
+ case CPU_R8000: name = "R8000"; break;
+ case CPU_R10000: name = "R10000"; break;
+ case CPU_R12000: name = "R12000"; break;
+ case CPU_R14000: name = "R14000"; break;
+ case CPU_R4300: name = "R4300"; break;
+ case CPU_R4650: name = "R4650"; break;
+ case CPU_R4700: name = "R4700"; break;
+ case CPU_R5000: name = "R5000"; break;
+ case CPU_R5000A: name = "R5000A"; break;
+ case CPU_R4640: name = "R4640"; break;
+ case CPU_NEVADA: name = "Nevada"; break;
+ case CPU_RM7000: name = "RM7000"; break;
+ case CPU_RM9000: name = "RM9000"; break;
+ case CPU_R5432: name = "R5432"; break;
+ case CPU_4KC: name = "MIPS 4Kc"; break;
+ case CPU_5KC: name = "MIPS 5Kc"; break;
+ case CPU_R4310: name = "R4310"; break;
+ case CPU_SB1: name = "SiByte SB1"; break;
+ case CPU_SB1A: name = "SiByte SB1A"; break;
+ case CPU_TX3912: name = "TX3912"; break;
+ case CPU_TX3922: name = "TX3922"; break;
+ case CPU_TX3927: name = "TX3927"; break;
+ case CPU_AU1000: name = "Au1000"; break;
+ case CPU_AU1500: name = "Au1500"; break;
+ case CPU_AU1100: name = "Au1100"; break;
+ case CPU_AU1550: name = "Au1550"; break;
+ case CPU_AU1200: name = "Au1200"; break;
+ case CPU_4KEC: name = "MIPS 4KEc"; break;
+ case CPU_4KSC: name = "MIPS 4KSc"; break;
+ case CPU_VR41XX: name = "NEC Vr41xx"; break;
+ case CPU_R5500: name = "R5500"; break;
+ case CPU_TX49XX: name = "TX49xx"; break;
+ case CPU_20KC: name = "MIPS 20Kc"; break;
+ case CPU_24K: name = "MIPS 24K"; break;
+ case CPU_25KF: name = "MIPS 25Kf"; break;
+ case CPU_34K: name = "MIPS 34K"; break;
+ case CPU_74K: name = "MIPS 74K"; break;
+ case CPU_VR4111: name = "NEC VR4111"; break;
+ case CPU_VR4121: name = "NEC VR4121"; break;
+ case CPU_VR4122: name = "NEC VR4122"; break;
+ case CPU_VR4131: name = "NEC VR4131"; break;
+ case CPU_VR4133: name = "NEC VR4133"; break;
+ case CPU_VR4181: name = "NEC VR4181"; break;
+ case CPU_VR4181A: name = "NEC VR4181A"; break;
+ case CPU_SR71000: name = "Sandcraft SR71000"; break;
+ case CPU_BCM3302: name = "Broadcom BCM3302"; break;
+ case CPU_BCM4710: name = "Broadcom BCM4710"; break;
+ case CPU_PR4450: name = "Philips PR4450"; break;
+ case CPU_LOONGSON2: name = "ICT Loongson-2"; break;
+ default:
+ BUG();
+ }
+
+ return name;
+}
+
__init void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
@@ -815,6 +910,9 @@ __init void cpu_probe(void)
case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c);
break;
+ case PRID_COMP_BROADCOM:
+ cpu_probe_broadcom(c);
+ break;
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c);
break;
@@ -824,6 +922,14 @@ __init void cpu_probe(void)
default:
c->cputype = CPU_UNKNOWN;
}
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
@@ -835,13 +941,16 @@ __init void cpu_probe(void)
c->ases |= MIPS_ASE_MIPS3D;
}
}
+
+ __cpu_name[cpu] = cpu_to_name(c);
}
__init void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk("CPU revision is: %08x\n", c->processor_id);
+ printk(KERN_INFO "CPU revision is: %08x (%s)\n",
+ c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
- printk("FPU revision is: %08x\n", c->fpu_id);
+ printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index cb5623aad55..3191afa29ad 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -676,15 +676,18 @@ static void kgdb_wait(void *arg)
static int kgdb_smp_call_kgdb_wait(void)
{
#ifdef CONFIG_SMP
+ cpumask_t mask = cpu_online_map;
struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
+ int cpus;
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON(!cpu_online(cpu));
+ cpu_clear(cpu, mask);
+ cpus = cpus_weight(mask);
if (!cpus)
return 0;
@@ -711,10 +714,7 @@ static int kgdb_smp_call_kgdb_wait(void)
call_data = &data;
mb();
- /* Send a message to all other CPUs and wait for them to respond */
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i) && i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
+ core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
@@ -733,7 +733,7 @@ static int kgdb_smp_call_kgdb_wait(void)
* returns 1 if you should skip the instruction at the trap address, 0
* otherwise.
*/
-void handle_exception (struct gdb_regs *regs)
+void handle_exception(struct gdb_regs *regs)
{
int trap; /* Trap type */
int sigval;
@@ -769,7 +769,7 @@ void handle_exception (struct gdb_regs *regs)
/*
* acquire the CPU spinlocks
*/
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
panic("kgdb: couldn't get cpulock %d\n", i);
@@ -902,7 +902,7 @@ void handle_exception (struct gdb_regs *regs)
hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
ptr += 2*(2*sizeof(long));
hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
- strcpy(output_buffer,"OK");
+ strcpy(output_buffer, "OK");
}
break;
@@ -917,9 +917,9 @@ void handle_exception (struct gdb_regs *regs)
&& hexToInt(&ptr, &length)) {
if (mem2hex((char *)addr, output_buffer, length, 1))
break;
- strcpy (output_buffer, "E03");
+ strcpy(output_buffer, "E03");
} else
- strcpy(output_buffer,"E01");
+ strcpy(output_buffer, "E01");
break;
/*
@@ -996,7 +996,7 @@ void handle_exception (struct gdb_regs *regs)
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &baudrate))
{
- strcpy(output_buffer,"B01");
+ strcpy(output_buffer, "B01");
break;
}
@@ -1015,7 +1015,7 @@ void handle_exception (struct gdb_regs *regs)
break;
default:
baudrate = 0;
- strcpy(output_buffer,"B02");
+ strcpy(output_buffer, "B02");
goto x1;
}
@@ -1044,7 +1044,7 @@ finish_kgdb:
exit_kgdb_exception:
/* release locks so other CPUs can go */
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
__raw_spin_unlock(&kgdb_cpulock[i]);
spin_unlock(&kgdb_lock);
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 00000000000..5d9830df359
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,213 @@
+/*
+ * i8253.c 8253/PIT functions
+ *
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include <asm/delay.h>
+#include <asm/i8253.h>
+#include <asm/io.h>
+
+static DEFINE_SPINLOCK(i8253_lock);
+
+/*
+ * Initialize the PIT timer.
+ *
+ * This is also called after resume to bring the PIT into operation again.
+ */
+static void init_pit_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ outb(LATCH >> 8 , PIT_CH0); /* MSB */
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* One shot setup */
+ outb_p(0x38, PIT_MODE);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+ spin_unlock_irqrestore(&i8253_lock, flags);
+}
+
+/*
+ * Program the next event in oneshot mode
+ *
+ * Delta is given in PIT ticks
+ */
+static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ outb_p(delta & 0xff , PIT_CH0); /* LSB */
+ outb(delta >> 8 , PIT_CH0); /* MSB */
+ spin_unlock_irqrestore(&i8253_lock, flags);
+
+ return 0;
+}
+
+/*
+ * On UP the PIT can serve all of the possible timer functions. On SMP systems
+ * it can be solely used for the global tick.
+ *
+ * The profiling and update capabilites are switched off once the local apic is
+ * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
+ * !using_apic_timer decisions in do_timer_interrupt_hook()
+ */
+struct clock_event_device pit_clockevent = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = init_pit_timer,
+ .set_next_event = pit_next_event,
+ .shift = 32,
+ .irq = 0,
+};
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ pit_clockevent.event_handler(&pit_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING,
+ .mask = CPU_MASK_NONE,
+ .name = "timer"
+};
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init setup_pit_timer(void)
+{
+ /*
+ * Start pit with the boot cpu mask and make it global after the
+ * IO_APIC has been initialized.
+ */
+ pit_clockevent.cpumask = cpumask_of_cpu(0);
+ pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
+ pit_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFF, &pit_clockevent);
+ pit_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &pit_clockevent);
+ clockevents_register_device(&pit_clockevent);
+
+ irq0.mask = cpumask_of_cpu(0);
+ setup_irq(0, &irq0);
+}
+
+/*
+ * Since the PIT overflows every tick, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static cycle_t pit_read(void)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ outb_p(0x00, PIT_MODE); /* latch the count ASAP */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
+
+ /* VIA686a test code... reset the latch if count > max + 1 */
+ if (count > LATCH) {
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff, PIT_CH0);
+ outb(LATCH >> 8, PIT_CH0);
+ count = LATCH - 1;
+ }
+
+ /*
+ * It's possible for count to appear to go the wrong way for a
+ * couple of reasons:
+ *
+ * 1. The timer counter underflows, but we haven't handled the
+ * resulting interrupt and incremented jiffies yet.
+ * 2. Hardware problem with the timer, not giving us continuous time,
+ * the counter does small "jumps" upwards on some Pentium systems,
+ * (see c't 95/10 page 335 for Neptun bug.)
+ *
+ * Previous attempts to handle these cases intelligently were
+ * buggy, so we just do the simple thing now.
+ */
+ if (count > old_count && jifs == old_jifs) {
+ count = old_count;
+ }
+ old_count = count;
+ old_jifs = jifs;
+
+ spin_unlock_irqrestore(&i8253_lock, flags);
+
+ count = (LATCH - 1) - count;
+
+ return (cycle_t)(jifs * LATCH) + count;
+}
+
+static struct clocksource clocksource_pit = {
+ .name = "pit",
+ .rating = 110,
+ .read = pit_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .mult = 0,
+ .shift = 20,
+};
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 1) /* PIT does not scale! */
+ return 0;
+
+ clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
+ return clocksource_register(&clocksource_pit);
+}
+arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 3a2d255361b..47101357710 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -30,8 +30,10 @@
static int i8259A_auto_eoi = -1;
DEFINE_SPINLOCK(i8259A_lock);
-/* some platforms call this... */
-void mask_and_ack_8259A(unsigned int);
+static void disable_8259A_irq(unsigned int irq);
+static void enable_8259A_irq(unsigned int irq);
+static void mask_and_ack_8259A(unsigned int irq);
+static void init_8259A(int auto_eoi);
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
@@ -39,6 +41,9 @@ static struct irq_chip i8259A_chip = {
.disable = disable_8259A_irq,
.unmask = enable_8259A_irq,
.mask_ack = mask_and_ack_8259A,
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ .set_affinity = plat_set_irq_affinity,
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
};
/*
@@ -53,7 +58,7 @@ static unsigned int cached_irq_mask = 0xffff;
#define cached_master_mask (cached_irq_mask)
#define cached_slave_mask (cached_irq_mask >> 8)
-void disable_8259A_irq(unsigned int irq)
+static void disable_8259A_irq(unsigned int irq)
{
unsigned int mask;
unsigned long flags;
@@ -69,7 +74,7 @@ void disable_8259A_irq(unsigned int irq)
spin_unlock_irqrestore(&i8259A_lock, flags);
}
-void enable_8259A_irq(unsigned int irq)
+static void enable_8259A_irq(unsigned int irq)
{
unsigned int mask;
unsigned long flags;
@@ -122,14 +127,14 @@ static inline int i8259A_irq_real(unsigned int irq)
int irqmask = 1 << irq;
if (irq < 8) {
- outb(0x0B,PIC_MASTER_CMD); /* ISR register */
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
value = inb(PIC_MASTER_CMD) & irqmask;
- outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
- outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
- outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
@@ -139,7 +144,7 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-void mask_and_ack_8259A(unsigned int irq)
+static void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask;
unsigned long flags;
@@ -170,12 +175,12 @@ handle_real_irq:
if (irq & 8) {
inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
outb(cached_slave_mask, PIC_SLAVE_IMR);
- outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
- outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
smtc_im_ack_irq(irq);
spin_unlock_irqrestore(&i8259A_lock, flags);
@@ -253,7 +258,7 @@ static int __init i8259A_init_sysfs(void)
device_initcall(i8259A_init_sysfs);
-void init_8259A(int auto_eoi)
+static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -300,7 +305,9 @@ void init_8259A(int auto_eoi)
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
- no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL
+ .handler = no_action,
+ .mask = CPU_MASK_NONE,
+ .name = "cascade",
};
static struct resource pic1_io_resource = {
@@ -322,7 +329,7 @@ static struct resource pic2_io_resource = {
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
-void __init init_i8259_irqs (void)
+void __init init_i8259_irqs(void)
{
int i;
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 403d96f99e7..8ef5cf4cc42 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -203,8 +203,8 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
* Put the ELF interpreter info on the stack
*/
#define NEW_AUX_ENT(nr, id, val) \
- __put_user ((id), sp+(nr*2)); \
- __put_user ((val), sp+(nr*2+1)); \
+ __put_user((id), sp+(nr*2)); \
+ __put_user((val), sp+(nr*2+1)); \
sp -= 2;
NEW_AUX_ENT(0, AT_NULL, 0);
@@ -212,17 +212,17 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
if (exec) {
sp -= 11*2;
- NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr));
- NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum);
- NEW_AUX_ENT (3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
- NEW_AUX_ENT (4, AT_BASE, interp_load_addr);
- NEW_AUX_ENT (5, AT_FLAGS, 0);
- NEW_AUX_ENT (6, AT_ENTRY, (elf_addr_t) exec->e_entry);
- NEW_AUX_ENT (7, AT_UID, (elf_addr_t) current->uid);
- NEW_AUX_ENT (8, AT_EUID, (elf_addr_t) current->euid);
- NEW_AUX_ENT (9, AT_GID, (elf_addr_t) current->gid);
- NEW_AUX_ENT (10, AT_EGID, (elf_addr_t) current->egid);
+ NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff);
+ NEW_AUX_ENT(1, AT_PHENT, sizeof(struct elf_phdr));
+ NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum);
+ NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
+ NEW_AUX_ENT(4, AT_BASE, interp_load_addr);
+ NEW_AUX_ENT(5, AT_FLAGS, 0);
+ NEW_AUX_ENT(6, AT_ENTRY, (elf_addr_t) exec->e_entry);
+ NEW_AUX_ENT(7, AT_UID, (elf_addr_t) current->uid);
+ NEW_AUX_ENT(8, AT_EUID, (elf_addr_t) current->euid);
+ NEW_AUX_ENT(9, AT_GID, (elf_addr_t) current->gid);
+ NEW_AUX_ENT(10, AT_EGID, (elf_addr_t) current->egid);
}
#undef NEW_AUX_ENT
@@ -231,16 +231,16 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
sp -= argc+1;
argv = sp;
- __put_user((elf_addr_t)argc,--sp);
+ __put_user((elf_addr_t)argc, --sp);
current->mm->arg_start = (unsigned long) p;
while (argc-->0) {
- __put_user((unsigned long)p,argv++);
+ __put_user((unsigned long)p, argv++);
p += strlen_user(p);
}
__put_user((unsigned long) NULL, argv);
current->mm->arg_end = current->mm->env_start = (unsigned long) p;
while (envc-->0) {
- __put_user((unsigned long)p,envp++);
+ __put_user((unsigned long)p, envp++);
p += strlen_user(p);
}
__put_user((unsigned long) NULL, envp);
@@ -581,7 +581,7 @@ static void irix_map_prda_page(void)
struct prda *pp;
down_write(&current->mm->mmap_sem);
- v = do_brk (PRDA_ADDRESS, PAGE_SIZE);
+ v = do_brk(PRDA_ADDRESS, PAGE_SIZE);
up_write(&current->mm->mmap_sem);
if (v < 0)
@@ -815,7 +815,7 @@ out_free_interp:
kfree(elf_interpreter);
out_free_file:
out_free_ph:
- kfree (elf_phdata);
+ kfree(elf_phdata);
goto out;
}
@@ -831,7 +831,7 @@ static int load_irix_library(struct file *file)
int retval;
unsigned int bss;
int error;
- int i,j, k;
+ int i, j, k;
error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
if (error != sizeof(elf_ex))
@@ -1232,7 +1232,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
/* Try to dump the FPU. */
- prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
+ prstatus.pr_fpvalid = dump_fpu(regs, &fpu);
if (!prstatus.pr_fpvalid) {
numnote--;
} else {
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
index de8584f6231..cf2dcd3d6a9 100644
--- a/arch/mips/kernel/irixinv.c
+++ b/arch/mips/kernel/irixinv.c
@@ -14,7 +14,7 @@ int inventory_items = 0;
static inventory_t inventory [MAX_INVENTORY];
-void add_to_inventory (int class, int type, int controller, int unit, int state)
+void add_to_inventory(int class, int type, int controller, int unit, int state)
{
inventory_t *ni = &inventory [inventory_items];
@@ -30,7 +30,7 @@ void add_to_inventory (int class, int type, int controller, int unit, int state)
inventory_items++;
}
-int dump_inventory_to_user (void __user *userbuf, int size)
+int dump_inventory_to_user(void __user *userbuf, int size)
{
inventory_t *inv = &inventory [0];
inventory_t __user *user = userbuf;
@@ -45,7 +45,7 @@ int dump_inventory_to_user (void __user *userbuf, int size)
return -EFAULT;
user++;
}
- return inventory_items * sizeof (inventory_t);
+ return inventory_items * sizeof(inventory_t);
}
int __init init_inventory(void)
@@ -55,24 +55,24 @@ int __init init_inventory(void)
* most likely this will not let just anyone run the X server
* until we put the right values all over the place
*/
- add_to_inventory (10, 3, 0, 0, 16400);
- add_to_inventory (1, 1, 150, -1, 12);
- add_to_inventory (1, 3, 0, 0, 8976);
- add_to_inventory (1, 2, 0, 0, 8976);
- add_to_inventory (4, 8, 0, 0, 2);
- add_to_inventory (5, 5, 0, 0, 1);
- add_to_inventory (3, 3, 0, 0, 32768);
- add_to_inventory (3, 4, 0, 0, 32768);
- add_to_inventory (3, 8, 0, 0, 524288);
- add_to_inventory (3, 9, 0, 0, 64);
- add_to_inventory (3, 1, 0, 0, 67108864);
- add_to_inventory (12, 3, 0, 0, 16);
- add_to_inventory (8, 7, 17, 0, 16777472);
- add_to_inventory (8, 0, 0, 0, 1);
- add_to_inventory (2, 1, 0, 13, 2);
- add_to_inventory (2, 2, 0, 2, 0);
- add_to_inventory (2, 2, 0, 1, 0);
- add_to_inventory (7, 14, 0, 0, 6);
+ add_to_inventory(10, 3, 0, 0, 16400);
+ add_to_inventory(1, 1, 150, -1, 12);
+ add_to_inventory(1, 3, 0, 0, 8976);
+ add_to_inventory(1, 2, 0, 0, 8976);
+ add_to_inventory(4, 8, 0, 0, 2);
+ add_to_inventory(5, 5, 0, 0, 1);
+ add_to_inventory(3, 3, 0, 0, 32768);
+ add_to_inventory(3, 4, 0, 0, 32768);
+ add_to_inventory(3, 8, 0, 0, 524288);
+ add_to_inventory(3, 9, 0, 0, 64);
+ add_to_inventory(3, 1, 0, 0, 67108864);
+ add_to_inventory(12, 3, 0, 0, 16);
+ add_to_inventory(8, 7, 17, 0, 16777472);
+ add_to_inventory(8, 0, 0, 0, 1);
+ add_to_inventory(2, 1, 0, 13, 2);
+ add_to_inventory(2, 2, 0, 2, 0);
+ add_to_inventory(2, 2, 0, 1, 0);
+ add_to_inventory(7, 14, 0, 0, 6);
return 0;
}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
index 30f9eb09db3..2bde200d5ad 100644
--- a/arch/mips/kernel/irixioctl.c
+++ b/arch/mips/kernel/irixioctl.c
@@ -238,7 +238,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
current->comm, current->pid, cmd);
do_exit(255);
#else
- error = sys_ioctl (fd, cmd, arg);
+ error = sys_ioctl(fd, cmd, arg);
#endif
}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 28b2a8f0091..85c2e389edd 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -163,9 +163,9 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
ret = setup_irix_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
+ sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
@@ -605,8 +605,8 @@ repeat:
current->state = TASK_INTERRUPTIBLE;
read_lock(&tasklist_lock);
tsk = current;
- list_for_each(_p,&tsk->children) {
- p = list_entry(_p,struct task_struct,sibling);
+ list_for_each(_p, &tsk->children) {
+ p = list_entry(_p, struct task_struct, sibling);
if ((type == IRIX_P_PID) && p->pid != pid)
continue;
if ((type == IRIX_P_PGID) && process_group(p) != pid)
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 00000000000..1b81b131f43
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,131 @@
+/*
+ * GT641xx IRQ routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/gt64120.h>
+
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+
+static DEFINE_SPINLOCK(gt641xx_irq_lock);
+
+static void ack_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 cause;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_ack_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 cause, mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void unmask_gt641xx_irq(unsigned int irq)
+{
+ unsigned long flags;
+ u32 mask;
+
+ spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask |= GT641XX_IRQ_TO_BIT(irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static struct irq_chip gt641xx_irq_chip = {
+ .name = "GT641xx",
+ .ack = ack_gt641xx_irq,
+ .mask = mask_gt641xx_irq,
+ .mask_ack = mask_ack_gt641xx_irq,
+ .unmask = unmask_gt641xx_irq,
+};
+
+void gt641xx_irq_dispatch(void)
+{
+ u32 cause, mask;
+ int i;
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ cause &= mask;
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++) {
+ if (cause & (1U << i)) {
+ do_IRQ(GT641XX_IRQ_BASE + i);
+ return;
+ }
+ }
+
+ atomic_inc(&irq_err_count);
+}
+
+void __init gt641xx_irq_init(void)
+{
+ int i;
+
+ GT_WRITE(GT_INTRMASK_OFS, 0);
+ GT_WRITE(GT_INTRCAUSE_OFS, 0);
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++)
+ set_irq_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 1ecdd50bfc6..4edc7e451d9 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -99,7 +99,7 @@ void ll_msc_irq(void)
}
void
-msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
+msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
@@ -130,7 +130,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
{
extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
- _icctrl_msc = (unsigned long) ioremap (icubase, 0x40000);
+ _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a990aad2f04..d06e9c9af79 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -93,7 +93,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%d ", j);
seq_putc(p, '\n');
}
@@ -102,7 +102,7 @@ int show_interrupts(struct seq_file *p, void *v)
action = irq_desc[i].action;
if (!action)
goto skip;
- seq_printf(p, "%3d: ",i);
+ seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index cb9a14a1ca5..d2c2e00e586 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -118,11 +118,11 @@ struct apsp_table syscall_command_table[] = {
static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
{
- register long int _num __asm__ ("$2") = num;
- register long int _arg0 __asm__ ("$4") = arg0;
- register long int _arg1 __asm__ ("$5") = arg1;
- register long int _arg2 __asm__ ("$6") = arg2;
- register long int _arg3 __asm__ ("$7") = arg3;
+ register long int _num __asm__("$2") = num;
+ register long int _arg0 __asm__("$4") = arg0;
+ register long int _arg1 __asm__("$5") = arg1;
+ register long int _arg2 __asm__("$6") = arg2;
+ register long int _arg3 __asm__("$7") = arg3;
mm_segment_t old_fs;
@@ -239,7 +239,7 @@ void sp_work_handle_request(void)
case MTSP_SYSCALL_GETTOD:
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
- (int)&tz, 0,0)) == 0)
+ (int)&tz, 0, 0)) == 0)
ret.retval = tv.tv_sec;
break;
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 135d9a5fe33..d6e01215fb2 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -58,10 +58,10 @@
#define AA(__x) ((unsigned long)((int)__x))
#ifdef __MIPSEB__
-#define merge_64(r1,r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
#ifdef __MIPSEL__
-#define merge_64(r1,r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
/*
@@ -96,7 +96,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
#endif
tmp.st_blocks = stat->blocks;
tmp.st_blksize = stat->blksize;
- return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+ return copy_to_user(statbuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
asmlinkage unsigned long
@@ -300,13 +300,13 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
{
struct timespec t;
int ret;
- mm_segment_t old_fs = get_fs ();
+ mm_segment_t old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
- set_fs (old_fs);
+ set_fs(old_fs);
if (put_user (t.tv_sec, &interval->tv_sec) ||
- __put_user (t.tv_nsec, &interval->tv_nsec))
+ __put_user(t.tv_nsec, &interval->tv_nsec))
return -EFAULT;
return ret;
}
@@ -314,7 +314,7 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
#ifdef CONFIG_SYSVIPC
asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version, err;
@@ -373,7 +373,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
#else
asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
return -ENOSYS;
}
@@ -505,16 +505,16 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
set_fs(KERNEL_DS);
err = sys_ustat(dev, (struct ustat __user *)&tmp);
- set_fs (old_fs);
+ set_fs(old_fs);
if (err)
goto out;
- memset(&tmp32,0,sizeof(struct ustat32));
+ memset(&tmp32, 0, sizeof(struct ustat32));
tmp32.f_tfree = tmp.f_tfree;
tmp32.f_tinode = tmp.f_tinode;
- err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
+ err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
out:
return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 56750b02ab4..3d6b1ec1f32 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -236,7 +236,7 @@ void mips_mt_set_cpuoptions(void)
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
- ehb ();
+ ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index ec04f5a1a5e..efd2d131412 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,76 +17,6 @@
unsigned int vced_count, vcei_count;
-static const char *cpu_name[] = {
- [CPU_UNKNOWN] = "unknown",
- [CPU_R2000] = "R2000",
- [CPU_R3000] = "R3000",
- [CPU_R3000A] = "R3000A",
- [CPU_R3041] = "R3041",
- [CPU_R3051] = "R3051",
- [CPU_R3052] = "R3052",
- [CPU_R3081] = "R3081",
- [CPU_R3081E] = "R3081E",
- [CPU_R4000PC] = "R4000PC",
- [CPU_R4000SC] = "R4000SC",
- [CPU_R4000MC] = "R4000MC",
- [CPU_R4200] = "R4200",
- [CPU_R4400PC] = "R4400PC",
- [CPU_R4400SC] = "R4400SC",
- [CPU_R4400MC] = "R4400MC",
- [CPU_R4600] = "R4600",
- [CPU_R6000] = "R6000",
- [CPU_R6000A] = "R6000A",
- [CPU_R8000] = "R8000",
- [CPU_R10000] = "R10000",
- [CPU_R12000] = "R12000",
- [CPU_R14000] = "R14000",
- [CPU_R4300] = "R4300",
- [CPU_R4650] = "R4650",
- [CPU_R4700] = "R4700",
- [CPU_R5000] = "R5000",
- [CPU_R5000A] = "R5000A",
- [CPU_R4640] = "R4640",
- [CPU_NEVADA] = "Nevada",
- [CPU_RM7000] = "RM7000",
- [CPU_RM9000] = "RM9000",
- [CPU_R5432] = "R5432",
- [CPU_4KC] = "MIPS 4Kc",
- [CPU_5KC] = "MIPS 5Kc",
- [CPU_R4310] = "R4310",
- [CPU_SB1] = "SiByte SB1",
- [CPU_SB1A] = "SiByte SB1A",
- [CPU_TX3912] = "TX3912",
- [CPU_TX3922] = "TX3922",
- [CPU_TX3927] = "TX3927",
- [CPU_AU1000] = "Au1000",
- [CPU_AU1500] = "Au1500",
- [CPU_AU1100] = "Au1100",
- [CPU_AU1550] = "Au1550",
- [CPU_AU1200] = "Au1200",
- [CPU_4KEC] = "MIPS 4KEc",
- [CPU_4KSC] = "MIPS 4KSc",
- [CPU_VR41XX] = "NEC Vr41xx",
- [CPU_R5500] = "R5500",
- [CPU_TX49XX] = "TX49xx",
- [CPU_20KC] = "MIPS 20Kc",
- [CPU_24K] = "MIPS 24K",
- [CPU_25KF] = "MIPS 25Kf",
- [CPU_34K] = "MIPS 34K",
- [CPU_74K] = "MIPS 74K",
- [CPU_VR4111] = "NEC VR4111",
- [CPU_VR4121] = "NEC VR4121",
- [CPU_VR4122] = "NEC VR4122",
- [CPU_VR4131] = "NEC VR4131",
- [CPU_VR4133] = "NEC VR4133",
- [CPU_VR4181] = "NEC VR4181",
- [CPU_VR4181A] = "NEC VR4181A",
- [CPU_SR71000] = "Sandcraft SR71000",
- [CPU_PR4450] = "Philips PR4450",
- [CPU_LOONGSON2] = "ICT Loongson-2",
-};
-
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
@@ -108,8 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
- seq_printf(m, fmt, cpu_name[cpu_data[n].cputype <= CPU_LAST ?
- cpu_data[n].cputype : CPU_UNKNOWN],
+ seq_printf(m, fmt, __cpu_name[smp_processor_id()],
(version >> 4) & 0x0f, version & 0x0f,
(fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index e6ce943099a..11cb264f59c 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
@@ -52,6 +53,7 @@ void __noreturn cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
+ tick_nohz_stop_sched_tick();
while (!need_resched()) {
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
extern void smtc_idle_loop_hook(void);
@@ -61,6 +63,7 @@ void __noreturn cpu_idle(void)
if (cpu_wait)
(*cpu_wait)();
}
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -199,13 +202,13 @@ void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
#endif
}
-int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
elf_dump_regs(*regs, task_pt_regs(tsk));
return 1;
}
-int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
+int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
{
memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
@@ -231,8 +234,8 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.cp0_epc = (unsigned long) kernel_thread_helper;
regs.cp0_status = read_c0_status();
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- regs.cp0_status &= ~(ST0_KUP | ST0_IEC);
- regs.cp0_status |= ST0_IEP;
+ regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
#else
regs.cp0_status |= ST0_EXL;
#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index bbd57b20b43..58aa6fec114 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -54,7 +54,7 @@ void ptrace_disable(struct task_struct *child)
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
-int ptrace_getregs (struct task_struct *child, __s64 __user *data)
+int ptrace_getregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
@@ -65,13 +65,13 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __put_user (regs->regs[i], data + i);
- __put_user (regs->lo, data + EF_LO - EF_R0);
- __put_user (regs->hi, data + EF_HI - EF_R0);
- __put_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
- __put_user (regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
- __put_user (regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
- __put_user (regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+ __put_user(regs->regs[i], data + i);
+ __put_user(regs->lo, data + EF_LO - EF_R0);
+ __put_user(regs->hi, data + EF_HI - EF_R0);
+ __put_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __put_user(regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
+ __put_user(regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
+ __put_user(regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
return 0;
}
@@ -81,7 +81,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
-int ptrace_setregs (struct task_struct *child, __s64 __user *data)
+int ptrace_setregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
@@ -92,17 +92,17 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __get_user (regs->regs[i], data + i);
- __get_user (regs->lo, data + EF_LO - EF_R0);
- __get_user (regs->hi, data + EF_HI - EF_R0);
- __get_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __get_user(regs->regs[i], data + i);
+ __get_user(regs->lo, data + EF_LO - EF_R0);
+ __get_user(regs->hi, data + EF_HI - EF_R0);
+ __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
/* badvaddr, status, and cause may not be written. */
return 0;
}
-int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
unsigned int tmp;
@@ -113,13 +113,13 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
if (tsk_used_math(child)) {
fpureg_t *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __put_user (fregs[i], i + (__u64 __user *) data);
+ __put_user(fregs[i], i + (__u64 __user *) data);
} else {
for (i = 0; i < 32; i++)
- __put_user ((__u64) -1, i + (__u64 __user *) data);
+ __put_user((__u64) -1, i + (__u64 __user *) data);
}
- __put_user (child->thread.fpu.fcr31, data + 64);
+ __put_user(child->thread.fpu.fcr31, data + 64);
preempt_disable();
if (cpu_has_fpu) {
@@ -142,12 +142,12 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
tmp = 0;
}
preempt_enable();
- __put_user (tmp, data + 65);
+ __put_user(tmp, data + 65);
return 0;
}
-int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
fpureg_t *fregs;
int i;
@@ -158,9 +158,9 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __get_user (fregs[i], i + (__u64 __user *) data);
+ __get_user(fregs[i], i + (__u64 __user *) data);
- __get_user (child->thread.fpu.fcr31, data + 64);
+ __get_user(child->thread.fpu.fcr31, data + 64);
/* FIR may not be written. */
@@ -390,19 +390,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs (child, (__u64 __user *) data);
+ ret = ptrace_getregs(child, (__u64 __user *) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs (child, (__u64 __user *) data);
+ ret = ptrace_setregs(child, (__u64 __user *) data);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs (child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs (child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index d9a39c16945..f2bffed94fa 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -36,11 +36,11 @@
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
-int ptrace_getregs (struct task_struct *child, __s64 __user *data);
-int ptrace_setregs (struct task_struct *child, __s64 __user *data);
+int ptrace_getregs(struct task_struct *child, __s64 __user *data);
+int ptrace_setregs(struct task_struct *child, __s64 __user *data);
-int ptrace_getfpregs (struct task_struct *child, __u32 __user *data);
-int ptrace_setfpregs (struct task_struct *child, __u32 __user *data);
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
@@ -346,19 +346,19 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs (child, (__u64 __user *) (__u64) data);
+ ret = ptrace_getregs(child, (__u64 __user *) (__u64) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs (child, (__u64 __user *) (__u64) data);
+ ret = ptrace_setregs(child, (__u64 __user *) (__u64) data);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs (child, (__u32 __user *) (__u64) data);
+ ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs (child, (__u32 __user *) (__u64) data);
+ ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 316685fca05..a06a27d6cfc 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -51,10 +51,8 @@ EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
* These are initialized so they are in the .data section
*/
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
-unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN;
EXPORT_SYMBOL(mips_machtype);
-EXPORT_SYMBOL(mips_machgroup);
struct boot_mem_map boot_mem_map;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2a08ce41bf2..a4e106c56ab 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -613,9 +613,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
+ sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 64b612a0a62..572c610db1b 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -261,11 +261,11 @@ static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
default:
__put_sigset_unknown_nsig();
case 2:
- err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]);
- err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
+ err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
+ err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
case 1:
- err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]);
- err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
+ err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
+ err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
}
return err;
@@ -283,12 +283,12 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
default:
__get_sigset_unknown_nsig();
case 2:
- err |= __get_user (sig[3], &ubuf->sig[3]);
- err |= __get_user (sig[2], &ubuf->sig[2]);
+ err |= __get_user(sig[3], &ubuf->sig[3]);
+ err |= __get_user(sig[2], &ubuf->sig[2]);
kbuf->sig[1] = sig[2] | (sig[3] << 32);
case 1:
- err |= __get_user (sig[1], &ubuf->sig[1]);
- err |= __get_user (sig[0], &ubuf->sig[0]);
+ err |= __get_user(sig[1], &ubuf->sig[1]);
+ err |= __get_user(sig[0], &ubuf->sig[0]);
kbuf->sig[0] = sig[0] | (sig[1] << 32);
}
@@ -412,10 +412,10 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
return -EFAULT;
}
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL,
uoss ? (stack_t __user *)&koss : NULL, usp);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && uoss) {
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
@@ -559,9 +559,9 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
- set_fs (old_fs);
+ set_fs(old_fs);
/*
* Don't let your children do this ...
@@ -746,11 +746,11 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
if (set && get_sigset(&new_set, set))
return -EFAULT;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL,
oset ? (sigset_t __user *)&old_set : NULL,
sigsetsize);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && oset && put_sigset(&old_set, oset))
return -EFAULT;
@@ -765,9 +765,9 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
sigset_t set;
mm_segment_t old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize);
- set_fs (old_fs);
+ set_fs(old_fs);
if (!ret && put_sigset(&set, uset))
return -EFAULT;
@@ -781,12 +781,12 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
int ret;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
- copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
+ if (copy_from_user(&info, uinfo, 3*sizeof(int)) ||
+ copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
return -EFAULT;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
- set_fs (old_fs);
+ set_fs(old_fs);
return ret;
}
@@ -801,10 +801,10 @@ sys32_waitid(int which, compat_pid_t pid,
mm_segment_t old_fs = get_fs();
info.si_signo = 0;
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
uru ? (struct rusage __user *) &ru : NULL);
- set_fs (old_fs);
+ set_fs(old_fs);
if (ret < 0 || info.si_signo == 0)
return ret;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index eb7e05926eb..bb277e82d42 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -88,7 +88,7 @@ struct rt_sigframe_n32 {
#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
-extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat);
+extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
@@ -105,7 +105,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
unewset = (compat_sigset_t __user *) regs.regs[4];
if (copy_from_user(&uset, unewset, sizeof(uset)))
return -EFAULT;
- sigset_from_compat (&newset, &uset);
+ sigset_from_compat(&newset, &uset);
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 05dcce41632..94e210cc6cb 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -353,7 +353,7 @@ void core_send_ipi(int cpu, unsigned int action)
unsigned long flags;
int vpflags;
- local_irq_save (flags);
+ local_irq_save(flags);
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 73b0dab0266..432f2e376ae 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,7 @@
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
+#include <asm/time.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
@@ -70,6 +71,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_probe();
cpu_report();
per_cpu_trap_init();
+ mips_clockevent_init();
prom_init_secondary();
/*
@@ -95,6 +97,8 @@ struct call_data_struct *call_data;
/*
* Run a function on all other CPUs.
+ *
+ * <mask> cpuset_t of all processors to run the function on.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <retry> If true, keep retrying until ready.
@@ -119,18 +123,20 @@ struct call_data_struct *call_data;
* Spin waiting for call_lock
* Deadlock Deadlock
*/
-int smp_call_function (void (*func) (void *info), void *info, int retry,
- int wait)
+int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
+ void *info, int retry, int wait)
{
struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
+ int cpus;
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON(!cpu_online(cpu));
+ cpu_clear(cpu, mask);
+ cpus = cpus_weight(mask);
if (!cpus)
return 0;
@@ -149,9 +155,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
- for_each_online_cpu(i)
- if (i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
+ core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
@@ -167,6 +171,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
return 0;
}
+int smp_call_function(void (*func) (void *info), void *info, int retry,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+}
void smp_call_function_interrupt(void)
{
@@ -197,8 +206,7 @@ void smp_call_function_interrupt(void)
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int retry, int wait)
{
- struct call_data_struct data;
- int me;
+ int ret, me;
/*
* Can die spectacularly if this CPU isn't yet marked online
@@ -217,33 +225,8 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
return 0;
}
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- smp_mb();
-
- /* Send a message to the other CPU */
- core_send_ipi(cpu, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != 1)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != 1)
- barrier();
- call_data = NULL;
- spin_unlock(&smp_call_lock);
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
+ wait);
put_cpu();
return 0;
@@ -390,12 +373,15 @@ void flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_mm(mm);
@@ -410,7 +396,7 @@ struct flush_tlb_data {
static void flush_tlb_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
@@ -421,17 +407,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.vma = vma;
- fd.addr1 = start;
- fd.addr2 = end;
- smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
@@ -439,23 +429,24 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
static void flush_tlb_kernel_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.addr1 = start;
- fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
}
static void flush_tlb_page_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
@@ -464,16 +455,20 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
- fd.vma = vma;
- fd.addr1 = page;
- smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, vma->vm_mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f09404377ef..a8c1a698d58 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
/* Copyright (C) 2004 Mips Technologies, Inc */
+#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
* Clock interrupt "latch" buffers, per "CPU"
*/
-unsigned int ipi_timer_latch[NR_CPUS];
+static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -179,7 +180,7 @@ void __init sanitize_tlb_entries(void)
static void smtc_configure_tlb(void)
{
- int i,tlbsiz,vpes;
+ int i, tlbsiz, vpes;
unsigned long mvpconf0;
unsigned long config1val;
@@ -296,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
+#ifdef CONFIG_MIPS_MT_FPAFF
/* Initialize map of CPUs with FPUs */
cpus_clear(mt_fpu_cpumask);
+#endif
/* One of those TC's is the one booting, and not a secondary... */
printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -359,7 +362,7 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- ipi_timer_latch[i] = 0;
+ atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
@@ -369,7 +372,7 @@ void mipsmt_prepare_cpus(void)
cpu++;
/* Report on boot-time options */
- mips_mt_set_cpuoptions ();
+ mips_mt_set_cpuoptions();
if (vpelimit > 0)
printk("Limit of %d VPEs set\n", vpelimit);
if (tclimit > 0)
@@ -420,7 +423,7 @@ void mipsmt_prepare_cpus(void)
* code. Leave it alone!
*/
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -428,7 +431,7 @@ void mipsmt_prepare_cpus(void)
}
if (slop) {
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -482,10 +485,12 @@ void mipsmt_prepare_cpus(void)
/* Set up coprocessor affinity CPU mask(s) */
+#ifdef CONFIG_MIPS_MT_FPAFF
for (tc = 0; tc < ntc; tc++) {
if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
+#endif
/* set up ipi interrupts... */
@@ -567,7 +572,7 @@ void smtc_init_secondary(void)
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE)
!= cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
}
local_irq_enable();
@@ -606,6 +611,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
return setup_irq(irq, new);
}
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * Support for IRQ affinity to TCs
+ */
+
+void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ /*
+ * If a "fast path" cache of quickly decodable affinity state
+ * is maintained, this is where it gets done, on a call up
+ * from the platform affinity code.
+ */
+}
+
+void smtc_forward_irq(unsigned int irq)
+{
+ int target;
+
+ /*
+ * OK wise guy, now figure out how to get the IRQ
+ * to be serviced on an authorized "CPU".
+ *
+ * Ideally, to handle the situation where an IRQ has multiple
+ * eligible CPUS, we would maintain state per IRQ that would
+ * allow a fair distribution of service requests. Since the
+ * expected use model is any-or-only-one, for simplicity
+ * and efficiency, we just pick the easiest one to find.
+ */
+
+ target = first_cpu(irq_desc[irq].affinity);
+
+ /*
+ * We depend on the platform code to have correctly processed
+ * IRQ affinity change requests to ensure that the IRQ affinity
+ * mask has been purged of bits corresponding to nonexistent and
+ * offline "CPUs", and to TCs bound to VPEs other than the VPE
+ * connected to the physical interrupt input for the interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
+ * mask management. This is best handled in non-performance-critical
+ * platform IRQ affinity setting code, to minimize interrupt-time
+ * checks.
+ */
+
+ /* If no one is eligible, service locally */
+ if (target >= NR_CPUS) {
+ do_IRQ_no_affinity(irq);
+ return;
+ }
+
+ smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
/*
* IPI model for SMTC is tricky, because interrupts aren't TC-specific.
* Within a VPE one TC can interrupt another by different approaches.
@@ -648,7 +707,7 @@ static void smtc_ipi_qdump(void)
* be done with the atomic.h primitives). And since this is
* MIPS MT, we can assume that we have LL/SC.
*/
-static __inline__ int atomic_postincrement(unsigned int *pv)
+static inline int atomic_postincrement(atomic_t *v)
{
unsigned long result;
@@ -659,9 +718,9 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
" addu %1, %0, 1 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "m" (v->counter)
: "memory");
return result;
@@ -689,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -730,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
}
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -747,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
unsigned long tcrestart;
extern u32 kernelsp[NR_CPUS];
extern void __smtc_ipi_vector(void);
+//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
/* Extract Status, EPC from halted TC */
tcstatus = read_tc_c0_tcstatus();
@@ -797,25 +861,31 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
+DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
void ipi_decode(struct smtc_ipi *pipi)
{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int dest_copy = pipi->dest;
+ int ticks;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
- /* Invoke Clock "Interrupt" */
- ipi_timer_latch[dest_copy] = 0;
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- clock_hang_reported[dest_copy] = 0;
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL);
+ kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+ ticks = atomic_read(&ipi_timer_latch[cpu]);
+ atomic_sub(ticks, &ipi_timer_latch[cpu]);
+ while (ticks) {
+ cd->event_handler(cd);
+ ticks--;
+ }
irq_exit();
break;
+
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
case SMP_RESCHEDULE_YOURSELF:
@@ -830,6 +900,15 @@ void ipi_decode(struct smtc_ipi *pipi)
break;
}
break;
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ case IRQ_AFFINITY_IPI:
+ /*
+ * Accept a "forwarded" interrupt that was initially
+ * taken by a TC who doesn't have affinity for the IRQ.
+ */
+ do_IRQ_no_affinity((int)arg_copy);
+ break;
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
default:
printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
break;
@@ -858,25 +937,6 @@ void deferred_smtc_ipi(void)
}
/*
- * Send clock tick to all TCs except the one executing the funtion
- */
-
-void smtc_timer_broadcast(void)
-{
- int cpu;
- int myTC = cpu_data[smp_processor_id()].tc_id;
- int myVPE = cpu_data[smp_processor_id()].vpe_id;
-
- smtc_cpu_stats[smp_processor_id()].timerints++;
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id == myVPE &&
- cpu_data[cpu].tc_id != myTC)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
- }
-}
-
-/*
* Cross-VPE interrupts in the SMTC prototype use "software interrupts"
* set via cross-VPE MTTR manipulation of the Cause register. It would be
* in some regards preferable to have external logic for "doorbell" hardware
@@ -1117,11 +1177,11 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < NR_CPUS; tc++) {
/* Don't check ourself - we'll dequeue IPIs just below */
if ((tc != smp_processor_id()) &&
- ipi_timer_latch[tc] > timerq_limit) {
+ atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
if (clock_hang_reported[tc] == 0) {
pdb_msg += sprintf(pdb_msg,
"TC %d looks hung with timer latch at %d\n",
- tc, ipi_timer_latch[tc]);
+ tc, atomic_read(&ipi_timer_latch[tc]));
clock_hang_reported[tc]++;
}
}
@@ -1162,7 +1222,7 @@ void smtc_soft_dump(void)
smtc_ipi_qdump();
printk("Timer IPI Backlogs:\n");
for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, ipi_timer_latch[i]);
+ printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
}
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
@@ -1204,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
@@ -1233,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 7c800ec3ff5..17c4374d220 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -245,7 +245,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
if (!name)
return -EFAULT;
- if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
error = __copy_to_user(&name->sysname, &utsname()->sysname,
@@ -314,8 +314,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
*
* This is really horribly ugly.
*/
-asmlinkage int sys_ipc (unsigned int call, int first, int second,
- unsigned long third, void __user *ptr, long fifth)
+asmlinkage int sys_ipc(unsigned int call, int first, int second,
+ unsigned long third, void __user *ptr, long fifth)
{
int version, ret;
@@ -324,26 +324,26 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
switch (call) {
case SEMOP:
- return sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, NULL);
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second, NULL);
case SEMTIMEDOP:
- return sys_semtimedop (first, (struct sembuf __user *)ptr,
- second,
- (const struct timespec __user *)fifth);
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second,
+ (const struct timespec __user *)fifth);
case SEMGET:
- return sys_semget (first, second, third);
+ return sys_semget(first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void __user *__user *) ptr))
return -EFAULT;
- return sys_semctl (first, second, third, fourth);
+ return sys_semctl(first, second, third, fourth);
}
case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- second, third);
+ return sys_msgsnd(first, (struct msgbuf __user *) ptr,
+ second, third);
case MSGRCV:
switch (version) {
case 0: {
@@ -353,45 +353,45 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
if (copy_from_user(&tmp,
(struct ipc_kludge __user *) ptr,
- sizeof (tmp)))
+ sizeof(tmp)))
return -EFAULT;
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
+ return sys_msgrcv(first, tmp.msgp, second,
+ tmp.msgtyp, third);
}
default:
- return sys_msgrcv (first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
+ return sys_msgrcv(first,
+ (struct msgbuf __user *) ptr,
+ second, fifth, third);
}
case MSGGET:
- return sys_msgget ((key_t) first, second);
+ return sys_msgget((key_t) first, second);
case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds __user *) ptr);
+ return sys_msgctl(first, second,
+ (struct msqid_ds __user *) ptr);
case SHMAT:
switch (version) {
default: {
unsigned long raddr;
- ret = do_shmat (first, (char __user *) ptr, second,
- &raddr);
+ ret = do_shmat(first, (char __user *) ptr, second,
+ &raddr);
if (ret)
return ret;
- return put_user (raddr, (unsigned long __user *) third);
+ return put_user(raddr, (unsigned long __user *) third);
}
case 1: /* iBCS2 emulator entry point */
if (!segment_eq(get_fs(), get_ds()))
return -EINVAL;
- return do_shmat (first, (char __user *) ptr, second,
- (unsigned long *) third);
+ return do_shmat(first, (char __user *) ptr, second,
+ (unsigned long *) third);
}
case SHMDT:
- return sys_shmdt ((char __user *)ptr);
+ return sys_shmdt((char __user *)ptr);
case SHMGET:
- return sys_shmget (first, second, third);
+ return sys_shmget(first, second, third);
case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds __user *) ptr);
+ return sys_shmctl(first, second,
+ (struct shmid_ds __user *) ptr);
default:
return -ENOSYS;
}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 93a148486f8..ee7790d9deb 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -486,10 +486,10 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
switch (arg1) {
case SGI_INV_SIZEOF:
- retval = sizeof (inventory_t);
+ retval = sizeof(inventory_t);
break;
case SGI_INV_READ:
- retval = dump_inventory_to_user (buffer, count);
+ retval = dump_inventory_to_user(buffer, count);
break;
default:
retval = -EINVAL;
@@ -778,7 +778,7 @@ asmlinkage int irix_times(struct tms __user *tbuf)
int err = 0;
if (tbuf) {
- if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf))
+ if (!access_ok(VERIFY_WRITE, tbuf, sizeof *tbuf))
return -EFAULT;
err = __put_user(current->utime, &tbuf->tms_utime);
@@ -1042,9 +1042,9 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
long max_size = offset + len;
if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, (void __user *) "", 1);
- sys_lseek (fd, old_pos, 0);
+ old_pos = sys_lseek(fd, max_size - 1, 0);
+ sys_write(fd, (void __user *) "", 1);
+ sys_lseek(fd, old_pos, 0);
}
}
}
@@ -1176,7 +1176,7 @@ static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf)
ub.st_ctime1 = stat->atime.tv_nsec;
ub.st_blksize = stat->blksize;
ub.st_blocks = stat->blocks;
- strcpy (ub.st_fstype, "efs");
+ strcpy(ub.st_fstype, "efs");
return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
}
@@ -1208,7 +1208,7 @@ static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf)
ks.st_nlink = (u32) stat->nlink;
ks.st_uid = (s32) stat->uid;
ks.st_gid = (s32) stat->gid;
- ks.st_rdev = sysv_encode_dev (stat->rdev);
+ ks.st_rdev = sysv_encode_dev(stat->rdev);
ks.st_pad2[0] = ks.st_pad2[1] = 0;
ks.st_size = (long long) stat->size;
ks.st_pad3 = 0;
@@ -1527,9 +1527,9 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
long max_size = off2 + len;
if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, (void __user *) "", 1);
- sys_lseek (fd, old_pos, 0);
+ old_pos = sys_lseek(fd, max_size - 1, 0);
+ sys_write(fd, (void __user *) "", 1);
+ sys_lseek(fd, old_pos, 0);
}
}
}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 9a5596bf857..5892491b40e 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,6 +11,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -24,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
#include <asm/bootinfo.h>
#include <asm/cache.h>
@@ -32,8 +34,11 @@
#include <asm/cpu-features.h>
#include <asm/div64.h>
#include <asm/sections.h>
+#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <irq.h>
+
/*
* The integer part of the number of usecs per jiffy is taken from tick,
* but the fractional part is not recorded, so we calculate it using the
@@ -49,32 +54,27 @@
* forward reference
*/
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
-/*
- * By default we provide the null RTC ops
- */
-static unsigned long null_rtc_get_time(void)
+int __weak rtc_mips_set_time(unsigned long sec)
{
- return mktime(2000, 1, 1, 0, 0, 0);
+ return 0;
}
+EXPORT_SYMBOL(rtc_mips_set_time);
-static int null_rtc_set_time(unsigned long sec)
+int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
- return 0;
+ return rtc_mips_set_time(nowtime);
}
-unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
-int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
-int (*rtc_mips_set_mmss)(unsigned long);
-
+int update_persistent_clock(struct timespec now)
+{
+ return rtc_mips_set_mmss(now.tv_sec);
+}
/* how many counter cycles in a jiffy */
static unsigned long cycles_per_jiffy __read_mostly;
-/* expirelo is the count value for next CPU timer interrupt */
-static unsigned int expirelo;
-
-
/*
* Null timer ack for systems not needing one (e.g. i8254).
*/
@@ -93,18 +93,7 @@ static cycle_t null_hpt_read(void)
*/
static void c0_timer_ack(void)
{
- unsigned int count;
-
- /* Ack this timer interrupt and set the next one. */
- expirelo += cycles_per_jiffy;
- write_c0_compare(expirelo);
-
- /* Check to see if we have missed any timer interrupts. */
- while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
- /* missed_timer_count++; */
- expirelo = count + cycles_per_jiffy;
- write_c0_compare(expirelo);
- }
+ write_c0_compare(read_c0_compare());
}
/*
@@ -115,19 +104,9 @@ static cycle_t c0_hpt_read(void)
return read_c0_count();
}
-/* For use both as a high precision timer and an interrupt source. */
-static void __init c0_hpt_timer_init(void)
-{
- expirelo = read_c0_count() + cycles_per_jiffy;
- write_c0_compare(expirelo);
-}
-
int (*mips_timer_state)(void);
void (*mips_timer_ack)(void);
-/* last time when xtime and rtc are sync'ed up */
-static long last_rtc_update;
-
/*
* local_timer_interrupt() does profiling and process accounting
* on a per-CPU basis.
@@ -144,60 +123,15 @@ void local_timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
-/*
- * High-level timer interrupt service routines. This function
- * is set as irqaction->handler and is invoked through do_IRQ.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- write_seqlock(&xtime_lock);
-
- mips_timer_ack();
-
- /*
- * call the generic timer interrupt handling
- */
- do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
- last_rtc_update = xtime.tv_sec;
- } else {
- /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- }
-
- write_sequnlock(&xtime_lock);
-
- /*
- * In UP mode, we call local_timer_interrupt() to do profiling
- * and process accouting.
- *
- * In SMP mode, local_timer_interrupt() is invoked by appropriate
- * low-level local timer interrupt handler.
- */
- local_timer_interrupt(irq, dev_id);
-
- return IRQ_HANDLED;
-}
-
int null_perf_irq(void)
{
return 0;
}
+EXPORT_SYMBOL(null_perf_irq);
+
int (*perf_irq)(void) = null_perf_irq;
-EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);
/*
@@ -215,7 +149,7 @@ EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
-static inline int handle_perf_irq (int r2)
+static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
@@ -229,63 +163,23 @@ static inline int handle_perf_irq (int r2)
!r2;
}
-asmlinkage void ll_timer_interrupt(int irq)
-{
- int r2 = cpu_has_mips_r2;
-
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
-
- if (handle_perf_irq(r2))
- goto out;
-
- if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
- goto out;
-
- timer_interrupt(irq, NULL);
-
-out:
- irq_exit();
-}
-
-asmlinkage void ll_local_timer_interrupt(int irq)
-{
- irq_enter();
- if (smp_processor_id() != 0)
- kstat_this_cpu.irqs[irq]++;
-
- /* we keep interrupt disabled all the time */
- local_timer_interrupt(irq, NULL);
-
- irq_exit();
-}
-
/*
* time_init() - it does the following things.
*
- * 1) board_time_init() -
+ * 1) plat_time_init() -
* a) (optional) set up RTC routines,
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
- * 2) setup xtime based on rtc_mips_get_time().
- * 3) calculate a couple of cached variables for later usage
- * 4) plat_timer_setup() -
+ * 2) calculate a couple of cached variables for later usage
+ * 3) plat_timer_setup() -
* a) (optional) over-write any choices made above by time_init().
* b) machine specific code should setup the timer irqaction.
* c) enable the timer interrupt
*/
-void (*board_time_init)(void);
-
unsigned int mips_hpt_frequency;
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
- .name = "timer",
-};
-
static unsigned int __init calibrate_hpt(void)
{
cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -334,6 +228,84 @@ struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ {
+ unsigned long flags, vpflags;
+ local_irq_save(flags);
+ vpflags = dvpe();
+#endif
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
+#ifdef CONFIG_MIPS_MT_SMTC
+ evpe(vpflags);
+ local_irq_restore(flags);
+ }
+#endif
+ return res;
+}
+
+static void mips_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+static int cp0_timer_irq_installed;
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ c0_timer_ack();
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id)
+ goto out;
+ cpu = 0;
+#endif
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+#ifdef CONFIG_MIPS_MT_SMTC
+ .flags = IRQF_DISABLED,
+#else
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+#endif
+ .name = "timer",
+};
+
static void __init init_mips_clocksource(void)
{
u64 temp;
@@ -357,19 +329,127 @@ static void __init init_mips_clocksource(void)
clocksource_register(&clocksource_mips);
}
-void __init time_init(void)
+void __init __weak plat_time_init(void)
+{
+}
+
+void __init __weak plat_timer_setup(struct irqaction *irq)
+{
+}
+
+#ifdef CONFIG_MIPS_MT_SMTC
+DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
+static void smtc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+int dummycnt[NR_CPUS];
+
+static void mips_broadcast(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+}
+
+static void setup_smtc_dummy_clockevent_device(void)
+{
+ //uint64_t mips_freq = mips_hpt_^frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+
+ cd->name = "SMTC";
+ cd->features = CLOCK_EVT_FEAT_DUMMY;
+
+ /* Calculate the min / max delta */
+ cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 0; //32;
+ cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 200;
+ cd->irq = 17; //-1;
+// if (cpu)
+// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
+// else
+ cd->cpumask = cpumask_of_cpu(cpu);
+
+ cd->set_mode = smtc_set_mode;
+
+ cd->broadcast = mips_broadcast;
+
+ clockevents_register_device(cd);
+}
+#endif
+
+static void mips_event_handler(struct clock_event_device *dev)
{
- if (board_time_init)
- board_time_init();
+}
- if (!rtc_mips_set_mmss)
- rtc_mips_set_mmss = rtc_mips_set_time;
+void __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
- xtime.tv_sec = rtc_mips_get_time();
- xtime.tv_nsec = 0;
+ if (!cpu_has_counter)
+ return;
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_smtc_dummy_clockevent_device();
+
+ /*
+ * On SMTC we only register VPE0's compare interrupt as clockevent
+ * device.
+ */
+ if (cpu)
+ return;
+#endif
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+#ifdef CONFIG_MIPS_MT_SMTC
+ cd->cpumask = CPU_MASK_ALL;
+#else
+ cd->cpumask = cpumask_of_cpu(cpu);
+#endif
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (!cp0_timer_irq_installed) {
+#ifdef CONFIG_MIPS_MT_SMTC
+#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
+ setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
+#else
+ setup_irq(irq, &timer_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
+ cp0_timer_irq_installed = 1;
+ }
+}
+
+void __init time_init(void)
+{
+ plat_time_init();
/* Choose appropriate high precision timer routines. */
if (!cpu_has_counter && !clocksource_mips.read)
@@ -392,11 +472,6 @@ void __init time_init(void)
/* Calculate cache parameters. */
cycles_per_jiffy =
(mips_hpt_frequency + HZ / 2) / HZ;
- /*
- * This sets up the high precision
- * timer for the first interrupt.
- */
- c0_hpt_timer_init();
}
}
if (!mips_hpt_frequency)
@@ -406,6 +481,10 @@ void __init time_init(void)
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
((mips_hpt_frequency + 500) / 1000) % 1000);
+
+#ifdef CONFIG_IRQ_CPU
+ setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
+#endif
}
if (!mips_timer_ack)
@@ -426,56 +505,5 @@ void __init time_init(void)
plat_timer_setup(&timer_irqaction);
init_mips_clocksource();
+ mips_clockevent_init();
}
-
-#define FEBRUARY 2
-#define STARTOFTIME 1970
-#define SECDAY 86400L
-#define SECYR (SECDAY * 365)
-#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
-#define days_in_year(y) (leapyear(y) ? 366 : 365)
-#define days_in_month(m) (month_days[(m) - 1])
-
-static int month_days[12] = {
- 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
-};
-
-void to_tm(unsigned long tim, struct rtc_time *tm)
-{
- long hms, day, gday;
- int i;
-
- gday = day = tim / SECDAY;
- hms = tim % SECDAY;
-
- /* Hours, minutes, seconds are easy */
- tm->tm_hour = hms / 3600;
- tm->tm_min = (hms % 3600) / 60;
- tm->tm_sec = (hms % 3600) % 60;
-
- /* Number of years in days */
- for (i = STARTOFTIME; day >= days_in_year(i); i++)
- day -= days_in_year(i);
- tm->tm_year = i;
-
- /* Number of months in days left */
- if (leapyear(tm->tm_year))
- days_in_month(FEBRUARY) = 29;
- for (i = 1; day >= days_in_month(i); i++)
- day -= days_in_month(i);
- days_in_month(FEBRUARY) = 28;
- tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
-
- /* Days are what is left over (+1) from all that. */
- tm->tm_mday = day + 1;
-
- /*
- * Determine the day of week
- */
- tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
-}
-
-EXPORT_SYMBOL(rtc_lock);
-EXPORT_SYMBOL(to_tm);
-EXPORT_SYMBOL(rtc_mips_set_time);
-EXPORT_SYMBOL(rtc_mips_get_time);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 6379003f9d8..632bce1bf42 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -295,7 +295,8 @@ void show_regs(struct pt_regs *regs)
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
- printk("PrId : %08x\n", read_c0_prid());
+ printk("PrId : %08x (%s)\n", read_c0_prid(),
+ cpu_name_string());
}
void show_registers(struct pt_regs *regs)
@@ -627,7 +628,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
lose_fpu(1);
/* Run the emulator */
- sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
/*
* We can't allow the emulated instruction to leave any of
@@ -954,7 +955,7 @@ asmlinkage void do_reserved(struct pt_regs *regs)
*/
static inline void parity_protection_init(void)
{
- switch (current_cpu_data.cputype) {
+ switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_5KC:
@@ -1075,8 +1076,8 @@ void *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
- *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
- (0x03ffffff & (handler >> 2));
+ *(u32 *)(ebase + 0x200) = 0x08000000 |
+ (0x03ffffff & (handler >> 2));
flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
@@ -1165,11 +1166,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
- board_bind_eic_interrupt (n, srs);
+ board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (mips_srs_max() > 1)
- change_c0_srsmap (0xf << n*4, srs << n*4);
+ change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
@@ -1198,10 +1199,10 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
- panic ("VECTORSPACING too small");
+ panic("VECTORSPACING too small");
}
- memcpy (b, &except_vec_vi, handler_len);
+ memcpy(b, &except_vec_vi, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
@@ -1370,9 +1371,9 @@ void __init per_cpu_trap_init(void)
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
- write_c0_ebase (ebase);
+ write_c0_ebase(ebase);
/* Setting vector spacing enables EI/VI mode */
- change_c0_intctl (0x3e0, VECTORSPACING);
+ change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
@@ -1390,8 +1391,8 @@ void __init per_cpu_trap_init(void)
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
- cp0_compare_irq = (read_c0_intctl () >> 29) & 7;
- cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7;
+ cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
+ cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
@@ -1429,14 +1430,17 @@ void __init per_cpu_trap_init(void)
}
/* Install CPU exception handler */
-void __init set_handler (unsigned long offset, void *addr, unsigned long size)
+void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
flush_icache_range(ebase + offset, ebase + offset + size);
}
+static char panic_null_cerr[] __initdata =
+ "Trying to set NULL cache error exception handler";
+
/* Install uncached CPU exception handler */
-void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
+void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase);
@@ -1445,6 +1449,9 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
unsigned long uncached_ebase = TO_UNCAC(ebase);
#endif
+ if (!addr)
+ panic(panic_null_cerr);
+
memcpy((void *)(uncached_ebase + offset), addr, size);
}
@@ -1464,7 +1471,7 @@ void __init trap_init(void)
unsigned long i;
if (cpu_has_veic || cpu_has_vint)
- ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
+ ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
else
ebase = CAC_BASE;
@@ -1490,7 +1497,7 @@ void __init trap_init(void)
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
- board_ejtag_handler_setup ();
+ board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
@@ -1543,8 +1550,8 @@ void __init trap_init(void)
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
- if (current_cpu_data.cputype == CPU_R6000 ||
- current_cpu_data.cputype == CPU_R6000A) {
+ if (current_cpu_type() == CPU_R6000 ||
+ current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index d34b1fb3665..c327b21bca8 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -481,7 +481,7 @@ fault:
if (fixup_exception(regs))
return;
- die_if_kernel ("Unhandled kernel unaligned access", regs);
+ die_if_kernel("Unhandled kernel unaligned access", regs);
send_sig(SIGSEGV, current, 1);
return;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 087ab997487..84f9a4cc6f2 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -6,163 +6,202 @@
OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
jiffies = JIFFIES;
+
SECTIONS
{
#ifdef CONFIG_BOOT_ELF64
- /* Read-only sections, merged into text segment: */
- /* . = 0xc000000000000000; */
+ /* Read-only sections, merged into text segment: */
+ /* . = 0xc000000000000000; */
- /* This is the value for an Origin kernel, taken from an IRIX kernel. */
- /* . = 0xc00000000001c000; */
+ /* This is the value for an Origin kernel, taken from an IRIX kernel. */
+ /* . = 0xc00000000001c000; */
- /* Set the vaddr for the text segment to a value
- >= 0xa800 0000 0001 9000 if no symmon is going to configured
- >= 0xa800 0000 0030 0000 otherwise */
+ /* Set the vaddr for the text segment to a value
+ * >= 0xa800 0000 0001 9000 if no symmon is going to configured
+ * >= 0xa800 0000 0030 0000 otherwise
+ */
- /* . = 0xa800000000300000; */
- /* . = 0xa800000000300000; */
- . = 0xffffffff80300000;
+ /* . = 0xa800000000300000; */
+ /* . = 0xa800000000300000; */
+ . = 0xffffffff80300000;
#endif
- . = LOADADDR;
- /* read-only */
- _text = .; /* Text and read-only data */
- .text : {
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- *(.fixup)
- *(.gnu.warning)
- } =0
-
- _etext = .; /* End of text section */
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
- __start___dbe_table = .; /* Exception table for data bus errors */
- __dbe_table : { *(__dbe_table) }
- __stop___dbe_table = .;
-
- NOTES
-
- RODATA
-
- /* writeable */
- .data : { /* Data */
- . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- /*
- * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
- * limits the maximum alignment to at most 32kB and results in the following
- * warning:
- *
- * CC arch/mips/kernel/init_task.o
- * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
- * is greater than maximum object file alignment. Using 32768
- */
- . = ALIGN(_PAGE_SIZE);
- *(.data.init_task)
-
- DATA_DATA
-
- CONSTRUCTORS
- }
- _gp = . + 0x8000;
- .lit8 : { *(.lit8) }
- .lit4 : { *(.lit4) }
- /* We want the small data sections together, so single-instruction offsets
- can access them all, and initialized data all before uninitialized, so
- we can shorten the on-disk segment size. */
- .sdata : { *(.sdata) }
-
- . = ALIGN(_PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(_PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
- _edata = .; /* End of data section */
-
- /* will be freed after init */
- . = ALIGN(_PAGE_SIZE); /* Init code and data */
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- *(.init.text)
- _einittext = .;
- }
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
-
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- /* .exit.text is discarded at runtime, not link time, to deal with
- references from .rodata */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ . = LOADADDR;
+ /* read-only */
+ _text = .; /* Text and read-only data */
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } =0
+ _etext = .; /* End of text section */
+
+ /* Exception table */
+ . = ALIGN(16);
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
+ /* Exception table for data bus errors */
+ __dbe_table : {
+ __start___dbe_table = .;
+ *(__dbe_table)
+ __stop___dbe_table = .;
+ }
+ RODATA
+
+ /* writeable */
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+ /*
+ * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
+ * limits the maximum alignment to at most 32kB and results in the following
+ * warning:
+ *
+ * CC arch/mips/kernel/init_task.o
+ * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
+ * is greater than maximum object file alignment. Using 32768
+ */
+ . = ALIGN(_PAGE_SIZE);
+ *(.data.init_task)
+
+ DATA_DATA
+ CONSTRUCTORS
+ }
+ _gp = . + 0x8000;
+ .lit8 : {
+ *(.lit8)
+ }
+ .lit4 : {
+ *(.lit4)
+ }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : {
+ *(.sdata)
+ }
+
+ . = ALIGN(_PAGE_SIZE);
+ .data_nosave : {
+ __nosave_begin = .;
+ *(.data.nosave)
+ }
+ . = ALIGN(_PAGE_SIZE);
+ __nosave_end = .;
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : {
+ *(.data.cacheline_aligned)
+ }
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+ . = ALIGN(_PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : {
+ *(.init.data)
+ }
+ . = ALIGN(16);
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+
+ .initcall.init : {
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ }
+
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+ SECURITY_INIT
+
+ /* .exit.text is discarded at runtime, not link time, to deal with
+ * references from .rodata
+ */
+ .exit.text : {
+ *(.exit.text)
+ }
+ .exit.data : {
+ *(.exit.data)
+ }
#if defined(CONFIG_BLK_DEV_INITRD)
- . = ALIGN(_PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ . = ALIGN(_PAGE_SIZE);
+ .init.ramfs : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
#endif
- PERCPU(_PAGE_SIZE)
- . = ALIGN(_PAGE_SIZE);
- __init_end = .;
- /* freed after init ends here */
-
- __bss_start = .; /* BSS */
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
-
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exitcall.exit)
-
- /* ABI crap starts here */
- *(.MIPS.options)
- *(.options)
- *(.pdr)
- *(.reginfo)
- }
-
- /* These mark the ABI of the kernel for debuggers. */
- .mdebug.abi32 : { KEEP(*(.mdebug.abi32)) }
- .mdebug.abi64 : { KEEP(*(.mdebug.abi64)) }
-
- /* This is the MIPS specific mdebug section. */
- .mdebug : { *(.mdebug) }
-
- STABS_DEBUG
-
- DWARF_DEBUG
-
- /* These must appear regardless of . */
- .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
- .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
- .note : { *(.note) }
+ PERCPU(_PAGE_SIZE)
+ . = ALIGN(_PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+
+ __bss_start = .; /* BSS */
+ .sbss : {
+ *(.sbss)
+ *(.scommon)
+ }
+ .bss : {
+ *(.bss)
+ *(COMMON)
+ }
+ __bss_stop = .;
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+
+ /* ABI crap starts here */
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ }
+
+ /* These mark the ABI of the kernel for debuggers. */
+ .mdebug.abi32 : {
+ KEEP(*(.mdebug.abi32))
+ }
+ .mdebug.abi64 : {
+ KEEP(*(.mdebug.abi64))
+ }
+
+ /* This is the MIPS specific mdebug section. */
+ .mdebug : {
+ *(.mdebug)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ /* These must appear regardless of . */
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+ .note : {
+ *(.note)
+ }
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 3c09b9785f4..61b729fa054 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -936,8 +936,18 @@ static int vpe_elfload(struct vpe * v)
}
} else {
- for (i = 0; i < hdr->e_shnum; i++) {
+ struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
+ for (i = 0; i < hdr->e_phnum; i++) {
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ memcpy((void *)phdr->p_vaddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
+ memset((void *)phdr->p_vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ phdr++;
+ }
+
+ for (i = 0; i < hdr->e_shnum; i++) {
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
@@ -948,39 +958,6 @@ static int vpe_elfload(struct vpe * v)
magic symbols */
sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
}
-
- /* filter sections we dont want in the final image */
- if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
- (sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
- printk( KERN_DEBUG " ignoring section, "
- "name %s type %x address 0x%x \n",
- secstrings + sechdrs[i].sh_name,
- sechdrs[i].sh_type, sechdrs[i].sh_addr);
- continue;
- }
-
- if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
- printk( KERN_WARNING "VPE loader: "
- "fully linked image has invalid section, "
- "name %s type %x address 0x%x, before load "
- "address of 0x%x\n",
- secstrings + sechdrs[i].sh_name,
- sechdrs[i].sh_type, sechdrs[i].sh_addr,
- (unsigned int)v->load_addr);
- return -ENOEXEC;
- }
-
- printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
- "size 0x%x0 from x%p\n",
- secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
- sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
-
- if (sechdrs[i].sh_type != SHT_NOBITS)
- memcpy((void *)sechdrs[i].sh_addr,
- (char *)hdr + sechdrs[i].sh_offset,
- sechdrs[i].sh_size);
- else
- memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
}
}
@@ -1044,7 +1021,7 @@ static int getcwd(char *buff, int size)
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_getcwd(buff,size);
+ ret = sys_getcwd(buff, size);
set_fs(old_fs);