summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cevt-r4k.c5
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c23
-rw-r--r--arch/mips/kernel/kgdb.c211
-rw-r--r--arch/mips/kernel/kprobes.c557
-rw-r--r--arch/mips/kernel/mcount.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S8
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c3
-rw-r--r--arch/mips/kernel/syscall.c49
-rw-r--r--arch/mips/kernel/traps.c50
13 files changed, 825 insertions, 97 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 7a6ac501cbb..06f84829978 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_IRQ_GIC) += irq-gic.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
@@ -101,6 +102,4 @@ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
-EXTRA_CFLAGS += -Werror
-
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca6c83218ca..6b30fb2caa6 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -126,7 +126,6 @@ void output_thread_defines(void)
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
- OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
OFFSET(THREAD_TRAMP, task_struct, \
thread.irix_trampoline);
OFFSET(THREAD_OLDCTX, task_struct, \
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 0b2450ceb13..2a4d50ff5e2 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -163,7 +163,6 @@ int c0_compare_int_usable(void)
int __cpuinit r4k_clockevent_init(void)
{
- uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
@@ -188,9 +187,9 @@ int __cpuinit r4k_clockevent_init(void)
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, mips_hpt_frequency);
+
/* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 408d0a07b3a..b8bb8ba6086 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug __cpuinitdata = -1;
+int daddiu_bug = -1;
static inline void check_daddiu(void)
{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3562b854f2c..b1b304ea212 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -187,6 +187,7 @@ void __init check_wait(void)
case CPU_BCM6358:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
@@ -760,6 +761,9 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
ok = decode_config4(c);
mips_probe_watch_registers(c);
+
+ if (cpu_has_mips_r2)
+ c->core = read_c0_ebase() & 0x3ff;
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
@@ -956,6 +960,22 @@ platform:
}
}
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ /* JZRISC does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_JZRISC:
+ c->cputype = CPU_JZRISC;
+ __cpu_name[cpu] = "Ingenic JZRISC";
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
@@ -994,6 +1014,9 @@ __cpuinit void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
+ case PRID_COMP_INGENIC:
+ cpu_probe_ingenic(c, cpu);
+ break;
}
BUG_ON(!__cpu_name[cpu]);
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 9b78ff6e9b8..1f4e2fa6414 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -50,6 +50,151 @@ static struct hard_trap_info {
{ 0, 0} /* Must be last */
};
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__(
@@ -84,64 +229,6 @@ static int compute_signal(int tt)
return SIGHUP; /* default for things we don't know about */
}
-void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- int reg;
-
-#if (KGDB_GDB_REG_SIZE == 32)
- u32 *ptr = (u32 *)gdb_regs;
-#else
- u64 *ptr = (u64 *)gdb_regs;
-#endif
-
- for (reg = 0; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
-
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
-
- /* FP REGS */
- if (!(current && (regs->cp0_status & ST0_CU1)))
- return;
-
- save_fp(current);
- for (reg = 0; reg < 32; reg++)
- *(ptr++) = current->thread.fpu.fpr[reg];
-}
-
-void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- int reg;
-
-#if (KGDB_GDB_REG_SIZE == 32)
- const u32 *ptr = (u32 *)gdb_regs;
-#else
- const u64 *ptr = (u64 *)gdb_regs;
-#endif
-
- for (reg = 0; reg < 32; reg++)
- regs->regs[reg] = *(ptr++);
-
- regs->cp0_status = *(ptr++);
- regs->lo = *(ptr++);
- regs->hi = *(ptr++);
- regs->cp0_badvaddr = *(ptr++);
- regs->cp0_cause = *(ptr++);
- regs->cp0_epc = *(ptr++);
-
- /* FP REGS from current */
- if (!(current && (regs->cp0_status & ST0_CU1)))
- return;
-
- for (reg = 0; reg < 32; reg++)
- current->thread.fpu.fpr[reg] = *(ptr++);
- restore_fp(current);
-}
-
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
@@ -242,7 +329,7 @@ static struct notifier_block kgdb_notifier = {
};
/*
- * Handle the 's' and 'c' commands
+ * Handle the 'c' command
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
@@ -250,20 +337,14 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
{
char *ptr;
unsigned long address;
- int cpu = smp_processor_id();
switch (remcom_in_buffer[0]) {
- case 's':
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->cp0_epc = address;
- atomic_set(&kgdb_cpu_doing_single_step, -1);
- if (remcom_in_buffer[0] == 's')
- atomic_set(&kgdb_cpu_doing_single_step, cpu);
-
return 0;
}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 00000000000..ee28683fc2a
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,557 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/break.h>
+#include <asm/inst.h>
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int __kprobes insn_has_delayslot(union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+
+ /*
+ * This group contains:
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jr_op:
+ case jalr_op:
+ break;
+ default:
+ goto insn_ok;
+ }
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op:
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+
+ /*
+ * These are the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ default:
+ break;
+ }
+insn_ok:
+ return 0;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ prev_insn = p->addr[-1];
+ insn = p->addr[0];
+
+ if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch and jump instructions are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ */
+
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ free_insn_slot(p->ainsn.insn, 0);
+}
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs))
+ goto ss_probe;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+ }
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_sp = regs->regs[29];
+
+ memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+ regs->cp0_epc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+ /* Assembler quirk necessitates this '0,code' business. */
+ asm volatile(
+ "break 0,%0\n\t"
+ ".globl jprobe_return_end\n"
+ "jprobe_return_end:\n"
+ : : "n" (BRK_KPROBE_BP) : "memory");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (regs->cp0_epc >= (unsigned long)jprobe_return &&
+ regs->cp0_epc <= (unsigned long)jprobe_return_end) {
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+ preempt_enable_no_resched();
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ instruction_pointer(regs) = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 6bfcb7a00ec..4c968e7efb7 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -165,12 +165,12 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
/* arg3: Get frame pointer of current stack */
#ifdef CONFIG_FRAME_POINTER
- move a2, fp
+ move a2, fp
#else /* ! CONFIG_FRAME_POINTER */
#ifdef CONFIG_64BIT
- PTR_LA a2, PT_SIZE(sp)
+ PTR_LA a2, PT_SIZE(sp)
#else
- PTR_LA a2, (PT_SIZE+8)(sp)
+ PTR_LA a2, (PT_SIZE+8)(sp)
#endif
#endif
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a4faceea9d8..a3d66137731 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -400,22 +400,22 @@ EXPORT(sysn32_call_table)
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR compat_sys_utimensat
- PTR compat_sys_signalfd /* 5280 */
+ PTR compat_sys_signalfd /* 6280 */
PTR sys_ni_syscall
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create
- PTR compat_sys_timerfd_gettime /* 5285 */
+ PTR compat_sys_timerfd_gettime /* 6285 */
PTR compat_sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1
- PTR sys_dup3 /* 5290 */
+ PTR sys_dup3 /* 6290 */
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev
- PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
+ PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6cdca1956b7..383aeb95cb4 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -47,8 +47,12 @@
#endif /* CONFIG_MIPS_MT_SMTC */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
+
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a95dea5459c..cfeb2c15589 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -975,8 +975,7 @@ void ipi_decode(struct smtc_ipi *pipi)
ipi_call_interrupt();
break;
default:
- printk("Impossible SMTC IPI Argument 0x%x\n",
- (int)arg_copy);
+ printk("Impossible SMTC IPI Argument %p\n", arg_copy);
break;
}
break;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index dd81b0f8751..58bab2ef257 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -29,6 +29,8 @@
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/elf.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -116,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vmm || addr + len <= vmm->vm_start))
return addr;
}
- addr = TASK_UNMAPPED_BASE;
+ addr = current->mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
@@ -134,6 +136,51 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ random_factor = random_factor << PAGE_SHIFT;
+ if (TASK_IS_32BIT_ADDR)
+ random_factor &= 0xfffffful;
+ else
+ random_factor &= 0xffffffful;
+ }
+
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+}
+
+static inline unsigned long brk_rnd(void)
+{
+ unsigned long rnd = get_random_int();
+
+ rnd = rnd << PAGE_SHIFT;
+ /* 8MB for 32bit, 256MB for 64bit */
+ if (TASK_IS_32BIT_ADDR)
+ rnd = rnd & 0x7ffffful;
+ else
+ rnd = rnd & 0xffffffful;
+
+ return rnd;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long base = mm->brk;
+ unsigned long ret;
+
+ ret = PAGE_ALIGN(base + brk_rnd());
+
+ if (ret < mm->brk)
+ return mm->brk;
+
+ return ret;
+}
+
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 852780868fb..03ec0019032 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -25,6 +25,7 @@
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
@@ -334,7 +335,7 @@ void show_regs(struct pt_regs *regs)
__show_regs((struct pt_regs *)regs);
}
-void show_registers(const struct pt_regs *regs)
+void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
@@ -356,9 +357,14 @@ void show_registers(const struct pt_regs *regs)
printk("\n");
}
+static int regs_to_trapnr(struct pt_regs *regs)
+{
+ return (regs->cp0_cause >> 2) & 0x1f;
+}
+
static DEFINE_SPINLOCK(die_lock);
-void __noreturn die(const char * str, struct pt_regs * regs)
+void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
@@ -366,7 +372,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
- notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+ notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV);
console_verbose();
spin_lock_irq(&die_lock);
@@ -375,7 +381,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
- if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
printk("%s[#%d]:\n", str, ++die_counter);
@@ -449,7 +455,7 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
- if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0)
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
== NOTIFY_STOP)
return;
@@ -650,7 +656,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info;
- if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0)
+ if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
return;
die_if_kernel("FP exception in kernel code", regs);
@@ -713,11 +719,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
/*
@@ -783,6 +789,25 @@ asmlinkage void do_bp(struct pt_regs *regs)
if (bcode >= (1 << 10))
bcode >>= 10;
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ return;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ return;
+ else
+ break;
+ default:
+ break;
+ }
+
do_trap_or_bp(regs, bcode, "Break");
return;
@@ -815,7 +840,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
unsigned int opcode = 0;
int status = -1;
- if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0)
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
@@ -907,11 +932,6 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block default_cu2_notifier = {
- .notifier_call = default_cu2_call,
- .priority = 0x80000000, /* Run last */
-};
-
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
@@ -1734,5 +1754,5 @@ void __init trap_init(void)
sort_extable(__start___dbe_table, __stop___dbe_table);
- register_cu2_notifier(&default_cu2_notifier);
+ cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}