From 70d2b5958a04139fbffecf27791cf913dce8038e Mon Sep 17 00:00:00 2001
From: Chris Metcalf <cmetcalf@tilera.com>
Date: Wed, 7 Aug 2013 12:11:56 -0400
Subject: tile: improve illegal translation interrupt handling

First, don't re-enable interrupts blindly in the Linux trap handler.
We already handle page faults this way; synchronous interrupts like
ILL_TRANS will fire even when interrupts are disabled, and we don't
want to re-enable interrupts in that case.

For ILL_TRANS, we now pass the ILL_VA_PC reason into the trap handler
so we can report it properly; this is the address that caused the
illegal translation trap.  We print the address as part of the
pr_alert() message now if it's coming from the kernel.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
---
 arch/tile/kernel/traps.c | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

(limited to 'arch/tile/kernel/traps.c')

diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 5b19a23c890..a1bbc5de4d0 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -222,8 +222,9 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	unsigned long address = 0;
 	bundle_bits instr;
 
-	/* Re-enable interrupts. */
-	local_irq_enable();
+	/* Re-enable interrupts, if they were previously enabled. */
+	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
+		local_irq_enable();
 
 	/*
 	 * If it hits in kernel mode and we can't fix it up, just exit the
@@ -231,7 +232,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	 */
 	if (!user_mode(regs)) {
 		const char *name;
-		if (fixup_exception(regs))  /* only UNALIGN_DATA in practice */
+		char buf[100];
+		if (fixup_exception(regs))  /* ILL_TRANS or UNALIGN_DATA */
 			return;
 		if (fault_num >= 0 &&
 		    fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
@@ -239,10 +241,16 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 			name = int_name[fault_num];
 		else
 			name = "Unknown interrupt";
-		pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
-			 fault_num, name, regs->pc);
 		if (fault_num == INT_GPV)
-			pr_alert("GPV_REASON is %#lx\n", reason);
+			snprintf(buf, sizeof(buf), "; GPV_REASON %#lx", reason);
+#ifdef __tilegx__
+		else if (fault_num == INT_ILL_TRANS)
+			snprintf(buf, sizeof(buf), "; address %#lx", reason);
+#endif
+		else
+			buf[0] = '\0';
+		pr_alert("Kernel took bad trap %d (%s) at PC %#lx%s\n",
+			 fault_num, name, regs->pc, buf);
 		show_regs(regs);
 		do_exit(SIGKILL);  /* FIXME: implement i386 die() */
 		return;
@@ -324,11 +332,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 		fill_ra_stack();
 
 		signo = SIGSEGV;
+		address = reason;
 		code = SEGV_MAPERR;
-		if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
-			address = regs->pc;
-		else
-			address = 0;  /* FIXME: GX: single-step for address */
 		break;
 	}
 #endif
-- 
cgit v1.2.3-70-g09d2


From 3fa17c395bb0c358745fbe0c8aa039d6cdac1735 Mon Sep 17 00:00:00 2001
From: Tony Lu <zlu@tilera.com>
Date: Fri, 9 Aug 2013 15:08:57 -0400
Subject: tile: support kprobes on tilegx

This change includes support for Kprobes, Jprobes and Return Probes.

Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Tony Lu <zlu@tilera.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
---
 arch/tile/Kconfig                            |   2 +
 arch/tile/include/asm/Kbuild                 |   1 -
 arch/tile/include/asm/kdebug.h               |  28 ++
 arch/tile/include/asm/kprobes.h              |  79 ++++
 arch/tile/include/asm/ptrace.h               |   1 +
 arch/tile/include/uapi/arch/opcode_tilegx.h  |   1 +
 arch/tile/include/uapi/arch/opcode_tilepro.h |   1 +
 arch/tile/kernel/Makefile                    |   1 +
 arch/tile/kernel/kprobes.c                   | 528 +++++++++++++++++++++++++++
 arch/tile/kernel/smp.c                       |  14 +-
 arch/tile/kernel/traps.c                     |  45 ++-
 arch/tile/kernel/vmlinux.lds.S               |   1 +
 arch/tile/mm/fault.c                         |  12 +
 samples/kprobes/kprobe_example.c             |   9 +
 14 files changed, 718 insertions(+), 5 deletions(-)
 create mode 100644 arch/tile/include/asm/kdebug.h
 create mode 100644 arch/tile/include/asm/kprobes.h
 create mode 100644 arch/tile/kernel/kprobes.c

(limited to 'arch/tile/kernel/traps.c')

diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index d2277c4f949..616e2344ec6 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -125,6 +125,8 @@ config TILEGX
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_KPROBES
+	select HAVE_KRETPROBES
 
 config TILEPRO
 	def_bool !TILEGX
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index b17b9b8e53c..4c0b3c24c39 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += ioctl.h
 generic-y += ioctls.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
-generic-y += kdebug.h
 generic-y += local.h
 generic-y += msgbuf.h
 generic-y += mutex.h
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h
new file mode 100644
index 00000000000..5bbbfa904c2
--- /dev/null
+++ b/arch/tile/include/asm/kdebug.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_KDEBUG_H
+#define _ASM_TILE_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BREAK,
+	DIE_SSTEPBP,
+	DIE_PAGE_FAULT,
+	DIE_COMPILED_BPT
+};
+
+#endif /* _ASM_TILE_KDEBUG_H */
diff --git a/arch/tile/include/asm/kprobes.h b/arch/tile/include/asm/kprobes.h
new file mode 100644
index 00000000000..d8f9a83943b
--- /dev/null
+++ b/arch/tile/include/asm/kprobes.h
@@ -0,0 +1,79 @@
+/*
+ * arch/tile/include/asm/kprobes.h
+ *
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_KPROBES_H
+#define _ASM_TILE_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#include <arch/opcode.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE			2
+
+#define kretprobe_blacklist_size 0
+
+typedef tile_bundle_bits kprobe_opcode_t;
+
+#define flush_insn_slot(p)						\
+	flush_icache_range((unsigned long)p->addr,			\
+			   (unsigned long)p->addr +			\
+			   (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
+
+struct kprobe;
+
+/* Architecture specific copy of original instruction. */
+struct arch_specific_insn {
+	kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_pc;
+};
+
+#define MAX_JPROBES_STACK_SIZE 128
+#define MAX_JPROBES_STACK_ADDR \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
+		- sizeof(struct pt_regs))
+
+#define MIN_JPROBES_STACK_SIZE(ADDR)					\
+	((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR)	\
+		? MAX_JPROBES_STACK_ADDR - (ADDR)			\
+		: MAX_JPROBES_STACK_SIZE)
+
+/* per-cpu kprobe control block. */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_pc;
+	unsigned long jprobe_saved_sp;
+	struct prev_kprobe prev_kprobe;
+	struct pt_regs jprobe_saved_regs;
+	char jprobes_stack[MAX_JPROBES_STACK_SIZE];
+};
+
+extern tile_bundle_bits breakpoint2_insn;
+extern tile_bundle_bits breakpoint_insn;
+
+void arch_remove_kprobe(struct kprobe *);
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+			     unsigned long val, void *data);
+
+#endif /* _ASM_TILE_KPROBES_H */
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 73b681b566f..0d25c21bcd6 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -33,6 +33,7 @@ typedef unsigned long pt_reg_t;
 
 #ifndef __ASSEMBLY__
 
+#define regs_return_value(regs) ((regs)->regs[0])
 #define instruction_pointer(regs) ((regs)->pc)
 #define profile_pc(regs) instruction_pointer(regs)
 #define user_stack_pointer(regs) ((regs)->sp)
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
index c14d02c8160..d76ff2db745 100644
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits;
 #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
 #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
   TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
 
 /* 64-bit pattern for a { bpt ; nop } bundle. */
 #define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
diff --git a/arch/tile/include/uapi/arch/opcode_tilepro.h b/arch/tile/include/uapi/arch/opcode_tilepro.h
index 71b763b8ce8..4451cff1a86 100644
--- a/arch/tile/include/uapi/arch/opcode_tilepro.h
+++ b/arch/tile/include/uapi/arch/opcode_tilepro.h
@@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits;
 #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
 #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
   TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
 
 /* 64-bit pattern for a { bpt ; nop } bundle. */
 #define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 2e6eaa11b60..b7c8b5e19d5 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -28,5 +28,6 @@ endif
 obj-$(CONFIG_TILE_USB)		+= usb.o
 obj-$(CONFIG_TILE_HVGLUE_TRACE)	+= hvglue_trace.o
 obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o mcount_64.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 obj-y				+= vdso/
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
new file mode 100644
index 00000000000..27cdcacbe81
--- /dev/null
+++ b/arch/tile/kernel/kprobes.c
@@ -0,0 +1,528 @@
+/*
+ * arch/tile/kernel/kprobes.c
+ * Kprobes on TILE-Gx
+ *
+ * Some portions copied from the MIPS version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#include <arch/opcode.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
+tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
+
+/*
+ * Check whether instruction is branch or jump, or if executing it
+ * has different results depending on where it is executed (e.g. lnk).
+ */
+static int __kprobes insn_has_control(kprobe_opcode_t insn)
+{
+	if (get_Mode(insn) != 0) {   /* Y-format bundle */
+		if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
+		    get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
+			return 0;
+
+		switch (get_UnaryOpcodeExtension_Y1(insn)) {
+		case JALRP_UNARY_OPCODE_Y1:
+		case JALR_UNARY_OPCODE_Y1:
+		case JRP_UNARY_OPCODE_Y1:
+		case JR_UNARY_OPCODE_Y1:
+		case LNK_UNARY_OPCODE_Y1:
+			return 1;
+		default:
+			return 0;
+		}
+	}
+
+	switch (get_Opcode_X1(insn)) {
+	case BRANCH_OPCODE_X1:	/* branch instructions */
+	case JUMP_OPCODE_X1:	/* jump instructions: j and jal */
+		return 1;
+
+	case RRR_0_OPCODE_X1:   /* other jump instructions */
+		if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
+			return 0;
+		switch (get_UnaryOpcodeExtension_X1(insn)) {
+		case JALRP_UNARY_OPCODE_X1:
+		case JALR_UNARY_OPCODE_X1:
+		case JRP_UNARY_OPCODE_X1:
+		case JR_UNARY_OPCODE_X1:
+		case LNK_UNARY_OPCODE_X1:
+			return 1;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	unsigned long addr = (unsigned long)p->addr;
+
+	if (addr & (sizeof(kprobe_opcode_t) - 1))
+		return -EINVAL;
+
+	if (insn_has_control(*p->addr)) {
+		pr_notice("Kprobes for control instructions are not "
+			  "supported\n");
+		return -EINVAL;
+	}
+
+	/* insn: must be on special executable page on tile. */
+	p->ainsn.insn = get_insn_slot();
+	if (!p->ainsn.insn)
+		return -ENOMEM;
+
+	/*
+	 * In the kprobe->ainsn.insn[] array we store the original
+	 * instruction at index zero and a break trap instruction at
+	 * index one.
+	 */
+	memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+	p->ainsn.insn[1] = breakpoint2_insn;
+	p->opcode = *p->addr;
+
+	return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	unsigned long addr_wr;
+
+	/* Operate on writable kernel text mapping. */
+	addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
+
+	if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
+		sizeof(breakpoint_insn)))
+		pr_err("%s: failed to enable kprobe\n", __func__);
+
+	smp_wmb();
+	flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *kp)
+{
+	unsigned long addr_wr;
+
+	/* Operate on writable kernel text mapping. */
+	addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
+
+	if (probe_kernel_write((void *)addr_wr, &kp->opcode,
+		sizeof(kp->opcode)))
+		pr_err("%s: failed to enable kprobe\n", __func__);
+
+	smp_wmb();
+	flush_insn_slot(kp);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	if (p->ainsn.insn) {
+		free_insn_slot(p->ainsn.insn, 0);
+		p->ainsn.insn = NULL;
+	}
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+			struct kprobe_ctlblk *kcb)
+{
+	__this_cpu_write(current_kprobe, p);
+	kcb->kprobe_saved_pc = regs->pc;
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	/* Single step inline if the instruction is a break. */
+	if (p->opcode == breakpoint_insn ||
+	    p->opcode == breakpoint2_insn)
+		regs->pc = (unsigned long)p->addr;
+	else
+		regs->pc = (unsigned long)&p->ainsn.insn[0];
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	kprobe_opcode_t *addr;
+	struct kprobe_ctlblk *kcb;
+
+	addr = (kprobe_opcode_t *)regs->pc;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing.
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing. */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    p->ainsn.insn[0] == breakpoint_insn) {
+				goto no_kprobe;
+			}
+			/*
+			 * We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			if (*addr != breakpoint_insn) {
+				/*
+				 * The breakpoint instruction was removed by
+				 * another cpu right after we hit, no further
+				 * handling of this interrupt is appropriate.
+				 */
+				ret = 1;
+				goto no_kprobe;
+			}
+			p = __this_cpu_read(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs))
+				goto ss_probe;
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != breakpoint_insn) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it. */
+		goto no_kprobe;
+	}
+
+	set_current_kprobe(p, regs, kcb);
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+	if (p->pre_handler && p->pre_handler(p, regs)) {
+		/* Handler has already set things up, so skip ss setup. */
+		return 1;
+	}
+
+ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction that has been replaced by the breakpoint. To avoid the
+ * SMP problems that can occur when we temporarily put back the
+ * original opcode to single-step, we single-stepped a copy of the
+ * instruction.  The address of this copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+				       struct pt_regs *regs,
+				       struct kprobe_ctlblk *kcb)
+{
+	unsigned long orig_pc = kcb->kprobe_saved_pc;
+	regs->pc = orig_pc + 8;
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs, kcb);
+
+	/* Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+out:
+	preempt_enable_no_resched();
+
+	return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+		return 1;
+
+	if (kcb->kprobe_status & KPROBE_HIT_SS) {
+		/*
+		 * We are here because the instruction being single
+		 * stepped caused a page fault. We reset the current
+		 * kprobe and the ip points back to the probe address
+		 * and allow the page fault handler to continue as a
+		 * normal page fault.
+		 */
+		resume_execution(cur, regs, kcb);
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BREAK:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEPBP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id(). */
+		preempt_disable();
+
+		if (kprobe_running()
+		    && kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	kcb->jprobe_saved_regs = *regs;
+	kcb->jprobe_saved_sp = regs->sp;
+
+	memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+	       MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+	regs->pc = (unsigned long)(jp->entry);
+
+	return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile(
+		"bpt\n\t"
+		".globl jprobe_return_end\n"
+		"jprobe_return_end:\n");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (regs->pc >= (unsigned long)jprobe_return &&
+	    regs->pc <= (unsigned long)jprobe_return_end) {
+		*regs = kcb->jprobe_saved_regs;
+		memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+		       MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+		preempt_enable_no_resched();
+
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ *   handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+	asm volatile(
+		"nop\n\t"
+		".global kretprobe_trampoline\n"
+		"kretprobe_trampoline:\n\t"
+		"nop\n\t"
+		: : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+				      struct pt_regs *regs)
+{
+	ri->ret_addr = (kprobe_opcode_t *) regs->lr;
+
+	/* Replace the return addr with trampoline addr */
+	regs->lr = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit.
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+						struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head, empty_rp;
+	struct hlist_node *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+	INIT_HLIST_HEAD(&empty_rp);
+	kretprobe_hash_lock(current, &head, &flags);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because multiple functions in the call path have
+	 * a return probe installed on them, and/or more than one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+
+	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+	instruction_pointer(regs) = orig_ret_address;
+
+	reset_current_kprobe();
+	kretprobe_hash_unlock(current, &flags);
+	preempt_enable_no_resched();
+
+	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+		hlist_del(&ri->hlist);
+		kfree(ri);
+	}
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+	if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+		return 1;
+
+	return 0;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *)kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	register_kprobe(&trampoline_p);
+	return 0;
+}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 6cc520d71d2..0ae1c594d88 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -20,6 +20,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <asm/cacheflush.h>
+#include <asm/homecache.h>
 
 HV_Topology smp_topology __write_once;
 EXPORT_SYMBOL(smp_topology);
@@ -167,9 +168,16 @@ static void ipi_flush_icache_range(void *info)
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 	struct ipi_flush flush = { start, end };
-	preempt_disable();
-	on_each_cpu(ipi_flush_icache_range, &flush, 1);
-	preempt_enable();
+
+	/* If invoked with irqs disabled, we can not issue IPIs. */
+	if (irqs_disabled())
+		flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
+			NULL, NULL, 0);
+	else {
+		preempt_disable();
+		on_each_cpu(ipi_flush_icache_range, &flush, 1);
+		preempt_enable();
+	}
 }
 
 
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index a1bbc5de4d0..cfff6f958d5 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
+#include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/uaccess.h>
@@ -214,6 +215,43 @@ static const char *const int_name[] = {
 #endif
 };
 
+static int do_bpt(struct pt_regs *regs)
+{
+	unsigned long bundle, bcode, bpt;
+
+	bundle = *(unsigned long *)instruction_pointer(regs);
+
+	/*
+	 * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL.
+	 * we encode the unused least significant bits for other purpose.
+	 */
+	bpt = bundle & ~((1ULL << 12) - 1);
+	if (bpt != TILE_BPT_BUNDLE)
+		return 0;
+
+	bcode = bundle & ((1ULL << 12) - 1);
+	/*
+	 * notify the kprobe handlers, if instruction is likely to
+	 * pertain to them.
+	 */
+	switch (bcode) {
+	/* breakpoint_insn */
+	case 0:
+		notify_die(DIE_BREAK, "debug", regs, bundle,
+			INT_ILL, SIGTRAP);
+		break;
+	/* breakpoint2_insn */
+	case DIE_SSTEPBP:
+		notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
+			INT_ILL, SIGTRAP);
+		break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
 void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 		       unsigned long reason)
 {
@@ -221,6 +259,11 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	int signo, code;
 	unsigned long address = 0;
 	bundle_bits instr;
+	int is_kernel = !user_mode(regs);
+
+	/* Handle breakpoints, etc. */
+	if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
+		return;
 
 	/* Re-enable interrupts, if they were previously enabled. */
 	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
@@ -230,7 +273,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	 * If it hits in kernel mode and we can't fix it up, just exit the
 	 * current process and hope for the best.
 	 */
-	if (!user_mode(regs)) {
+	if (is_kernel) {
 		const char *name;
 		char buf[100];
 		if (fixup_exception(regs))  /* ILL_TRANS or UNALIGN_DATA */
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 673d00afa83..aab995578dc 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -43,6 +43,7 @@ SECTIONS
     HEAD_TEXT
     SCHED_TEXT
     LOCK_TEXT
+    KPROBES_TEXT
     IRQENTRY_TEXT
     __fix_text_end = .;   /* tile-cpack won't rearrange before this */
     TEXT_TEXT
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 502664ae1e6..64eec3f9584 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -34,6 +34,7 @@
 #include <linux/hugetlb.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/kdebug.h>
 
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
@@ -721,6 +722,17 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
 {
 	int is_page_fault;
 
+#ifdef CONFIG_KPROBES
+	/*
+	 * This is to notify the fault handler of the kprobes.  The
+	 * exception code is redundant as it is also carried in REGS,
+	 * but we pass it anyhow.
+	 */
+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
+		       regs->faultnum, SIGSEGV) == NOTIFY_STOP)
+		return;
+#endif
+
 #ifdef __tilegx__
 	/*
 	 * We don't need early do_page_fault_ics() support, since unlike
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index ebf5e0c368e..366db1a9fb6 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -37,6 +37,11 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs)
 			" status = 0x%lx\n",
 		p->addr, regs->cp0_epc, regs->cp0_status);
 #endif
+#ifdef CONFIG_TILEGX
+	printk(KERN_INFO "pre_handler: p->addr = 0x%p, pc = 0x%lx,"
+			" ex1 = 0x%lx\n",
+		p->addr, regs->pc, regs->ex1);
+#endif
 
 	/* A dump_stack() here will give a stack backtrace */
 	return 0;
@@ -58,6 +63,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
 	printk(KERN_INFO "post_handler: p->addr = 0x%p, status = 0x%lx\n",
 		p->addr, regs->cp0_status);
 #endif
+#ifdef CONFIG_TILEGX
+	printk(KERN_INFO "post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
+		p->addr, regs->ex1);
+#endif
 }
 
 /*
-- 
cgit v1.2.3-70-g09d2


From acbde1db294932623aad15dd8cc6e37b28340f26 Mon Sep 17 00:00:00 2001
From: Chris Metcalf <cmetcalf@tilera.com>
Date: Tue, 3 Sep 2013 14:41:36 -0400
Subject: tile: parameterize VA and PA space more cleanly

The existing code relied on the hardware definition (<arch/chip.h>)
to specify how much VA and PA space was available.  It's convenient
to allow customizing this for some configurations, so provide symbols
MAX_PA_WIDTH and MAX_VA_WIDTH in <asm/page.h> that can be modified
if desired.

Additionally, move away from the MEM_XX_INTRPT nomenclature to
define the start of various regions within the VA space.  In fact
the cleaner symbol is, for example, MEM_SV_START, to indicate the
start of the area used for supervisor code; the actual address of the
interrupt vectors is not as important, and can be changed if desired.
As part of this change, convert from "intrpt1" nomenclature (which
built in the old privilege-level 1 model) to a simple "intrpt".

Also strip out some tilepro-specific code supporting modifying the
PL the kernel could run at, since we don't actually support using
different PLs in tilepro, only tilegx.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
---
 arch/tile/include/asm/page.h       | 52 +++++++++++++++-----------------------
 arch/tile/include/asm/pgtable_32.h |  2 +-
 arch/tile/include/asm/pgtable_64.h |  3 +--
 arch/tile/include/asm/processor.h  |  2 +-
 arch/tile/kernel/head_32.S         |  4 +--
 arch/tile/kernel/head_64.S         |  6 ++---
 arch/tile/kernel/intvec_32.S       |  6 ++---
 arch/tile/kernel/intvec_64.S       |  8 +++---
 arch/tile/kernel/setup.c           |  8 +++---
 arch/tile/kernel/traps.c           |  2 +-
 arch/tile/kernel/vmlinux.lds.S     | 10 ++++----
 arch/tile/mm/init.c                |  8 +++---
 12 files changed, 51 insertions(+), 60 deletions(-)

(limited to 'arch/tile/kernel/traps.c')

diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index b4f96c0024d..980843dd983 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -148,8 +148,12 @@ static inline __attribute_const__ int get_order(unsigned long size)
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #endif
 
+/* Allow overriding how much VA or PA the kernel will use. */
+#define MAX_PA_WIDTH CHIP_PA_WIDTH()
+#define MAX_VA_WIDTH CHIP_VA_WIDTH()
+
 /* Each memory controller has PAs distinct in their high bits. */
-#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
+#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
 #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
 #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
 #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
@@ -160,7 +164,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * We reserve the lower half of memory for user-space programs, and the
  * upper half for system code.  We re-map all of physical memory in the
  * upper half, which takes a quarter of our VA space.  Then we have
- * the vmalloc regions.  The supervisor code lives at 0xfffffff700000000,
+ * the vmalloc regions.  The supervisor code lives at the highest address,
  * with the hypervisor above that.
  *
  * Loadable kernel modules are placed immediately after the static
@@ -172,26 +176,19 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * Similarly, for now we don't play any struct page mapping games.
  */
 
-#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
+#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
 # error Too much PA to map with the VA available!
 #endif
-#define HALF_VA_SPACE           (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
 
-#define MEM_LOW_END		(HALF_VA_SPACE - 1)         /* low half */
-#define MEM_HIGH_START		(-HALF_VA_SPACE)            /* high half */
-#define PAGE_OFFSET		MEM_HIGH_START
-#define FIXADDR_BASE		_AC(0xfffffff400000000, UL) /* 4 GB */
-#define FIXADDR_TOP		_AC(0xfffffff500000000, UL) /* 4 GB */
+#define PAGE_OFFSET		(-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
+#define KERNEL_HIGH_VADDR	_AC(0xfffffff800000000, UL)  /* high 32GB */
+#define FIXADDR_BASE		(KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
+#define FIXADDR_TOP		(KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
 #define _VMALLOC_START		FIXADDR_TOP
-#define HUGE_VMAP_BASE		_AC(0xfffffff600000000, UL) /* 4 GB */
-#define MEM_SV_START		_AC(0xfffffff700000000, UL) /* 256 MB */
-#define MEM_SV_INTRPT		MEM_SV_START
-#define MEM_MODULE_START	_AC(0xfffffff710000000, UL) /* 256 MB */
+#define HUGE_VMAP_BASE		(KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
+#define MEM_SV_START		(KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
+#define MEM_MODULE_START	(MEM_SV_START + (256*1024*1024)) /* 256 MB */
 #define MEM_MODULE_END		(MEM_MODULE_START + (256*1024*1024))
-#define MEM_HV_START		_AC(0xfffffff800000000, UL) /* 32 GB */
-
-/* Highest DTLB address we will use */
-#define KERNEL_HIGH_VADDR	MEM_SV_START
 
 #else /* !__tilegx__ */
 
@@ -213,25 +210,18 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * values, and after that, we show "typical" values, since the actual
  * addresses depend on kernel #defines.
  *
- * MEM_HV_INTRPT                   0xfe000000
- * MEM_SV_INTRPT (kernel code)     0xfd000000
+ * MEM_HV_START                    0xfe000000
+ * MEM_SV_START  (kernel code)     0xfd000000
  * MEM_USER_INTRPT (user vector)   0xfc000000
- * FIX_KMAP_xxx                    0xf8000000 (via NR_CPUS * KM_TYPE_NR)
- * PKMAP_BASE                      0xf7000000 (via LAST_PKMAP)
- * HUGE_VMAP                       0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
- * VMALLOC_START                   0xf0000000 (via __VMALLOC_RESERVE)
+ * FIX_KMAP_xxx                    0xfa000000 (via NR_CPUS * KM_TYPE_NR)
+ * PKMAP_BASE                      0xf9000000 (via LAST_PKMAP)
+ * VMALLOC_START                   0xf7000000 (via VMALLOC_RESERVE)
  * mapped LOWMEM                   0xc0000000
  */
 
 #define MEM_USER_INTRPT		_AC(0xfc000000, UL)
-#if CONFIG_KERNEL_PL == 1
-#define MEM_SV_INTRPT		_AC(0xfd000000, UL)
-#define MEM_HV_INTRPT		_AC(0xfe000000, UL)
-#else
-#define MEM_GUEST_INTRPT	_AC(0xfd000000, UL)
-#define MEM_SV_INTRPT		_AC(0xfe000000, UL)
-#define MEM_HV_INTRPT		_AC(0xff000000, UL)
-#endif
+#define MEM_SV_START		_AC(0xfd000000, UL)
+#define MEM_HV_START		_AC(0xfe000000, UL)
 
 #define INTRPT_SIZE		0x4000
 
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index e5bdc0ea85c..63142ab3b3d 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -89,7 +89,7 @@ static inline int pud_huge_page(pud_t pud)	{ return 0; }
 /* We don't define any pgds for these addresses. */
 static inline int pgd_addr_invalid(unsigned long addr)
 {
-	return addr >= MEM_HV_INTRPT;
+	return addr >= MEM_HV_START;
 }
 
 /*
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 7cb8d355f91..3421177f737 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -140,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr)
 /* We don't define any pgds for these addresses. */
 static inline int pgd_addr_invalid(unsigned long addr)
 {
-	return addr >= MEM_HV_START ||
-		(addr > MEM_LOW_END && addr < MEM_HIGH_START);
+	return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
 }
 
 /*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index c72fcba7016..5aa54319d2e 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -168,7 +168,7 @@ struct thread_struct {
 #ifndef __ASSEMBLY__
 
 #ifdef __tilegx__
-#define TASK_SIZE_MAX		(MEM_LOW_END + 1)
+#define TASK_SIZE_MAX		(_AC(1, UL) << (MAX_VA_WIDTH - 1))
 #else
 #define TASK_SIZE_MAX		PAGE_OFFSET
 #endif
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index f3f17b0283f..8d5b40ff292 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -162,8 +162,8 @@ ENTRY(swapper_pg_dir)
 	.set addr, addr + PGDIR_SIZE
 	.endr
 
-	/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
-	PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
+	/* The true text VAs are mapped as VA = PA + MEM_SV_START */
+	PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
 			      (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
 	.org swapper_pg_dir + PGDIR_SIZE
 	END(swapper_pg_dir)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 652b8142615..bd0e12f283f 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -135,9 +135,9 @@ ENTRY(_start)
 1:
 
 	/* Install the interrupt base. */
-	moveli r0, hw2_last(MEM_SV_START)
-	shl16insli r0, r0, hw1(MEM_SV_START)
-	shl16insli r0, r0, hw0(MEM_SV_START)
+	moveli r0, hw2_last(intrpt_start)
+	shl16insli r0, r0, hw1(intrpt_start)
+	shl16insli r0, r0, hw0(intrpt_start)
 	mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
 
 	/* Get our processor number and save it away in SAVE_K_0. */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index f3d26f48e65..f084f1c7afd 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -353,7 +353,7 @@ intvec_\vecname:
 #ifdef __COLLECT_LINKER_FEEDBACK__
 	.pushsection .text.intvec_feedback,"ax"
 	.org    (\vecnum << 5)
-	FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
+	FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
 	jrp     lr
 	.popsection
 #endif
@@ -1890,8 +1890,8 @@ int_unalign:
 	push_extra_callee_saves r0
 	j       do_trap
 
-/* Include .intrpt1 array of interrupt vectors */
-	.section ".intrpt1", "ax"
+/* Include .intrpt array of interrupt vectors */
+	.section ".intrpt", "ax"
 
 #define op_handle_perf_interrupt bad_intr
 #define op_handle_aux_perf_interrupt bad_intr
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index f020f01960c..c3a2335fa6a 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -535,7 +535,7 @@ intvec_\vecname:
 #ifdef __COLLECT_LINKER_FEEDBACK__
 	.pushsection .text.intvec_feedback,"ax"
 	.org    (\vecnum << 5)
-	FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
+	FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
 	jrp     lr
 	.popsection
 #endif
@@ -1485,8 +1485,10 @@ STD_ENTRY(fill_ra_stack)
 		__int_hand   \vecnum, \vecname, \c_routine, \processing
 	.endm
 
-/* Include .intrpt1 array of interrupt vectors */
-	.section ".intrpt1", "ax"
+/* Include .intrpt array of interrupt vectors */
+	.section ".intrpt", "ax"
+	.global intrpt_start
+intrpt_start:
 
 #define op_handle_perf_interrupt bad_intr
 #define op_handle_aux_perf_interrupt bad_intr
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 774e819f6a5..10217844052 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -268,7 +268,7 @@ early_param("vmalloc", parse_vmalloc);
 /*
  * Determine for each controller where its lowmem is mapped and how much of
  * it is mapped there.  On controller zero, the first few megabytes are
- * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * already mapped in as code at MEM_SV_START, so in principle we could
  * start our data mappings higher up, but for now we don't bother, to avoid
  * additional confusion.
  *
@@ -1242,7 +1242,7 @@ static void __init validate_va(void)
 #ifndef __tilegx__   /* FIXME: GX: probably some validation relevant here */
 	/*
 	 * Similarly, make sure we're only using allowed VAs.
-	 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
+	 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
 	 * and 0 .. KERNEL_HIGH_VADDR.
 	 * In addition, make sure we CAN'T use the end of memory, since
 	 * we use the last chunk of each pgd for the pgd_list.
@@ -1257,7 +1257,7 @@ static void __init validate_va(void)
 		if (range.size == 0)
 			break;
 		if (range.start <= MEM_USER_INTRPT &&
-		    range.start + range.size >= MEM_HV_INTRPT)
+		    range.start + range.size >= MEM_HV_START)
 			user_kernel_ok = 1;
 		if (range.start == 0)
 			max_va = range.size;
@@ -1693,7 +1693,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
 static int __init request_standard_resources(void)
 {
 	int i;
-	enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
+	enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
 
 #if defined(CONFIG_PCI) && !defined(__tilegx__)
 	insert_non_bus_resource();
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index cfff6f958d5..628661f6a92 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -30,7 +30,7 @@
 
 void __init trap_init(void)
 {
-	/* Nothing needed here since we link code at .intrpt1 */
+	/* Nothing needed here since we link code at .intrpt */
 }
 
 int unaligned_fixup = 1;
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index c7ae53df429..8b2016307eb 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -5,7 +5,7 @@
 #include <hv/hypervisor.h>
 
 /* Text loads starting from the supervisor interrupt vector address. */
-#define TEXT_OFFSET MEM_SV_INTRPT
+#define TEXT_OFFSET MEM_SV_START
 
 OUTPUT_ARCH(tile)
 ENTRY(_start)
@@ -13,7 +13,7 @@ jiffies = jiffies_64;
 
 PHDRS
 {
-  intrpt1 PT_LOAD ;
+  intrpt PT_LOAD ;
   text PT_LOAD ;
   data PT_LOAD ;
 }
@@ -24,11 +24,11 @@ SECTIONS
   #define LOAD_OFFSET TEXT_OFFSET
 
   /* Interrupt vectors */
-  .intrpt1 (LOAD_OFFSET) : AT ( 0 )   /* put at the start of physical memory */
+  .intrpt (LOAD_OFFSET) : AT ( 0 )   /* put at the start of physical memory */
   {
     _text = .;
-    *(.intrpt1)
-  } :intrpt1 =0
+    *(.intrpt)
+  } :intrpt =0
 
   /* Hypervisor call vectors */
   . = ALIGN(0x10000);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 3bfa1275e33..c6d21601ec4 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -234,7 +234,7 @@ static pgprot_t __init init_pgprot(ulong address)
 {
 	int cpu;
 	unsigned long page;
-	enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
+	enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
 
 #if CHIP_HAS_CBOX_HOME_MAP()
 	/* For kdata=huge, everything is just hash-for-home. */
@@ -538,7 +538,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 		}
 	}
 
-	address = MEM_SV_INTRPT;
+	address = MEM_SV_START;
 	pmd = get_pmd(pgtables, address);
 	pfn = 0;  /* code starts at PA 0 */
 	if (ktext_small) {
@@ -1021,7 +1021,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
 
 void free_initmem(void)
 {
-	const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET;
+	const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
 
 	/*
 	 * Evict the dirty initdata on the boot cpu, evict the w1data
@@ -1040,7 +1040,7 @@ void free_initmem(void)
 
 	/*
 	 * Free the pages mapped from 0xc0000000 that correspond to code
-	 * pages from MEM_SV_INTRPT that we won't use again after init.
+	 * pages from MEM_SV_START that we won't use again after init.
 	 */
 	free_init_pages("unused kernel text",
 			(unsigned long)_sinittext - text_delta,
-- 
cgit v1.2.3-70-g09d2


From 8157107b13099d6eb2e8ccd00b9aba009c698c38 Mon Sep 17 00:00:00 2001
From: Chris Metcalf <cmetcalf@tilera.com>
Date: Wed, 28 Aug 2013 19:53:17 -0400
Subject: tilegx: support KGDB

Enter kernel debugger at boot with:
  --hvd UART_1=1 --hvx kgdbwait --hvx kgdboc=ttyS1,115200
or at runtime with:
  echo ttyS1,115200 > /sys/module/kgdboc/parameters/kgdboc
  echo g > /proc/sysrq-trigger

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
---
 arch/tile/Kconfig            |   1 +
 arch/tile/include/asm/kgdb.h |  71 ++++++
 arch/tile/kernel/Makefile    |   1 +
 arch/tile/kernel/kgdb.c      | 499 +++++++++++++++++++++++++++++++++++++++++++
 arch/tile/kernel/traps.c     |   5 +
 5 files changed, 577 insertions(+)
 create mode 100644 arch/tile/include/asm/kgdb.h
 create mode 100644 arch/tile/kernel/kgdb.c

(limited to 'arch/tile/kernel/traps.c')

diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 616e2344ec6..de599c14c4f 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -127,6 +127,7 @@ config TILEGX
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
+	select HAVE_ARCH_KGDB
 
 config TILEPRO
 	def_bool !TILEGX
diff --git a/arch/tile/include/asm/kgdb.h b/arch/tile/include/asm/kgdb.h
new file mode 100644
index 00000000000..280c181cf0d
--- /dev/null
+++ b/arch/tile/include/asm/kgdb.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx KGDB support.
+ */
+
+#ifndef __TILE_KGDB_H__
+#define __TILE_KGDB_H__
+
+#include <linux/kdebug.h>
+#include <arch/opcode.h>
+
+#define GDB_SIZEOF_REG		sizeof(unsigned long)
+
+/*
+ * TILE-Gx gdb is expecting the following register layout:
+ * 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
+ * plus the PC and the faultnum.
+ *
+ * Even though kernel not use the 8 special GPRs, they need to be present
+ * in the registers sent for correct processing in the host-side gdb.
+ *
+ */
+#define DBG_MAX_REG_NUM		(56+8+2)
+#define NUMREGBYTES		(DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound
+ * buffers at least NUMREGBYTES*2 are needed for register packets,
+ * Longer buffer is needed to list all threads.
+ */
+#define BUFMAX			2048
+
+#define BREAK_INSTR_SIZE	TILEGX_BUNDLE_SIZE_IN_BYTES
+
+/*
+ * Require cache flush for set/clear a software breakpoint or write memory.
+ */
+#define CACHE_FLUSH_IS_SAFE	1
+
+/*
+ * The compiled-in breakpoint instruction can be used to "break" into
+ * the debugger via magic system request key (sysrq-G).
+ */
+static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
+
+enum tilegx_regnum {
+	TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
+	TILEGX_FAULTNUM_REGNUM,
+};
+
+/*
+ * Generate a breakpoint exception to "break" into the debugger.
+ */
+static inline void arch_kgdb_breakpoint(void)
+{
+	asm volatile (".quad %0\n\t"
+		      ::""(compiled_bpt));
+}
+
+#endif /* __TILE_KGDB_H__ */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index b7c8b5e19d5..27a2bf39dae 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_TILE_USB)		+= usb.o
 obj-$(CONFIG_TILE_HVGLUE_TRACE)	+= hvglue_trace.o
 obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o mcount_64.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
+obj-$(CONFIG_KGDB)		+= kgdb.o
 
 obj-y				+= vdso/
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
new file mode 100644
index 00000000000..4cd88381a83
--- /dev/null
+++ b/arch/tile/kernel/kgdb.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx KGDB support.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+
+static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
+static unsigned long stepped_addr;
+static tile_bundle_bits stepped_instr;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
+	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
+	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
+	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
+	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
+	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
+	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
+	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
+	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
+	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
+	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
+	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
+	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
+	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
+	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
+	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
+	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
+	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
+	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
+	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
+	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
+	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
+	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
+	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
+	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
+	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
+	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
+	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
+	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
+	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
+	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
+	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
+	{ "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
+	{ "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
+	{ "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
+	{ "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
+	{ "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
+	{ "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
+	{ "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
+	{ "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
+	{ "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
+	{ "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
+	{ "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
+	{ "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
+	{ "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
+	{ "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
+	{ "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
+	{ "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
+	{ "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
+	{ "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
+	{ "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
+	{ "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
+	{ "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
+	{ "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
+	{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
+	{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
+	{ "sn", GDB_SIZEOF_REG, -1},
+	{ "idn0", GDB_SIZEOF_REG, -1},
+	{ "idn1", GDB_SIZEOF_REG, -1},
+	{ "udn0", GDB_SIZEOF_REG, -1},
+	{ "udn1", GDB_SIZEOF_REG, -1},
+	{ "udn2", GDB_SIZEOF_REG, -1},
+	{ "udn3", GDB_SIZEOF_REG, -1},
+	{ "zero", GDB_SIZEOF_REG, -1},
+	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
+	{ "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+	if (regno >= DBG_MAX_REG_NUM || regno < 0)
+		return NULL;
+
+	if (dbg_reg_def[regno].offset != -1)
+		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+		       dbg_reg_def[regno].size);
+	else
+		memset(mem, 0, dbg_reg_def[regno].size);
+	return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+	if (regno >= DBG_MAX_REG_NUM || regno < 0)
+		return -EINVAL;
+
+	if (dbg_reg_def[regno].offset != -1)
+		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+		       dbg_reg_def[regno].size);
+	return 0;
+}
+
+/*
+ * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+	int reg;
+	struct pt_regs *thread_regs;
+	unsigned long *ptr = gdb_regs;
+
+	if (task == NULL)
+		return;
+
+	/* Initialize to zero. */
+	memset(gdb_regs, 0, NUMREGBYTES);
+
+	thread_regs = task_pt_regs(task);
+	for (reg = 0; reg <= TREG_LAST_GPR; reg++)
+		*(ptr++) = thread_regs->regs[reg];
+
+	gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
+	gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->pc = pc;
+}
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+	kgdb_nmicallback(raw_smp_processor_id(), NULL);
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+	local_irq_enable();
+	smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+	local_irq_disable();
+}
+
+/*
+ * Convert a kernel address to the writable kernel text mapping.
+ */
+static unsigned long writable_address(unsigned long addr)
+{
+	unsigned long ret = 0;
+
+	if (core_kernel_text(addr))
+		ret = addr - MEM_SV_START + PAGE_OFFSET;
+	else if (is_module_text_address(addr))
+		ret = addr;
+	else
+		pr_err("Unknown virtual address 0x%lx\n", addr);
+
+	return ret;
+}
+
+/*
+ * Calculate the new address for after a step.
+ */
+static unsigned long get_step_address(struct pt_regs *regs)
+{
+	int src_reg;
+	int jump_off;
+	int br_off;
+	unsigned long addr;
+	unsigned int opcode;
+	tile_bundle_bits bundle;
+
+	/* Move to the next instruction by default. */
+	addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
+	bundle = *(unsigned long *)instruction_pointer(regs);
+
+	/* 0: X mode, Otherwise: Y mode. */
+	if (bundle & TILEGX_BUNDLE_MODE_MASK) {
+		if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
+		    get_RRROpcodeExtension_Y1(bundle) ==
+		    UNARY_RRR_1_OPCODE_Y1) {
+			opcode = get_UnaryOpcodeExtension_Y1(bundle);
+
+			switch (opcode) {
+			case JALR_UNARY_OPCODE_Y1:
+			case JALRP_UNARY_OPCODE_Y1:
+			case JR_UNARY_OPCODE_Y1:
+			case JRP_UNARY_OPCODE_Y1:
+				src_reg = get_SrcA_Y1(bundle);
+				dbg_get_reg(src_reg, &addr, regs);
+				break;
+			}
+		}
+	} else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
+		if (get_RRROpcodeExtension_X1(bundle) ==
+		    UNARY_RRR_0_OPCODE_X1) {
+			opcode = get_UnaryOpcodeExtension_X1(bundle);
+
+			switch (opcode) {
+			case JALR_UNARY_OPCODE_X1:
+			case JALRP_UNARY_OPCODE_X1:
+			case JR_UNARY_OPCODE_X1:
+			case JRP_UNARY_OPCODE_X1:
+				src_reg = get_SrcA_X1(bundle);
+				dbg_get_reg(src_reg, &addr, regs);
+				break;
+			}
+		}
+	} else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
+		opcode = get_JumpOpcodeExtension_X1(bundle);
+
+		switch (opcode) {
+		case JAL_JUMP_OPCODE_X1:
+		case J_JUMP_OPCODE_X1:
+			jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
+			addr = regs->pc +
+				(jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
+			break;
+		}
+	} else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
+		br_off = 0;
+		opcode = get_BrType_X1(bundle);
+
+		switch (opcode) {
+		case BEQZT_BRANCH_OPCODE_X1:
+		case BEQZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) == 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BGEZT_BRANCH_OPCODE_X1:
+		case BGEZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) >= 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BGTZT_BRANCH_OPCODE_X1:
+		case BGTZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) > 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BLBCT_BRANCH_OPCODE_X1:
+		case BLBC_BRANCH_OPCODE_X1:
+			if (!(get_SrcA_X1(bundle) & 1))
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BLBST_BRANCH_OPCODE_X1:
+		case BLBS_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) & 1)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BLEZT_BRANCH_OPCODE_X1:
+		case BLEZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) <= 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BLTZT_BRANCH_OPCODE_X1:
+		case BLTZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) < 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		case BNEZT_BRANCH_OPCODE_X1:
+		case BNEZ_BRANCH_OPCODE_X1:
+			if (get_SrcA_X1(bundle) != 0)
+				br_off = get_BrOff_X1(bundle);
+			break;
+		}
+
+		if (br_off != 0) {
+			br_off = sign_extend(br_off, 17);
+			addr = regs->pc +
+				(br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
+		}
+	}
+
+	return addr;
+}
+
+/*
+ * Replace the next instruction after the current instruction with a
+ * breakpoint instruction.
+ */
+static void do_single_step(struct pt_regs *regs)
+{
+	unsigned long addr_wr;
+
+	/* Determine where the target instruction will send us to. */
+	stepped_addr = get_step_address(regs);
+	probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
+			  BREAK_INSTR_SIZE);
+
+	addr_wr = writable_address(stepped_addr);
+	probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
+			   BREAK_INSTR_SIZE);
+	smp_wmb();
+	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
+}
+
+static void undo_single_step(struct pt_regs *regs)
+{
+	unsigned long addr_wr;
+
+	if (stepped_instr == 0)
+		return;
+
+	addr_wr = writable_address(stepped_addr);
+	probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
+			   BREAK_INSTR_SIZE);
+	stepped_instr = 0;
+	smp_wmb();
+	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger.
+ */
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+	int ret;
+	unsigned long flags;
+	struct die_args *args = (struct die_args *)ptr;
+	struct pt_regs *regs = args->regs;
+
+#ifdef CONFIG_KPROBES
+	/*
+	 * Return immediately if the kprobes fault notifier has set
+	 * DIE_PAGE_FAULT.
+	 */
+	if (cmd == DIE_PAGE_FAULT)
+		return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+	switch (cmd) {
+	case DIE_BREAK:
+	case DIE_COMPILED_BPT:
+		break;
+	case DIE_SSTEPBP:
+		local_irq_save(flags);
+		kgdb_handle_exception(0, SIGTRAP, 0, regs);
+		local_irq_restore(flags);
+		return NOTIFY_STOP;
+	default:
+		/* Userspace events, ignore. */
+		if (user_mode(regs))
+			return NOTIFY_DONE;
+	}
+
+	local_irq_save(flags);
+	ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
+	local_irq_restore(flags);
+	if (ret)
+		return NOTIFY_DONE;
+
+	return NOTIFY_STOP;
+}
+
+static struct notifier_block kgdb_notifier = {
+	.notifier_call = kgdb_notify,
+};
+
+/*
+ * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ * @vector: The error vector of the exception that happened.
+ * @signo: The signal number of the exception that happened.
+ * @err_code: The error code of the exception that happened.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @regs: The &struct pt_regs of the current process.
+ *
+ * This function MUST handle the 'c' and 's' command packets,
+ * as well packets to set / remove a hardware breakpoint, if used.
+ * If there are additional packets which the hardware needs to handle,
+ * they are handled here. The code should return -1 if it wants to
+ * process more packets, and a %0 or %1 if it wants to exit from the
+ * kgdb callback.
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+			       char *remcom_in_buffer, char *remcom_out_buffer,
+			       struct pt_regs *regs)
+{
+	char *ptr;
+	unsigned long address;
+
+	/* Undo any stepping we may have done. */
+	undo_single_step(regs);
+
+	switch (remcom_in_buffer[0]) {
+	case 'c':
+	case 's':
+	case 'D':
+	case 'k':
+		/*
+		 * Try to read optional parameter, pc unchanged if no parm.
+		 * If this was a compiled-in breakpoint, we need to move
+		 * to the next instruction or we will just breakpoint
+		 * over and over again.
+		 */
+		ptr = &remcom_in_buffer[1];
+		if (kgdb_hex2long(&ptr, &address))
+			regs->pc = address;
+		else if (*(unsigned long *)regs->pc == compiled_bpt)
+			regs->pc += BREAK_INSTR_SIZE;
+
+		if (remcom_in_buffer[0] == 's') {
+			do_single_step(regs);
+			kgdb_single_step = 1;
+			atomic_set(&kgdb_cpu_doing_single_step,
+				   raw_smp_processor_id());
+		} else
+			atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+		return 0;
+	}
+
+	return -1; /* this means that we do not want to exit from the handler */
+}
+
+struct kgdb_arch arch_kgdb_ops;
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+	tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
+
+	memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
+	return register_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+	unregister_die_notifier(&kgdb_notifier);
+}
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+	int err;
+	unsigned long addr_wr = writable_address(bpt->bpt_addr);
+
+	if (addr_wr == 0)
+		return -1;
+
+	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+				BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+
+	err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
+				 BREAK_INSTR_SIZE);
+	smp_wmb();
+	flush_icache_range((unsigned long)bpt->bpt_addr,
+			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
+	return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+	int err;
+	unsigned long addr_wr = writable_address(bpt->bpt_addr);
+
+	if (addr_wr == 0)
+		return -1;
+
+	err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
+				 BREAK_INSTR_SIZE);
+	smp_wmb();
+	flush_icache_range((unsigned long)bpt->bpt_addr,
+			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
+	return err;
+}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 628661f6a92..a937365b342 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -240,6 +240,11 @@ static int do_bpt(struct pt_regs *regs)
 		notify_die(DIE_BREAK, "debug", regs, bundle,
 			INT_ILL, SIGTRAP);
 		break;
+	/* compiled_bpt */
+	case DIE_COMPILED_BPT:
+		notify_die(DIE_COMPILED_BPT, "debug", regs, bundle,
+			INT_ILL, SIGTRAP);
+		break;
 	/* breakpoint2_insn */
 	case DIE_SSTEPBP:
 		notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
-- 
cgit v1.2.3-70-g09d2


From a0099303cd6336675a5532e0444c1b6fd493ca49 Mon Sep 17 00:00:00 2001
From: Chris Metcalf <cmetcalf@tilera.com>
Date: Tue, 13 Aug 2013 15:33:53 -0400
Subject: tile: use standard tile_bundle_bits type in traps.c

We were rolling our own bundle_bits, which is unnecessary.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
---
 arch/tile/kernel/traps.c | 12 +++---------
 1 file changed, 3 insertions(+), 9 deletions(-)

(limited to 'arch/tile/kernel/traps.c')

diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index a937365b342..6b603d556ca 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -101,13 +101,7 @@ static int retry_gpv(unsigned int gpv_reason)
 
 #endif /* CHIP_HAS_TILE_DMA() */
 
-#ifdef __tilegx__
-#define bundle_bits tilegx_bundle_bits
-#else
-#define bundle_bits tile_bundle_bits
-#endif
-
-extern bundle_bits bpt_code;
+extern tile_bundle_bits bpt_code;
 
 asm(".pushsection .rodata.bpt_code,\"a\";"
     ".align 8;"
@@ -115,7 +109,7 @@ asm(".pushsection .rodata.bpt_code,\"a\";"
     ".size bpt_code,.-bpt_code;"
     ".popsection");
 
-static int special_ill(bundle_bits bundle, int *sigp, int *codep)
+static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep)
 {
 	int sig, code, maxcode;
 
@@ -263,7 +257,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	siginfo_t info = { 0 };
 	int signo, code;
 	unsigned long address = 0;
-	bundle_bits instr;
+	tile_bundle_bits instr;
 	int is_kernel = !user_mode(regs);
 
 	/* Handle breakpoints, etc. */
-- 
cgit v1.2.3-70-g09d2