summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-14 16:11:52 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-14 16:11:52 +0200
commit5806b81ac1c0c52665b91723fd4146a4f86e386b (patch)
tree24ea8763bf308ce1407c1de91dc8de4d2655e1c1 /arch/arm/kernel/ftrace.c
parentd14c8a680ccfdeb5e7b9be4d61162c2b373bd1e8 (diff)
parent6712e299b7dc78aa4971b85e803435ee6d49a9dd (diff)
Merge branch 'auto-ftrace-next' into tracing/for-linus
Conflicts: arch/x86/kernel/entry_32.S arch/x86/kernel/process_32.c arch/x86/kernel/process_64.c arch/x86/lib/Makefile include/asm-x86/irqflags.h kernel/Makefile kernel/sched.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/kernel/ftrace.c')
-rw-r--r--arch/arm/kernel/ftrace.c116
1 files changed, 116 insertions, 0 deletions
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
new file mode 100644
index 00000000000..76d50e6091b
--- /dev/null
+++ b/arch/arm/kernel/ftrace.c
@@ -0,0 +1,116 @@
+/*
+ * Dynamic function tracing support.
+ *
+ * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
+ *
+ * For licencing details, see COPYING.
+ *
+ * Defines low-level handling of mcount calls when the kernel
+ * is compiled with the -pg flag. When using dynamic ftrace, the
+ * mcount call-sites get patched lazily with NOP till they are
+ * enabled. All code mutation routines here take effect atomically.
+ */
+
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+
+#define PC_OFFSET 8
+#define BL_OPCODE 0xeb000000
+#define BL_OFFSET_MASK 0x00ffffff
+
+static unsigned long bl_insn;
+static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
+
+unsigned char *ftrace_nop_replace(void)
+{
+ return (char *)&NOP;
+}
+
+/* construct a branch (BL) instruction to addr */
+unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+ long offset;
+
+ offset = (long)addr - (long)(pc + PC_OFFSET);
+ if (unlikely(offset < -33554432 || offset > 33554428)) {
+ /* Can't generate branches that far (from ARM ARM). Ftrace
+ * doesn't generate branches outside of kernel text.
+ */
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+ offset = (offset >> 2) & BL_OFFSET_MASK;
+ bl_insn = BL_OPCODE | offset;
+ return (unsigned char *)&bl_insn;
+}
+
+int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
+ unsigned char *new_code)
+{
+ unsigned long err = 0, replaced = 0, old, new;
+
+ old = *(unsigned long *)old_code;
+ new = *(unsigned long *)new_code;
+
+ __asm__ __volatile__ (
+ "1: ldr %1, [%2] \n"
+ " cmp %1, %4 \n"
+ "2: streq %3, [%2] \n"
+ " cmpne %1, %3 \n"
+ " movne %0, #2 \n"
+ "3:\n"
+
+ ".section .fixup, \"ax\"\n"
+ "4: mov %0, #1 \n"
+ " b 3b \n"
+ ".previous\n"
+
+ ".section __ex_table, \"a\"\n"
+ " .long 1b, 4b \n"
+ " .long 2b, 4b \n"
+ ".previous\n"
+
+ : "=r"(err), "=r"(replaced)
+ : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
+ : "memory");
+
+ if (!err && (replaced == old))
+ flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+ return err;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ int ret;
+ unsigned long pc, old;
+ unsigned char *new;
+
+ pc = (unsigned long)&ftrace_call;
+ memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(pc, (unsigned long)func);
+ ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
+ return ret;
+}
+
+int ftrace_mcount_set(unsigned long *data)
+{
+ unsigned long pc, old;
+ unsigned long *addr = data;
+ unsigned char *new;
+
+ pc = (unsigned long)&mcount_call;
+ memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(pc, *addr);
+ *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
+ return 0;
+}
+
+/* run from kstop_machine */
+int __init ftrace_dyn_arch_init(void *data)
+{
+ ftrace_mcount_set(data);
+ return 0;
+}