summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorDeng-Cheng Zhu <dengcheng.zhu@gmail.com>2010-10-12 19:37:23 +0800
committerRalf Baechle <ralf@linux-mips.org>2010-10-29 19:08:49 +0100
commit7e788d967c092343cca773542565c362991c87c0 (patch)
tree88c3ef91aba02a7dae93ccc674f5bed461c16d0d /arch/mips/kernel
parent14f7001284185bffeb796a181664906f7160f593 (diff)
MIPS: Perf-events: Add callchain support
Adds callchain support for MIPS Perf-events. For more info on this feature, please refer to tools/perf/Documentation/perf-report.txt and tools/perf/design.txt. Currently userspace callchain data is not recorded, because we do not have a safe way to do this. Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Acked-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Cc: a.p.zijlstra@chello.nl Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: jamie.iles@picochip.com Cc: matt@console-pimps.org Patchwork: https://patchwork.linux-mips.org/patch/1690/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/perf_event.c108
1 files changed, 107 insertions, 1 deletions
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index eefdf60da2d..95c833e8dc8 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -6,7 +6,8 @@
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
- * counter access is based on the MIPS Oprofile code.
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -486,3 +487,108 @@ handle_associated_event(struct cpu_hw_events *cpuc,
if (perf_event_overflow(event, 0, data, regs))
mipspmu->disable_event(idx);
}
+
+/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we add here.
+ */
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+}
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}