summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c7b0c6a7db0..a426f410c06 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
+#include <linux/pstore.h>
#include <linux/fs.h>
#include "trace.h"
@@ -74,6 +75,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
preempt_enable_notrace();
}
+/* Our two options */
+enum {
+ TRACE_FUNC_OPT_STACK = 0x1,
+ TRACE_FUNC_OPT_PSTORE = 0x2,
+};
+
+static struct tracer_flags func_flags;
+
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
@@ -97,6 +106,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
+ /*
+ * So far tracing doesn't support multiple buffers, so
+ * we make an explicit call for now.
+ */
+ if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
+ pstore_ftrace_call(ip, parent_ip);
pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc);
}
@@ -158,15 +173,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
.flags = FTRACE_OPS_FL_GLOBAL,
};
-/* Our two options */
-enum {
- TRACE_FUNC_OPT_STACK = 0x1,
-};
-
static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
+#ifdef CONFIG_PSTORE_FTRACE
+ { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
+#endif
{ } /* Always set a last empty entry */
};
@@ -204,10 +217,11 @@ static void tracing_stop_function_trace(void)
static int func_set_flag(u32 old_flags, u32 bit, int set)
{
- if (bit == TRACE_FUNC_OPT_STACK) {
+ switch (bit) {
+ case TRACE_FUNC_OPT_STACK:
/* do nothing if already set */
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
- return 0;
+ break;
if (set) {
unregister_ftrace_function(&trace_ops);
@@ -217,10 +231,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
register_ftrace_function(&trace_ops);
}
- return 0;
+ break;
+ case TRACE_FUNC_OPT_PSTORE:
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ return 0;
}
static struct tracer function_trace __read_mostly =