summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-08 15:14:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-08 15:14:11 -0700
commit5f2f280f87fe9755dba915f1ade149840885fd91 (patch)
tree31c8564dfd1fcb0ea0023a23c3aefd8108d7b2de
parent31880c37c11e28cb81c70757e38392b42e695dc6 (diff)
parent395b97a3aeff0b8d949ee3e67bf8c11c5ffd6861 (diff)
Merge tag 'trace-fixes-3.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: "This includes three fixes. Two fix features added in 3.9 and one fixes a long time minor bug. The first patch fixes a race that can happen if the user switches from the irqsoff tracer to another tracer. If a irqs off latency is detected, it will try to use the snapshot buffer, but the new tracer wont have it allocated. There's a nasty warning that gets printed and the trace is ignored. Nothing crashes, just a nasty WARN_ON is shown. The second patch fixes an issue where if the sysctl is used to disable and enable function tracing, it can put the function tracing into an unstable state. The third patch fixes an issue with perf using the function tracer. An update was done, where the stub function could be called during the perf function tracing, and that stub function wont have the "control" flag set and cause a nasty warning when running perf." * tag 'trace-fixes-3.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Do not call stub functions in control loop ftrace: Consistently restore trace function on sysctl enabling tracing: Fix race with update_max_tr_single and changing tracers
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--kernel/trace/ftrace.c13
-rw-r--r--kernel/trace/trace.c5
3 files changed, 11 insertions, 9 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index e5ca8ef50e9..167abf90780 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* that the call back has its own recursion protection. If it does
* not set this, then the ftrace infrastructure will add recursion
* protection for the caller.
+ * STUB - The ftrace_ops is just a place holder.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -98,6 +99,7 @@ enum {
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
+ FTRACE_OPS_FL_STUB = 1 << 7,
};
struct ftrace_ops {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6893d5a2bf0..7e897106b7e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -66,7 +66,7 @@
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
};
/* ftrace_enabled is a method to turn ftrace on or off */
@@ -4131,7 +4131,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
preempt_disable_notrace();
trace_recursion_set(TRACE_CONTROL_BIT);
do_for_each_ftrace_op(op, ftrace_control_list) {
- if (!ftrace_function_local_disabled(op) &&
+ if (!(op->flags & FTRACE_OPS_FL_STUB) &&
+ !ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
@@ -4555,12 +4556,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ftrace_startup_sysctl();
/* we are starting ftrace again */
- if (ftrace_ops_list != &ftrace_list_end) {
- if (ftrace_ops_list->next == &ftrace_list_end)
- ftrace_trace_function = ftrace_ops_list->func;
- else
- ftrace_trace_function = ftrace_ops_list_func;
- }
+ if (ftrace_ops_list != &ftrace_list_end)
+ update_ftrace_function();
} else {
/* stopping ftrace calls (just send to ftrace_stub) */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4f1dade5698..7ba7fc76f9e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -744,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
+ if (!current_trace->allocated_snapshot) {
+ /* Only the nop tracer should hit this when disabling */
+ WARN_ON_ONCE(current_trace != &nop_trace);
return;
+ }
arch_spin_lock(&ftrace_max_lock);