summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c56
1 files changed, 43 insertions, 13 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b97..67708f46baa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
- * mechanism. The rcu_dereference_raw() calls are needed to handle
+ * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
#define do_for_each_ftrace_op(op, list) \
- op = rcu_dereference_raw(list); \
+ op = rcu_dereference_raw_notrace(list); \
do
/*
* Optimized for just a single item in the list (as that is the normal case).
*/
#define while_for_each_ftrace_op(op) \
- while (likely(op = rcu_dereference_raw((op)->next)) && \
+ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
return 0;
}
+static void ftrace_sync(struct work_struct *work)
+{
+ /*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ *
+ * Yes, function tracing is rude.
+ */
+}
+
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
* so there'll be no new users. We must ensure
* all current users are done before we free
* the control data.
+ * Note synchronize_sched() is not enough, as we
+ * use preempt_disable() to do RCU, but the function
+ * tracer can be called where RCU is not active
+ * (before user_exit()).
*/
- synchronize_sched();
+ schedule_on_each_cpu(ftrace_sync);
control_ops_free(ops);
}
} else
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
/*
* Dynamic ops may be freed, we must make sure that all
* callers are done before leaving this function.
+ *
+ * Again, normal synchronize_sched() is not good enough.
+ * We need to do a hard force of sched synchronization.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
- synchronize_sched();
+ schedule_on_each_cpu(ftrace_sync);
+
return 0;
}
@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v)
if (rec->counter <= 1)
stddev = 0;
else {
- stddev = rec->time_squared - rec->counter * avg * avg;
+ /*
+ * Apply Welford's method:
+ * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+ */
+ stddev = rec->counter * rec->time_squared -
+ rec->time * rec->time;
+
/*
* Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again.
*/
- do_div(stddev, (rec->counter - 1) * 1000);
+ do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
}
trace_seq_init(&s);
@@ -779,7 +804,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;
- hlist_for_each_entry_rcu(rec, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
@@ -1165,7 +1190,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key];
- hlist_for_each_entry_rcu(entry, hhd, hlist) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
@@ -1422,8 +1447,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash;
int ret;
- filter_hash = rcu_dereference_raw(ops->filter_hash);
- notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+ filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2945,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
- hlist_for_each_entry_rcu(entry, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
@@ -3512,8 +3537,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+/* Used by function selftest to not test if filter is set */
+bool ftrace_filter_param __initdata;
+
static int __init set_ftrace_notrace(char *str)
{
+ ftrace_filter_param = true;
strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
@@ -3521,6 +3550,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
+ ftrace_filter_param = true;
strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}