summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c134
1 files changed, 120 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 683d559a0ee..867bd1dd2dd 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,6 +62,8 @@
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
};
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
+static struct ftrace_ops control_ops;
static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
}
#endif
+static void control_ops_disable_all(struct ftrace_ops *ops)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(ops->disabled, cpu) = 1;
+}
+
+static int control_ops_alloc(struct ftrace_ops *ops)
+{
+ int __percpu *disabled;
+
+ disabled = alloc_percpu(int);
+ if (!disabled)
+ return -ENOMEM;
+
+ ops->disabled = disabled;
+ control_ops_disable_all(ops);
+ return 0;
+}
+
+static void control_ops_free(struct ftrace_ops *ops)
+{
+ free_percpu(ops->disabled);
+}
+
static void update_global_ops(void)
{
ftrace_func_t func;
@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
return 0;
}
+static void add_ftrace_list_ops(struct ftrace_ops **list,
+ struct ftrace_ops *main_ops,
+ struct ftrace_ops *ops)
+{
+ int first = *list == &ftrace_list_end;
+ add_ftrace_ops(list, ops);
+ if (first)
+ add_ftrace_ops(&ftrace_ops_list, main_ops);
+}
+
+static int remove_ftrace_list_ops(struct ftrace_ops **list,
+ struct ftrace_ops *main_ops,
+ struct ftrace_ops *ops)
+{
+ int ret = remove_ftrace_ops(list, ops);
+ if (!ret && *list == &ftrace_list_end)
+ ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
+ return ret;
+}
+
static int __register_ftrace_function(struct ftrace_ops *ops)
{
if (ftrace_disabled)
@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return -EBUSY;
+ /* We don't support both control and global flags set. */
+ if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
+ return -EINVAL;
+
if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
- int first = ftrace_global_list == &ftrace_list_end;
- add_ftrace_ops(&ftrace_global_list, ops);
+ add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
ops->flags |= FTRACE_OPS_FL_ENABLED;
- if (first)
- add_ftrace_ops(&ftrace_ops_list, &global_ops);
+ } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+ if (control_ops_alloc(ops))
+ return -ENOMEM;
+ add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
} else
add_ftrace_ops(&ftrace_ops_list, ops);
@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
return -EINVAL;
if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
- ret = remove_ftrace_ops(&ftrace_global_list, ops);
- if (!ret && ftrace_global_list == &ftrace_list_end)
- ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
+ ret = remove_ftrace_list_ops(&ftrace_global_list,
+ &global_ops, ops);
if (!ret)
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+ ret = remove_ftrace_list_ops(&ftrace_control_list,
+ &control_ops, ops);
+ if (!ret) {
+ /*
+ * The ftrace_ops is now removed from the list,
+ * so there'll be no new users. We must ensure
+ * all current users are done before we free
+ * the control data.
+ */
+ synchronize_sched();
+ control_ops_free(ops);
+ }
} else
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
@@ -1119,6 +1186,12 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
}
+void ftrace_free_filter(struct ftrace_ops *ops)
+{
+ free_ftrace_hash(ops->filter_hash);
+ free_ftrace_hash(ops->notrace_hash);
+}
+
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
struct ftrace_hash *hash;
@@ -1129,7 +1202,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
return NULL;
size = 1 << size_bits;
- hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
+ hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
if (!hash->buckets) {
kfree(hash);
@@ -3146,8 +3219,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_regex_lock);
if (reset)
ftrace_filter_reset(hash);
- if (buf)
- ftrace_match_records(hash, buf, len);
+ if (buf && !ftrace_match_records(hash, buf, len)) {
+ ret = -EINVAL;
+ goto out_regex_unlock;
+ }
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
@@ -3157,6 +3232,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_unlock(&ftrace_lock);
+ out_regex_unlock:
mutex_unlock(&ftrace_regex_lock);
free_ftrace_hash(hash);
@@ -3173,10 +3249,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
-void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
- ftrace_set_regex(ops, buf, len, reset, 1);
+ return ftrace_set_regex(ops, buf, len, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3191,10 +3267,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
* for tracing.
*/
-void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
- ftrace_set_regex(ops, buf, len, reset, 0);
+ return ftrace_set_regex(ops, buf, len, reset, 0);
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
@@ -3871,6 +3947,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
#endif /* CONFIG_DYNAMIC_FTRACE */
static void
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+{
+ struct ftrace_ops *op;
+
+ if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
+ return;
+
+ /*
+ * Some of the ops may be dynamically allocated,
+ * they must be freed after a synchronize_sched().
+ */
+ preempt_disable_notrace();
+ trace_recursion_set(TRACE_CONTROL_BIT);
+ op = rcu_dereference_raw(ftrace_control_list);
+ while (op != &ftrace_list_end) {
+ if (!ftrace_function_local_disabled(op) &&
+ ftrace_ops_test(op, ip))
+ op->func(ip, parent_ip);
+
+ op = rcu_dereference_raw(op->next);
+ };
+ trace_recursion_clear(TRACE_CONTROL_BIT);
+ preempt_enable_notrace();
+}
+
+static struct ftrace_ops control_ops = {
+ .func = ftrace_ops_control_func,
+};
+
+static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;