summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c1139
1 files changed, 881 insertions, 258 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9a236ffe2aa..f1ed080406c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,9 @@
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/list.h>
+#include <linux/hash.h>
+
+#include <trace/sched.h>
#include <asm/ftrace.h>
@@ -44,14 +47,14 @@
ftrace_kill(); \
} while (0)
+/* hash bits for specific function selection */
+#define FTRACE_HASH_BITS 7
+#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
-static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-
/* Quick disabling of function tracer. */
int function_trace_stop;
@@ -61,9 +64,7 @@ int function_trace_stop;
*/
static int ftrace_disabled __read_mostly;
-static DEFINE_SPINLOCK(ftrace_lock);
-static DEFINE_MUTEX(ftrace_sysctl_lock);
-static DEFINE_MUTEX(ftrace_start_lock);
+static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops ftrace_list_end __read_mostly =
{
@@ -134,9 +135,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
static int __register_ftrace_function(struct ftrace_ops *ops)
{
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
ops->next = ftrace_list;
/*
* We are entering ops into the ftrace_list but another
@@ -172,18 +170,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
#endif
}
- spin_unlock(&ftrace_lock);
-
return 0;
}
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
struct ftrace_ops **p;
- int ret = 0;
-
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
/*
* If we are removing the last function, then simply point
@@ -192,17 +184,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_list == ops && ops->next == &ftrace_list_end) {
ftrace_trace_function = ftrace_stub;
ftrace_list = &ftrace_list_end;
- goto out;
+ return 0;
}
for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
if (*p == ops)
break;
- if (*p != ops) {
- ret = -1;
- goto out;
- }
+ if (*p != ops)
+ return -1;
*p = (*p)->next;
@@ -223,21 +213,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
}
}
- out:
- spin_unlock(&ftrace_lock);
-
- return ret;
+ return 0;
}
static void ftrace_update_pid_func(void)
{
ftrace_func_t func;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
if (ftrace_trace_function == ftrace_stub)
- goto out;
+ return;
func = ftrace_trace_function;
@@ -254,23 +238,29 @@ static void ftrace_update_pid_func(void)
#else
__ftrace_trace_function = func;
#endif
-
- out:
- spin_unlock(&ftrace_lock);
}
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
#ifdef CONFIG_DYNAMIC_FTRACE
+
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif
-/*
- * Since MCOUNT_ADDR may point to mcount itself, we do not want
- * to get it confused by reading a reference in the code as we
- * are parsing on objcopy output of text. Use a variable for
- * it instead.
- */
-static unsigned long mcount_addr = MCOUNT_ADDR;
+static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
+
+struct ftrace_func_probe {
+ struct hlist_node node;
+ struct ftrace_probe_ops *ops;
+ unsigned long flags;
+ unsigned long ip;
+ void *data;
+ struct rcu_head rcu;
+};
+
enum {
FTRACE_ENABLE_CALLS = (1 << 0),
@@ -284,13 +274,13 @@ enum {
static int ftrace_filtered;
-static LIST_HEAD(ftrace_new_addrs);
+static struct dyn_ftrace *ftrace_new_addrs;
static DEFINE_MUTEX(ftrace_regex_lock);
struct ftrace_page {
struct ftrace_page *next;
- unsigned long index;
+ int index;
struct dyn_ftrace records[];
};
@@ -305,6 +295,19 @@ static struct ftrace_page *ftrace_pages;
static struct dyn_ftrace *ftrace_free_records;
+/*
+ * This is a double for. Do not use 'break' to break out of the loop,
+ * you must use a goto.
+ */
+#define do_for_each_ftrace_rec(pg, rec) \
+ for (pg = ftrace_pages_start; pg; pg = pg->next) { \
+ int _____i; \
+ for (_____i = 0; _____i < pg->index; _____i++) { \
+ rec = &pg->records[_____i];
+
+#define while_for_each_ftrace_rec() \
+ } \
+ }
#ifdef CONFIG_KPROBES
@@ -338,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec)
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
- rec->ip = (unsigned long)ftrace_free_records;
+ rec->freelist = ftrace_free_records;
ftrace_free_records = rec;
rec->flags |= FTRACE_FL_FREE;
}
@@ -349,23 +352,22 @@ void ftrace_release(void *start, unsigned long size)
struct ftrace_page *pg;
unsigned long s = (unsigned long)start;
unsigned long e = s + size;
- int i;
if (ftrace_disabled || !start)
return;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
-
- if ((rec->ip >= s) && (rec->ip < e))
- ftrace_free_rec(rec);
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ if ((rec->ip >= s) && (rec->ip < e)) {
+ /*
+ * rec->ip is changed in ftrace_free_rec()
+ * It should not between s and e if record was freed.
+ */
+ FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
+ ftrace_free_rec(rec);
}
- }
- spin_unlock(&ftrace_lock);
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
}
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -382,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
return NULL;
}
- ftrace_free_records = (void *)rec->ip;
+ ftrace_free_records = rec->freelist;
memset(rec, 0, sizeof(*rec));
return rec;
}
@@ -414,8 +416,8 @@ ftrace_record_ip(unsigned long ip)
return NULL;
rec->ip = ip;
-
- list_add(&rec->list, &ftrace_new_addrs);
+ rec->newlist = ftrace_new_addrs;
+ ftrace_new_addrs = rec;
return rec;
}
@@ -461,10 +463,10 @@ static void ftrace_bug(int failed, unsigned long ip)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
- unsigned long ip, fl;
unsigned long ftrace_addr;
+ unsigned long ip, fl;
- ftrace_addr = (unsigned long)ftrace_caller;
+ ftrace_addr = (unsigned long)FTRACE_ADDR;
ip = rec->ip;
@@ -473,7 +475,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
* it is not enabled then do nothing.
*
* If this record is not to be traced and
- * it is enabled then disabled it.
+ * it is enabled then disable it.
*
*/
if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -493,7 +495,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
return 0;
- /* Record is not filtered and is not enabled do nothing */
+ /* Record is not filtered or enabled, do nothing */
if (!fl)
return 0;
@@ -515,7 +517,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
} else {
- /* if record is not enabled do nothing */
+ /* if record is not enabled, do nothing */
if (!(rec->flags & FTRACE_FL_ENABLED))
return 0;
@@ -531,41 +533,41 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
static void ftrace_replace_code(int enable)
{
- int i, failed;
struct dyn_ftrace *rec;
struct ftrace_page *pg;
+ int failed;
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
-
- /*
- * Skip over free records and records that have
- * failed.
- */
- if (rec->flags & FTRACE_FL_FREE ||
- rec->flags & FTRACE_FL_FAILED)
- continue;
+ do_for_each_ftrace_rec(pg, rec) {
+ /*
+ * Skip over free records, records that have
+ * failed and not converted.
+ */
+ if (rec->flags & FTRACE_FL_FREE ||
+ rec->flags & FTRACE_FL_FAILED ||
+ !(rec->flags & FTRACE_FL_CONVERTED))
+ continue;
- /* ignore updates to this record's mcount site */
- if (get_kprobe((void *)rec->ip)) {
- freeze_record(rec);
- continue;
- } else {
- unfreeze_record(rec);
- }
+ /* ignore updates to this record's mcount site */
+ if (get_kprobe((void *)rec->ip)) {
+ freeze_record(rec);
+ continue;
+ } else {
+ unfreeze_record(rec);
+ }
- failed = __ftrace_replace_code(rec, enable);
- if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
- rec->flags |= FTRACE_FL_FAILED;
- if ((system_state == SYSTEM_BOOTING) ||
- !core_kernel_text(rec->ip)) {
- ftrace_free_rec(rec);
- } else
- ftrace_bug(failed, rec->ip);
- }
+ failed = __ftrace_replace_code(rec, enable);
+ if (failed) {
+ rec->flags |= FTRACE_FL_FAILED;
+ if ((system_state == SYSTEM_BOOTING) ||
+ !core_kernel_text(rec->ip)) {
+ ftrace_free_rec(rec);
+ } else {
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+ return;
+ }
}
- }
+ } while_for_each_ftrace_rec();
}
static int
@@ -576,7 +578,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
ip = rec->ip;
- ret = ftrace_make_nop(mod, rec, mcount_addr);
+ ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
if (ret) {
ftrace_bug(ret, ip);
rec->flags |= FTRACE_FL_FAILED;
@@ -585,6 +587,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
return 1;
}
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+ return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+ return 0;
+}
+
static int __ftrace_modify_code(void *data)
{
int *command = data;
@@ -607,7 +627,17 @@ static int __ftrace_modify_code(void *data)
static void ftrace_run_update_code(int command)
{
+ int ret;
+
+ ret = ftrace_arch_code_modify_prepare();
+ FTRACE_WARN_ON(ret);
+ if (ret)
+ return;
+
stop_machine(__ftrace_modify_code, &command, NULL);
+
+ ret = ftrace_arch_code_modify_post_process();
+ FTRACE_WARN_ON(ret);
}
static ftrace_func_t saved_ftrace_func;
@@ -631,13 +661,10 @@ static void ftrace_startup(int command)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
ftrace_startup_enable(command);
-
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown(int command)
@@ -645,7 +672,6 @@ static void ftrace_shutdown(int command)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
ftrace_start_up--;
if (!ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
@@ -656,11 +682,9 @@ static void ftrace_shutdown(int command)
}
if (!command || !ftrace_enabled)
- goto out;
+ return;
ftrace_run_update_code(command);
- out:
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_startup_sysctl(void)
@@ -670,7 +694,6 @@ static void ftrace_startup_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
/* Force update next time */
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
@@ -678,7 +701,6 @@ static void ftrace_startup_sysctl(void)
command |= FTRACE_ENABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown_sysctl(void)
@@ -688,13 +710,11 @@ static void ftrace_shutdown_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
/* ftrace_start_up is true if ftrace is running */
if (ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftrace_start_lock);
}
static cycle_t ftrace_update_time;
@@ -703,19 +723,21 @@ unsigned long ftrace_update_tot_cnt;
static int ftrace_update_code(struct module *mod)
{
- struct dyn_ftrace *p, *t;
+ struct dyn_ftrace *p;
cycle_t start, stop;
start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
- list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
+ while (ftrace_new_addrs) {
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
- list_del_init(&p->list);
+ p = ftrace_new_addrs;
+ ftrace_new_addrs = p->newlist;
+ p->flags = 0L;
/* convert record (i.e, patch mcount-call with NOP) */
if (ftrace_code_disable(mod, p)) {
@@ -781,13 +803,16 @@ enum {
FTRACE_ITER_CONT = (1 << 1),
FTRACE_ITER_NOTRACE = (1 << 2),
FTRACE_ITER_FAILURES = (1 << 3),
+ FTRACE_ITER_PRINTALL = (1 << 4),
+ FTRACE_ITER_HASH = (1 << 5),
};
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
struct ftrace_iterator {
struct ftrace_page *pg;
- unsigned idx;
+ int hidx;
+ int idx;
unsigned flags;
unsigned char buffer[FTRACE_BUFF_MAX+1];
unsigned buffer_idx;
@@ -795,15 +820,89 @@ struct ftrace_iterator {
};
static void *
+t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ struct hlist_node *hnd = v;
+ struct hlist_head *hhd;
+
+ WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
+
+ (*pos)++;
+
+ retry:
+ if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+ return NULL;
+
+ hhd = &ftrace_func_hash[iter->hidx];
+
+ if (hlist_empty(hhd)) {
+ iter->hidx++;
+ hnd = NULL;
+ goto retry;
+ }
+
+ if (!hnd)
+ hnd = hhd->first;
+ else {
+ hnd = hnd->next;
+ if (!hnd) {
+ iter->hidx++;
+ goto retry;
+ }
+ }
+
+ return hnd;
+}
+
+static void *t_hash_start(struct seq_file *m, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ void *p = NULL;
+
+ iter->flags |= FTRACE_ITER_HASH;
+
+ return t_hash_next(m, p, pos);
+}
+
+static int t_hash_show(struct seq_file *m, void *v)
+{
+ struct ftrace_func_probe *rec;
+ struct hlist_node *hnd = v;
+ char str[KSYM_SYMBOL_LEN];
+
+ rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+ if (rec->ops->print)
+ return rec->ops->print(m, rec->ip, rec->ops, rec->data);
+
+ kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+ seq_printf(m, "%s:", str);
+
+ kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
+ seq_printf(m, "%s", str);
+
+ if (rec->data)
+ seq_printf(m, ":%p", rec->data);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = NULL;
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_next(m, v, pos);
+
(*pos)++;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ if (iter->flags & FTRACE_ITER_PRINTALL)
+ return NULL;
+
retry:
if (iter->idx >= iter->pg->index) {
if (iter->pg->next) {
@@ -832,7 +931,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
goto retry;
}
}
- spin_unlock(&ftrace_lock);
return rec;
}
@@ -842,6 +940,23 @@ static void *t_start(struct seq_file *m, loff_t *pos)
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ mutex_lock(&ftrace_lock);
+ /*
+ * For set_ftrace_filter reading, if we have the filter
+ * off, we can short cut and just print out that all
+ * functions are enabled.
+ */
+ if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
+ if (*pos > 0)
+ return t_hash_start(m, pos);
+ iter->flags |= FTRACE_ITER_PRINTALL;
+ (*pos)++;
+ return iter;
+ }
+
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_start(m, pos);
+
if (*pos > 0) {
if (iter->idx < 0)
return p;
@@ -851,18 +966,31 @@ static void *t_start(struct seq_file *m, loff_t *pos)
p = t_next(m, p, pos);
+ if (!p)
+ return t_hash_start(m, pos);
+
return p;
}
static void t_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&ftrace_lock);
}
static int t_show(struct seq_file *m, void *v)
{
+ struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
char str[KSYM_SYMBOL_LEN];
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_show(m, v);
+
+ if (iter->flags & FTRACE_ITER_PRINTALL) {
+ seq_printf(m, "#### all functions enabled ####\n");
+ return 0;
+ }
+
if (!rec)
return 0;
@@ -941,23 +1069,16 @@ static void ftrace_filter_reset(int enable)
struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
- unsigned i;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ mutex_lock(&ftrace_lock);
if (enable)
ftrace_filtered = 0;
- pg = ftrace_pages_start;
- while (pg) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
- if (rec->flags & FTRACE_FL_FAILED)
- continue;
- rec->flags &= ~type;
- }
- pg = pg->next;
- }
- spin_unlock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+ rec->flags &= ~type;
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
}
static int
@@ -1008,16 +1129,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
return ftrace_regex_open(inode, file, 0);
}
-static ssize_t
-ftrace_regex_read(struct file *file, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- if (file->f_mode & FMODE_READ)
- return seq_read(file, ubuf, cnt, ppos);
- else
- return -EPERM;
-}
-
static loff_t
ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
{
@@ -1038,86 +1149,536 @@ enum {
MATCH_END_ONLY,
};
-static void
-ftrace_match(unsigned char *buff, int len, int enable)
+/*
+ * (static function - no need for kernel doc)
+ *
+ * Pass in a buffer containing a glob and this function will
+ * set search to point to the search part of the buffer and
+ * return the type of search it is (see enum above).
+ * This does modify buff.
+ *
+ * Returns enum type.
+ * search returns the pointer to use for comparison.
+ * not returns 1 if buff started with a '!'
+ * 0 otherwise.
+ */
+static int
+ftrace_setup_glob(char *buff, int len, char **search, int *not)
{
- char str[KSYM_SYMBOL_LEN];
- char *search = NULL;
- struct ftrace_page *pg;
- struct dyn_ftrace *rec;
int type = MATCH_FULL;
- unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
- unsigned i, match = 0, search_len = 0;
- int not = 0;
+ int i;
if (buff[0] == '!') {
- not = 1;
+ *not = 1;
buff++;
len--;
- }
+ } else
+ *not = 0;
+
+ *search = buff;
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
- search = buff + i + 1;
+ *search = buff + 1;
type = MATCH_END_ONLY;
- search_len = len - (i + 1);
} else {
- if (type == MATCH_END_ONLY) {
+ if (type == MATCH_END_ONLY)
type = MATCH_MIDDLE_ONLY;
- } else {
- match = i;
+ else
type = MATCH_FRONT_ONLY;
- }
buff[i] = 0;
break;
}
}
}
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
- if (enable)
- ftrace_filtered = 1;
- pg = ftrace_pages_start;
- while (pg) {
- for (i = 0; i < pg->index; i++) {
- int matched = 0;
- char *ptr;
-
- rec = &pg->records[i];
- if (rec->flags & FTRACE_FL_FAILED)
+ return type;
+}
+
+static int ftrace_match(char *str, char *regex, int len, int type)
+{
+ int matched = 0;
+ char *ptr;
+
+ switch (type) {
+ case MATCH_FULL:
+ if (strcmp(str, regex) == 0)
+ matched = 1;
+ break;
+ case MATCH_FRONT_ONLY:
+ if (strncmp(str, regex, len) == 0)
+ matched = 1;
+ break;
+ case MATCH_MIDDLE_ONLY:
+ if (strstr(str, regex))
+ matched = 1;
+ break;
+ case MATCH_END_ONLY:
+ ptr = strstr(str, regex);
+ if (ptr && (ptr[len] == 0))
+ matched = 1;
+ break;
+ }
+
+ return matched;
+}
+
+static int
+ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
+{
+ char str[KSYM_SYMBOL_LEN];
+
+ kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+ return ftrace_match(str, regex, len, type);
+}
+
+static void ftrace_match_records(char *buff, int len, int enable)
+{
+ unsigned int search_len;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ unsigned long flag;
+ char *search;
+ int type;
+ int not;
+
+ flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+ type = ftrace_setup_glob(buff, len, &search, &not);
+
+ search_len = strlen(search);
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (ftrace_match_record(rec, search, search_len, type)) {
+ if (not)
+ rec->flags &= ~flag;
+ else
+ rec->flags |= flag;
+ }
+ /*
+ * Only enable filtering if we have a function that
+ * is filtered on.
+ */
+ if (enable && (rec->flags & FTRACE_FL_FILTER))
+ ftrace_filtered = 1;
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+static int
+ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
+ char *regex, int len, int type)
+{
+ char str[KSYM_SYMBOL_LEN];
+ char *modname;
+
+ kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+
+ if (!modname || strcmp(modname, mod))
+ return 0;
+
+ /* blank search means to match all funcs in the mod */
+ if (len)
+ return ftrace_match(str, regex, len, type);
+ else
+ return 1;
+}
+
+static void ftrace_match_module_records(char *buff, char *mod, int enable)
+{
+ unsigned search_len = 0;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ int type = MATCH_FULL;
+ char *search = buff;
+ unsigned long flag;
+ int not = 0;
+
+ flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+
+ /* blank or '*' mean the same */
+ if (strcmp(buff, "*") == 0)
+ buff[0] = 0;
+
+ /* handle the case of 'dont filter this module' */
+ if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
+ buff[0] = 0;
+ not = 1;
+ }
+
+ if (strlen(buff)) {
+ type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
+ search_len = strlen(search);
+ }
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (ftrace_match_module_record(rec, mod,
+ search, search_len, type)) {
+ if (not)
+ rec->flags &= ~flag;
+ else
+ rec->flags |= flag;
+ }
+ if (enable && (rec->flags & FTRACE_FL_FILTER))
+ ftrace_filtered = 1;
+
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+/*
+ * We register the module command as a template to show others how
+ * to register the a command as well.
+ */
+
+static int
+ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+{
+ char *mod;
+
+ /*
+ * cmd == 'mod' because we only registered this func
+ * for the 'mod' ftrace_func_command.
+ * But if you register one func with multiple commands,
+ * you can tell which command was used by the cmd
+ * parameter.
+ */
+
+ /* we must have a module name */
+ if (!param)
+ return -EINVAL;
+
+ mod = strsep(&param, ":");
+ if (!strlen(mod))
+ return -EINVAL;
+
+ ftrace_match_module_records(func, mod, enable);
+ return 0;
+}
+
+static struct ftrace_func_command ftrace_mod_cmd = {
+ .name = "mod",
+ .func = ftrace_mod_callback,
+};
+
+static int __init ftrace_mod_cmd_init(void)
+{
+ return register_ftrace_command(&ftrace_mod_cmd);
+}
+device_initcall(ftrace_mod_cmd_init);
+
+static void
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct ftrace_func_probe *entry;
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+ unsigned long key;
+ int resched;
+
+ key = hash_long(ip, FTRACE_HASH_BITS);
+
+ hhd = &ftrace_func_hash[key];
+
+ if (hlist_empty(hhd))
+ return;
+
+ /*
+ * Disable preemption for these calls to prevent a RCU grace
+ * period. This syncs the hash iteration and freeing of items
+ * on the hash. rcu_read_lock is too dangerous here.
+ */
+ resched = ftrace_preempt_disable();
+ hlist_for_each_entry_rcu(entry, n, hhd, node) {
+ if (entry->ip == ip)
+ entry->ops->func(ip, parent_ip, &entry->data);
+ }
+ ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_probe_ops __read_mostly =
+{
+ .func = function_trace_probe_call,
+};
+
+static int ftrace_probe_registered;
+
+static void __enable_ftrace_function_probe(void)
+{
+ int i;
+
+ if (ftrace_probe_registered)
+ return;
+
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+ if (hhd->first)
+ break;
+ }
+ /* Nothing registered? */
+ if (i == FTRACE_FUNC_HASHSIZE)
+ return;
+
+ __register_ftrace_function(&trace_probe_ops);
+ ftrace_startup(0);
+ ftrace_probe_registered = 1;
+}
+
+static void __disable_ftrace_function_probe(void)
+{
+ int i;
+
+ if (!ftrace_probe_registered)
+ return;
+
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+ if (hhd->first)
+ return;
+ }
+
+ /* no more funcs left */
+ __unregister_ftrace_function(&trace_probe_ops);
+ ftrace_shutdown(0);
+ ftrace_probe_registered = 0;
+}
+
+
+static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+{
+ struct ftrace_func_probe *entry =
+ container_of(rhp, struct ftrace_func_probe, rcu);
+
+ if (entry->ops->free)
+ entry->ops->free(&entry->data);
+ kfree(entry);
+}
+
+
+int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data)
+{
+ struct ftrace_func_probe *entry;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ int type, len, not;
+ unsigned long key;
+ int count = 0;
+ char *search;
+
+ type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ len = strlen(search);
+
+ /* we do not support '!' for function probes */
+ if (WARN_ON(not))
+ return -EINVAL;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (!ftrace_match_record(rec, search, len, type))
+ continue;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ /* If we did not process any, then return error */
+ if (!count)
+ count = -ENOMEM;
+ goto out_unlock;
+ }
+
+ count++;
+
+ entry->data = data;
+
+ /*
+ * The caller might want to do something special
+ * for each function we find. We call the callback
+ * to give the caller an opportunity to do so.
+ */
+ if (ops->callback) {
+ if (ops->callback(rec->ip, &entry->data) < 0) {
+ /* caller does not like this func */
+ kfree(entry);
continue;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- switch (type) {
- case MATCH_FULL:
- if (strcmp(str, buff) == 0)
- matched = 1;
- break;
- case MATCH_FRONT_ONLY:
- if (memcmp(str, buff, match) == 0)
- matched = 1;
- break;
- case MATCH_MIDDLE_ONLY:
- if (strstr(str, search))
- matched = 1;
- break;
- case MATCH_END_ONLY:
- ptr = strstr(str, search);
- if (ptr && (ptr[search_len] == 0))
- matched = 1;
- break;
}
- if (matched) {
- if (not)
- rec->flags &= ~flag;
- else
- rec->flags |= flag;
+ }
+
+ entry->ops = ops;
+ entry->ip = rec->ip;
+
+ key = hash_long(entry->ip, FTRACE_HASH_BITS);
+ hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
+
+ } while_for_each_ftrace_rec();
+ __enable_ftrace_function_probe();
+
+ out_unlock:
+ mutex_unlock(&ftrace_lock);
+
+ return count;
+}
+
+enum {
+ PROBE_TEST_FUNC = 1,
+ PROBE_TEST_DATA = 2
+};
+
+static void
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data, int flags)
+{
+ struct ftrace_func_probe *entry;
+ struct hlist_node *n, *tmp;
+ char str[KSYM_SYMBOL_LEN];
+ int type = MATCH_FULL;
+ int i, len = 0;
+ char *search;
+
+ if (glob && (strcmp(glob, "*") || !strlen(glob)))
+ glob = NULL;
+ else {
+ int not;
+
+ type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ len = strlen(search);
+
+ /* we do not support '!' for function probes */
+ if (WARN_ON(not))
+ return;
+ }
+
+ mutex_lock(&ftrace_lock);
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+
+ hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+
+ /* break up if statements for readability */
+ if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
+ continue;
+
+ if ((flags & PROBE_TEST_DATA) && entry->data != data)
+ continue;
+
+ /* do this last, since it is the most expensive */
+ if (glob) {
+ kallsyms_lookup(entry->ip, NULL, NULL,
+ NULL, str);
+ if (!ftrace_match(str, glob, len, type))
+ continue;
}
+
+ hlist_del(&entry->node);
+ call_rcu(&entry->rcu, ftrace_free_entry_rcu);
}
- pg = pg->next;
}
- spin_unlock(&ftrace_lock);
+ __disable_ftrace_function_probe();
+ mutex_unlock(&ftrace_lock);
+}
+
+void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data)
+{
+ __unregister_ftrace_function_probe(glob, ops, data,
+ PROBE_TEST_FUNC | PROBE_TEST_DATA);
+}
+
+void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
+{
+ __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+}
+
+void unregister_ftrace_function_probe_all(char *glob)
+{
+ __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
+}
+
+static LIST_HEAD(ftrace_commands);
+static DEFINE_MUTEX(ftrace_cmd_mutex);
+
+int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+ struct ftrace_func_command *p;
+ int ret = 0;
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry(p, &ftrace_commands, list) {
+ if (strcmp(cmd->name, p->name) == 0) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ }
+ list_add(&cmd->list, &ftrace_commands);
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
+}
+
+int unregister_ftrace_command(struct ftrace_func_command *cmd)
+{
+ struct ftrace_func_command *p, *n;
+ int ret = -ENODEV;
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry_safe(p, n, &ftrace_commands, list) {
+ if (strcmp(cmd->name, p->name) == 0) {
+ ret = 0;
+ list_del_init(&p->list);
+ goto out_unlock;
+ }
+ }
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
+}
+
+static int ftrace_process_regex(char *buff, int len, int enable)
+{
+ char *func, *command, *next = buff;
+ struct ftrace_func_command *p;
+ int ret = -EINVAL;
+
+ func = strsep(&next, ":");
+
+ if (!next) {
+ ftrace_match_records(func, len, enable);
+ return 0;
+ }
+
+ /* command found */
+
+ command = strsep(&next, ":");
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry(p, &ftrace_commands, list) {
+ if (strcmp(p->name, command) == 0) {
+ ret = p->func(func, command, next, enable);
+ goto out_unlock;
+ }
+ }
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
}
static ssize_t
@@ -1187,7 +1748,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
if (isspace(ch)) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx, enable);
+ ret = ftrace_process_regex(iter->buffer,
+ iter->buffer_idx, enable);
+ if (ret)
+ goto out;
iter->buffer_idx = 0;
} else
iter->flags |= FTRACE_ITER_CONT;
@@ -1226,7 +1790,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
if (reset)
ftrace_filter_reset(enable);
if (buf)
- ftrace_match(buf, len, enable);
+ ftrace_match_records(buf, len, enable);
mutex_unlock(&ftrace_regex_lock);
}
@@ -1276,15 +1840,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
if (iter->buffer_idx) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx, enable);
+ ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
}
- mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
if (ftrace_start_up && ftrace_enabled)
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- mutex_unlock(&ftrace_start_lock);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
kfree(iter);
mutex_unlock(&ftrace_regex_lock);
@@ -1303,31 +1865,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
return ftrace_regex_release(inode, file, 0);
}
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = ftrace_avail_release,
};
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
.open = ftrace_failures_open,
.read = seq_read,
.llseek = seq_lseek,
.release = ftrace_avail_release,
};
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
- .read = ftrace_regex_read,
+ .read = seq_read,
.write = ftrace_filter_write,
.llseek = ftrace_regex_lseek,
.release = ftrace_filter_release,
};
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
- .read = ftrace_regex_read,
+ .read = seq_read,
.write = ftrace_notrace_write,
.llseek = ftrace_regex_lseek,
.release = ftrace_notrace_release,
@@ -1360,6 +1922,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
mutex_lock(&graph_lock);
+ /* Nothing, tell g_show to print all functions are enabled */
+ if (!ftrace_graph_count && !*pos)
+ return (void *)1;
+
p = g_next(m, p, pos);
return p;
@@ -1378,6 +1944,11 @@ static int g_show(struct seq_file *m, void *v)
if (!ptr)
return 0;
+ if (ptr == (unsigned long *)1) {
+ seq_printf(m, "#### all functions enabled ####\n");
+ return 0;
+ }
+
kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
seq_printf(m, "%s\n", str);
@@ -1420,53 +1991,53 @@ ftrace_graph_open(struct inode *inode, struct file *file)
return ret;
}
-static ssize_t
-ftrace_graph_read(struct file *file, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- if (file->f_mode & FMODE_READ)
- return seq_read(file, ubuf, cnt, ppos);
- else
- return -EPERM;
-}
-
static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
{
- char str[KSYM_SYMBOL_LEN];
struct dyn_ftrace *rec;
struct ftrace_page *pg;
+ int search_len;
int found = 0;
- int i, j;
+ int type, not;
+ char *search;
+ bool exists;
+ int i;
if (ftrace_disabled)
return -ENODEV;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ /* decode regex */
+ type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+ if (not)
+ return -EINVAL;
+
+ search_len = strlen(search);
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
- if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
- continue;
+ if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+ break;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- if (strcmp(str, buffer) == 0) {
+ if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
+ continue;
+
+ if (ftrace_match_record(rec, search, search_len, type)) {
+ /* ensure it is not already in the array */
+ exists = false;
+ for (i = 0; i < *idx; i++)
+ if (array[i] == rec->ip) {
+ exists = true;
+ break;
+ }
+ if (!exists) {
+ array[(*idx)++] = rec->ip;
found = 1;
- for (j = 0; j < idx; j++)
- if (array[j] == rec->ip) {
- found = 0;
- break;
- }
- if (found)
- array[idx] = rec->ip;
- break;
}
}
- }
- spin_unlock(&ftrace_lock);
+ } while_for_each_ftrace_rec();
+
+ mutex_unlock(&ftrace_lock);
return found ? 0 : -EINVAL;
}
@@ -1534,13 +2105,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
}
buffer[index] = 0;
- /* we allow only one at a time */
- ret = ftrace_set_func(array, ftrace_graph_count, buffer);
+ /* we allow only one expression at a time */
+ ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
if (ret)
goto out;
- ftrace_graph_count++;
-
file->f_pos += read;
ret = read;
@@ -1552,7 +2121,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
- .read = ftrace_graph_read,
+ .read = seq_read,
.write = ftrace_graph_write,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -1604,7 +2173,7 @@ static int ftrace_convert_nops(struct module *mod,
unsigned long addr;
unsigned long flags;
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
p = start;
while (p < end) {
addr = ftrace_call_adjust(*p++);
@@ -1623,7 +2192,7 @@ static int ftrace_convert_nops(struct module *mod,
local_irq_save(flags);
ftrace_update_code(mod);
local_irq_restore(flags);
- mutex_unlock(&ftrace_start_lock);
+ mutex_unlock(&ftrace_lock);
return 0;
}
@@ -1700,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
if (ftrace_pid_trace == ftrace_swapper_pid)
r = sprintf(buf, "swapper tasks\n");
else if (ftrace_pid_trace)
- r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
+ r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
else
r = sprintf(buf, "no pid\n");
@@ -1796,7 +2365,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
if (val < 0) {
/* disable pid tracing */
if (!ftrace_pid_trace)
@@ -1835,12 +2404,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
ftrace_startup_enable(0);
out:
- mutex_unlock(&ftrace_start_lock);
+ mutex_unlock(&ftrace_lock);
return cnt;
}
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
.read = ftrace_pid_read,
.write = ftrace_pid_write,
};
@@ -1863,7 +2432,6 @@ static __init int ftrace_init_debugfs(void)
"'set_ftrace_pid' entry\n");
return 0;
}
-
fs_initcall(ftrace_init_debugfs);
/**
@@ -1898,17 +2466,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
if (unlikely(ftrace_disabled))
return -1;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = __register_ftrace_function(ops);
ftrace_startup(0);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
/**
- * unregister_ftrace_function - unresgister a function for profiling.
+ * unregister_ftrace_function - unregister a function for profiling.
* @ops - ops structure that holds the function to unregister
*
* Unregister a function that was added to be called by ftrace profiling.
@@ -1917,10 +2485,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops);
ftrace_shutdown(0);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
@@ -1935,7 +2503,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (unlikely(ftrace_disabled))
return -ENODEV;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
@@ -1964,7 +2532,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
}
out:
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
@@ -2029,11 +2597,43 @@ free:
return ret;
}
+static void
+ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ unsigned long long timestamp;
+ int index;
+
+ /*
+ * Does the user want to count the time a function was asleep.
+ * If so, do not update the time stamps.
+ */
+ if (trace_flags & TRACE_ITER_SLEEP_TIME)
+ return;
+
+ timestamp = trace_clock_local();
+
+ prev->ftrace_timestamp = timestamp;
+
+ /* only process tasks that we timestamped */
+ if (!next->ftrace_timestamp)
+ return;
+
+ /*
+ * Update all the counters in next to make up for the
+ * time next was sleeping.
+ */
+ timestamp -= next->ftrace_timestamp;
+
+ for (index = next->curr_ret_stack; index >= 0; index--)
+ next->ret_stack[index].calltime += timestamp;
+}
+
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
{
struct ftrace_ret_stack **ret_stack_list;
- int ret;
+ int ret, cpu;
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
sizeof(struct ftrace_ret_stack *),
@@ -2042,10 +2642,21 @@ static int start_graph_tracing(void)
if (!ret_stack_list)
return -ENOMEM;
+ /* The cpu_boot init_task->ret_stack will never be freed */
+ for_each_online_cpu(cpu)
+ ftrace_graph_init_task(idle_task(cpu));
+
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
+ if (!ret) {
+ ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
+ if (ret)
+ pr_info("ftrace_graph: Couldn't activate tracepoint"
+ " probe to kernel_sched_switch\n");
+ }
+
kfree(ret_stack_list);
return ret;
}
@@ -2076,7 +2687,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
{
int ret = 0;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
+
+ /* we currently allow only one tracer registered at a time */
+ if (atomic_read(&ftrace_graph_active)) {
+ ret = -EBUSY;
+ goto out;
+ }
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier);
@@ -2094,21 +2711,26 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_startup(FTRACE_START_FUNC_RET);
out:
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
void unregister_ftrace_graph(void)
{
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
+
+ if (!unlikely(atomic_read(&ftrace_graph_active)))
+ goto out;
atomic_dec(&ftrace_graph_active);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
- mutex_unlock(&ftrace_sysctl_lock);
+ out:
+ mutex_unlock(&ftrace_lock);
}
/* Allocate a return stack for newly created task */
@@ -2123,6 +2745,7 @@ void ftrace_graph_init_task(struct task_struct *t)
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
} else
t->ret_stack = NULL;
}