summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-07-21 09:32:40 +0200
committerIngo Molnar <mingo@elte.hu>2011-07-21 09:32:40 +0200
commit40bcea7bbe8fe452a2d272e2ffd3dea281eec9ff (patch)
treeaedb6d02e53e3cf84cc32fd81db84032cee205e1 /kernel/trace/ftrace.c
parent492f73a303b488ffd67097b2351d54aa6e6c7c73 (diff)
parent14a8fd7ceea6915c613746203d6e9a2bf273f16c (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c95
1 files changed, 74 insertions, 21 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a0e246e2cee..c3e4575e782 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
@@ -146,9 +147,11 @@ void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
__ftrace_trace_function = ftrace_stub;
+ __ftrace_trace_function_delay = ftrace_stub;
ftrace_pid_function = ftrace_stub;
}
+#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
@@ -208,7 +211,12 @@ static void update_ftrace_function(void)
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func;
#else
+#ifdef CONFIG_DYNAMIC_FTRACE
+ /* do not update till all functions have been modified */
+ __ftrace_trace_function_delay = func;
+#else
__ftrace_trace_function = func;
+#endif
ftrace_trace_function = ftrace_test_stop_func;
#endif
}
@@ -1170,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
return NULL;
}
+static void
+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+static void
+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+
static int
-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ftrace_hash_move(struct ftrace_ops *ops, int enable,
+ struct ftrace_hash **dst, struct ftrace_hash *src)
{
struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn;
@@ -1181,9 +1195,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
unsigned long key;
int size = src->count;
int bits = 0;
+ int ret;
int i;
/*
+ * Remove the current set, update the hash and add
+ * them back.
+ */
+ ftrace_hash_rec_disable(ops, enable);
+
+ /*
* If the new source is empty, just free dst and assign it
* the empty_hash.
*/
@@ -1203,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
+ ret = -ENOMEM;
new_hash = alloc_ftrace_hash(bits);
if (!new_hash)
- return -ENOMEM;
+ goto out;
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
@@ -1224,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
rcu_assign_pointer(*dst, new_hash);
free_ftrace_hash_rcu(old_hash);
- return 0;
+ ret = 0;
+ out:
+ /*
+ * Enable regardless of ret:
+ * On success, we enable the new hash.
+ * On failure, we re-enable the original hash.
+ */
+ ftrace_hash_rec_enable(ops, enable);
+
+ return ret;
}
/*
@@ -1584,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
{
int *command = data;
+ /*
+ * Do not call function tracer while we update the code.
+ * We are in stop machine, no worrying about races.
+ */
+ function_trace_stop++;
+
if (*command & FTRACE_ENABLE_CALLS)
ftrace_replace_code(1);
else if (*command & FTRACE_DISABLE_CALLS)
@@ -1597,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
else if (*command & FTRACE_STOP_FUNC_RET)
ftrace_disable_ftrace_graph_caller();
+#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ /*
+ * For archs that call ftrace_test_stop_func(), we must
+ * wait till after we update all the function callers
+ * before we update the callback. This keeps different
+ * ops that record different functions from corrupting
+ * each other.
+ */
+ __ftrace_trace_function = __ftrace_trace_function_delay;
+#endif
+ function_trace_stop--;
+
return 0;
}
@@ -2865,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
ftrace_match_records(hash, buf, len);
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move(orig_hash, hash);
+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+ && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+
mutex_unlock(&ftrace_lock);
mutex_unlock(&ftrace_regex_lock);
@@ -3048,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->notrace_hash;
mutex_lock(&ftrace_lock);
- /*
- * Remove the current set, update the hash and add
- * them back.
- */
- ftrace_hash_rec_disable(iter->ops, filter_hash);
- ret = ftrace_hash_move(orig_hash, iter->hash);
- if (!ret) {
- ftrace_hash_rec_enable(iter->ops, filter_hash);
- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- }
+ ret = ftrace_hash_move(iter->ops, filter_hash,
+ orig_hash, iter->hash);
+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+ && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+
mutex_unlock(&ftrace_lock);
}
free_ftrace_hash(iter->hash);
@@ -3338,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
{
unsigned long *p;
unsigned long addr;
- unsigned long flags;
+ unsigned long flags = 0; /* Shut up gcc */
mutex_lock(&ftrace_lock);
p = start;
@@ -3356,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
}
/*
- * Disable interrupts to prevent interrupts from executing
- * code that is being modified.
+ * We only need to disable interrupts on start up
+ * because we are modifying code that an interrupt
+ * may execute, and the modification is not atomic.
+ * But for modules, nothing runs the code we modify
+ * until we are finished with it, and there's no
+ * reason to cause large interrupt latencies while we do it.
*/
- local_irq_save(flags);
+ if (!mod)
+ local_irq_save(flags);
ftrace_update_code(mod);
- local_irq_restore(flags);
+ if (!mod)
+ local_irq_restore(flags);
mutex_unlock(&ftrace_lock);
return 0;