summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-23 09:33:02 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-23 16:00:19 +0200
commit81adbdc029ecc416d56563e7f159100181dd711d (patch)
treeff7ed7b0fb284c22eb30e690a85e2e9e083c4162 /kernel/trace
parentab9a0918cbf0fa8883301838df8dbc8fc085ff50 (diff)
ftrace: only have ftrace_kill atomic
When an anomaly is detected, we need a way to completely disable ftrace. Right now we have two functions: ftrace_kill and ftrace_kill_atomic. The ftrace_kill tries to do it in a "nice" way by converting everything back to a nop. The "nice" way is dangerous itself, so this patch removes it and only has the "atomic" version, which is all that is needed. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c42
-rw-r--r--kernel/trace/trace.c2
2 files changed, 3 insertions, 41 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b2de8de7735..93245ae046e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1549,22 +1549,6 @@ int ftrace_force_update(void)
return ret;
}
-static void ftrace_force_shutdown(void)
-{
- struct task_struct *task;
- int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
-
- mutex_lock(&ftraced_lock);
- task = ftraced_task;
- ftraced_task = NULL;
- ftraced_suspend = -1;
- ftrace_run_update_code(command);
- mutex_unlock(&ftraced_lock);
-
- if (task)
- kthread_stop(task);
-}
-
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
@@ -1795,17 +1779,16 @@ core_initcall(ftrace_dynamic_init);
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
-# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
- * ftrace_kill_atomic - kill ftrace from critical sections
+ * ftrace_kill - kill ftrace
*
* This function should be used by panic code. It stops ftrace
* but in a not so nice way. If you need to simply kill ftrace
* from a non-atomic section, use ftrace_kill.
*/
-void ftrace_kill_atomic(void)
+void ftrace_kill(void)
{
ftrace_disabled = 1;
ftrace_enabled = 0;
@@ -1816,27 +1799,6 @@ void ftrace_kill_atomic(void)
}
/**
- * ftrace_kill - totally shutdown ftrace
- *
- * This is a safety measure. If something was detected that seems
- * wrong, calling this function will keep ftrace from doing
- * any more modifications, and updates.
- * used when something went wrong.
- */
-void ftrace_kill(void)
-{
- mutex_lock(&ftrace_sysctl_lock);
- ftrace_disabled = 1;
- ftrace_enabled = 0;
-
- clear_ftrace_function();
- mutex_unlock(&ftrace_sysctl_lock);
-
- /* Try to totally disable ftrace */
- ftrace_force_shutdown();
-}
-
-/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index aeb2f2505bc..333a5162149 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3097,7 +3097,7 @@ void ftrace_dump(void)
dump_ran = 1;
/* No turning back! */
- ftrace_kill_atomic();
+ ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);