summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_functions.c4
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_selftest.c7
-rw-r--r--kernel/trace/trace_stack.c1
7 files changed, 19 insertions, 8 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c55f7e27461..ad765b4ba42 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -66,6 +66,7 @@
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
/* ftrace_enabled is a method to turn ftrace on or off */
@@ -221,12 +222,13 @@ static void update_ftrace_function(void)
/*
* If we are at the end of the list and this ops is
- * not dynamic and the arch supports passing ops, then have the
- * mcount trampoline call the function directly.
+ * recursion safe and not dynamic and the arch supports passing ops,
+ * then have the mcount trampoline call the function directly.
*/
if (ftrace_ops_list == &ftrace_list_end ||
(ftrace_ops_list->next == &ftrace_list_end &&
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
+ (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
!FTRACE_FORCE_LIST_FUNC)) {
/* Set the ftrace_ops that the arch callback uses */
if (ftrace_ops_list == &global_ops)
@@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int register_ftrace_profiler(void)
@@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static DEFINE_MUTEX(ftrace_regex_lock);
@@ -3967,6 +3971,7 @@ void __init ftrace_init(void)
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int __init ftrace_nodyn_init(void)
@@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops control_ops = {
.func = ftrace_ops_control_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static inline void
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8c669683368..6825d833a25 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __initdata =
{
.func = function_test_events_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static __init void event_trace_self_test_with_function(void)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5675ebd541f..fdff65dff1b 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
/* Our two options */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index c7a9ba936de..d98ee8283b2 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7547e36d483..02170c00c41 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index add37e019fd..1fb6da85ff8 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,
static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_global = {
- .func = trace_selftest_test_global_func,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .func = trace_selftest_test_global_func,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static void print_counts(void)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 2fa5328e889..0c1b165778e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static ssize_t