summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace_event.h14
-rw-r--r--include/trace/ftrace.h3
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/trace.c121
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_sched_switch.c2
-rw-r--r--kernel/trace/trace_selftest.c1
9 files changed, 84 insertions, 73 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 642928cf57b..b80c8ddfbbd 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs);
+void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a763888a36f..698f2a89032 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \
\
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
- trace_nowake_buffer_unlock_commit(buffer, \
- event, irq_flags, pc); \
+ trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f41c1d..5d89335a485 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
+ select IRQ_WORK
config GENERIC_TRACER
bool
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d5cbc0d3f20..37d1c703e3e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
+#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
@@ -85,6 +86,14 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
static DEFINE_PER_CPU(bool, trace_cmdline_save);
/*
+ * When a reader is waiting for data, then this variable is
+ * set to true.
+ */
+static bool trace_wakeup_needed;
+
+static struct irq_work trace_work_wakeup;
+
+/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
@@ -329,12 +338,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
-static void wakeup_work_handler(struct work_struct *work)
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Schedules a delayed work to wake up any task that is blocked on the
+ * trace_wait queue. These is used with trace_poll for tasks polling the
+ * trace.
+ */
+static void trace_wake_up(struct irq_work *work)
{
- wake_up(&trace_wait);
-}
+ wake_up_all(&trace_wait);
-static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
+}
/**
* tracing_on - enable tracing buffers
@@ -389,22 +404,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
-/**
- * trace_wake_up - wake up tasks waiting for trace input
- *
- * Schedules a delayed work to wake up any task that is blocked on the
- * trace_wait queue. These is used with trace_poll for tasks polling the
- * trace.
- */
-void trace_wake_up(void)
-{
- const unsigned long delay = msecs_to_jiffies(2);
-
- if (trace_flags & TRACE_ITER_BLOCK)
- return;
- schedule_delayed_work(&wakeup_work, delay);
-}
-
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
@@ -753,6 +752,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
+static void default_wait_pipe(struct trace_iterator *iter)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ trace_wakeup_needed = true;
+
+ if (trace_empty(iter))
+ schedule();
+
+ finish_wait(&trace_wait, &wait);
+}
+
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@@ -1156,30 +1189,32 @@ void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_cmdline_save, true);
+ if (trace_wakeup_needed) {
+ trace_wakeup_needed = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&trace_work_wakeup);
+ }
ring_buffer_unlock_commit(buffer, event);
}
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
- int wake)
+ unsigned long flags, int pc)
{
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
-
- if (wake)
- trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@@ -1196,29 +1231,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
-{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
-}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
-
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs)
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
@@ -3354,19 +3381,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
-
-void default_wait_pipe(struct trace_iterator *iter)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
-
- if (trace_empty(iter))
- schedule();
-
- finish_wait(&trace_wait, &wait);
-}
-
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
@@ -5107,6 +5121,7 @@ __init static int tracer_alloc_buffers(void)
#endif
trace_init_cmdlines();
+ init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace);
current_trace = &nop_trace;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3e8a176f64e..55010ed175f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -370,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
-void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index cb2df3b70f7..880073d0b94 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1760,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
- trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5a3c533ef06..1865d5f7653 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Kretprobe handler */
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Event entry printers */
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b0a136ac382..3374c792ccd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -102,7 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event))
- trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 091b815f7b0..47623169a81 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1094,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
+ printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);