summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/ring_buffer.c65
-rw-r--r--kernel/trace/trace.c413
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_events.c51
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_functions.c7
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_irqsoff.c16
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_output.c78
-rw-r--r--kernel/trace/trace_probe.c14
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c12
-rw-r--r--kernel/trace/trace_selftest.c13
-rw-r--r--kernel/trace/trace_syscalls.c61
-rw-r--r--kernel/trace/trace_uprobe.c4
19 files changed, 439 insertions, 352 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f41c1d..5d89335a485 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
+ select IRQ_WORK
config GENERIC_TRACER
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9dcf15d3838..afd092de45b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -10,7 +10,7 @@
* Based on code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/stop_machine.h>
@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
{
iter->pos = 0;
iter->func_pos = 0;
- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
+ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
}
static void *t_start(struct seq_file *m, loff_t *pos)
@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
{
return register_ftrace_command(&ftrace_mod_cmd);
}
-device_initcall(ftrace_mod_cmd_init);
+core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
ftrace_enabled = 1;
return 0;
}
-device_initcall(ftrace_nodyn_init);
+core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (strlen(tmp) == 0)
return 1;
- ret = strict_strtol(tmp, 10, &val);
+ ret = kstrtol(tmp, 10, &val);
if (ret < 0)
return ret;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b979426d16c..ce8514feedc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
- local_t commit_overrun;
- local_t overrun;
local_t entries;
+ local_t overrun;
+ local_t commit_overrun;
+ local_t dropped_events;
local_t committing;
local_t commits;
unsigned long read;
@@ -1396,6 +1397,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head_page_with_bit;
head_page = &rb_set_head_page(cpu_buffer)->list;
+ if (!head_page)
+ break;
prev_page = head_page->prev;
first_page = pages->next;
@@ -1820,7 +1823,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
}
/**
- * ring_buffer_update_event - update event type and data
+ * rb_update_event - update event type and data
* @event: the even to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
@@ -2155,8 +2158,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are not in overwrite mode,
* this is easy, just stop here.
*/
- if (!(buffer->flags & RB_FL_OVERWRITE))
+ if (!(buffer->flags & RB_FL_OVERWRITE)) {
+ local_inc(&cpu_buffer->dropped_events);
goto out_reset;
+ }
ret = rb_handle_head_page(cpu_buffer,
tail_page,
@@ -2720,8 +2725,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
- unsigned long length,
- void *data)
+ unsigned long length,
+ void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -2929,12 +2934,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
-unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
- unsigned long ret;
+ u64 ret = 0;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
@@ -2949,7 +2954,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
bpage = cpu_buffer->reader_page;
else
bpage = rb_set_head_page(cpu_buffer);
- ret = bpage->page->time_stamp;
+ if (bpage)
+ ret = bpage->page->time_stamp;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
@@ -2995,7 +3001,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
- * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
+ * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
+ * buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@@ -3015,7 +3022,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
- * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
+ * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
+ * commits failing due to the buffer wrapping around while there are uncommitted
+ * events, such as during an interrupt storm.
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@@ -3036,6 +3045,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
+ * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
+ * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long
+ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = local_read(&cpu_buffer->dropped_events);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
+
+/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
*
@@ -3260,6 +3291,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Splice the empty reader page into the list around the head.
*/
reader = rb_set_head_page(cpu_buffer);
+ if (!reader)
+ goto out;
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev;
@@ -3778,12 +3811,17 @@ void
ring_buffer_read_finish(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
/*
* Ring buffer is disabled from recording, here's a good place
- * to check the integrity of the ring buffer.
+ * to check the integrity of the ring buffer.
+ * Must prevent readers from trying to read, as the check
+ * clears the HEAD page and readers require it.
*/
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
@@ -3864,9 +3902,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
- local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
+ local_set(&cpu_buffer->commit_overrun, 0);
+ local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 31e4f55773f..61e081b4ba1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9,7 +9,7 @@
*
* Based on code from the latency_tracer, that is:
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/ring_buffer.h>
#include <generated/utsrelease.h>
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
+#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
@@ -78,6 +79,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
}
/*
+ * To prevent the comm cache from being overwritten when no
+ * tracing is active, only save the comm when a trace event
+ * occurred.
+ */
+static DEFINE_PER_CPU(bool, trace_cmdline_save);
+
+/*
+ * When a reader is waiting for data, then this variable is
+ * set to true.
+ */
+static bool trace_wakeup_needed;
+
+static struct irq_work trace_work_wakeup;
+
+/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
+
+static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
+static char *trace_boot_options __initdata;
+
+static int __init set_trace_boot_options(char *str)
+{
+ strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+ trace_boot_options = trace_boot_options_buf;
+ return 0;
+}
+__setup("trace_options=", set_trace_boot_options);
+
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
@@ -198,20 +226,9 @@ static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
-/* tracer_enabled is used to toggle activation of a tracer */
-static int tracer_enabled = 1;
-
-/**
- * tracing_is_enabled - return tracer_enabled status
- *
- * This function is used by other tracers to know the status
- * of the tracer_enabled flag. Tracers may use this function
- * to know if it should enable their features when starting
- * up. See irqsoff tracer for an example (start_irqsoff_tracer).
- */
int tracing_is_enabled(void)
{
- return tracer_enabled;
+ return tracing_is_on();
}
/*
@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
-static void wakeup_work_handler(struct work_struct *work)
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Schedules a delayed work to wake up any task that is blocked on the
+ * trace_wait queue. These is used with trace_poll for tasks polling the
+ * trace.
+ */
+static void trace_wake_up(struct irq_work *work)
{
- wake_up(&trace_wait);
-}
+ wake_up_all(&trace_wait);
-static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
+}
/**
* tracing_on - enable tracing buffers
@@ -393,22 +416,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
-/**
- * trace_wake_up - wake up tasks waiting for trace input
- *
- * Schedules a delayed work to wake up any task that is blocked on the
- * trace_wait queue. These is used with trace_poll for tasks polling the
- * trace.
- */
-void trace_wake_up(void)
-{
- const unsigned long delay = msecs_to_jiffies(2);
-
- if (trace_flags & TRACE_ITER_BLOCK)
- return;
- schedule_delayed_work(&wakeup_work, delay);
-}
-
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &threshold);
+ ret = kstrtoul(str, 0, &threshold);
if (ret < 0)
return 0;
tracing_thresh = threshold * 1000;
@@ -477,10 +484,12 @@ static const char *trace_options[] = {
static struct {
u64 (*func)(void);
const char *name;
+ int in_ns; /* is this clock in nanoseconds? */
} trace_clocks[] = {
- { trace_clock_local, "local" },
- { trace_clock_global, "global" },
- { trace_clock_counter, "counter" },
+ { trace_clock_local, "local", 1 },
+ { trace_clock_global, "global", 1 },
+ { trace_clock_counter, "counter", 0 },
+ ARCH_TRACE_CLOCKS
};
int trace_clock_id;
@@ -757,6 +766,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
+static void default_wait_pipe(struct trace_iterator *iter)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ trace_wakeup_needed = true;
+
+ if (trace_empty(iter))
+ schedule();
+
+ finish_wait(&trace_wait, &wait);
+}
+
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@@ -875,32 +918,6 @@ int register_tracer(struct tracer *type)
return ret;
}
-void unregister_tracer(struct tracer *type)
-{
- struct tracer **t;
-
- mutex_lock(&trace_types_lock);
- for (t = &trace_types; *t; t = &(*t)->next) {
- if (*t == type)
- goto found;
- }
- pr_info("Tracer %s not registered\n", type->name);
- goto out;
-
- found:
- *t = (*t)->next;
-
- if (type == current_trace && tracer_enabled) {
- tracer_enabled = 0;
- tracing_stop();
- if (current_trace->stop)
- current_trace->stop(&global_trace);
- current_trace = &nop_trace;
- }
-out:
- mutex_unlock(&trace_types_lock);
-}
-
void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
@@ -1131,10 +1148,14 @@ void trace_find_cmdline(int pid, char comm[])
void tracing_record_cmdline(struct task_struct *tsk)
{
- if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
- !tracing_is_on())
+ if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
return;
+ if (!__this_cpu_read(trace_cmdline_save))
+ return;
+
+ __this_cpu_write(trace_cmdline_save, false);
+
trace_save_cmdline(tsk);
}
@@ -1178,27 +1199,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event;
}
+void
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+{
+ __this_cpu_write(trace_cmdline_save, true);
+ if (trace_wakeup_needed) {
+ trace_wakeup_needed = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&trace_work_wakeup);
+ }
+ ring_buffer_unlock_commit(buffer, event);
+}
+
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
- int wake)
+ unsigned long flags, int pc)
{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
-
- if (wake)
- trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@@ -1215,29 +1245,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
-}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
-
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs)
-{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
@@ -1269,7 +1291,7 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void
@@ -1362,7 +1384,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
@@ -1458,7 +1480,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
@@ -1559,10 +1581,10 @@ static int alloc_percpu_trace_buffer(void)
return -ENOMEM;
}
+static int buffers_allocated;
+
void trace_printk_init_buffers(void)
{
- static int buffers_allocated;
-
if (buffers_allocated)
return;
@@ -1571,7 +1593,38 @@ void trace_printk_init_buffers(void)
pr_info("ftrace: Allocated trace_printk buffers\n");
+ /* Expand the buffers to set size */
+ tracing_update_buffers();
+
buffers_allocated = 1;
+
+ /*
+ * trace_printk_init_buffers() can be called by modules.
+ * If that happens, then we need to start cmdline recording
+ * directly here. If the global_trace.buffer is already
+ * allocated here, then this was called by module code.
+ */
+ if (global_trace.buffer)
+ tracing_start_cmdline_record();
+}
+
+void trace_printk_start_comm(void)
+{
+ /* Start tracing comms if trace printk is set */
+ if (!buffers_allocated)
+ return;
+ tracing_start_cmdline_record();
+}
+
+static void trace_printk_start_stop_comm(int enabled)
+{
+ if (!buffers_allocated)
+ return;
+
+ if (enabled)
+ tracing_start_cmdline_record();
+ else
+ tracing_stop_cmdline_record();
}
/**
@@ -1622,7 +1675,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
@@ -1693,7 +1746,7 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
@@ -2426,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */
+ if (trace_clocks[trace_clock_id].in_ns)
+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
/* stop the trace while dumping */
tracing_stop();
@@ -2794,26 +2851,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled);
+
+ if (mask == TRACE_ITER_PRINTK)
+ trace_printk_start_stop_comm(enabled);
}
-static ssize_t
-tracing_trace_options_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int trace_set_options(char *option)
{
- char buf[64];
char *cmp;
int neg = 0;
- int ret;
+ int ret = 0;
int i;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
- cmp = strstrip(buf);
+ cmp = strstrip(option);
if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
@@ -2832,10 +2882,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
- if (ret)
- return ret;
}
+ return ret;
+}
+
+static ssize_t
+tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ trace_set_options(buf);
+
*ppos += cnt;
return cnt;
@@ -2940,56 +3005,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
};
static ssize_t
-tracing_ctrl_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- int r;
-
- r = sprintf(buf, "%u\n", tracer_enabled);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-tracing_ctrl_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct trace_array *tr = filp->private_data;
- unsigned long val;
- int ret;
-
- ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
- if (ret)
- return ret;
-
- val = !!val;
-
- mutex_lock(&trace_types_lock);
- if (tracer_enabled ^ val) {
-
- /* Only need to warn if this is used to change the state */
- WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
-
- if (val) {
- tracer_enabled = 1;
- if (current_trace->start)
- current_trace->start(tr);
- tracing_start();
- } else {
- tracer_enabled = 0;
- tracing_stop();
- if (current_trace->stop)
- current_trace->stop(tr);
- }
- }
- mutex_unlock(&trace_types_lock);
-
- *ppos += cnt;
-
- return cnt;
-}
-
-static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -3030,6 +3045,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
*/
ring_buffer_expanded = 1;
+ /* May be called before buffers are initialized */
+ if (!global_trace.buffer)
+ return 0;
+
ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0)
return ret;
@@ -3325,6 +3344,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */
+ if (trace_clocks[trace_clock_id].in_ns)
+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
@@ -3385,19 +3408,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
-
-void default_wait_pipe(struct trace_iterator *iter)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
-
- if (trace_empty(iter))
- schedule();
-
- finish_wait(&trace_wait, &wait);
-}
-
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
@@ -3438,7 +3448,7 @@ static int tracing_wait_pipe(struct file *filp)
return -EINTR;
/*
- * We block until we read something and tracing is disabled.
+ * We block until we read something and tracing is enabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
@@ -3446,7 +3456,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracer_enabled && iter->pos)
+ if (tracing_is_enabled() && iter->pos)
break;
}
@@ -3955,7 +3965,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
entry->buf[cnt] = '\0';
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
written = cnt;
@@ -4016,6 +4026,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
+ /*
+ * New clock may not be consistent with the previous clock.
+ * Reset the buffer so that it doesn't have incomparable timestamps.
+ */
+ tracing_reset_online_cpus(&global_trace);
+ if (max_tr.buffer)
+ tracing_reset_online_cpus(&max_tr);
+
mutex_unlock(&trace_types_lock);
*fpos += cnt;
@@ -4037,13 +4055,6 @@ static const struct file_operations tracing_max_lat_fops = {
.llseek = generic_file_llseek,
};
-static const struct file_operations tracing_ctrl_fops = {
- .open = tracing_open_generic,
- .read = tracing_ctrl_read,
- .write = tracing_ctrl_write,
- .llseek = generic_file_llseek,
-};
-
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
@@ -4377,13 +4388,27 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
- t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
- usec_rem = do_div(t, USEC_PER_SEC);
- trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
+ if (trace_clocks[trace_clock_id].in_ns) {
+ /* local or global for trace_clock */
+ t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
+ usec_rem = do_div(t, USEC_PER_SEC);
+ trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
+ t, usec_rem);
+
+ t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
+ usec_rem = do_div(t, USEC_PER_SEC);
+ trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
+ } else {
+ /* counter or tsc mode for trace_clock */
+ trace_seq_printf(s, "oldest event ts: %llu\n",
+ ring_buffer_oldest_event_ts(tr->buffer, cpu));
+
+ trace_seq_printf(s, "now ts: %llu\n",
+ ring_buffer_time_stamp(tr->buffer, cpu));
+ }
- t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
- usec_rem = do_div(t, USEC_PER_SEC);
- trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
+ cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "dropped events: %ld\n", cnt);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
@@ -4815,9 +4840,6 @@ static __init int tracer_init_debugfs(void)
d_tracer = tracing_init_dentry();
- trace_create_file("tracing_enabled", 0644, d_tracer,
- &global_trace, &tracing_ctrl_fops);
-
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
@@ -5089,6 +5111,7 @@ __init static int tracer_alloc_buffers(void)
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ /* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
@@ -5136,6 +5159,7 @@ __init static int tracer_alloc_buffers(void)
#endif
trace_init_cmdlines();
+ init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace);
current_trace = &nop_trace;
@@ -5147,6 +5171,13 @@ __init static int tracer_alloc_buffers(void)
register_die_notifier(&trace_die_notifier);
+ while (trace_boot_options) {
+ char *option;
+
+ option = strsep(&trace_boot_options, ",");
+ trace_set_options(option);
+ }
+
return 0;
out_free_cpumask:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c15f528c1af..c75d7988902 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
- int print_max;
- int use_max_tr;
+ bool print_max;
+ bool use_max_tr;
};
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
+void __buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
-void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
@@ -407,12 +405,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
-void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
-enum trace_file_type {
- TRACE_FILE_LAT_FMT = 1,
- TRACE_FILE_ANNOTATE = 2,
-};
extern cpumask_var_t __read_mostly tracing_buffer_mask;
@@ -841,6 +834,7 @@ extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
+void trace_printk_start_comm(void);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 8d3538b4ea5..95e96842ed2 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
}
return register_tracer(&branch_trace);
}
-device_initcall(init_branch_tracer);
+core_initcall(init_branch_tracer);
#else
static inline
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d608d09d08c..880073d0b94 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
-static int
-ftrace_event_seq_open(struct inode *inode, struct file *file)
-{
- const struct seq_operations *seq_ops;
-
- if ((file->f_mode & FMODE_WRITE) &&
- (file->f_flags & O_TRUNC))
- ftrace_clear_events();
-
- seq_ops = inode->i_private;
- return seq_open(file, seq_ops);
-}
-
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r;
}
+static int ftrace_event_avail_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_open(struct inode *inode, struct file *file);
+
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
};
static const struct file_operations ftrace_avail_fops = {
- .open = ftrace_event_seq_open,
+ .open = ftrace_event_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations ftrace_set_event_fops = {
- .open = ftrace_event_seq_open,
+ .open = ftrace_event_set_open,
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
+static int
+ftrace_event_avail_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_event_seq_ops;
+
+ return seq_open(file, seq_ops);
+}
+
+static int
+ftrace_event_set_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_set_event_seq_ops;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC))
+ ftrace_clear_events();
+
+ return seq_open(file, seq_ops);
+}
+
static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
+
+ trace_printk_start_comm();
+
return 0;
}
@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
- (void *)&show_event_seq_ops,
- &ftrace_avail_fops);
+ NULL, &ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer,
- (void *)&show_set_event_seq_ops,
- &ftrace_set_event_fops);
+ NULL, &ftrace_set_event_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_event' entry\n");
@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
- trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index c154797a7ff..e5b0ca8b8d4 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
}
} else {
if (field->is_signed)
- ret = strict_strtoll(pred->regex.pattern, 0, &val);
+ ret = kstrtoll(pred->regex.pattern, 0, &val);
else
- ret = strict_strtoull(pred->regex.pattern, 0, &val);
+ ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 507a7a9630b..8e3ad8082ab 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -7,7 +7,7 @@
* Based on code from the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/ring_buffer.h>
#include <linux/debugfs.h>
@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
* We use the callback data field (which is a pointer)
* as our counter.
*/
- ret = strict_strtoul(number, 0, (unsigned long *)&count);
+ ret = kstrtoul(number, 0, (unsigned long *)&count);
if (ret)
return ret;
@@ -411,5 +411,4 @@ static __init int init_function_trace(void)
init_func_cmd_traceon();
return register_tracer(&function_trace);
}
-device_initcall(init_function_trace);
-
+core_initcall(init_function_trace);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 99b4378393d..4edb4b74eb7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
return 1;
}
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
return register_tracer(&graph_trace);
}
-device_initcall(init_graph_trace);
+core_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d98ee8283b2..713a2cac488 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -7,7 +7,7 @@
* From code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
-device_initcall(init_irqsoff_tracer);
+core_initcall(init_irqsoff_tracer);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1a2117043bb..1865d5f7653 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
/* an address specified */
- ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
+ ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) {
pr_info("Failed to parse address.\n");
return ret;
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Kretprobe handler */
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Event entry printers */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 123b189c732..194d79602dc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
return trace_print_lat_fmt(s, entry);
}
-static unsigned long preempt_mark_thresh = 100;
+static unsigned long preempt_mark_thresh_us = 100;
static int
-lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
- unsigned long rel_usecs)
+lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{
- return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
- rel_usecs > preempt_mark_thresh ? '!' :
- rel_usecs > 1 ? '+' : ' ');
+ unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
+ unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
+ unsigned long long abs_ts = iter->ts - iter->tr->time_start;
+ unsigned long long rel_ts = next_ts - iter->ts;
+ struct trace_seq *s = &iter->seq;
+
+ if (in_ns) {
+ abs_ts = ns2usecs(abs_ts);
+ rel_ts = ns2usecs(rel_ts);
+ }
+
+ if (verbose && in_ns) {
+ unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
+ unsigned long abs_msec = (unsigned long)abs_ts;
+ unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
+ unsigned long rel_msec = (unsigned long)rel_ts;
+
+ return trace_seq_printf(
+ s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
+ ns2usecs(iter->ts),
+ abs_msec, abs_usec,
+ rel_msec, rel_usec);
+ } else if (verbose && !in_ns) {
+ return trace_seq_printf(
+ s, "[%016llx] %lld (+%lld): ",
+ iter->ts, abs_ts, rel_ts);
+ } else if (!verbose && in_ns) {
+ return trace_seq_printf(
+ s, " %4lldus%c: ",
+ abs_ts,
+ rel_ts > preempt_mark_thresh_us ? '!' :
+ rel_ts > 1 ? '+' : ' ');
+ } else { /* !verbose && !in_ns */
+ return trace_seq_printf(s, " %4lld: ", abs_ts);
+ }
}
int trace_print_context(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
- unsigned long long t = ns2usecs(iter->ts);
- unsigned long usec_rem = do_div(t, USEC_PER_SEC);
- unsigned long secs = (unsigned long)t;
+ unsigned long long t;
+ unsigned long secs, usec_rem;
char comm[TASK_COMM_LEN];
int ret;
@@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter)
return 0;
}
- return trace_seq_printf(s, " %5lu.%06lu: ",
- secs, usec_rem);
+ if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
+ t = ns2usecs(iter->ts);
+ usec_rem = do_div(t, USEC_PER_SEC);
+ secs = (unsigned long)t;
+ return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
+ } else
+ return trace_seq_printf(s, " %12llu: ", iter->ts);
}
int trace_print_lat_context(struct trace_iterator *iter)
@@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter)
*next_entry = trace_find_next_entry(iter, NULL,
&next_ts);
unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
- unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
- unsigned long rel_usecs;
/* Restore the original ent_size */
iter->ent_size = ent_size;
if (!next_entry)
next_ts = iter->ts;
- rel_usecs = ns2usecs(next_ts - iter->ts);
if (verbose) {
char comm[TASK_COMM_LEN];
trace_find_cmdline(entry->pid, comm);
- ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
- " %ld.%03ldms (+%ld.%03ldms): ", comm,
- entry->pid, iter->cpu, entry->flags,
- entry->preempt_count, iter->idx,
- ns2usecs(iter->ts),
- abs_usecs / USEC_PER_MSEC,
- abs_usecs % USEC_PER_MSEC,
- rel_usecs / USEC_PER_MSEC,
- rel_usecs % USEC_PER_MSEC);
+ ret = trace_seq_printf(
+ s, "%16s %5d %3d %d %08x %08lx ",
+ comm, entry->pid, iter->cpu, entry->flags,
+ entry->preempt_count, iter->idx);
} else {
ret = lat_print_generic(s, entry, iter->cpu);
- if (ret)
- ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
}
+ if (ret)
+ ret = lat_print_timestamp(iter, next_ts);
+
return ret;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index daa9980153a..412e959709b 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
goto fail;
type++;
- if (strict_strtoul(type, 0, &bs))
+ if (kstrtoul(type, 0, &bs))
goto fail;
switch (bs) {
@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
tmp = strchr(symbol, '+');
if (tmp) {
- /* skip sign because strict_strtol doesn't accept '+' */
- ret = strict_strtoul(tmp + 1, 0, offset);
+ /* skip sign because kstrtoul doesn't accept '+' */
+ ret = kstrtoul(tmp + 1, 0, offset);
if (ret)
return ret;
@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
else
ret = -EINVAL;
} else if (isdigit(arg[5])) {
- ret = strict_strtoul(arg + 5, 10, &param);
+ ret = kstrtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '@': /* memory or symbol */
if (isdigit(arg[1])) {
- ret = strict_strtoul(arg + 1, 0, &param);
+ ret = kstrtoul(arg + 1, 0, &param);
if (ret)
break;
@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
break;
case '+': /* deref memory */
- arg++; /* Skip '+', because strict_strtol() rejects it. */
+ arg++; /* Skip '+', because kstrtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
- ret = strict_strtol(arg, 0, &offset);
+ ret = kstrtol(arg, 0, &offset);
if (ret)
break;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a1845..3374c792ccd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr->buffer, flags, 6, pc);
- ftrace_trace_userstack(tr->buffer, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 02170c00c41..9fe45fcefca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -7,7 +7,7 @@
* Based on code from the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/module.h>
#include <linux/fs.h>
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
return 0;
}
-device_initcall(init_wakeup_tracer);
+core_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 2c00a691a54..47623169a81 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int (*func)(void))
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
unsigned long count;
char *func_name;
int ret;
@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* passed in by parameter to fool gcc from optimizing */
func();
@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1);
@@ -452,7 +449,6 @@ static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
return ret;
}
@@ -569,7 +563,6 @@ static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
return ret;
}
@@ -662,7 +653,6 @@ int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
unsigned long count;
int ret;
@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* start the tracing */
ftrace_enabled = 1;
- tracer_enabled = 1;
ret = tracer_init(trace, tr);
if (ret) {
@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_function_regs();
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
/* kill ftrace totally if we failed */
if (ret)
@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
+ printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2485a7d09b1..7609dd6714c 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data);
-static int syscall_enter_define_fields(struct ftrace_event_call *call);
-static int syscall_exit_define_fields(struct ftrace_event_call *call);
-
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields;
}
-struct trace_event_functions enter_syscall_print_funcs = {
- .trace = print_syscall_enter,
-};
-
-struct trace_event_functions exit_syscall_print_funcs = {
- .trace = print_syscall_exit,
-};
-
-struct ftrace_event_class event_class_syscall_enter = {
- .system = "syscalls",
- .reg = syscall_enter_register,
- .define_fields = syscall_enter_define_fields,
- .get_fields = syscall_get_enter_fields,
- .raw_init = init_syscall_trace,
-};
-
-struct ftrace_event_class event_class_syscall_exit = {
- .system = "syscalls",
- .reg = syscall_exit_register,
- .define_fields = syscall_exit_define_fields,
- .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
- .raw_init = init_syscall_trace,
-};
-
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
-int init_syscall_trace(struct ftrace_event_call *call)
+static int init_syscall_trace(struct ftrace_event_call *call)
{
int id;
int num;
@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id;
}
+struct trace_event_functions enter_syscall_print_funcs = {
+ .trace = print_syscall_enter,
+};
+
+struct trace_event_functions exit_syscall_print_funcs = {
+ .trace = print_syscall_exit,
+};
+
+struct ftrace_event_class event_class_syscall_enter = {
+ .system = "syscalls",
+ .reg = syscall_enter_register,
+ .define_fields = syscall_enter_define_fields,
+ .get_fields = syscall_get_enter_fields,
+ .raw_init = init_syscall_trace,
+};
+
+struct ftrace_event_class event_class_syscall_exit = {
+ .system = "syscalls",
+ .reg = syscall_exit_register,
+ .define_fields = syscall_exit_define_fields,
+ .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
+ .raw_init = init_syscall_trace,
+};
+
unsigned long __init __weak arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-int perf_sysenter_enable(struct ftrace_event_call *call)
+static int perf_sysenter_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
-void perf_sysenter_disable(struct ftrace_event_call *call)
+static void perf_sysenter_disable(struct ftrace_event_call *call)
{
int num;
@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-int perf_sysexit_enable(struct ftrace_event_call *call)
+static int perf_sysexit_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
-void perf_sysexit_disable(struct ftrace_event_call *call)
+static void perf_sysexit_disable(struct ftrace_event_call *call)
{
int num;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 03003cd7dd9..9614db8b0f8 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -189,7 +189,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (argv[0][0] == '-')
is_delete = true;
else if (argv[0][0] != 'p') {
- pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n");
+ pr_info("Probe definition must be started with 'p' or '-'.\n");
return -EINVAL;
}
@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
- ret = strict_strtoul(arg, 0, &offset);
+ ret = kstrtoul(arg, 0, &offset);
if (ret)
goto fail_address_parse;