summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c89
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/power-traces.c20
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c80
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_entries.h17
-rw-r--r--kernel/trace/trace_event_profile.c87
-rw-r--r--kernel/trace/trace_events.c56
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_hw_branches.c10
-rw-r--r--kernel/trace/trace_output.c23
-rw-r--r--kernel/trace/trace_power.c218
-rw-r--r--kernel/trace/trace_printk.c1
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c103
20 files changed, 352 insertions, 431 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e7163460440..b416512ad17 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
-# hidding of the automatic options options.
+# hidding of the automatic options.
config TRACING
bool
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 844164dca90..26f03ac07c2 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
-obj-$(CONFIG_POWER_TRACER) += trace_power.o
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
@@ -54,5 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
+obj-$(CONFIG_EVENT_TRACING) += power-traces.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c..d9d6206e0b1 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
}
/**
+ * blk_add_trace_rq_remap - Add a trace for a request-remap operation
+ * @q: queue the io is for
+ * @rq: the source request
+ * @dev: target device
+ * @from: source sector
+ *
+ * Description:
+ * Device mapper remaps request to other devices.
+ * Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_rq_remap(struct request_queue *q,
+ struct request *rq, dev_t dev,
+ sector_t from)
+{
+ struct blk_trace *bt = q->blk_trace;
+ struct blk_io_trace_remap r;
+
+ if (likely(!bt))
+ return;
+
+ r.device_from = cpu_to_be32(dev);
+ r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
+ r.sector_from = cpu_to_be64(from);
+
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
+ sizeof(r), &r);
+}
+
+/**
* blk_add_driver_data - Add binary message with driver-specific data
* @q: queue the io is for
* @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_remap(blk_add_trace_remap);
WARN_ON(ret);
+ ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
+ WARN_ON(ret);
}
static void blk_unregister_tracepoints(void)
{
+ unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
unregister_trace_block_remap(blk_add_trace_remap);
unregister_trace_block_split(blk_add_trace_split);
unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}
+void blk_trace_remove_sysfs(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
+}
+
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cc615f84751..6dc4e5ef7a0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
if (ftrace_trace_function == ftrace_stub)
return;
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
func = ftrace_trace_function;
+#else
+ func = __ftrace_trace_function;
+#endif
if (ftrace_pid_trace) {
set_ftrace_pid_function(func);
@@ -736,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
out:
mutex_unlock(&ftrace_profile_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable)
failed = __ftrace_replace_code(rec, enable);
if (failed) {
rec->flags |= FTRACE_FL_FAILED;
- if ((system_state == SYSTEM_BOOTING) ||
- !core_kernel_text(rec->ip)) {
- ftrace_free_rec(rec);
- } else {
- ftrace_bug(failed, rec->ip);
- /* Stop processing */
- return;
- }
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+ return;
}
} while_for_each_ftrace_rec();
}
@@ -1520,7 +1519,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations show_ftrace_seq_ops = {
+static const struct seq_operations show_ftrace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -1621,8 +1620,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
if (!ret) {
struct seq_file *m = file->private_data;
m->private = iter;
- } else
+ } else {
+ trace_parser_put(&iter->parser);
kfree(iter);
+ }
} else
file->private_data = iter;
mutex_unlock(&ftrace_regex_lock);
@@ -2202,7 +2203,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
struct trace_parser *parser;
ssize_t ret, read;
- if (!cnt || cnt < 0)
+ if (!cnt)
return 0;
mutex_lock(&ftrace_regex_lock);
@@ -2216,20 +2217,20 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
parser = &iter->parser;
read = trace_get_user(parser, ubuf, cnt, ppos);
- if (trace_parser_loaded(parser) &&
+ if (read >= 0 && trace_parser_loaded(parser) &&
!trace_parser_cont(parser)) {
ret = ftrace_process_regex(parser->buffer,
parser->idx, enable);
if (ret)
- goto out;
+ goto out_unlock;
trace_parser_clear(parser);
}
ret = read;
-
+out_unlock:
mutex_unlock(&ftrace_regex_lock);
-out:
+
return ret;
}
@@ -2414,11 +2415,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
__g_next(struct seq_file *m, loff_t *pos)
{
- unsigned long *array = m->private;
-
if (*pos >= ftrace_graph_count)
return NULL;
- return &array[*pos];
+ return &ftrace_graph_funcs[*pos];
}
static void *
@@ -2461,7 +2460,7 @@ static int g_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ftrace_graph_seq_ops = {
+static const struct seq_operations ftrace_graph_seq_ops = {
.start = g_start,
.next = g_next,
.stop = g_stop,
@@ -2482,16 +2481,10 @@ ftrace_graph_open(struct inode *inode, struct file *file)
ftrace_graph_count = 0;
memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
}
+ mutex_unlock(&graph_lock);
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
ret = seq_open(file, &ftrace_graph_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = ftrace_graph_funcs;
- }
- } else
- file->private_data = ftrace_graph_funcs;
- mutex_unlock(&graph_lock);
return ret;
}
@@ -2560,9 +2553,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_parser parser;
- unsigned long *array;
- size_t read = 0;
- ssize_t ret;
+ ssize_t read, ret;
if (!cnt || cnt < 0)
return 0;
@@ -2571,35 +2562,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
ret = -EBUSY;
- goto out;
+ goto out_unlock;
}
- if (file->f_mode & FMODE_READ) {
- struct seq_file *m = file->private_data;
- array = m->private;
- } else
- array = file->private_data;
-
if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
ret = -ENOMEM;
- goto out;
+ goto out_unlock;
}
read = trace_get_user(&parser, ubuf, cnt, ppos);
- if (trace_parser_loaded((&parser))) {
+ if (read >= 0 && trace_parser_loaded((&parser))) {
parser.buffer[parser.idx] = 0;
/* we allow only one expression at a time */
- ret = ftrace_set_func(array, &ftrace_graph_count,
+ ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
parser.buffer);
if (ret)
- goto out;
+ goto out_free;
}
ret = read;
- out:
+
+out_free:
trace_parser_put(&parser);
+out_unlock:
mutex_unlock(&graph_lock);
return ret;
@@ -2670,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
}
#ifdef CONFIG_MODULES
-void ftrace_release(void *start, void *end)
+void ftrace_release_mod(struct module *mod)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
- unsigned long s = (unsigned long)start;
- unsigned long e = (unsigned long)end;
- if (ftrace_disabled || !start || start == end)
+ if (ftrace_disabled)
return;
mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) {
- if ((rec->ip >= s) && (rec->ip < e)) {
+ if (within_module_core(rec->ip, mod)) {
/*
* rec->ip is changed in ftrace_free_rec()
* It should not between s and e if record was freed.
@@ -2714,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
mod->num_ftrace_callsites);
break;
case MODULE_STATE_GOING:
- ftrace_release(mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
+ ftrace_release_mod(mod);
break;
}
@@ -3030,7 +3013,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
@@ -3040,7 +3023,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
mutex_lock(&ftrace_lock);
- ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c854..a91da69f153 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
return 1;
}
- if (!register_tracer(&kmem_tracer)) {
+ if (register_tracer(&kmem_tracer) != 0) {
pr_warning("Warning: could not register the kmem tracer\n");
return 1;
}
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
new file mode 100644
index 00000000000..e06c6e3d56a
--- /dev/null
+++ b/kernel/trace/power-traces.c
@@ -0,0 +1,20 @@
+/*
+ * Power trace points
+ *
+ * Copyright (C) 2009 Arjan van de Ven <arjan@linux.intel.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
+
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6eef38923b0..5dd017fea6f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -201,8 +201,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
-#include "trace.h"
-
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -485,7 +483,7 @@ struct ring_buffer_iter {
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
+static inline u64 rb_time_stamp(struct ring_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
@@ -496,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
u64 time;
preempt_disable_notrace();
- time = rb_time_stamp(buffer, cpu);
+ time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
return time;
@@ -601,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
}
/*
- * rb_is_head_page - test if the give page is the head page
+ * rb_is_head_page - test if the given page is the head page
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
@@ -1195,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
+ spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1209,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
return;
rb_reset_cpu(cpu_buffer);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer);
@@ -1870,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
+ *ts = rb_time_stamp(buffer);
next_page->page->time_stamp = *ts;
}
@@ -2113,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
- ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
+ ts = rb_time_stamp(cpu_buffer->buffer);
/*
* Only the first commit can update the timestamp.
@@ -2683,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
- * ring_buffer_overrun_cpu - get the number of overruns in buffer
+ * ring_buffer_overruns - get the number of overruns in buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fd52a19dd17..b20d3ec75de 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
-#define BOOTUP_TRACER_SIZE 100
-static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
+#define MAX_TRACER_SIZE 100
+static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static int __init set_ftrace(char *str)
{
- strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
+ strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
static struct tracer *current_trace __read_mostly;
/*
- * max_tracer_type_len is used to simplify the allocating of
- * buffers to read userspace tracer names. We keep track of
- * the longest tracer name registered.
- */
-static int max_tracer_type_len;
-
-/*
* trace_types_lock is used to protect the trace_types list.
* This lock is also used to keep user access serialized.
* Accesses from userspace will grab this lock while userspace
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
*/
void trace_wake_up(void)
{
+ int cpu;
+
+ if (trace_flags & TRACE_ITER_BLOCK)
+ return;
/*
* The runqueue_is_locked() can fail, but this is the best we
* have for now:
*/
- if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
+ cpu = get_cpu();
+ if (!runqueue_is_locked(cpu))
wake_up(&trace_wait);
+ put_cpu();
}
static int __init set_buf_size(char *str)
@@ -416,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
/* read the non-space input */
while (cnt && !isspace(ch)) {
- if (parser->idx < parser->size)
+ if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
else {
ret = -EINVAL;
@@ -619,7 +618,6 @@ __releases(kernel_lock)
__acquires(kernel_lock)
{
struct tracer *t;
- int len;
int ret = 0;
if (!type->name) {
@@ -627,6 +625,11 @@ __acquires(kernel_lock)
return -1;
}
+ if (strlen(type->name) > MAX_TRACER_SIZE) {
+ pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
+ return -1;
+ }
+
/*
* When this gets called we hold the BKL which means that
* preemption is disabled. Various trace selftests however
@@ -641,7 +644,7 @@ __acquires(kernel_lock)
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
- pr_info("Trace %s already registered\n",
+ pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
@@ -692,9 +695,6 @@ __acquires(kernel_lock)
type->next = trace_types;
trace_types = type;
- len = strlen(type->name);
- if (len > max_tracer_type_len)
- max_tracer_type_len = len;
out:
tracing_selftest_running = false;
@@ -703,7 +703,7 @@ __acquires(kernel_lock)
if (ret || !default_bootup_tracer)
goto out_unlock;
- if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
+ if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -725,14 +725,13 @@ __acquires(kernel_lock)
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
- int len;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
- pr_info("Trace %s not registered\n", type->name);
+ pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
@@ -745,17 +744,7 @@ void unregister_tracer(struct tracer *type)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
-
- if (strlen(type->name) != max_tracer_type_len)
- goto out;
-
- max_tracer_type_len = 0;
- for (t = &trace_types; *t; t = &(*t)->next) {
- len = strlen((*t)->name);
- if (len > max_tracer_type_len)
- max_tracer_type_len = len;
- }
- out:
+out:
mutex_unlock(&trace_types_lock);
}
@@ -1404,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_printk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
@@ -1960,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations tracer_seq_ops = {
+static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
@@ -1995,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace)
*iter->trace = *current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- cpumask_clear(iter->started);
-
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
@@ -2174,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations show_traces_seq_ops = {
+static const struct seq_operations show_traces_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -2453,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return ret;
}
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2595,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
}
mutex_unlock(&trace_types_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2604,7 +2591,7 @@ static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- char buf[max_tracer_type_len+2];
+ char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
@@ -2754,15 +2741,15 @@ static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- char buf[max_tracer_type_len+1];
+ char buf[MAX_TRACER_SIZE+1];
int i;
size_t ret;
int err;
ret = cnt;
- if (cnt > max_tracer_type_len)
- cnt = max_tracer_type_len;
+ if (cnt > MAX_TRACER_SIZE)
+ cnt = MAX_TRACER_SIZE;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
@@ -2777,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
if (err)
return err;
- filp->f_pos += ret;
+ *ppos += ret;
return ret;
}
@@ -3312,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
}
}
- filp->f_pos += cnt;
+ *ppos += cnt;
/* If check pages failed, return ENOMEM */
if (tracing_disabled)
@@ -4400,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
- if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */
@@ -4411,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
- cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 86bcff94791..405cb850b75 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -11,7 +11,6 @@
#include <linux/ftrace.h>
#include <trace/boot.h>
#include <linux/kmemtrace.h>
-#include <trace/power.h>
#include <linux/trace_seq.h>
#include <linux/ftrace_event.h>
@@ -37,7 +36,6 @@ enum trace_type {
TRACE_HW_BRANCHES,
TRACE_KMEM_ALLOC,
TRACE_KMEM_FREE,
- TRACE_POWER,
TRACE_BLK,
__TRACE_LAST_TYPE,
@@ -207,7 +205,6 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
- IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
TRACE_KMEM_ALLOC); \
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a..4a194f08f88 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
+ struct ring_buffer *buffer;
unsigned long flags;
int cpu, pc;
const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
goto out;
pc = preempt_count();
- event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line;
entry->correct = val == expect;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index a431748ddd6..ead3d724599 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -330,23 +330,6 @@ FTRACE_ENTRY(hw_branch, hw_branch_entry,
F_printk("from: %llx to: %llx", __entry->from, __entry->to)
);
-FTRACE_ENTRY(power, trace_power,
-
- TRACE_POWER,
-
- F_STRUCT(
- __field_struct( struct power_trace, state_data )
- __field_desc( s64, state_data, stamp )
- __field_desc( s64, state_data, end )
- __field_desc( int, state_data, type )
- __field_desc( int, state_data, state )
- ),
-
- F_printk("%llx->%llx type:%u state:%u",
- __entry->stamp, __entry->end,
- __entry->type, __entry->state)
-);
-
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
TRACE_KMEM_ALLOC,
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 55a25c933d1..8d5c171cc99 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -8,6 +8,62 @@
#include <linux/module.h>
#include "trace.h"
+/*
+ * We can't use a size but a type in alloc_percpu()
+ * So let's create a dummy type that matches the desired size
+ */
+typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
+
+char *trace_profile_buf;
+EXPORT_SYMBOL_GPL(trace_profile_buf);
+
+char *trace_profile_buf_nmi;
+EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
+
+/* Count the events in use (per event id, not per instance) */
+static int total_profile_count;
+
+static int ftrace_profile_enable_event(struct ftrace_event_call *event)
+{
+ char *buf;
+ int ret = -ENOMEM;
+
+ if (atomic_inc_return(&event->profile_count))
+ return 0;
+
+ if (!total_profile_count) {
+ buf = (char *)alloc_percpu(profile_buf_t);
+ if (!buf)
+ goto fail_buf;
+
+ rcu_assign_pointer(trace_profile_buf, buf);
+
+ buf = (char *)alloc_percpu(profile_buf_t);
+ if (!buf)
+ goto fail_buf_nmi;
+
+ rcu_assign_pointer(trace_profile_buf_nmi, buf);
+ }
+
+ ret = event->profile_enable();
+ if (!ret) {
+ total_profile_count++;
+ return 0;
+ }
+
+fail_buf_nmi:
+ if (!total_profile_count) {
+ free_percpu(trace_profile_buf_nmi);
+ free_percpu(trace_profile_buf);
+ trace_profile_buf_nmi = NULL;
+ trace_profile_buf = NULL;
+ }
+fail_buf:
+ atomic_dec(&event->profile_count);
+
+ return ret;
+}
+
int ftrace_profile_enable(int event_id)
{
struct ftrace_event_call *event;
@@ -17,7 +73,7 @@ int ftrace_profile_enable(int event_id)
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->profile_enable &&
try_module_get(event->mod)) {
- ret = event->profile_enable(event);
+ ret = ftrace_profile_enable_event(event);
break;
}
}
@@ -26,6 +82,33 @@ int ftrace_profile_enable(int event_id)
return ret;
}
+static void ftrace_profile_disable_event(struct ftrace_event_call *event)
+{
+ char *buf, *nmi_buf;
+
+ if (!atomic_add_negative(-1, &event->profile_count))
+ return;
+
+ event->profile_disable();
+
+ if (!--total_profile_count) {
+ buf = trace_profile_buf;
+ rcu_assign_pointer(trace_profile_buf, NULL);
+
+ nmi_buf = trace_profile_buf_nmi;
+ rcu_assign_pointer(trace_profile_buf_nmi, NULL);
+
+ /*
+ * Ensure every events in profiling have finished before
+ * releasing the buffers
+ */
+ synchronize_sched();
+
+ free_percpu(buf);
+ free_percpu(nmi_buf);
+ }
+}
+
void ftrace_profile_disable(int event_id)
{
struct ftrace_event_call *event;
@@ -33,7 +116,7 @@ void ftrace_profile_disable(int event_id)
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) {
- event->profile_disable(event);
+ ftrace_profile_disable_event(event);
module_put(event->mod);
break;
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 56c260b83a9..d128f65778e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_parser parser;
- size_t read = 0;
- ssize_t ret;
+ ssize_t read, ret;
- if (!cnt || cnt < 0)
+ if (!cnt)
return 0;
ret = tracing_update_buffers();
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
read = trace_get_user(&parser, ubuf, cnt, ppos);
- if (trace_parser_loaded((&parser))) {
+ if (read >= 0 && trace_parser_loaded((&parser))) {
int set = 1;
if (*parser.buffer == '!')
@@ -271,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct list_head *list = m->private;
- struct ftrace_event_call *call;
+ struct ftrace_event_call *call = v;
(*pos)++;
- for (;;) {
- if (list == &ftrace_events)
- return NULL;
-
- call = list_entry(list, struct ftrace_event_call, list);
-
+ list_for_each_entry_continue(call, &ftrace_events, list) {
/*
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
if (call->regfunc)
- break;
-
- list = list->next;
+ return call;
}
- m->private = list->next;
-
- return call;
+ return NULL;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_call *call = NULL;
+ struct ftrace_event_call *call;
loff_t l;
mutex_lock(&event_mutex);
- m->private = ftrace_events.next;
+ call = list_entry(&ftrace_events, struct ftrace_event_call, list);
for (l = 0; l <= *pos; ) {
- call = t_next(m, NULL, &l);
+ call = t_next(m, call, &l);
if (!call)
break;
}
@@ -316,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct list_head *list = m->private;
- struct ftrace_event_call *call;
+ struct ftrace_event_call *call = v;
(*pos)++;
- retry:
- if (list == &ftrace_events)
- return NULL;
-
- call = list_entry(list, struct ftrace_event_call, list);
-
- if (!call->enabled) {
- list = list->next;
- goto retry;
+ list_for_each_entry_continue(call, &ftrace_events, list) {
+ if (call->enabled)
+ return call;
}
- m->private = list->next;
-
- return call;
+ return NULL;
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_call *call = NULL;
+ struct ftrace_event_call *call;
loff_t l;
mutex_lock(&event_mutex);
- m->private = ftrace_events.next;
+ call = list_entry(&ftrace_events, struct ftrace_event_call, list);
for (l = 0; l <= *pos; ) {
- call = s_next(m, NULL, &l);
+ call = s_next(m, call, &l);
if (!call)
break;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927..98a6cc5c64e 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps)
while (!list_empty(&ps->postfix)) {
elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
- kfree(elt->operand);
list_del(&elt->list);
+ kfree(elt->operand);
+ kfree(elt);
}
}
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ca7d7c4d0c2..69543a905cd 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
seq_print_ip_sym(seq, it->from, symflags) &&
trace_seq_printf(seq, "\n"))
return TRACE_TYPE_HANDLED;
- return TRACE_TYPE_PARTIAL_LINE;;
+ return TRACE_TYPE_PARTIAL_LINE;
}
return TRACE_TYPE_UNHANDLED;
}
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
struct ftrace_event_call *call = &event_hw_branch;
struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event;
+ struct ring_buffer *buf;
struct hw_branch_entry *entry;
unsigned long irq1;
int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
- event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+ buf = tr->buffer;
+ event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
sizeof(*entry), 0, 0);
if (!event)
goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.type = TRACE_HW_BRANCHES;
entry->from = from;
entry->to = to;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ if (!filter_check_discard(call, entry, buf, event))
+ trace_buffer_unlock_commit(buf, event, 0, 0);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1..b6c12c6a1bc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
* @s: trace sequence descriptor
* @fmt: printf format string
*
+ * It returns 0 if the trace oversizes the buffer's free
+ * space, 1 otherwise.
+ *
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
s->len += ret;
- return len;
+ return 1;
}
EXPORT_SYMBOL_GPL(trace_seq_printf);
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
hardirq ? 'h' : softirq ? 's' : '.'))
return 0;
- if (entry->lock_depth < 0)
- ret = trace_seq_putc(s, '.');
+ if (entry->preempt_count)
+ ret = trace_seq_printf(s, "%x", entry->preempt_count);
else
- ret = trace_seq_printf(s, "%d", entry->lock_depth);
+ ret = trace_seq_putc(s, '.');
+
if (!ret)
return 0;
- if (entry->preempt_count)
- return trace_seq_printf(s, "%x", entry->preempt_count);
- return trace_seq_putc(s, '.');
+ if (entry->lock_depth < 0)
+ return trace_seq_putc(s, '.');
+
+ return trace_seq_printf(s, "%d", entry->lock_depth);
}
static int
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- task_state_char(field->prev_state);
+ S = task_state_char(field->prev_state);
T = task_state_char(field->next_state);
if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
field->prev_pid,
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- task_state_char(field->prev_state);
+ S = task_state_char(field->prev_state);
T = task_state_char(field->next_state);
SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
deleted file mode 100644
index fe1a00f1445..00000000000
--- a/kernel/trace/trace_power.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * ring buffer based C-state tracer
- *
- * Arjan van de Ven <arjan@linux.intel.com>
- * Copyright (C) 2008 Intel Corporation
- *
- * Much is borrowed from trace_boot.c which is
- * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <trace/power.h>
-#include <linux/kallsyms.h>
-#include <linux/module.h>
-
-#include "trace.h"
-#include "trace_output.h"
-
-static struct trace_array *power_trace;
-static int __read_mostly trace_power_enabled;
-
-static void probe_power_start(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- if (!trace_power_enabled)
- return;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
-}
-
-
-static void probe_power_end(struct power_trace *it)
-{
- struct ftrace_event_call *call = &event_power;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- buffer = tr->buffer;
-
- preempt_disable();
- it->end = ktime_get();
- data = tr->data[smp_processor_id()];
-
- event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
- sizeof(*entry), 0, 0);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->state_data = *it;
- if (!filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
- preempt_enable();
-}
-
-static void probe_power_mark(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- struct ftrace_event_call *call = &event_power;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- buffer = tr->buffer;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
- preempt_disable();
- it->end = it->stamp;
- data = tr->data[smp_processor_id()];
-
- event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
- sizeof(*entry), 0, 0);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->state_data = *it;
- if (!filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
- preempt_enable();
-}
-
-static int tracing_power_register(void)
-{
- int ret;
-
- ret = register_trace_power_start(probe_power_start);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_start\n");
- return ret;
- }
- ret = register_trace_power_end(probe_power_end);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_end\n");
- goto fail_start;
- }
- ret = register_trace_power_mark(probe_power_mark);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_mark\n");
- goto fail_end;
- }
- return ret;
-fail_end:
- unregister_trace_power_end(probe_power_end);
-fail_start:
- unregister_trace_power_start(probe_power_start);
- return ret;
-}
-
-static void start_power_trace(struct trace_array *tr)
-{
- trace_power_enabled = 1;
-}
-
-static void stop_power_trace(struct trace_array *tr)
-{
- trace_power_enabled = 0;
-}
-
-static void power_trace_reset(struct trace_array *tr)
-{
- trace_power_enabled = 0;
- unregister_trace_power_start(probe_power_start);
- unregister_trace_power_end(probe_power_end);
- unregister_trace_power_mark(probe_power_mark);
-}
-
-
-static int power_trace_init(struct trace_array *tr)
-{
- power_trace = tr;
-
- trace_power_enabled = 1;
- tracing_power_register();
-
- tracing_reset_online_cpus(tr);
- return 0;
-}
-
-static enum print_line_t power_print_line(struct trace_iterator *iter)
-{
- int ret = 0;
- struct trace_entry *entry = iter->ent;
- struct trace_power *field ;
- struct power_trace *it;
- struct trace_seq *s = &iter->seq;
- struct timespec stamp;
- struct timespec duration;
-
- trace_assign_type(field, entry);
- it = &field->state_data;
- stamp = ktime_to_timespec(it->stamp);
- duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
-
- if (entry->type == TRACE_POWER) {
- if (it->type == POWER_CSTATE)
- ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
- stamp.tv_sec,
- stamp.tv_nsec,
- it->state, iter->cpu,
- duration.tv_sec,
- duration.tv_nsec);
- if (it->type == POWER_PSTATE)
- ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
- stamp.tv_sec,
- stamp.tv_nsec,
- it->state, iter->cpu);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- return TRACE_TYPE_HANDLED;
- }
- return TRACE_TYPE_UNHANDLED;
-}
-
-static void power_print_header(struct seq_file *s)
-{
- seq_puts(s, "# TIMESTAMP STATE EVENT\n");
- seq_puts(s, "# | | |\n");
-}
-
-static struct tracer power_tracer __read_mostly =
-{
- .name = "power",
- .init = power_trace_init,
- .start = start_power_trace,
- .stop = stop_power_trace,
- .reset = power_trace_reset,
- .print_line = power_print_line,
- .print_header = power_print_header,
-};
-
-static int init_power_trace(void)
-{
- return register_tracer(&power_tracer);
-}
-device_initcall(init_power_trace);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 687699d365a..2547d8813cf 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -11,7 +11,6 @@
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/module.h>
-#include <linux/marker.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
#include <linux/list.h>
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0f6facb050a..8504ac71e4e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = {
int
stack_trace_sysctl(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
mutex_lock(&stack_sysctl_mutex);
- ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write ||
(last_stack_tracer_enabled == !!stack_tracer_enabled))
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8712ce3c6a0..527e17eae57 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
#include <trace/events/syscalls.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/syscall.h>
#include "trace_output.h"
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
SYSCALL_FIELD(int, nr),
- SYSCALL_FIELD(unsigned long, ret));
+ SYSCALL_FIELD(long, ret));
if (!ret)
return 0;
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
+ ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
FILTER_OTHER);
return ret;
@@ -384,10 +384,13 @@ static int sys_prof_refcount_exit;
static void prof_syscall_enter(struct pt_regs *regs, long id)
{
- struct syscall_trace_enter *rec;
struct syscall_metadata *sys_data;
+ struct syscall_trace_enter *rec;
+ unsigned long flags;
+ char *raw_data;
int syscall_nr;
int size;
+ int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -402,20 +405,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- do {
- char raw_data[size];
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "profile buffer not large enough"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
- rec = (struct syscall_trace_enter *) raw_data;
- tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->enter_id;
- rec->nr = syscall_nr;
- syscall_get_arguments(current, regs, 0, sys_data->nb_args,
- (unsigned long *)&rec->args);
- perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
- } while(0);
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_enter *) raw_data;
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->enter_id;
+ rec->nr = syscall_nr;
+ syscall_get_arguments(current, regs, 0, sys_data->nb_args,
+ (unsigned long *)&rec->args);
+ perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_enter(char *name)
@@ -460,8 +481,12 @@ void unreg_prof_syscall_enter(char *name)
static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
- struct syscall_trace_exit rec;
+ struct syscall_trace_exit *rec;
+ unsigned long flags;
int syscall_nr;
+ char *raw_data;
+ int size;
+ int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -471,12 +496,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
if (!sys_data)
return;
- tracing_generic_entry_update(&rec.ent, 0, 0);
- rec.ent.type = sys_data->exit_id;
- rec.nr = syscall_nr;
- rec.ret = syscall_get_return_value(current, regs);
+ /* We can probably do that at build time */
+ size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
- perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
+ /*
+ * Impossible, but be paranoid with the future
+ * How to put this check outside runtime?
+ */
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "exit event has grown above profile buffer size"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
+
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_exit *)raw_data;
+
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->exit_id;
+ rec->nr = syscall_nr;
+ rec->ret = syscall_get_return_value(current, regs);
+
+ perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_exit(char *name)