From b07c3f193a8074aa4afe43cfa8ae38ec4c7ccfa9 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 18 Jul 2008 12:16:17 -0400 Subject: ftrace: port to tracepoints Porting the trace_mark() used by ftrace to tracepoints. (cleanup) Changelog : - Change error messages : marker -> tracepoint [ mingo@elte.hu: conflict resolutions ] Signed-off-by: Mathieu Desnoyers Acked-by: 'Peter Zijlstra' Signed-off-by: Ingo Molnar --- kernel/trace/trace_sched_switch.c | 120 +++++++-------------------------- kernel/trace/trace_sched_wakeup.c | 135 ++++++++++---------------------------- 2 files changed, 58 insertions(+), 197 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index cb817a209aa..789e927abc9 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -9,8 +9,8 @@ #include #include #include -#include #include +#include #include "trace.h" @@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled; static atomic_t sched_ref; static void -sched_switch_func(void *private, void *__rq, struct task_struct *prev, +probe_sched_switch(struct rq *__rq, struct task_struct *prev, struct task_struct *next) { - struct trace_array **ptr = private; - struct trace_array *tr = *ptr; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; + if (!atomic_read(&sched_ref)) + return; + tracing_record_cmdline(prev); tracing_record_cmdline(next); @@ -37,95 +38,42 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev, local_irq_save(flags); cpu = raw_smp_processor_id(); - data = tr->data[cpu]; + data = ctx_trace->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - tracing_sched_switch_trace(tr, data, prev, next, flags); + tracing_sched_switch_trace(ctx_trace, data, prev, next, flags); atomic_dec(&data->disabled); local_irq_restore(flags); } -static notrace void -sched_switch_callback(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - struct task_struct *prev; - struct task_struct *next; - struct rq *__rq; - - if (!atomic_read(&sched_ref)) - return; - - /* skip prev_pid %d next_pid %d prev_state %ld */ - (void)va_arg(*args, int); - (void)va_arg(*args, int); - (void)va_arg(*args, long); - __rq = va_arg(*args, typeof(__rq)); - prev = va_arg(*args, typeof(prev)); - next = va_arg(*args, typeof(next)); - - /* - * If tracer_switch_func only points to the local - * switch func, it still needs the ptr passed to it. - */ - sched_switch_func(probe_data, __rq, prev, next); -} - static void -wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct - task_struct *curr) +probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) { - struct trace_array **ptr = private; - struct trace_array *tr = *ptr; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; - if (!tracer_enabled) + if (!likely(tracer_enabled)) return; - tracing_record_cmdline(curr); + tracing_record_cmdline(current); local_irq_save(flags); cpu = raw_smp_processor_id(); - data = tr->data[cpu]; + data = ctx_trace->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); + tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, + flags); atomic_dec(&data->disabled); local_irq_restore(flags); } -static notrace void -wake_up_callback(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - struct task_struct *curr; - struct task_struct *task; - struct rq *__rq; - - if (likely(!tracer_enabled)) - return; - - /* Skip pid %d state %ld */ - (void)va_arg(*args, int); - (void)va_arg(*args, long); - /* now get the meat: "rq %p task %p rq->curr %p" */ - __rq = va_arg(*args, typeof(__rq)); - task = va_arg(*args, typeof(task)); - curr = va_arg(*args, typeof(curr)); - - tracing_record_cmdline(task); - tracing_record_cmdline(curr); - - wakeup_func(probe_data, __rq, task, curr); -} - static void sched_switch_reset(struct trace_array *tr) { int cpu; @@ -140,60 +88,40 @@ static int tracing_sched_register(void) { int ret; - ret = marker_probe_register("kernel_sched_wakeup", - "pid %d state %ld ## rq %p task %p rq->curr %p", - wake_up_callback, - &ctx_trace); + ret = register_trace_sched_wakeup(probe_sched_wakeup); if (ret) { - pr_info("wakeup trace: Couldn't add marker" + pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return ret; } - ret = marker_probe_register("kernel_sched_wakeup_new", - "pid %d state %ld ## rq %p task %p rq->curr %p", - wake_up_callback, - &ctx_trace); + ret = register_trace_sched_wakeup_new(probe_sched_wakeup); if (ret) { - pr_info("wakeup trace: Couldn't add marker" + pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } - ret = marker_probe_register("kernel_sched_schedule", - "prev_pid %d next_pid %d prev_state %ld " - "## rq %p prev %p next %p", - sched_switch_callback, - &ctx_trace); + ret = register_trace_sched_switch(probe_sched_switch); if (ret) { - pr_info("sched trace: Couldn't add marker" + pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_schedule\n"); goto fail_deprobe_wake_new; } return ret; fail_deprobe_wake_new: - marker_probe_unregister("kernel_sched_wakeup_new", - wake_up_callback, - &ctx_trace); + unregister_trace_sched_wakeup_new(probe_sched_wakeup); fail_deprobe: - marker_probe_unregister("kernel_sched_wakeup", - wake_up_callback, - &ctx_trace); + unregister_trace_sched_wakeup(probe_sched_wakeup); return ret; } static void tracing_sched_unregister(void) { - marker_probe_unregister("kernel_sched_schedule", - sched_switch_callback, - &ctx_trace); - marker_probe_unregister("kernel_sched_wakeup_new", - wake_up_callback, - &ctx_trace); - marker_probe_unregister("kernel_sched_wakeup", - wake_up_callback, - &ctx_trace); + unregister_trace_sched_switch(probe_sched_switch); + unregister_trace_sched_wakeup_new(probe_sched_wakeup); + unregister_trace_sched_wakeup(probe_sched_wakeup); } static void tracing_start_sched_switch(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e303ccb62cd..08206b4e29c 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "trace.h" @@ -112,18 +112,18 @@ static int report_latency(cycle_t delta) } static void notrace -wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, +probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { unsigned long latency = 0, t0 = 0, t1 = 0; - struct trace_array **ptr = private; - struct trace_array *tr = *ptr; struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; + tracing_record_cmdline(prev); + if (unlikely(!tracer_enabled)) return; @@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, return; /* The task we are waiting for is waking up */ - data = tr->data[wakeup_cpu]; + data = wakeup_trace->data[wakeup_cpu]; /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); - disabled = atomic_inc_return(&tr->data[cpu]->disabled); + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; @@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; - trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); + trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); /* * usecs conversion is slow so we try to delay the conversion @@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, t0 = nsecs_to_usecs(T0); t1 = nsecs_to_usecs(T1); - update_max_tr(tr, wakeup_task, wakeup_cpu); + update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); out_unlock: - __wakeup_reset(tr); + __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: - atomic_dec(&tr->data[cpu]->disabled); -} - -static notrace void -sched_switch_callback(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - struct task_struct *prev; - struct task_struct *next; - struct rq *__rq; - - /* skip prev_pid %d next_pid %d prev_state %ld */ - (void)va_arg(*args, int); - (void)va_arg(*args, int); - (void)va_arg(*args, long); - __rq = va_arg(*args, typeof(__rq)); - prev = va_arg(*args, typeof(prev)); - next = va_arg(*args, typeof(next)); - - tracing_record_cmdline(prev); - - /* - * If tracer_switch_func only points to the local - * switch func, it still needs the ptr passed to it. - */ - wakeup_sched_switch(probe_data, __rq, prev, next); + atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void __wakeup_reset(struct trace_array *tr) @@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_array *tr) } static void -wakeup_check_start(struct trace_array *tr, struct task_struct *p, - struct task_struct *curr) +probe_wakeup(struct rq *rq, struct task_struct *p) { int cpu = smp_processor_id(); unsigned long flags; long disabled; + if (likely(!tracer_enabled)) + return; + + tracing_record_cmdline(p); + tracing_record_cmdline(current); + if (likely(!rt_task(p)) || p->prio >= wakeup_prio || - p->prio >= curr->prio) + p->prio >= current->prio) return; - disabled = atomic_inc_return(&tr->data[cpu]->disabled); + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; @@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, goto out_locked; /* reset the trace */ - __wakeup_reset(tr); + __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; @@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, local_save_flags(flags); - tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); - trace_function(tr, tr->data[wakeup_cpu], + wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); + trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); out_locked: __raw_spin_unlock(&wakeup_lock); out: - atomic_dec(&tr->data[cpu]->disabled); -} - -static notrace void -wake_up_callback(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - struct trace_array **ptr = probe_data; - struct trace_array *tr = *ptr; - struct task_struct *curr; - struct task_struct *task; - struct rq *__rq; - - if (likely(!tracer_enabled)) - return; - - /* Skip pid %d state %ld */ - (void)va_arg(*args, int); - (void)va_arg(*args, long); - /* now get the meat: "rq %p task %p rq->curr %p" */ - __rq = va_arg(*args, typeof(__rq)); - task = va_arg(*args, typeof(task)); - curr = va_arg(*args, typeof(curr)); - - tracing_record_cmdline(task); - tracing_record_cmdline(curr); - - wakeup_check_start(tr, task, curr); + atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void start_wakeup_tracer(struct trace_array *tr) { int ret; - ret = marker_probe_register("kernel_sched_wakeup", - "pid %d state %ld ## rq %p task %p rq->curr %p", - wake_up_callback, - &wakeup_trace); + ret = register_trace_sched_wakeup(probe_wakeup); if (ret) { - pr_info("wakeup trace: Couldn't add marker" + pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return; } - ret = marker_probe_register("kernel_sched_wakeup_new", - "pid %d state %ld ## rq %p task %p rq->curr %p", - wake_up_callback, - &wakeup_trace); + ret = register_trace_sched_wakeup_new(probe_wakeup); if (ret) { - pr_info("wakeup trace: Couldn't add marker" + pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } - ret = marker_probe_register("kernel_sched_schedule", - "prev_pid %d next_pid %d prev_state %ld " - "## rq %p prev %p next %p", - sched_switch_callback, - &wakeup_trace); + ret = register_trace_sched_switch(probe_wakeup_sched_switch); if (ret) { - pr_info("sched trace: Couldn't add marker" + pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_schedule\n"); goto fail_deprobe_wake_new; } @@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct trace_array *tr) return; fail_deprobe_wake_new: - marker_probe_unregister("kernel_sched_wakeup_new", - wake_up_callback, - &wakeup_trace); + unregister_trace_sched_wakeup_new(probe_wakeup); fail_deprobe: - marker_probe_unregister("kernel_sched_wakeup", - wake_up_callback, - &wakeup_trace); + unregister_trace_sched_wakeup(probe_wakeup); } static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; unregister_ftrace_function(&trace_ops); - marker_probe_unregister("kernel_sched_schedule", - sched_switch_callback, - &wakeup_trace); - marker_probe_unregister("kernel_sched_wakeup_new", - wake_up_callback, - &wakeup_trace); - marker_probe_unregister("kernel_sched_wakeup", - wake_up_callback, - &wakeup_trace); + unregister_trace_sched_switch(probe_wakeup_sched_switch); + unregister_trace_sched_wakeup_new(probe_wakeup); + unregister_trace_sched_wakeup(probe_wakeup); } static void wakeup_tracer_init(struct trace_array *tr) -- cgit v1.2.3-70-g09d2 From 5f87f1121895dc09d2d1c1db5f14af6aa4ce3e94 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 23 Jul 2008 14:15:22 +0200 Subject: tracing: clean up tracepoints kconfig structure do not expose users to CONFIG_TRACEPOINTS - tracers can select it just fine. update ftrace to select CONFIG_TRACEPOINTS. Signed-off-by: Ingo Molnar --- init/Kconfig | 9 +++++---- kernel/trace/Kconfig | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/init/Kconfig b/init/Kconfig index d5994490b0b..031344f954f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -771,11 +771,12 @@ config PROFILING Say Y here to enable the extended profiling support mechanisms used by profilers such as OProfile. +# +# Place an empty function call at each tracepoint site. Can be +# dynamically changed for a probe function. +# config TRACEPOINTS - bool "Activate tracepoints" - help - Place an empty function call at each tracepoint site. Can be - dynamically changed for a probe function. + bool config MARKERS bool "Activate markers" diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 263e9e6bbd6..cae2637d5e6 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -14,6 +14,7 @@ config TRACING bool select DEBUG_FS select STACKTRACE + select TRACEPOINTS config FTRACE bool "Kernel Function Tracer" -- cgit v1.2.3-70-g09d2 From 8da3821ba5634497da63d58a69e24a97697c4a2b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Aug 2008 15:45:07 -0400 Subject: ftrace: create __mcount_loc section This patch creates a section in the kernel called "__mcount_loc". This will hold a list of pointers to the mcount relocation for each call site of mcount. For example: objdump -dr init/main.o [...] Disassembly of section .text: 0000000000000000 : 0: 55 push %rbp [...] 000000000000017b : 17b: 55 push %rbp 17c: 48 89 e5 mov %rsp,%rbp 17f: 53 push %rbx 180: 48 83 ec 08 sub $0x8,%rsp 184: e8 00 00 00 00 callq 189 185: R_X86_64_PC32 mcount+0xfffffffffffffffc [...] We will add a section to point to each function call. .section __mcount_loc,"a",@progbits [...] .quad .text + 0x185 [...] The offset to of the mcount call site in init_post is an offset from the start of the section, and not the start of the function init_post. The mcount relocation is at the call site 0x185 from the start of the .text section. .text + 0x185 == init_post + 0xa We need a way to add this __mcount_loc section in a way that we do not lose the relocations after final link. The .text section here will be attached to all other .text sections after final link and the offsets will be meaningless. We need to keep track of where these .text sections are. To do this, we use the start of the first function in the section. do_one_initcall. We can make a tmp.s file with this function as a reference to the start of the .text section. .section __mcount_loc,"a",@progbits [...] .quad do_one_initcall + 0x185 [...] Then we can compile the tmp.s into a tmp.o gcc -c tmp.s -o tmp.o And link it into back into main.o. ld -r main.o tmp.o -o tmp_main.o mv tmp_main.o main.o But we have a problem. What happens if the first function in a section is not exported, and is a static function. The linker will not let the tmp.o use it. This case exists in main.o as well. Disassembly of section .init.text: 0000000000000000 : 0: 55 push %rbp 1: 48 89 e5 mov %rsp,%rbp 4: e8 00 00 00 00 callq 9 5: R_X86_64_PC32 mcount+0xfffffffffffffffc The first function in .init.text is a static function. 00000000000000a8 t __setup_set_reset_devices 000000000000105f t __setup_str_set_reset_devices 0000000000000000 t set_reset_devices The lowercase 't' means that set_reset_devices is local and is not exported. If we simply try to link the tmp.o with the set_reset_devices we end up with two symbols: one local and one global. .section __mcount_loc,"a",@progbits .quad set_reset_devices + 0x10 00000000000000a8 t __setup_set_reset_devices 000000000000105f t __setup_str_set_reset_devices 0000000000000000 t set_reset_devices U set_reset_devices We still have an undefined reference to set_reset_devices, and if we try to compile the kernel, we will end up with an undefined reference to set_reset_devices, or even worst, it could be exported someplace else, and then we will have a reference to the wrong location. To handle this case, we make an intermediate step using objcopy. We convert set_reset_devices into a global exported symbol before linking it with tmp.o and set it back afterwards. 00000000000000a8 t __setup_set_reset_devices 000000000000105f t __setup_str_set_reset_devices 0000000000000000 T set_reset_devices 00000000000000a8 t __setup_set_reset_devices 000000000000105f t __setup_str_set_reset_devices 0000000000000000 T set_reset_devices 00000000000000a8 t __setup_set_reset_devices 000000000000105f t __setup_str_set_reset_devices 0000000000000000 t set_reset_devices Now we have a section in main.o called __mcount_loc that we can place somewhere in the kernel using vmlinux.ld.S and access it to convert all these locations that call mcount into nops before starting SMP and thus, eliminating the need to do this with kstop_machine. Note, A well documented perl script (scripts/recordmcount.pl) is used to do all this in one location. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/asm-generic/vmlinux.lds.h | 8 ++ kernel/trace/Kconfig | 8 ++ scripts/Makefile.build | 6 + scripts/recordmcount.pl | 280 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 302 insertions(+) create mode 100755 scripts/recordmcount.pl (limited to 'kernel/trace') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 3d8e472a09c..838d9b2a0da 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -37,6 +37,13 @@ #define MEM_DISCARD(sec) *(.mem##sec) #endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ + *(__mcount_loc) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; +#else +#define MCOUNT_REC() +#endif /* .data section */ #define DATA_DATA \ @@ -192,6 +199,7 @@ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ + MCOUNT_REC() \ DEV_KEEP(init.rodata) \ DEV_KEEP(exit.rodata) \ CPU_KEEP(init.rodata) \ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index cae2637d5e6..14d9505178c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -7,6 +7,9 @@ config HAVE_FTRACE config HAVE_DYNAMIC_FTRACE bool +config HAVE_FTRACE_MCOUNT_RECORD + bool + config TRACER_MAX_TRACE bool @@ -122,6 +125,11 @@ config DYNAMIC_FTRACE were made. If so, it runs stop_machine (stops all CPUS) and modifies the code to jump over the call to ftrace. +config FTRACE_MCOUNT_RECORD + def_bool y + depends on DYNAMIC_FTRACE + depends on HAVE_FTRACE_MCOUNT_RECORD + config FTRACE_SELFTEST bool diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 277cfe0b710..463ddcc583e 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -198,10 +198,16 @@ cmd_modversions = \ fi; endif +ifdef CONFIG_FTRACE_MCOUNT_RECORD +cmd_record_mcount = scripts/recordmcount.pl "$(ARCH)" \ + "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" "$(@)"; +endif + define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ $(cmd_modversions) \ + $(cmd_record_mcount) \ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ $(dot-target).tmp; \ rm -f $(depfile); \ diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl new file mode 100755 index 00000000000..44b4b23e91b --- /dev/null +++ b/scripts/recordmcount.pl @@ -0,0 +1,280 @@ +#!/usr/bin/perl -w +# (c) 2008, Steven Rostedt +# Licensed under the terms of the GNU GPL License version 2 +# +# recordmcount.pl - makes a section called __mcount_loc that holds +# all the offsets to the calls to mcount. +# +# +# What we want to end up with is a section in vmlinux called +# __mcount_loc that contains a list of pointers to all the +# call sites in the kernel that call mcount. Later on boot up, the kernel +# will read this list, save the locations and turn them into nops. +# When tracing or profiling is later enabled, these locations will then +# be converted back to pointers to some function. +# +# This is no easy feat. This script is called just after the original +# object is compiled and before it is linked. +# +# The references to the call sites are offsets from the section of text +# that the call site is in. Hence, all functions in a section that +# has a call site to mcount, will have the offset from the beginning of +# the section and not the beginning of the function. +# +# The trick is to find a way to record the beginning of the section. +# The way we do this is to look at the first function in the section +# which will also be the location of that section after final link. +# e.g. +# +# .section ".text.sched" +# .globl my_func +# my_func: +# [...] +# call mcount (offset: 0x5) +# [...] +# ret +# other_func: +# [...] +# call mcount (offset: 0x1b) +# [...] +# +# Both relocation offsets for the mcounts in the above example will be +# offset from .text.sched. If we make another file called tmp.s with: +# +# .section __mcount_loc +# .quad my_func + 0x5 +# .quad my_func + 0x1b +# +# We can then compile this tmp.s into tmp.o, and link it to the original +# object. +# +# But this gets hard if my_func is not globl (a static function). +# In such a case we have: +# +# .section ".text.sched" +# my_func: +# [...] +# call mcount (offset: 0x5) +# [...] +# ret +# .globl my_func +# other_func: +# [...] +# call mcount (offset: 0x1b) +# [...] +# +# If we make the tmp.s the same as above, when we link together with +# the original object, we will end up with two symbols for my_func: +# one local, one global. After final compile, we will end up with +# an undefined reference to my_func. +# +# Since local objects can reference local variables, we need to find +# a way to make tmp.o reference the local objects of the original object +# file after it is linked together. To do this, we convert the my_func +# into a global symbol before linking tmp.o. Then after we link tmp.o +# we will only have a single symbol for my_func that is global. +# We can convert my_func back into a local symbol and we are done. +# +# Here are the steps we take: +# +# 1) Record all the local symbols by using 'nm' +# 2) Use objdump to find all the call site offsets and sections for +# mcount. +# 3) Compile the list into its own object. +# 4) Do we have to deal with local functions? If not, go to step 8. +# 5) Make an object that converts these local functions to global symbols +# with objcopy. +# 6) Link together this new object with the list object. +# 7) Convert the local functions back to local symbols and rename +# the result as the original object. +# End. +# 8) Link the object with the list object. +# 9) Move the result back to the original object. +# End. +# + +use strict; + +my $P = $0; +$P =~ s@.*/@@g; + +my $V = '0.1'; + +if ($#ARGV < 6) { + print "usage: $P arch objdump objcopy cc ld nm rm mv inputfile\n"; + print "version: $V\n"; + exit(1); +} + +my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV; + +$objdump = "objdump" if ((length $objdump) == 0); +$objcopy = "objcopy" if ((length $objcopy) == 0); +$cc = "gcc" if ((length $cc) == 0); +$ld = "ld" if ((length $ld) == 0); +$nm = "nm" if ((length $nm) == 0); +$rm = "rm" if ((length $rm) == 0); +$mv = "mv" if ((length $mv) == 0); + +#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " . +# "'$nm' '$rm' '$mv' '$inputfile'\n"; + +my %locals; +my %convert; + +my $type; +my $section_regex; # Find the start of a section +my $function_regex; # Find the name of a function (return func name) +my $mcount_regex; # Find the call site to mcount (return offset) + +if ($arch eq "x86_64") { + $section_regex = "Disassembly of section"; + $function_regex = "<(.*?)>:"; + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; + $type = ".quad"; +} elsif ($arch eq "i386") { + $section_regex = "Disassembly of section"; + $function_regex = "<(.*?)>:"; + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; + $type = ".long"; +} else { + die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; +} + +my $text_found = 0; +my $read_function = 0; +my $opened = 0; +my $text = ""; +my $mcount_section = "__mcount_loc"; + +my $dirname; +my $filename; +my $prefix; +my $ext; + +if ($inputfile =~ m,^(.*)/([^/]*)$,) { + $dirname = $1; + $filename = $2; +} else { + $dirname = "."; + $filename = $inputfile; +} + +if ($filename =~ m,^(.*)(\.\S),) { + $prefix = $1; + $ext = $2; +} else { + $prefix = $filename; + $ext = ""; +} + +my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; +my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; + +# +# Step 1: find all the local symbols (static functions). +# +open (IN, "$nm $inputfile|") || die "error running $nm"; +while () { + if (/^[0-9a-fA-F]+\s+t\s+(\S+)/) { + $locals{$1} = 1; + } +} +close(IN); + +# +# Step 2: find the sections and mcount call sites +# +open(IN, "$objdump -dr $inputfile|") || die "error running $objdump"; + +while () { + # is it a section? + if (/$section_regex/) { + $read_function = 1; + $text_found = 0; + # section found, now is this a start of a function? + } elsif ($read_function && /$function_regex/) { + $read_function = 0; + $text_found = 1; + $text = $1; + # is this function static? If so, note this fact. + if (defined $locals{$text}) { + $convert{$text} = 1; + } + # is this a call site to mcount? If so, print the offset from the section + } elsif ($text_found && /$mcount_regex/) { + if (!$opened) { + open(FILE, ">$mcount_s") || die "can't create $mcount_s\n"; + $opened = 1; + print FILE "\t.section $mcount_section,\"a\",\@progbits\n"; + } + print FILE "\t$type $text + 0x$1\n"; + } +} + +# If we did not find any mcount callers, we are done (do nothing). +if (!$opened) { + exit(0); +} + +close(FILE); + +# +# Step 3: Compile the file that holds the list of call sites to mcount. +# +`$cc -o $mcount_o -c $mcount_s`; + +my @converts = keys %convert; + +# +# Step 4: Do we have sections that started with local functions? +# +if ($#converts >= 0) { + my $globallist = ""; + my $locallist = ""; + + foreach my $con (@converts) { + $globallist .= " --globalize-symbol $con"; + $locallist .= " --localize-symbol $con"; + } + + my $globalobj = $dirname . "/.tmp_gl_" . $filename; + my $globalmix = $dirname . "/.tmp_mx_" . $filename; + + # + # Step 5: set up each local function as a global + # + `$objcopy $globallist $inputfile $globalobj`; + + # + # Step 6: Link the global version to our list. + # + `$ld -r $globalobj $mcount_o -o $globalmix`; + + # + # Step 7: Convert the local functions back into local symbols + # + `$objcopy $locallist $globalmix $inputfile`; + + # Remove the temp files + `$rm $globalobj $globalmix`; + +} else { + + my $mix = $dirname . "/.tmp_mx_" . $filename; + + # + # Step 8: Link the object with our list of call sites object. + # + `$ld -r $inputfile $mcount_o -o $mix`; + + # + # Step 9: Move the result back to the original object. + # + `$mv $mix $inputfile`; +} + +# Clean up the temp files +`$rm $mcount_o $mcount_s`; + +exit(0); -- cgit v1.2.3-70-g09d2 From 68bf21aa15c85d2e9b623dcda2b1ed8893275fa1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Aug 2008 15:45:08 -0400 Subject: ftrace: mcount call site on boot nops core This is the infrastructure to the converting the mcount call sites recorded by the __mcount_loc section into nops on boot. It also allows for using these sites to enable tracing as normal. When the __mcount_loc section is used, the "ftraced" kernel thread is disabled. This uses the current infrastructure to record the mcount call sites as well as convert them to nops. The mcount function is kept as a stub on boot up and not converted to the ftrace_record_ip function. We use the ftrace_record_ip to only record from the table. This patch does not handle modules. That comes with a later patch. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/asm-x86/ftrace.h | 10 ++++ include/linux/ftrace.h | 6 ++ init/main.c | 3 + kernel/trace/ftrace.c | 148 +++++++++++++++++++++++++++++++++-------------- 4 files changed, 124 insertions(+), 43 deletions(-) (limited to 'kernel/trace') diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h index be0e004ad14..1bb6f9bbe1a 100644 --- a/include/asm-x86/ftrace.h +++ b/include/asm-x86/ftrace.h @@ -7,6 +7,16 @@ #ifndef __ASSEMBLY__ extern void mcount(void); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * call mcount is "e8 <4 byte offset>" + * The addr points to the 4 byte offset and the caller of this + * function wants the pointer to e8. Simply subtract one. + */ + return addr - 1; +} #endif #endif /* CONFIG_FTRACE */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index bb384068272..d4d6ab453b7 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -162,4 +162,10 @@ static inline void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } #endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +extern void ftrace_init(void); +#else +static inline void ftrace_init(void) { } +#endif + #endif /* _LINUX_FTRACE_H */ diff --git a/init/main.c b/init/main.c index 3820323c4c8..ded1fae965a 100644 --- a/init/main.c +++ b/init/main.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -687,6 +688,8 @@ asmlinkage void __init start_kernel(void) acpi_early_init(); /* before LAPIC and SMP init */ + ftrace_init(); + /* Do the rest non-__init'ed, we're now alive */ rest_init(); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f6e3af31b40..df96d5990c0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -792,47 +792,7 @@ static int ftrace_update_code(void) return 1; } -static int ftraced(void *ignore) -{ - unsigned long usecs; - - while (!kthread_should_stop()) { - - set_current_state(TASK_INTERRUPTIBLE); - - /* check once a second */ - schedule_timeout(HZ); - - if (unlikely(ftrace_disabled)) - continue; - - mutex_lock(&ftrace_sysctl_lock); - mutex_lock(&ftraced_lock); - if (!ftraced_suspend && !ftraced_stop && - ftrace_update_code()) { - usecs = nsecs_to_usecs(ftrace_update_time); - if (ftrace_update_tot_cnt > 100000) { - ftrace_update_tot_cnt = 0; - pr_info("hm, dftrace overflow: %lu change%s" - " (%lu total) in %lu usec%s\n", - ftrace_update_cnt, - ftrace_update_cnt != 1 ? "s" : "", - ftrace_update_tot_cnt, - usecs, usecs != 1 ? "s" : ""); - ftrace_disabled = 1; - WARN_ON_ONCE(1); - } - } - mutex_unlock(&ftraced_lock); - mutex_unlock(&ftrace_sysctl_lock); - - ftrace_shutdown_replenish(); - } - __set_current_state(TASK_RUNNING); - return 0; -} - -static int __init ftrace_dyn_table_alloc(void) +static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) { struct ftrace_page *pg; int cnt; @@ -859,7 +819,9 @@ static int __init ftrace_dyn_table_alloc(void) pg = ftrace_pages = ftrace_pages_start; - cnt = NR_TO_INIT / ENTRIES_PER_PAGE; + cnt = num_to_init / ENTRIES_PER_PAGE; + pr_info("ftrace: allocating %ld hash entries in %d pages\n", + num_to_init, cnt); for (i = 0; i < cnt; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); @@ -1556,6 +1518,104 @@ static __init int ftrace_init_debugfs(void) fs_initcall(ftrace_init_debugfs); +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +static int ftrace_convert_nops(unsigned long *start, + unsigned long *end) +{ + unsigned long *p; + unsigned long addr; + unsigned long flags; + + p = start; + while (p < end) { + addr = ftrace_call_adjust(*p++); + ftrace_record_ip(addr); + ftrace_shutdown_replenish(); + } + + /* p is ignored */ + local_irq_save(flags); + __ftrace_update_code(p); + local_irq_restore(flags); + + return 0; +} + +extern unsigned long __start_mcount_loc[]; +extern unsigned long __stop_mcount_loc[]; + +void __init ftrace_init(void) +{ + unsigned long count, addr, flags; + int ret; + + /* Keep the ftrace pointer to the stub */ + addr = (unsigned long)ftrace_stub; + + local_irq_save(flags); + ftrace_dyn_arch_init(&addr); + local_irq_restore(flags); + + /* ftrace_dyn_arch_init places the return code in addr */ + if (addr) + goto failed; + + count = __stop_mcount_loc - __start_mcount_loc; + + ret = ftrace_dyn_table_alloc(count); + if (ret) + goto failed; + + last_ftrace_enabled = ftrace_enabled = 1; + + ret = ftrace_convert_nops(__start_mcount_loc, + __stop_mcount_loc); + + return; + failed: + ftrace_disabled = 1; +} +#else /* CONFIG_FTRACE_MCOUNT_RECORD */ +static int ftraced(void *ignore) +{ + unsigned long usecs; + + while (!kthread_should_stop()) { + + set_current_state(TASK_INTERRUPTIBLE); + + /* check once a second */ + schedule_timeout(HZ); + + if (unlikely(ftrace_disabled)) + continue; + + mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftraced_lock); + if (!ftraced_suspend && !ftraced_stop && + ftrace_update_code()) { + usecs = nsecs_to_usecs(ftrace_update_time); + if (ftrace_update_tot_cnt > 100000) { + ftrace_update_tot_cnt = 0; + pr_info("hm, dftrace overflow: %lu change%s" + " (%lu total) in %lu usec%s\n", + ftrace_update_cnt, + ftrace_update_cnt != 1 ? "s" : "", + ftrace_update_tot_cnt, + usecs, usecs != 1 ? "s" : ""); + ftrace_disabled = 1; + WARN_ON_ONCE(1); + } + } + mutex_unlock(&ftraced_lock); + mutex_unlock(&ftrace_sysctl_lock); + + ftrace_shutdown_replenish(); + } + __set_current_state(TASK_RUNNING); + return 0; +} + static int __init ftrace_dynamic_init(void) { struct task_struct *p; @@ -1572,7 +1632,7 @@ static int __init ftrace_dynamic_init(void) goto failed; } - ret = ftrace_dyn_table_alloc(); + ret = ftrace_dyn_table_alloc(NR_TO_INIT); if (ret) goto failed; @@ -1593,6 +1653,8 @@ static int __init ftrace_dynamic_init(void) } core_initcall(ftrace_dynamic_init); +#endif /* CONFIG_FTRACE_MCOUNT_RECORD */ + #else # define ftrace_startup() do { } while (0) # define ftrace_shutdown() do { } while (0) -- cgit v1.2.3-70-g09d2 From 90d595fe5ca4b685465c068907e6e554760abea8 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Aug 2008 15:45:09 -0400 Subject: ftrace: enable mcount recording for modules This patch enables the loading of the __mcount_section of modules and changing all the callers of mcount into nops. The modification is done before the init_module function is called, so again, we do not need to use kstop_machine to make these changes. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 3 +++ kernel/module.c | 11 +++++++++++ kernel/trace/ftrace.c | 5 +++++ 3 files changed, 19 insertions(+) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index d4d6ab453b7..4936489f9ed 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -164,8 +164,11 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); +extern void ftrace_init_module(unsigned long *start, unsigned long *end); #else static inline void ftrace_init(void) { } +static inline void +ftrace_init_module(unsigned long *start, unsigned long *end) { } #endif #endif /* _LINUX_FTRACE_H */ diff --git a/kernel/module.c b/kernel/module.c index 661d73db786..d753fd9d83e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -47,6 +47,7 @@ #include #include #include +#include #if 0 #define DEBUGP printk @@ -1834,6 +1835,7 @@ static noinline struct module *load_module(void __user *umod, unsigned int markersstringsindex; unsigned int tracepointsindex; unsigned int tracepointsstringsindex; + unsigned int mcountindex; struct module *mod; long err = 0; void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ @@ -2124,6 +2126,9 @@ static noinline struct module *load_module(void __user *umod, tracepointsstringsindex = find_sec(hdr, sechdrs, secstrings, "__tracepoints_strings"); + mcountindex = find_sec(hdr, sechdrs, secstrings, + "__mcount_loc"); + /* Now do relocations. */ for (i = 1; i < hdr->e_shnum; i++) { const char *strtab = (char *)sechdrs[strindex].sh_addr; @@ -2184,6 +2189,12 @@ static noinline struct module *load_module(void __user *umod, mod->tracepoints + mod->num_tracepoints); #endif } + + if (mcountindex) { + void *mseg = (void *)sechdrs[mcountindex].sh_addr; + ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size); + } + err = module_finalize(hdr, sechdrs, mod); if (err < 0) goto cleanup; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index df96d5990c0..ea45bb1c0fd 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1541,6 +1541,11 @@ static int ftrace_convert_nops(unsigned long *start, return 0; } +void ftrace_init_module(unsigned long *start, unsigned long *end) +{ + ftrace_convert_nops(start, end); +} + extern unsigned long __start_mcount_loc[]; extern unsigned long __stop_mcount_loc[]; -- cgit v1.2.3-70-g09d2 From a9fdda33cd7c7519b082e37538fe790f9ff684bb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Aug 2008 22:47:17 -0400 Subject: ftrace: do not show freed records in available_filter_functions Seems that freed records can appear in the available_filter_functions list. This patch fixes that. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ea45bb1c0fd..8affb6d00ec 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -872,15 +872,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) } } else { rec = &iter->pg->records[iter->idx++]; - if ((!(iter->flags & FTRACE_ITER_FAILURES) && + if ((rec->flags & FTRACE_FL_FREE) || + + (!(iter->flags & FTRACE_ITER_FAILURES) && (rec->flags & FTRACE_FL_FAILED)) || ((iter->flags & FTRACE_ITER_FAILURES) && - (!(rec->flags & FTRACE_FL_FAILED) || - (rec->flags & FTRACE_FL_FREE))) || - - ((iter->flags & FTRACE_ITER_FILTER) && - !(rec->flags & FTRACE_FL_FILTER)) || + !(rec->flags & FTRACE_FL_FAILED)) || ((iter->flags & FTRACE_ITER_NOTRACE) && !(rec->flags & FTRACE_FL_NOTRACE))) { -- cgit v1.2.3-70-g09d2 From fed1939c64d2288938fdc1c367d49082da65e195 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Aug 2008 22:47:19 -0400 Subject: ftrace: remove old pointers to mcount When a mcount pointer is recorded into a table, it is used to add or remove calls to mcount (replacing them with nops). If the code is removed via removing a module, the pointers still exist. At modifying the code a check is always made to make sure the code being replaced is the code expected. In-other-words, the code being replaced is compared to what it is expected to be before being replaced. There is a very small chance that the code being replaced just happens to look like code that calls mcount (very small since the call to mcount is relative). To remove this chance, this patch adds ftrace_release to allow module unloading to remove the pointers to mcount within the module. Another change for init calls is made to not trace calls marked with __init. The tracing can not be started until after init is done anyway. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 2 ++ include/linux/init.h | 2 +- kernel/module.c | 12 ++++++++---- kernel/trace/ftrace.c | 32 ++++++++++++++++++++++++++++++-- 4 files changed, 41 insertions(+), 7 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4936489f9ed..6b232a2460c 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -165,10 +165,12 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); extern void ftrace_init_module(unsigned long *start, unsigned long *end); +extern void ftrace_release(void *start, unsigned long size); #else static inline void ftrace_init(void) { } static inline void ftrace_init_module(unsigned long *start, unsigned long *end) { } +static inline void ftrace_release(void *start, unsigned long size) { } #endif #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/init.h b/include/linux/init.h index 93538b696e3..27f61f6b3cb 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -40,7 +40,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold +#define __init __section(.init.text) __cold notrace #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) diff --git a/kernel/module.c b/kernel/module.c index d753fd9d83e..7576c2d9462 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1431,6 +1431,9 @@ static void free_module(struct module *mod) /* Module unload stuff */ module_unload_free(mod); + /* release any pointers to mcount in this module */ + ftrace_release(mod->module_core, mod->core_size); + /* This may be NULL, but that's OK */ module_free(mod, mod->module_init); kfree(mod->args); @@ -1839,6 +1842,7 @@ static noinline struct module *load_module(void __user *umod, struct module *mod; long err = 0; void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ + void *mseg; struct exception_table_entry *extable; mm_segment_t old_fs; @@ -2190,10 +2194,9 @@ static noinline struct module *load_module(void __user *umod, #endif } - if (mcountindex) { - void *mseg = (void *)sechdrs[mcountindex].sh_addr; - ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size); - } + /* sechdrs[0].sh_size is always zero */ + mseg = (void *)sechdrs[mcountindex].sh_addr; + ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size); err = module_finalize(hdr, sechdrs, mod); if (err < 0) @@ -2264,6 +2267,7 @@ static noinline struct module *load_module(void __user *umod, cleanup: kobject_del(&mod->mkobj.kobj); kobject_put(&mod->mkobj.kobj); + ftrace_release(mod->module_core, mod->core_size); free_unload: module_unload_free(mod); module_free(mod, mod->module_init); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8affb6d00ec..eadd0eaea9b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -294,13 +294,37 @@ static inline void ftrace_del_hash(struct dyn_ftrace *node) static void ftrace_free_rec(struct dyn_ftrace *rec) { - /* no locking, only called from kstop_machine */ - rec->ip = (unsigned long)ftrace_free_records; ftrace_free_records = rec; rec->flags |= FTRACE_FL_FREE; } +void ftrace_release(void *start, unsigned long size) +{ + struct dyn_ftrace *rec; + struct ftrace_page *pg; + unsigned long s = (unsigned long)start; + unsigned long e = s + size; + int i; + + if (!start) + return; + + /* No interrupt should call this */ + spin_lock(&ftrace_lock); + + for (pg = ftrace_pages_start; pg; pg = pg->next) { + for (i = 0; i < pg->index; i++) { + rec = &pg->records[i]; + + if ((rec->ip >= s) && (rec->ip < e)) + ftrace_free_rec(rec); + } + } + spin_unlock(&ftrace_lock); + +} + static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) { struct dyn_ftrace *rec; @@ -1527,7 +1551,9 @@ static int ftrace_convert_nops(unsigned long *start, p = start; while (p < end) { addr = ftrace_call_adjust(*p++); + spin_lock(&ftrace_lock); ftrace_record_ip(addr); + spin_unlock(&ftrace_lock); ftrace_shutdown_replenish(); } @@ -1541,6 +1567,8 @@ static int ftrace_convert_nops(unsigned long *start, void ftrace_init_module(unsigned long *start, unsigned long *end) { + if (start == end) + return; ftrace_convert_nops(start, end); } -- cgit v1.2.3-70-g09d2 From 2e2ca155cd2213b4f398031180fb3d399d5b7db9 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 1 Aug 2008 12:26:40 -0400 Subject: ftrace: new continue entry - separate out from trace_entry Some tracers will need to work with more than one entry. In order to do this the trace_entry structure was split into two fields. One for the start of all entries, and one to continue an existing entry. The trace_entry structure now has a "field" entry that consists of the previous content of the trace_entry, and a "cont" entry that is just a string buffer the size of the "field" entry. Thanks to Andrew Morton for suggesting this idea. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 267 +++++++++++++++++++++-------------------- kernel/trace/trace.h | 17 ++- kernel/trace/trace_mmiotrace.c | 12 +- 3 files changed, 160 insertions(+), 136 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8f3fb3db61c..76dfe6d2466 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -817,10 +817,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) pc = preempt_count(); - entry->preempt_count = pc & 0xff; - entry->pid = (tsk) ? tsk->pid : 0; - entry->t = ftrace_now(raw_smp_processor_id()); - entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | + entry->field.preempt_count = pc & 0xff; + entry->field.pid = (tsk) ? tsk->pid : 0; + entry->field.t = ftrace_now(raw_smp_processor_id()); + entry->field.flags = + (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); @@ -835,11 +836,11 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); - entry->type = TRACE_FN; - entry->fn.ip = ip; - entry->fn.parent_ip = parent_ip; + entry->type = TRACE_FN; + entry->field.fn.ip = ip; + entry->field.fn.parent_ip = parent_ip; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); } @@ -862,10 +863,10 @@ void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_RW; - entry->mmiorw = *rw; + entry->type = TRACE_MMIO_RW; + entry->field.mmiorw = *rw; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -882,10 +883,10 @@ void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_MAP; - entry->mmiomap = *map; + entry->type = TRACE_MMIO_MAP; + entry->field.mmiomap = *map; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -909,12 +910,12 @@ void __trace_stack(struct trace_array *tr, tracing_generic_entry_update(entry, flags); entry->type = TRACE_STACK; - memset(&entry->stack, 0, sizeof(entry->stack)); + memset(&entry->field.stack, 0, sizeof(entry->field.stack)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = skip; - trace.entries = entry->stack.caller; + trace.entries = entry->field.stack.caller; save_stack_trace(&trace); } @@ -930,12 +931,12 @@ __trace_special(void *__tr, void *__data, raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); - entry->type = TRACE_SPECIAL; - entry->special.arg1 = arg1; - entry->special.arg2 = arg2; - entry->special.arg3 = arg3; + entry->type = TRACE_SPECIAL; + entry->field.special.arg1 = arg1; + entry->field.special.arg2 = arg2; + entry->field.special.arg3 = arg3; __trace_stack(tr, data, irq_flags, 4); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -955,15 +956,15 @@ tracing_sched_switch_trace(struct trace_array *tr, raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); - entry->type = TRACE_CTX; - entry->ctx.prev_pid = prev->pid; - entry->ctx.prev_prio = prev->prio; - entry->ctx.prev_state = prev->state; - entry->ctx.next_pid = next->pid; - entry->ctx.next_prio = next->prio; - entry->ctx.next_state = next->state; + entry->type = TRACE_CTX; + entry->field.ctx.prev_pid = prev->pid; + entry->field.ctx.prev_prio = prev->prio; + entry->field.ctx.prev_state = prev->state; + entry->field.ctx.next_pid = next->pid; + entry->field.ctx.next_prio = next->prio; + entry->field.ctx.next_state = next->state; __trace_stack(tr, data, flags, 5); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -984,12 +985,12 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); entry->type = TRACE_WAKE; - entry->ctx.prev_pid = curr->pid; - entry->ctx.prev_prio = curr->prio; - entry->ctx.prev_state = curr->state; - entry->ctx.next_pid = wakee->pid; - entry->ctx.next_prio = wakee->prio; - entry->ctx.next_state = wakee->state; + entry->field.ctx.prev_pid = curr->pid; + entry->field.ctx.prev_prio = curr->prio; + entry->field.ctx.prev_state = curr->state; + entry->field.ctx.next_pid = wakee->pid; + entry->field.ctx.next_prio = wakee->prio; + entry->field.ctx.next_state = wakee->state; __trace_stack(tr, data, flags, 6); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -1118,7 +1119,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) /* * Pick the entry with the smallest timestamp: */ - if (ent && (!next || ent->t < next->t)) { + if (ent && (!next || ent->field.t < next->field.t)) { next = ent; next_cpu = cpu; } @@ -1422,19 +1423,20 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) static void lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) { + struct trace_field *field = &entry->field; int hardirq, softirq; char *comm; - comm = trace_find_cmdline(entry->pid); + comm = trace_find_cmdline(field->pid); - trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); + trace_seq_printf(s, "%8.8s-%-5d ", comm, field->pid); trace_seq_printf(s, "%d", cpu); trace_seq_printf(s, "%c%c", - (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', - ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); + (field->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', + ((field->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); - hardirq = entry->flags & TRACE_FLAG_HARDIRQ; - softirq = entry->flags & TRACE_FLAG_SOFTIRQ; + hardirq = field->flags & TRACE_FLAG_HARDIRQ; + softirq = field->flags & TRACE_FLAG_SOFTIRQ; if (hardirq && softirq) { trace_seq_putc(s, 'H'); } else { @@ -1448,8 +1450,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) } } - if (entry->preempt_count) - trace_seq_printf(s, "%x", entry->preempt_count); + if (field->preempt_count) + trace_seq_printf(s, "%x", field->preempt_count); else trace_seq_puts(s, "."); } @@ -1479,6 +1481,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) struct trace_entry *next_entry = find_next_entry(iter, NULL); unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); struct trace_entry *entry = iter->ent; + struct trace_field *field = &entry->field; unsigned long abs_usecs; unsigned long rel_usecs; char *comm; @@ -1488,17 +1491,17 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) if (!next_entry) next_entry = entry; - rel_usecs = ns2usecs(next_entry->t - entry->t); - abs_usecs = ns2usecs(entry->t - iter->tr->time_start); + rel_usecs = ns2usecs(next_entry->field.t - entry->field.t); + abs_usecs = ns2usecs(entry->field.t - iter->tr->time_start); if (verbose) { - comm = trace_find_cmdline(entry->pid); + comm = trace_find_cmdline(field->pid); trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, - entry->pid, cpu, entry->flags, - entry->preempt_count, trace_idx, - ns2usecs(entry->t), + field->pid, cpu, field->flags, + field->preempt_count, trace_idx, + ns2usecs(field->t), abs_usecs/1000, abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); @@ -1508,41 +1511,42 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) } switch (entry->type) { case TRACE_FN: - seq_print_ip_sym(s, entry->fn.ip, sym_flags); + seq_print_ip_sym(s, field->fn.ip, sym_flags); trace_seq_puts(s, " ("); - if (kretprobed(entry->fn.parent_ip)) + if (kretprobed(field->fn.parent_ip)) trace_seq_puts(s, KRETPROBE_MSG); else - seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); + seq_print_ip_sym(s, field->fn.parent_ip, sym_flags); trace_seq_puts(s, ")\n"); break; case TRACE_CTX: case TRACE_WAKE: - T = entry->ctx.next_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.next_state] : 'X'; + T = field->ctx.next_state < sizeof(state_to_char) ? + state_to_char[field->ctx.next_state] : 'X'; - state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; + state = field->ctx.prev_state ? + __ffs(field->ctx.prev_state) + 1 : 0; S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; - comm = trace_find_cmdline(entry->ctx.next_pid); + comm = trace_find_cmdline(field->ctx.next_pid); trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", - entry->ctx.prev_pid, - entry->ctx.prev_prio, + field->ctx.prev_pid, + field->ctx.prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", - entry->ctx.next_pid, - entry->ctx.next_prio, + field->ctx.next_pid, + field->ctx.next_prio, T, comm); break; case TRACE_SPECIAL: trace_seq_printf(s, "# %ld %ld %ld\n", - entry->special.arg1, - entry->special.arg2, - entry->special.arg3); + field->special.arg1, + field->special.arg2, + field->special.arg3); break; case TRACE_STACK: for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { if (i) trace_seq_puts(s, " <= "); - seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); + seq_print_ip_sym(s, field->stack.caller[i], sym_flags); } trace_seq_puts(s, "\n"); break; @@ -1557,6 +1561,7 @@ static int print_trace_fmt(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; + struct trace_field *field; unsigned long usec_rem; unsigned long long t; unsigned long secs; @@ -1566,14 +1571,15 @@ static int print_trace_fmt(struct trace_iterator *iter) int i; entry = iter->ent; + field = &entry->field; - comm = trace_find_cmdline(iter->ent->pid); + comm = trace_find_cmdline(iter->ent->field.pid); - t = ns2usecs(entry->t); + t = ns2usecs(field->t); usec_rem = do_div(t, 1000000ULL); secs = (unsigned long)t; - ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); + ret = trace_seq_printf(s, "%16s-%-5d ", comm, field->pid); if (!ret) return 0; ret = trace_seq_printf(s, "[%02d] ", iter->cpu); @@ -1585,18 +1591,19 @@ static int print_trace_fmt(struct trace_iterator *iter) switch (entry->type) { case TRACE_FN: - ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); + ret = seq_print_ip_sym(s, field->fn.ip, sym_flags); if (!ret) return 0; if ((sym_flags & TRACE_ITER_PRINT_PARENT) && - entry->fn.parent_ip) { + field->fn.parent_ip) { ret = trace_seq_printf(s, " <-"); if (!ret) return 0; - if (kretprobed(entry->fn.parent_ip)) + if (kretprobed(field->fn.parent_ip)) ret = trace_seq_puts(s, KRETPROBE_MSG); else - ret = seq_print_ip_sym(s, entry->fn.parent_ip, + ret = seq_print_ip_sym(s, + field->fn.parent_ip, sym_flags); if (!ret) return 0; @@ -1607,26 +1614,26 @@ static int print_trace_fmt(struct trace_iterator *iter) break; case TRACE_CTX: case TRACE_WAKE: - S = entry->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.prev_state] : 'X'; - T = entry->ctx.next_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.next_state] : 'X'; + S = field->ctx.prev_state < sizeof(state_to_char) ? + state_to_char[field->ctx.prev_state] : 'X'; + T = field->ctx.next_state < sizeof(state_to_char) ? + state_to_char[field->ctx.next_state] : 'X'; ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", - entry->ctx.prev_pid, - entry->ctx.prev_prio, + field->ctx.prev_pid, + field->ctx.prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", - entry->ctx.next_pid, - entry->ctx.next_prio, + field->ctx.next_pid, + field->ctx.next_prio, T); if (!ret) return 0; break; case TRACE_SPECIAL: ret = trace_seq_printf(s, "# %ld %ld %ld\n", - entry->special.arg1, - entry->special.arg2, - entry->special.arg3); + field->special.arg1, + field->special.arg2, + field->special.arg3); if (!ret) return 0; break; @@ -1637,7 +1644,7 @@ static int print_trace_fmt(struct trace_iterator *iter) if (!ret) return 0; } - ret = seq_print_ip_sym(s, entry->stack.caller[i], + ret = seq_print_ip_sym(s, field->stack.caller[i], sym_flags); if (!ret) return 0; @@ -1654,37 +1661,40 @@ static int print_raw_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; + struct trace_field *field; int ret; int S, T; entry = iter->ent; + field = &entry->field; ret = trace_seq_printf(s, "%d %d %llu ", - entry->pid, iter->cpu, entry->t); + field->pid, iter->cpu, field->t); if (!ret) return 0; switch (entry->type) { case TRACE_FN: ret = trace_seq_printf(s, "%x %x\n", - entry->fn.ip, entry->fn.parent_ip); + field->fn.ip, + field->fn.parent_ip); if (!ret) return 0; break; case TRACE_CTX: case TRACE_WAKE: - S = entry->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.prev_state] : 'X'; - T = entry->ctx.next_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.next_state] : 'X'; + S = field->ctx.prev_state < sizeof(state_to_char) ? + state_to_char[field->ctx.prev_state] : 'X'; + T = field->ctx.next_state < sizeof(state_to_char) ? + state_to_char[field->ctx.next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", - entry->ctx.prev_pid, - entry->ctx.prev_prio, + field->ctx.prev_pid, + field->ctx.prev_prio, S, - entry->ctx.next_pid, - entry->ctx.next_prio, + field->ctx.next_pid, + field->ctx.next_prio, T); if (!ret) return 0; @@ -1692,9 +1702,9 @@ static int print_raw_fmt(struct trace_iterator *iter) case TRACE_SPECIAL: case TRACE_STACK: ret = trace_seq_printf(s, "# %ld %ld %ld\n", - entry->special.arg1, - entry->special.arg2, - entry->special.arg3); + field->special.arg1, + field->special.arg2, + field->special.arg3); if (!ret) return 0; break; @@ -1719,40 +1729,41 @@ static int print_hex_fmt(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; + struct trace_field *field; int S, T; entry = iter->ent; + field = &entry->field; - SEQ_PUT_HEX_FIELD_RET(s, entry->pid); + SEQ_PUT_HEX_FIELD_RET(s, field->pid); SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); - SEQ_PUT_HEX_FIELD_RET(s, entry->t); + SEQ_PUT_HEX_FIELD_RET(s, field->t); switch (entry->type) { case TRACE_FN: - SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); - SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); + SEQ_PUT_HEX_FIELD_RET(s, field->fn.ip); + SEQ_PUT_HEX_FIELD_RET(s, field->fn.parent_ip); break; case TRACE_CTX: case TRACE_WAKE: - S = entry->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.prev_state] : 'X'; - T = entry->ctx.next_state < sizeof(state_to_char) ? - state_to_char[entry->ctx.next_state] : 'X'; + S = field->ctx.prev_state < sizeof(state_to_char) ? + state_to_char[field->ctx.prev_state] : 'X'; + T = field->ctx.next_state < sizeof(state_to_char) ? + state_to_char[field->ctx.next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; - SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); - SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); + SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_pid); + SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_prio); SEQ_PUT_HEX_FIELD_RET(s, S); - SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); - SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); - SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); + SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_pid); + SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_prio); SEQ_PUT_HEX_FIELD_RET(s, T); break; case TRACE_SPECIAL: case TRACE_STACK: - SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); - SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); - SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); + SEQ_PUT_HEX_FIELD_RET(s, field->special.arg1); + SEQ_PUT_HEX_FIELD_RET(s, field->special.arg2); + SEQ_PUT_HEX_FIELD_RET(s, field->special.arg3); break; } SEQ_PUT_FIELD_RET(s, newline); @@ -1764,31 +1775,33 @@ static int print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; + struct trace_field *field; entry = iter->ent; + field = &entry->field; - SEQ_PUT_FIELD_RET(s, entry->pid); - SEQ_PUT_FIELD_RET(s, entry->cpu); - SEQ_PUT_FIELD_RET(s, entry->t); + SEQ_PUT_FIELD_RET(s, field->pid); + SEQ_PUT_FIELD_RET(s, field->cpu); + SEQ_PUT_FIELD_RET(s, field->t); switch (entry->type) { case TRACE_FN: - SEQ_PUT_FIELD_RET(s, entry->fn.ip); - SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); + SEQ_PUT_FIELD_RET(s, field->fn.ip); + SEQ_PUT_FIELD_RET(s, field->fn.parent_ip); break; case TRACE_CTX: - SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); - SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); - SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); - SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); - SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); - SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); + SEQ_PUT_FIELD_RET(s, field->ctx.prev_pid); + SEQ_PUT_FIELD_RET(s, field->ctx.prev_prio); + SEQ_PUT_FIELD_RET(s, field->ctx.prev_state); + SEQ_PUT_FIELD_RET(s, field->ctx.next_pid); + SEQ_PUT_FIELD_RET(s, field->ctx.next_prio); + SEQ_PUT_FIELD_RET(s, field->ctx.next_state); break; case TRACE_SPECIAL: case TRACE_STACK: - SEQ_PUT_FIELD_RET(s, entry->special.arg1); - SEQ_PUT_FIELD_RET(s, entry->special.arg2); - SEQ_PUT_FIELD_RET(s, entry->special.arg3); + SEQ_PUT_FIELD_RET(s, field->special.arg1); + SEQ_PUT_FIELD_RET(s, field->special.arg2); + SEQ_PUT_FIELD_RET(s, field->special.arg3); break; } return 1; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f69f86788c2..6ddd6a6556c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -61,13 +61,12 @@ struct stack_entry { }; /* - * The trace entry - the most basic unit of tracing. This is what + * The trace field - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: * * bash-15816 [01] 235.197585: idle_cpu <- irq_enter */ -struct trace_entry { - char type; +struct trace_field { char cpu; char flags; char preempt_count; @@ -83,6 +82,18 @@ struct trace_entry { }; }; +struct trace_field_cont { + char buf[sizeof(struct trace_field)]; +}; + +struct trace_entry { + char type; + union { + struct trace_field field; + struct trace_field_cont cont; + }; +}; + #define TRACE_ENTRY_SIZE sizeof(struct trace_entry) /* diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index b13dc19dcbb..9b7a936f4b1 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -174,14 +174,14 @@ print_out: static int mmio_print_rw(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct mmiotrace_rw *rw = &entry->mmiorw; + struct mmiotrace_rw *rw = &entry->field.mmiorw; struct trace_seq *s = &iter->seq; - unsigned long long t = ns2usecs(entry->t); + unsigned long long t = ns2usecs(entry->field.t); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; - switch (entry->mmiorw.opcode) { + switch (entry->field.mmiorw.opcode) { case MMIO_READ: ret = trace_seq_printf(s, "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", @@ -216,14 +216,14 @@ static int mmio_print_rw(struct trace_iterator *iter) static int mmio_print_map(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct mmiotrace_map *m = &entry->mmiomap; + struct mmiotrace_map *m = &entry->field.mmiomap; struct trace_seq *s = &iter->seq; - unsigned long long t = ns2usecs(entry->t); + unsigned long long t = ns2usecs(entry->field.t); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; - switch (entry->mmiorw.opcode) { + switch (entry->field.mmiorw.opcode) { case MMIO_PROBE: ret = trace_seq_printf(s, "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", -- cgit v1.2.3-70-g09d2 From dd0e545f061f90099a3dcc13aa77e29c6295cf23 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 1 Aug 2008 12:26:41 -0400 Subject: ftrace: printk formatting infrastructure This patch adds a feature that can help kernel developers debug their code using ftrace. int ftrace_printk(const char *fmt, ...); This records into the ftrace buffer using printf formatting. The entry size in the buffers are still a fixed length. A new type has been added that allows for more entries to be used for a single recording. The start of the print is still the same as the other entries. It returns the number of characters written to the ftrace buffer. For example: Having a module with the following code: static int __init ftrace_print_test(void) { ftrace_printk("jiffies are %ld\n", jiffies); return 0; } Gives me: insmod-5441 3...1 7569us : ftrace_print_test: jiffies are 4296626666 for the latency_trace file and: insmod-5441 [03] 1959.370498: ftrace_print_test jiffies are 4296626666 for the trace file. Note: Only the infrastructure should go into the kernel. It is to help facilitate debugging for other kernel developers. Calls to ftrace_printk is not intended to be left in the kernel, and should be frowned upon just like scattering printks around in the code. But having this easily at your fingertips helps the debugging go faster and bugs be solved quicker. Maybe later on, we can hook this with markers and have their printf format be sucked into ftrace output. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 10 ++ kernel/trace/trace.c | 273 +++++++++++++++++++++++++++++++++++++----- kernel/trace/trace.h | 11 ++ kernel/trace/trace_selftest.c | 11 +- 4 files changed, 272 insertions(+), 33 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6b232a2460c..f53b975e32f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -157,9 +157,18 @@ static inline void __ftrace_enabled_restore(int enabled) #ifdef CONFIG_TRACING extern void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); +# define ftrace_printk(x...) __ftrace_printk(_THIS_IP_, x) +extern int +__ftrace_printk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); #else static inline void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } +static inline int +ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))) +{ + return 0; +} #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD @@ -173,4 +182,5 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } static inline void ftrace_release(void *start, unsigned long size) { } #endif + #endif /* _LINUX_FTRACE_H */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 76dfe6d2466..a917bea8271 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -197,12 +197,14 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) * NEED_RESCED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler + * CONT - multiple entries hold the trace item */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, TRACE_FLAG_NEED_RESCHED = 0x02, TRACE_FLAG_HARDIRQ = 0x04, TRACE_FLAG_SOFTIRQ = 0x08, + TRACE_FLAG_CONT = 0x10, }; /* @@ -1074,6 +1076,7 @@ enum trace_file_type { TRACE_FILE_LAT_FMT = 1, }; +/* Return the current entry. */ static struct trace_entry * trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, struct trace_iterator *iter, int cpu) @@ -1104,8 +1107,58 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, return &array[iter->next_page_idx[cpu]]; } +/* Increment the index counter of an iterator by one */ +static void trace_iterator_increment(struct trace_iterator *iter, int cpu) +{ + iter->idx++; + iter->next_idx[cpu]++; + iter->next_page_idx[cpu]++; + + if (iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE) { + struct trace_array_cpu *data = iter->tr->data[cpu]; + + iter->next_page_idx[cpu] = 0; + iter->next_page[cpu] = + trace_next_list(data, iter->next_page[cpu]); + } +} + static struct trace_entry * -find_next_entry(struct trace_iterator *iter, int *ent_cpu) +trace_entry_next(struct trace_array *tr, struct trace_array_cpu *data, + struct trace_iterator *iter, int cpu) +{ + struct list_head *next_page; + struct trace_entry *ent; + int idx, next_idx, next_page_idx; + + ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); + + if (likely(!ent || ent->type != TRACE_CONT)) + return ent; + + /* save the iterator details */ + idx = iter->idx; + next_idx = iter->next_idx[cpu]; + next_page_idx = iter->next_page_idx[cpu]; + next_page = iter->next_page[cpu]; + + /* find a real entry */ + do { + trace_iterator_increment(iter, cpu); + ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); + } while (ent && ent->type != TRACE_CONT); + + /* reset the iterator */ + iter->idx = idx; + iter->next_idx[cpu] = next_idx; + iter->next_page_idx[cpu] = next_page_idx; + iter->next_page[cpu] = next_page; + + return ent; +} + +static struct trace_entry * +__find_next_entry(struct trace_iterator *iter, int *ent_cpu, int inc) { struct trace_array *tr = iter->tr; struct trace_entry *ent, *next = NULL; @@ -1115,7 +1168,23 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) for_each_tracing_cpu(cpu) { if (!head_page(tr->data[cpu])) continue; + ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); + + if (ent && ent->type == TRACE_CONT) { + struct trace_array_cpu *data = tr->data[cpu]; + + if (!inc) + ent = trace_entry_next(tr, data, iter, cpu); + else { + while (ent && ent->type == TRACE_CONT) { + trace_iterator_increment(iter, cpu); + ent = trace_entry_idx(tr, tr->data[cpu], + iter, cpu); + } + } + } + /* * Pick the entry with the smallest timestamp: */ @@ -1131,25 +1200,39 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) return next; } -static void trace_iterator_increment(struct trace_iterator *iter) +/* Find the next real entry, without updating the iterator itself */ +static struct trace_entry * +find_next_entry(struct trace_iterator *iter, int *ent_cpu) { - iter->idx++; - iter->next_idx[iter->cpu]++; - iter->next_page_idx[iter->cpu]++; + return __find_next_entry(iter, ent_cpu, 0); +} + +/* Find the next real entry, and increment the iterator to the next entry */ +static void *find_next_entry_inc(struct trace_iterator *iter) +{ + struct trace_entry *next; + int next_cpu = -1; - if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) { - struct trace_array_cpu *data = iter->tr->data[iter->cpu]; + next = __find_next_entry(iter, &next_cpu, 1); - iter->next_page_idx[iter->cpu] = 0; - iter->next_page[iter->cpu] = - trace_next_list(data, iter->next_page[iter->cpu]); - } + iter->prev_ent = iter->ent; + iter->prev_cpu = iter->cpu; + + iter->ent = next; + iter->cpu = next_cpu; + + if (next) + trace_iterator_increment(iter, iter->cpu); + + return next ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { struct trace_array_cpu *data = iter->tr->data[iter->cpu]; + struct trace_entry *ent; + again: data->trace_tail_idx++; if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { data->trace_tail = trace_next_page(data, data->trace_tail); @@ -1160,25 +1243,11 @@ static void trace_consume(struct trace_iterator *iter) if (data->trace_head == data->trace_tail && data->trace_head_idx == data->trace_tail_idx) data->trace_idx = 0; -} - -static void *find_next_entry_inc(struct trace_iterator *iter) -{ - struct trace_entry *next; - int next_cpu = -1; - - next = find_next_entry(iter, &next_cpu); - - iter->prev_ent = iter->ent; - iter->prev_cpu = iter->cpu; - - iter->ent = next; - iter->cpu = next_cpu; - - if (next) - trace_iterator_increment(iter); - return next ? iter : NULL; + ent = trace_entry_idx(iter->tr, iter->tr->data[iter->cpu], + iter, iter->cpu); + if (ent && ent->type == TRACE_CONT) + goto again; } static void *s_next(struct seq_file *m, void *v, loff_t *pos) @@ -1473,6 +1542,26 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; +static void +trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) +{ + struct trace_array *tr = iter->tr; + struct trace_array_cpu *data = tr->data[iter->cpu]; + struct trace_entry *ent; + + ent = trace_entry_idx(tr, data, iter, iter->cpu); + if (!ent || ent->type != TRACE_CONT) { + trace_seq_putc(s, '\n'); + return; + } + + do { + trace_seq_printf(s, "%s", ent->cont.buf); + trace_iterator_increment(iter, iter->cpu); + ent = trace_entry_idx(tr, data, iter, iter->cpu); + } while (ent && ent->type == TRACE_CONT); +} + static int print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) { @@ -1491,6 +1580,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) if (!next_entry) next_entry = entry; + + if (entry->type == TRACE_CONT) + return 1; + rel_usecs = ns2usecs(next_entry->field.t - entry->field.t); abs_usecs = ns2usecs(entry->field.t - iter->tr->time_start); @@ -1550,6 +1643,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) } trace_seq_puts(s, "\n"); break; + case TRACE_PRINT: + seq_print_ip_sym(s, field->print.ip, sym_flags); + trace_seq_printf(s, ": %s", field->print.buf); + if (field->flags && TRACE_FLAG_CONT) + trace_seq_print_cont(s, iter); + break; default: trace_seq_printf(s, "Unknown type %d\n", entry->type); } @@ -1571,6 +1670,10 @@ static int print_trace_fmt(struct trace_iterator *iter) int i; entry = iter->ent; + + if (entry->type == TRACE_CONT) + return 1; + field = &entry->field; comm = trace_find_cmdline(iter->ent->field.pid); @@ -1653,6 +1756,12 @@ static int print_trace_fmt(struct trace_iterator *iter) if (!ret) return 0; break; + case TRACE_PRINT: + seq_print_ip_sym(s, field->print.ip, sym_flags); + trace_seq_printf(s, ": %s", field->print.buf); + if (field->flags && TRACE_FLAG_CONT) + trace_seq_print_cont(s, iter); + break; } return 1; } @@ -1666,6 +1775,10 @@ static int print_raw_fmt(struct trace_iterator *iter) int S, T; entry = iter->ent; + + if (entry->type == TRACE_CONT) + return 1; + field = &entry->field; ret = trace_seq_printf(s, "%d %d %llu ", @@ -1708,6 +1821,12 @@ static int print_raw_fmt(struct trace_iterator *iter) if (!ret) return 0; break; + case TRACE_PRINT: + trace_seq_printf(s, "# %lx %s", + field->print.ip, field->print.buf); + if (field->flags && TRACE_FLAG_CONT) + trace_seq_print_cont(s, iter); + break; } return 1; } @@ -1733,6 +1852,10 @@ static int print_hex_fmt(struct trace_iterator *iter) int S, T; entry = iter->ent; + + if (entry->type == TRACE_CONT) + return 1; + field = &entry->field; SEQ_PUT_HEX_FIELD_RET(s, field->pid); @@ -1778,6 +1901,10 @@ static int print_bin_fmt(struct trace_iterator *iter) struct trace_field *field; entry = iter->ent; + + if (entry->type == TRACE_CONT) + return 1; + field = &entry->field; SEQ_PUT_FIELD_RET(s, field->pid); @@ -2943,6 +3070,94 @@ static __init void tracer_init_debugfs(void) #endif } +#define TRACE_BUF_SIZE 1024 +#define TRACE_PRINT_BUF_SIZE \ + (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) +#define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) + +/** + * ftrace_printk - printf formatting in the ftrace buffer + * @fmt - the printf format for printing. + * + * Note: __ftrace_printk is an internal function for ftrace_printk and + * the @ip is passed in via the ftrace_printk macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please reframe from leaving ftrace_printks scattered around in + * your code. + */ +int __ftrace_printk(unsigned long ip, const char *fmt, ...) +{ + struct trace_array *tr = &global_trace; + static DEFINE_SPINLOCK(trace_buf_lock); + static char trace_buf[TRACE_BUF_SIZE]; + struct trace_array_cpu *data; + struct trace_entry *entry; + unsigned long flags; + long disabled; + va_list ap; + int cpu, len = 0, write, written = 0; + + if (likely(!ftrace_function_enabled)) + return 0; + + local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + + if (unlikely(disabled != 1 || !ftrace_function_enabled)) + goto out; + + spin_lock(&trace_buf_lock); + va_start(ap, fmt); + len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, ap); + va_end(ap); + + len = min(len, TRACE_BUF_SIZE-1); + trace_buf[len] = 0; + + __raw_spin_lock(&data->lock); + entry = tracing_get_trace_entry(tr, data); + tracing_generic_entry_update(entry, flags); + entry->type = TRACE_PRINT; + entry->field.print.ip = ip; + + write = min(len, (int)(TRACE_PRINT_BUF_SIZE-1)); + + memcpy(&entry->field.print.buf, trace_buf, write); + entry->field.print.buf[write] = 0; + written = write; + + if (written != len) + entry->field.flags |= TRACE_FLAG_CONT; + + while (written != len) { + entry = tracing_get_trace_entry(tr, data); + + entry->type = TRACE_CONT; + write = min(len - written, (int)(TRACE_CONT_BUF_SIZE-1)); + memcpy(&entry->cont.buf, trace_buf+written, write); + entry->cont.buf[write] = 0; + written += write; + } + __raw_spin_unlock(&data->lock); + + spin_unlock(&trace_buf_lock); + + out: + atomic_dec(&data->disabled); + local_irq_restore(flags); + + return len; +} +EXPORT_SYMBOL_GPL(__ftrace_printk); + static int trace_alloc_page(void) { struct trace_array_cpu *data; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6ddd6a6556c..50b6d7a6f01 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -13,7 +13,9 @@ enum trace_type { TRACE_FN, TRACE_CTX, TRACE_WAKE, + TRACE_CONT, TRACE_STACK, + TRACE_PRINT, TRACE_SPECIAL, TRACE_MMIO_RW, TRACE_MMIO_MAP, @@ -60,6 +62,14 @@ struct stack_entry { unsigned long caller[FTRACE_STACK_ENTRIES]; }; +/* + * ftrace_printk entry: + */ +struct print_entry { + unsigned long ip; + char buf[]; +}; + /* * The trace field - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: @@ -77,6 +87,7 @@ struct trace_field { struct ctx_switch_entry ctx; struct special_entry special; struct stack_entry stack; + struct print_entry print; struct mmiotrace_rw mmiorw; struct mmiotrace_map mmiomap; }; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 0911b7e073b..630715bbd57 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -9,7 +9,9 @@ static inline int trace_valid_entry(struct trace_entry *entry) case TRACE_FN: case TRACE_CTX: case TRACE_WAKE: + case TRACE_CONT: case TRACE_STACK: + case TRACE_PRINT: case TRACE_SPECIAL: return 1; } @@ -120,11 +122,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, int (*func)(void)) { - unsigned long count; - int ret; int save_ftrace_enabled = ftrace_enabled; int save_tracer_enabled = tracer_enabled; + unsigned long count; char *func_name; + int ret; /* The ftrace test PASSED */ printk(KERN_CONT "PASSED\n"); @@ -157,6 +159,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, /* enable tracing */ tr->ctrl = 1; trace->init(tr); + /* Sleep for a 1/10 of a second */ msleep(100); @@ -212,10 +215,10 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) { - unsigned long count; - int ret; int save_ftrace_enabled = ftrace_enabled; int save_tracer_enabled = tracer_enabled; + unsigned long count; + int ret; /* make sure msleep has been recorded */ msleep(1); -- cgit v1.2.3-70-g09d2 From 2f2c99dba2398ef7d9c21f7c793180a50e68b1f0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 1 Aug 2008 16:45:49 -0400 Subject: ftrace: ftrace_printk doc moved Based on Randy Dunlap's suggestion, the ftrace_printk kernel-doc belongs with the ftrace_printk macro that should be used. Not with the __ftrace_printk internal function. Signed-off-by: Steven Rostedt Acked-by: Randy Dunlap Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 19 ++++++++++++++++++- kernel/trace/trace.c | 16 ---------------- 2 files changed, 18 insertions(+), 17 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f53b975e32f..018af16bce5 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -157,7 +157,24 @@ static inline void __ftrace_enabled_restore(int enabled) #ifdef CONFIG_TRACING extern void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); -# define ftrace_printk(x...) __ftrace_printk(_THIS_IP_, x) + +/** + * ftrace_printk - printf formatting in the ftrace buffer + * @fmt: the printf format for printing + * + * Note: __ftrace_printk is an internal function for ftrace_printk and + * the @ip is passed in via the ftrace_printk macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving ftrace_printks scattered around in + * your code. + */ +# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) extern int __ftrace_printk(unsigned long ip, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a917bea8271..2597e7e49c3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3075,22 +3075,6 @@ static __init void tracer_init_debugfs(void) (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) #define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) -/** - * ftrace_printk - printf formatting in the ftrace buffer - * @fmt - the printf format for printing. - * - * Note: __ftrace_printk is an internal function for ftrace_printk and - * the @ip is passed in via the ftrace_printk macro. - * - * This function allows a kernel developer to debug fast path sections - * that printk is not appropriate for. By scattering in various - * printk like tracing in the code, a developer can quickly see - * where problems are occurring. - * - * This is intended as a debugging tool for the developer only. - * Please reframe from leaving ftrace_printks scattered around in - * your code. - */ int __ftrace_printk(unsigned long ip, const char *fmt, ...) { struct trace_array *tr = &global_trace; -- cgit v1.2.3-70-g09d2 From 3f5a54e371ca20b119b73704f6c01b71295c1714 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 30 Jul 2008 22:36:46 -0400 Subject: ftrace: dump out ftrace buffers to console on panic At OLS I had a lot of interest to be able to have the ftrace buffers dumped on panic. Usually one would expect to uses kexec and examine the buffers after a new kernel is loaded. But sometimes the resources do not permit kdump and kexec, so having an option to still see the sequence of events up to the crash is very advantageous. This patch adds the option to have the ftrace buffers dumped to the console in the latency_trace format on a panic. When the option is set, the default entries per CPU buffer are lowered to 16384, since the writing to the serial (if that is the console) may take an awful long time otherwise. [ Changes since -v1: Got alpine to send correctly (as well as spell check working). Removed config option. Moved the static variables into ftrace_dump itself. Gave printk a log level. ] Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 2 + kernel/trace/trace.c | 175 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 018af16bce5..f7fb92045bf 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -178,6 +178,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); extern int __ftrace_printk(unsigned long ip, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); +extern void ftrace_dump(void); #else static inline void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } @@ -186,6 +187,7 @@ ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))) { return 0; } +static inline void ftrace_dump(void) { } #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2597e7e49c3..97513c8ecd6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -103,8 +105,15 @@ int ftrace_function_enabled; * trace_nr_entries is the number of entries that is allocated * for a buffer. Note, the number of entries is always rounded * to ENTRIES_PER_PAGE. + * + * This number is purposely set to a low number of 16384. + * If the dump on oops happens, it will be much appreciated + * to not have to wait for all that output. Anyway this can be + * boot time and run time configurable. */ -static unsigned long trace_nr_entries = 65536UL; +#define TRACE_ENTRIES_DEFAULT 16384UL + +static unsigned long trace_nr_entries = TRACE_ENTRIES_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; @@ -3142,6 +3151,165 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) } EXPORT_SYMBOL_GPL(__ftrace_printk); +static int trace_panic_handler(struct notifier_block *this, + unsigned long event, void *unused) +{ + ftrace_dump(); + return NOTIFY_OK; +} + +static struct notifier_block trace_panic_notifier = { + .notifier_call = trace_panic_handler, + .next = NULL, + .priority = 150 /* priority: INT_MAX >= x >= 0 */ +}; + +static int trace_die_handler(struct notifier_block *self, + unsigned long val, + void *data) +{ + switch (val) { + case DIE_OOPS: + ftrace_dump(); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block trace_die_notifier = { + .notifier_call = trace_die_handler, + .priority = 200 +}; + +/* + * printk is set to max of 1024, we really don't need it that big. + * Nothing should be printing 1000 characters anyway. + */ +#define TRACE_MAX_PRINT 1000 + +/* + * Define here KERN_TRACE so that we have one place to modify + * it if we decide to change what log level the ftrace dump + * should be at. + */ +#define KERN_TRACE KERN_INFO + +static void +trace_printk_seq(struct trace_seq *s) +{ + /* Probably should print a warning here. */ + if (s->len >= 1000) + s->len = 1000; + + /* should be zero ended, but we are paranoid. */ + s->buffer[s->len] = 0; + + printk(KERN_TRACE "%s", s->buffer); + + trace_seq_reset(s); +} + + +void ftrace_dump(void) +{ + static DEFINE_SPINLOCK(ftrace_dump_lock); + /* use static because iter can be a bit big for the stack */ + static struct trace_iterator iter; + struct trace_array_cpu *data; + static cpumask_t mask; + static int dump_ran; + unsigned long flags; + int cnt = 0; + int cpu; + + /* only one dump */ + spin_lock_irqsave(&ftrace_dump_lock, flags); + if (dump_ran) + goto out; + + dump_ran = 1; + + /* No turning back! */ + ftrace_kill_atomic(); + + printk(KERN_TRACE "Dumping ftrace buffer:\n"); + + iter.tr = &global_trace; + iter.trace = current_trace; + + /* + * We need to stop all tracing on all CPUS to read the + * the next buffer. This is a bit expensive, but is + * not done often. We fill all what we can read, + * and then release the locks again. + */ + + cpus_clear(mask); + + for_each_tracing_cpu(cpu) { + data = iter.tr->data[cpu]; + + if (!head_page(data) || !data->trace_idx) + continue; + + atomic_inc(&data->disabled); + cpu_set(cpu, mask); + } + + for_each_cpu_mask(cpu, mask) { + data = iter.tr->data[cpu]; + __raw_spin_lock(&data->lock); + + if (data->overrun > iter.last_overrun[cpu]) + iter.overrun[cpu] += + data->overrun - iter.last_overrun[cpu]; + iter.last_overrun[cpu] = data->overrun; + } + + while (!trace_empty(&iter)) { + + if (!cnt) + printk(KERN_TRACE "---------------------------------\n"); + + cnt++; + + /* reset all but tr, trace, and overruns */ + memset(&iter.seq, 0, + sizeof(struct trace_iterator) - + offsetof(struct trace_iterator, seq)); + iter.iter_flags |= TRACE_FILE_LAT_FMT; + iter.pos = -1; + + if (find_next_entry_inc(&iter) != NULL) { + print_trace_line(&iter); + trace_consume(&iter); + } + + trace_printk_seq(&iter.seq); + } + + if (!cnt) + printk(KERN_TRACE " (ftrace buffer empty)\n"); + else + printk(KERN_TRACE "---------------------------------\n"); + + for_each_cpu_mask(cpu, mask) { + data = iter.tr->data[cpu]; + __raw_spin_unlock(&data->lock); + } + + for_each_cpu_mask(cpu, mask) { + data = iter.tr->data[cpu]; + atomic_dec(&data->disabled); + } + + + out: + spin_unlock_irqrestore(&ftrace_dump_lock, flags); +} + static int trace_alloc_page(void) { struct trace_array_cpu *data; @@ -3338,6 +3506,11 @@ __init static int tracer_alloc_buffers(void) global_trace.ctrl = tracer_enabled; tracing_disabled = 0; + atomic_notifier_chain_register(&panic_notifier_list, + &trace_panic_notifier); + + register_die_notifier(&trace_die_notifier); + return 0; free_buffers: -- cgit v1.2.3-70-g09d2 From 98a983aad2e5b3dc83a8a761675445cdd8f3e6bd Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Fri, 15 Aug 2008 21:08:22 +0200 Subject: ftrace: fix some mistakes in error messages This patch fixes some mistakes on the tracer in warning messages when debugfs fails to create tracing files. Signed-off-by: Frederic Weisbecker Cc: srostedt@redhat.com Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 97513c8ecd6..896e59f772c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3030,12 +3030,12 @@ static __init void tracer_init_debugfs(void) entry = debugfs_create_file("available_tracers", 0444, d_tracer, &global_trace, &show_traces_fops); if (!entry) - pr_warning("Could not create debugfs 'trace' entry\n"); + pr_warning("Could not create debugfs 'available_tracers' entry\n"); entry = debugfs_create_file("current_tracer", 0444, d_tracer, &global_trace, &set_tracer_fops); if (!entry) - pr_warning("Could not create debugfs 'trace' entry\n"); + pr_warning("Could not create debugfs 'current_tracer' entry\n"); entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, &tracing_max_latency, @@ -3048,7 +3048,7 @@ static __init void tracer_init_debugfs(void) &tracing_thresh, &tracing_max_lat_fops); if (!entry) pr_warning("Could not create debugfs " - "'tracing_threash' entry\n"); + "'tracing_thresh' entry\n"); entry = debugfs_create_file("README", 0644, d_tracer, NULL, &tracing_readme_fops); if (!entry) @@ -3058,13 +3058,13 @@ static __init void tracer_init_debugfs(void) NULL, &tracing_pipe_fops); if (!entry) pr_warning("Could not create debugfs " - "'tracing_threash' entry\n"); + "'trace_pipe' entry\n"); entry = debugfs_create_file("trace_entries", 0644, d_tracer, &global_trace, &tracing_entries_fops); if (!entry) pr_warning("Could not create debugfs " - "'tracing_threash' entry\n"); + "'trace_entries' entry\n"); #ifdef CONFIG_DYNAMIC_FTRACE entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, -- cgit v1.2.3-70-g09d2 From 00fd61aee10533e003f2f00ab7163207660a4051 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 15 Aug 2008 21:40:04 -0400 Subject: ftrace: do not init module on ftrace disabled If one of the self tests of ftrace has disabled the function tracer, do not run the code to convert the mcount calls in modules. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eadd0eaea9b..11d94f2dc48 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -307,7 +307,7 @@ void ftrace_release(void *start, unsigned long size) unsigned long e = s + size; int i; - if (!start) + if (ftrace_disabled || !start) return; /* No interrupt should call this */ @@ -1567,7 +1567,7 @@ static int ftrace_convert_nops(unsigned long *start, void ftrace_init_module(unsigned long *start, unsigned long *end) { - if (start == end) + if (ftrace_disabled || start == end) return; ftrace_convert_nops(start, end); } -- cgit v1.2.3-70-g09d2 From 99ecdc43bc17faf5fa571db8569df171ecd0e5b8 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 15 Aug 2008 21:40:05 -0400 Subject: ftrace: add necessary locking for ftrace records The new design of pre-recorded mcounts and updating the code outside of kstop_machine has changed the way the records themselves are protected. This patch uses the ftrace_lock to protect the records. Note, the lock still does not need to be taken within calls that are only called via kstop_machine, since the that code can not run while the spin lock is held. Also removed the hash_lock needed for the daemon when MCOUNT_RECORD is configured. Also did a slight cleanup of an unused variable. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 44 ++++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 11d94f2dc48..43665add980 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -81,7 +81,7 @@ void clear_ftrace_function(void) static int __register_ftrace_function(struct ftrace_ops *ops) { - /* Should never be called by interrupts */ + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); ops->next = ftrace_list; @@ -115,6 +115,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) struct ftrace_ops **p; int ret = 0; + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); /* @@ -153,6 +154,21 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) #ifdef CONFIG_DYNAMIC_FTRACE +#ifndef CONFIG_FTRACE_MCOUNT_RECORD +/* + * The hash lock is only needed when the recording of the mcount + * callers are dynamic. That is, by the caller themselves and + * not recorded via the compilation. + */ +static DEFINE_SPINLOCK(ftrace_hash_lock); +#define ftrace_hash_lock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) +#define ftrace_hash_unlock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) +#else +/* This is protected via the ftrace_lock with MCOUNT_RECORD. */ +#define ftrace_hash_lock(flags) do { (void)flags; } while (0) +#define ftrace_hash_unlock(flags) do { } while(0) +#endif + static struct task_struct *ftraced_task; enum { @@ -171,7 +187,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); -static DEFINE_SPINLOCK(ftrace_shutdown_lock); static DEFINE_MUTEX(ftraced_lock); static DEFINE_MUTEX(ftrace_regex_lock); @@ -310,7 +325,7 @@ void ftrace_release(void *start, unsigned long size) if (ftrace_disabled || !start) return; - /* No interrupt should call this */ + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); for (pg = ftrace_pages_start; pg; pg = pg->next) { @@ -362,7 +377,6 @@ ftrace_record_ip(unsigned long ip) unsigned long flags; unsigned long key; int resched; - int atomic; int cpu; if (!ftrace_enabled || ftrace_disabled) @@ -392,9 +406,7 @@ ftrace_record_ip(unsigned long ip) if (ftrace_ip_in_hash(ip, key)) goto out; - atomic = irqs_disabled(); - - spin_lock_irqsave(&ftrace_shutdown_lock, flags); + ftrace_hash_lock(flags); /* This ip may have hit the hash before the lock */ if (ftrace_ip_in_hash(ip, key)) @@ -411,7 +423,7 @@ ftrace_record_ip(unsigned long ip) ftraced_trigger = 1; out_unlock: - spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); + ftrace_hash_unlock(flags); out: per_cpu(ftrace_shutdown_disable_cpu, cpu)--; @@ -887,6 +899,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) (*pos)++; + /* should not be called from interrupt context */ + spin_lock(&ftrace_lock); retry: if (iter->idx >= iter->pg->index) { if (iter->pg->next) { @@ -910,6 +924,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) goto retry; } } + spin_unlock(&ftrace_lock); iter->pos = *pos; @@ -1023,8 +1038,8 @@ static void ftrace_filter_reset(int enable) unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; unsigned i; - /* keep kstop machine from running */ - preempt_disable(); + /* should not be called from interrupt context */ + spin_lock(&ftrace_lock); if (enable) ftrace_filtered = 0; pg = ftrace_pages_start; @@ -1037,7 +1052,7 @@ static void ftrace_filter_reset(int enable) } pg = pg->next; } - preempt_enable(); + spin_unlock(&ftrace_lock); } static int @@ -1149,8 +1164,8 @@ ftrace_match(unsigned char *buff, int len, int enable) } } - /* keep kstop machine from running */ - preempt_disable(); + /* should not be called from interrupt context */ + spin_lock(&ftrace_lock); if (enable) ftrace_filtered = 1; pg = ftrace_pages_start; @@ -1187,7 +1202,7 @@ ftrace_match(unsigned char *buff, int len, int enable) } pg = pg->next; } - preempt_enable(); + spin_unlock(&ftrace_lock); } static ssize_t @@ -1551,6 +1566,7 @@ static int ftrace_convert_nops(unsigned long *start, p = start; while (p < end) { addr = ftrace_call_adjust(*p++); + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); ftrace_record_ip(addr); spin_unlock(&ftrace_lock); -- cgit v1.2.3-70-g09d2 From 2d7da80f7138c4276ef4fa0334be400b805d0fbf Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 25 Aug 2008 13:08:44 +1000 Subject: ftrace: fix build failure After disabling FTRACE_MCOUNT_RECORD via a patch, a dormant build failure surfaced: kernel/trace/ftrace.c: In function 'ftrace_record_ip': kernel/trace/ftrace.c:416: error: incompatible type for argument 1 of '_spin_lock_irqsave' kernel/trace/ftrace.c:433: error: incompatible type for argument 1 of '_spin_lock_irqsave' Introduced by commit 6dad8e07f4c10b17b038e84d29f3ca41c2e55cd0 ("ftrace: add necessary locking for ftrace records"). Signed-off-by: Stephen Rothwell Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 43665add980..7599abdf6d4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -161,8 +161,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) * not recorded via the compilation. */ static DEFINE_SPINLOCK(ftrace_hash_lock); -#define ftrace_hash_lock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) -#define ftrace_hash_unlock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) +#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) +#define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) #else /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ #define ftrace_hash_lock(flags) do { (void)flags; } while (0) -- cgit v1.2.3-70-g09d2 From ac8825ec6d941b6899331b84c7d6bf027c3bb4f1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 Aug 2008 08:12:04 +0200 Subject: ftrace: clean up macro usage enclose the argument in parenthesis. (especially since we cast it, which is a high prio operation) Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7599abdf6d4..969a83f75a3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -165,7 +165,7 @@ static DEFINE_SPINLOCK(ftrace_hash_lock); #define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) #else /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ -#define ftrace_hash_lock(flags) do { (void)flags; } while (0) +#define ftrace_hash_lock(flags) do { (void)(flags); } while (0) #define ftrace_hash_unlock(flags) do { } while(0) #endif -- cgit v1.2.3-70-g09d2 From e5a81b629ea8feb9e7530cfac35cfb41c45facf3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 27 Aug 2008 23:31:01 -0400 Subject: ftrace: add stack tracer This is another tracer using the ftrace infrastructure, that examines at each function call the size of the stack. If the stack use is greater than the previous max it is recorded. You can always see (and set) the max stack size seen. By setting it to zero will start the recording again. The backtrace is also available. For example: # cat /debug/tracing/stack_max_size 1856 # cat /debug/tracing/stack_trace [] stack_trace_call+0x8f/0x101 [] ftrace_call+0x5/0x8 [] clocksource_get_next+0x12/0x48 [] update_wall_time+0x538/0x6d1 [] do_timer+0x23/0xb0 [] tick_do_update_jiffies64+0xd9/0xf1 [] tick_sched_timer+0x4a/0xad [] __run_hrtimer+0x3e/0x75 [] hrtimer_interrupt+0xf1/0x154 [] smp_apic_timer_interrupt+0x71/0x84 [] apic_timer_interrupt+0x2d/0x34 [] finish_task_switch+0x29/0xa0 [] schedule+0x765/0x7be [] schedule_timeout+0x1b/0x90 [] wait_for_common+0xab/0x101 [] wait_for_completion+0x12/0x14 [] blk_execute_rq+0x84/0x99 [] scsi_execute+0xc2/0x105 [] scsi_execute_req+0x57/0x7f [] sr_test_unit_ready+0x3e/0x97 [] sr_media_change+0x43/0x205 [] media_changed+0x48/0x77 [] cdrom_media_changed+0x31/0x37 [] sr_block_media_changed+0x16/0x18 [] check_disk_change+0x1b/0x63 [] cdrom_open+0x7a1/0x806 [] sr_block_open+0x78/0x8d [] do_open+0x90/0x257 [] blkdev_open+0x2d/0x56 [] __dentry_open+0x14d/0x23c [] nameidata_to_filp+0x24/0x38 [] do_filp_open+0x347/0x626 [] do_sys_open+0x47/0xbc [] sys_open+0x23/0x2b [] sysenter_do_call+0x12/0x26 I've tested this on both x86_64 and i386. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Andrew Morton Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 9 ++ kernel/trace/Makefile | 1 + kernel/trace/trace_stack.c | 254 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 264 insertions(+) create mode 100644 kernel/trace/trace_stack.c (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 14d9505178c..2a22e46390d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -106,6 +106,15 @@ config CONTEXT_SWITCH_TRACER This tracer gets called from the context switch and records all switching of tasks. +config STACK_TRACER + bool "Trace max stack" + depends on HAVE_FTRACE + select FTRACE + select STACKTRACE + help + This tracer records the max stack of the kernel, and displays + it in debugfs/tracing/stack_trace + config DYNAMIC_FTRACE bool "enable/disable ftrace tracepoints dynamically" depends on FTRACE diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 71d17de1728..58ec61c44bd 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_FTRACE) += trace_functions.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o +obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o libftrace-y := ftrace.o diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c new file mode 100644 index 00000000000..4d1e522e3fe --- /dev/null +++ b/kernel/trace/trace_stack.c @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2008 Steven Rostedt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +#define STACK_TRACE_ENTRIES 500 + +static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] = + { [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX }; +static struct stack_trace max_stack_trace = { + .max_entries = STACK_TRACE_ENTRIES, + .entries = stack_dump_trace, +}; + +static unsigned long max_stack_size; +static raw_spinlock_t max_stack_lock = + (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + +static int stack_trace_disabled __read_mostly; +static DEFINE_PER_CPU(int, trace_active); + +static inline void check_stack(void) +{ + unsigned long this_size; + unsigned long flags; + + this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); + this_size = THREAD_SIZE - this_size; + + if (this_size <= max_stack_size) + return; + + raw_local_irq_save(flags); + __raw_spin_lock(&max_stack_lock); + + /* a race could have already updated it */ + if (this_size <= max_stack_size) + goto out; + + max_stack_size = this_size; + + max_stack_trace.nr_entries = 0; + max_stack_trace.skip = 1; + + save_stack_trace(&max_stack_trace); + + out: + __raw_spin_unlock(&max_stack_lock); + raw_local_irq_restore(flags); +} + +static void +stack_trace_call(unsigned long ip, unsigned long parent_ip) +{ + int cpu, resched; + + if (unlikely(!ftrace_enabled || stack_trace_disabled)) + return; + + resched = need_resched(); + preempt_disable_notrace(); + + cpu = raw_smp_processor_id(); + /* no atomic needed, we only modify this variable by this cpu */ + if (per_cpu(trace_active, cpu)++ != 0) + goto out; + + check_stack(); + + out: + per_cpu(trace_active, cpu)--; + /* prevent recursion in schedule */ + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); +} + +static struct ftrace_ops trace_ops __read_mostly = +{ + .func = stack_trace_call, +}; + +static ssize_t +stack_max_size_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + unsigned long *ptr = filp->private_data; + char buf[64]; + int r; + + r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); + if (r > sizeof(buf)) + r = sizeof(buf); + return simple_read_from_buffer(ubuf, count, ppos, buf, r); +} + +static ssize_t +stack_max_size_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + long *ptr = filp->private_data; + unsigned long val, flags; + char buf[64]; + int ret; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, count)) + return -EFAULT; + + buf[count] = 0; + + ret = strict_strtoul(buf, 10, &val); + if (ret < 0) + return ret; + + raw_local_irq_save(flags); + __raw_spin_lock(&max_stack_lock); + *ptr = val; + __raw_spin_unlock(&max_stack_lock); + raw_local_irq_restore(flags); + + return count; +} + +static struct file_operations stack_max_size_fops = { + .open = tracing_open_generic, + .read = stack_max_size_read, + .write = stack_max_size_write, +}; + +static void * +t_next(struct seq_file *m, void *v, loff_t *pos) +{ + unsigned long *t = m->private; + + (*pos)++; + + if (!t || *t == ULONG_MAX) + return NULL; + + t++; + m->private = t; + + return t; +} + +static void *t_start(struct seq_file *m, loff_t *pos) +{ + unsigned long *t = m->private; + loff_t l = 0; + + local_irq_disable(); + __raw_spin_lock(&max_stack_lock); + + for (; t && l < *pos; t = t_next(m, t, &l)) + ; + + return t; +} + +static void t_stop(struct seq_file *m, void *p) +{ + __raw_spin_unlock(&max_stack_lock); + local_irq_enable(); +} + +static int trace_lookup_stack(struct seq_file *m, unsigned long addr) +{ +#ifdef CONFIG_KALLSYMS + char str[KSYM_SYMBOL_LEN]; + + sprint_symbol(str, addr); + + return seq_printf(m, "[<%p>] %s\n", (void*)addr, str); +#else + return seq_printf(m, "%p\n", (void*)addr); +#endif +} + +static int t_show(struct seq_file *m, void *v) +{ + unsigned long *t = v; + + if (!t || *t == ULONG_MAX) + return 0; + + trace_lookup_stack(m, *t); + + return 0; +} + +static struct seq_operations stack_trace_seq_ops = { + .start = t_start, + .next = t_next, + .stop = t_stop, + .show = t_show, +}; + +static int stack_trace_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, &stack_trace_seq_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = stack_dump_trace; + } + + return ret; +} + +static struct file_operations stack_trace_fops = { + .open = stack_trace_open, + .read = seq_read, + .llseek = seq_lseek, +}; + +static __init int stack_trace_init(void) +{ + struct dentry *d_tracer; + struct dentry *entry; + + d_tracer = tracing_init_dentry(); + + entry = debugfs_create_file("stack_max_size", 0644, d_tracer, + &max_stack_size, &stack_max_size_fops); + if (!entry) + pr_warning("Could not create debugfs 'stack_max_size' entry\n"); + + entry = debugfs_create_file("stack_trace", 0444, d_tracer, + NULL, &stack_trace_fops); + if (!entry) + pr_warning("Could not create debugfs 'stack_trace' entry\n"); + + register_ftrace_function(&trace_ops); + + return 0; +} + +device_initcall(stack_trace_init); -- cgit v1.2.3-70-g09d2 From 3b47bfc1fca01cccad9cce2d18b79b18ef2e4131 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 27 Aug 2008 23:24:15 -0400 Subject: ftrace: remove direct reference to mcount in trace code The mcount record method of ftrace scans objdump for references to mcount. Using mcount as the reference to test if the calls to mcount being replaced are indeed calls to mcount, this use of mcount was also caught as a location to change. Using a variable that points to the mcount address moves this reference into the data section that is not scanned, and we do not use a false location to try and modify. The warn on code was what was used to detect this bug. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 969a83f75a3..b69966f0f14 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -36,6 +36,14 @@ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; +/* + * Since MCOUNT_ADDR may point to mcount itself, we do not want + * to get it confused by reading a reference in the code as we + * are parsing on objcopy output of text. Use a variable for + * it instead. + */ +static unsigned long mcount_addr = MCOUNT_ADDR; + /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -577,7 +585,7 @@ ftrace_code_disable(struct dyn_ftrace *rec) ip = rec->ip; nop = ftrace_nop_replace(); - call = ftrace_call_replace(ip, MCOUNT_ADDR); + call = ftrace_call_replace(ip, mcount_addr); failed = ftrace_modify_code(ip, call, nop); if (failed) { -- cgit v1.2.3-70-g09d2 From 1b6cced6ec9677fa65471e890dfdcb4bf5387643 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Aug 2008 16:51:43 -0400 Subject: ftrace: stack trace add indexes This patch adds indexes into the stack that the functions in the stack dump were found at. As an added bonus, I also added a diff to show which function is the most notorious consumer of the stack. The output now looks like this: # cat /debug/tracing/stack_trace Depth Size Location (48 entries) ----- ---- -------- 0) 2476 212 blk_recount_segments+0x39/0x59 1) 2264 12 bio_phys_segments+0x16/0x1d 2) 2252 20 blk_rq_bio_prep+0x23/0xaf 3) 2232 12 init_request_from_bio+0x74/0x77 4) 2220 56 __make_request+0x294/0x331 5) 2164 136 generic_make_request+0x34f/0x37d 6) 2028 56 submit_bio+0xe7/0xef 7) 1972 28 submit_bh+0xd1/0xf0 8) 1944 112 block_read_full_page+0x299/0x2a9 9) 1832 8 blkdev_readpage+0x14/0x16 10) 1824 28 read_cache_page_async+0x7e/0x109 11) 1796 16 read_cache_page+0x11/0x49 12) 1780 32 read_dev_sector+0x3c/0x72 13) 1748 48 read_lba+0x4d/0xaa 14) 1700 168 efi_partition+0x85/0x61b 15) 1532 72 rescan_partitions+0x10e/0x266 16) 1460 40 do_open+0x1c7/0x24e 17) 1420 292 __blkdev_get+0x79/0x84 18) 1128 12 blkdev_get+0x12/0x14 19) 1116 20 register_disk+0xd1/0x11e 20) 1096 28 add_disk+0x34/0x90 21) 1068 52 sd_probe+0x2b1/0x366 22) 1016 20 driver_probe_device+0xa5/0x120 23) 996 8 __device_attach+0xd/0xf 24) 988 32 bus_for_each_drv+0x3e/0x68 25) 956 24 device_attach+0x56/0x6c 26) 932 16 bus_attach_device+0x26/0x4d 27) 916 64 device_add+0x380/0x4b4 28) 852 28 scsi_sysfs_add_sdev+0xa1/0x1c9 29) 824 160 scsi_probe_and_add_lun+0x919/0xa2a 30) 664 36 __scsi_add_device+0x88/0xae 31) 628 44 ata_scsi_scan_host+0x9e/0x21c 32) 584 28 ata_host_register+0x1cb/0x1db 33) 556 24 ata_host_activate+0x98/0xb5 34) 532 192 ahci_init_one+0x9bd/0x9e9 35) 340 20 pci_device_probe+0x3e/0x5e 36) 320 20 driver_probe_device+0xa5/0x120 37) 300 20 __driver_attach+0x3f/0x5e 38) 280 36 bus_for_each_dev+0x40/0x62 39) 244 12 driver_attach+0x19/0x1b 40) 232 28 bus_add_driver+0x9c/0x1af 41) 204 28 driver_register+0x76/0xd2 42) 176 20 __pci_register_driver+0x44/0x71 43) 156 8 ahci_init+0x14/0x16 44) 148 100 _stext+0x42/0x122 45) 48 20 kernel_init+0x175/0x1dc 46) 28 28 kernel_thread_helper+0x7/0x10 The first column is simply an index starting from the inner most function and counting down to the outer most. The next column is the location that the function was found on the stack. The next column is the size of the stack for that function. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_stack.c | 90 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 73 insertions(+), 17 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4d1e522e3fe..74c5d9a3afa 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -16,8 +16,10 @@ #define STACK_TRACE_ENTRIES 500 -static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] = - { [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX }; +static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = + { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; +static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; + static struct stack_trace max_stack_trace = { .max_entries = STACK_TRACE_ENTRIES, .entries = stack_dump_trace, @@ -32,8 +34,9 @@ static DEFINE_PER_CPU(int, trace_active); static inline void check_stack(void) { - unsigned long this_size; - unsigned long flags; + unsigned long this_size, flags; + unsigned long *p, *top, *start; + int i; this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; @@ -51,10 +54,42 @@ static inline void check_stack(void) max_stack_size = this_size; max_stack_trace.nr_entries = 0; - max_stack_trace.skip = 1; + max_stack_trace.skip = 3; save_stack_trace(&max_stack_trace); + /* + * Now find where in the stack these are. + */ + i = 0; + start = &this_size; + top = (unsigned long *) + (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); + + /* + * Loop through all the entries. One of the entries may + * for some reason be missed on the stack, so we may + * have to account for them. If they are all there, this + * loop will only happen once. This code only takes place + * on a new max, so it is far from a fast path. + */ + while (i < max_stack_trace.nr_entries) { + + stack_dump_index[i] = this_size; + p = start; + + for (; p < top && i < max_stack_trace.nr_entries; p++) { + if (*p == stack_dump_trace[i]) { + this_size = stack_dump_index[i++] = + (top - p) * sizeof(unsigned long); + /* Start the search from here */ + start = p + 1; + } + } + + i++; + } + out: __raw_spin_unlock(&max_stack_lock); raw_local_irq_restore(flags); @@ -145,22 +180,24 @@ static struct file_operations stack_max_size_fops = { static void * t_next(struct seq_file *m, void *v, loff_t *pos) { - unsigned long *t = m->private; + long i = (long)m->private; (*pos)++; - if (!t || *t == ULONG_MAX) + i++; + + if (i >= max_stack_trace.nr_entries || + stack_dump_trace[i] == ULONG_MAX) return NULL; - t++; - m->private = t; + m->private = (void *)i; - return t; + return &m->private; } static void *t_start(struct seq_file *m, loff_t *pos) { - unsigned long *t = m->private; + void *t = &m->private; loff_t l = 0; local_irq_disable(); @@ -178,14 +215,15 @@ static void t_stop(struct seq_file *m, void *p) local_irq_enable(); } -static int trace_lookup_stack(struct seq_file *m, unsigned long addr) +static int trace_lookup_stack(struct seq_file *m, long i) { + unsigned long addr = stack_dump_trace[i]; #ifdef CONFIG_KALLSYMS char str[KSYM_SYMBOL_LEN]; sprint_symbol(str, addr); - return seq_printf(m, "[<%p>] %s\n", (void*)addr, str); + return seq_printf(m, "%s\n", str); #else return seq_printf(m, "%p\n", (void*)addr); #endif @@ -193,12 +231,30 @@ static int trace_lookup_stack(struct seq_file *m, unsigned long addr) static int t_show(struct seq_file *m, void *v) { - unsigned long *t = v; + long i = *(long *)v; + int size; + + if (i < 0) { + seq_printf(m, " Depth Size Location" + " (%d entries)\n" + " ----- ---- --------\n", + max_stack_trace.nr_entries); + return 0; + } - if (!t || *t == ULONG_MAX) + if (i >= max_stack_trace.nr_entries || + stack_dump_trace[i] == ULONG_MAX) return 0; - trace_lookup_stack(m, *t); + if (i+1 == max_stack_trace.nr_entries || + stack_dump_trace[i+1] == ULONG_MAX) + size = stack_dump_index[i]; + else + size = stack_dump_index[i] - stack_dump_index[i+1]; + + seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); + + trace_lookup_stack(m, i); return 0; } @@ -217,7 +273,7 @@ static int stack_trace_open(struct inode *inode, struct file *file) ret = seq_open(file, &stack_trace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; - m->private = stack_dump_trace; + m->private = (void *)-1; } return ret; -- cgit v1.2.3-70-g09d2 From 2ff01c6a17391225a18256d510b6e5b4aba40aa1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Sep 2008 15:04:37 +0200 Subject: stack tracer: depends on DEBUG_KERNEL Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2a22e46390d..5a9cffb0faf 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -109,6 +109,7 @@ config CONTEXT_SWITCH_TRACER config STACK_TRACER bool "Trace max stack" depends on HAVE_FTRACE + depends on DEBUG_KERNEL select FTRACE select STACKTRACE help -- cgit v1.2.3-70-g09d2 From a6168353d1c0b24b7512e14d1c3e47ed69881a23 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 20 Aug 2008 16:36:11 -0700 Subject: ftrace: make output nicely spaced for up to 999 cpus Currently some of the ftrace output goes skewiff if you have more than 9 cpus, and some if you have more than 99. Twiddle with the headers and format strings to make up to 999 cpus display without causing spacing problems. Signed-off-by: Michael Ellerman Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 896e59f772c..1801900908e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1409,21 +1409,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n"); - seq_puts(m, "# / _-----=> irqs-off \n"); - seq_puts(m, "# | / _----=> need-resched \n"); - seq_puts(m, "# || / _---=> hardirq/softirq \n"); - seq_puts(m, "# ||| / _--=> preempt-depth \n"); - seq_puts(m, "# |||| / \n"); - seq_puts(m, "# ||||| delay \n"); - seq_puts(m, "# cmd pid ||||| time | caller \n"); - seq_puts(m, "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _------=> CPU# \n"); + seq_puts(m, "# / _-----=> irqs-off \n"); + seq_puts(m, "# | / _----=> need-resched \n"); + seq_puts(m, "# || / _---=> hardirq/softirq \n"); + seq_puts(m, "# ||| / _--=> preempt-depth \n"); + seq_puts(m, "# |||| / \n"); + seq_puts(m, "# ||||| delay \n"); + seq_puts(m, "# cmd pid ||||| time | caller \n"); + seq_puts(m, "# \\ / ||||| \\ | / \n"); } static void print_func_help_header(struct seq_file *m) { - seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | | |\n"); + seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); + seq_puts(m, "# | | | | |\n"); } @@ -1508,7 +1508,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) comm = trace_find_cmdline(field->pid); trace_seq_printf(s, "%8.8s-%-5d ", comm, field->pid); - trace_seq_printf(s, "%d", cpu); + trace_seq_printf(s, "%3d", cpu); trace_seq_printf(s, "%c%c", (field->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', ((field->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); @@ -1598,7 +1598,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) if (verbose) { comm = trace_find_cmdline(field->pid); - trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" + trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, field->pid, cpu, field->flags, @@ -1694,7 +1694,7 @@ static int print_trace_fmt(struct trace_iterator *iter) ret = trace_seq_printf(s, "%16s-%-5d ", comm, field->pid); if (!ret) return 0; - ret = trace_seq_printf(s, "[%02d] ", iter->cpu); + ret = trace_seq_printf(s, "[%03d] ", iter->cpu); if (!ret) return 0; ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); -- cgit v1.2.3-70-g09d2 From 652567aa2000f1d4a1fd434382a30d8dd4a7c980 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 3 Sep 2008 17:42:50 -0400 Subject: ftrace: binary and not logical for continue test Peter Zijlstra provided me with a nice brown paper bag while letting me know that I was doing a logical AND and not a binary one, making a condition true more often than it should be. Luckily, a false true is handled by the calling function and no harm is done. But this needs to be fixed regardless. Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1801900908e..9639e45f086 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1655,7 +1655,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) case TRACE_PRINT: seq_print_ip_sym(s, field->print.ip, sym_flags); trace_seq_printf(s, ": %s", field->print.buf); - if (field->flags && TRACE_FLAG_CONT) + if (field->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; default: @@ -1768,7 +1768,7 @@ static int print_trace_fmt(struct trace_iterator *iter) case TRACE_PRINT: seq_print_ip_sym(s, field->print.ip, sym_flags); trace_seq_printf(s, ": %s", field->print.buf); - if (field->flags && TRACE_FLAG_CONT) + if (field->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } @@ -1833,7 +1833,7 @@ static int print_raw_fmt(struct trace_iterator *iter) case TRACE_PRINT: trace_seq_printf(s, "# %lx %s", field->print.ip, field->print.buf); - if (field->flags && TRACE_FLAG_CONT) + if (field->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } -- cgit v1.2.3-70-g09d2 From 5a90f577e5369a84b720ead42e621fcb1b8a8b21 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 3 Sep 2008 17:42:51 -0400 Subject: ftrace: print continue index fix An item in the trace buffer that is bigger than one entry may be split up using the TRACE_CONT entry. This makes it a virtual single entry. The current code increments the iterator index even while traversing TRACE_CONT entries, making it look like the iterator is further than it actually is. This patch adds code to not increment the iterator index while skipping over TRACE_CONT entries. Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9639e45f086..d24101cfc42 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1117,9 +1117,8 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, } /* Increment the index counter of an iterator by one */ -static void trace_iterator_increment(struct trace_iterator *iter, int cpu) +static void __trace_iterator_increment(struct trace_iterator *iter, int cpu) { - iter->idx++; iter->next_idx[cpu]++; iter->next_page_idx[cpu]++; @@ -1132,6 +1131,12 @@ static void trace_iterator_increment(struct trace_iterator *iter, int cpu) } } +static void trace_iterator_increment(struct trace_iterator *iter, int cpu) +{ + iter->idx++; + __trace_iterator_increment(iter, cpu); +} + static struct trace_entry * trace_entry_next(struct trace_array *tr, struct trace_array_cpu *data, struct trace_iterator *iter, int cpu) @@ -1153,7 +1158,7 @@ trace_entry_next(struct trace_array *tr, struct trace_array_cpu *data, /* find a real entry */ do { - trace_iterator_increment(iter, cpu); + __trace_iterator_increment(iter, cpu); ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); } while (ent && ent->type != TRACE_CONT); @@ -1187,7 +1192,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, int inc) ent = trace_entry_next(tr, data, iter, cpu); else { while (ent && ent->type == TRACE_CONT) { - trace_iterator_increment(iter, cpu); + __trace_iterator_increment(iter, cpu); ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); } @@ -1566,7 +1571,7 @@ trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) do { trace_seq_printf(s, "%s", ent->cont.buf); - trace_iterator_increment(iter, iter->cpu); + __trace_iterator_increment(iter, iter->cpu); ent = trace_entry_idx(tr, data, iter, iter->cpu); } while (ent && ent->type == TRACE_CONT); } -- cgit v1.2.3-70-g09d2 From f09ce573f57ddc35c67b39e51f34545877b30031 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Sep 2008 10:24:14 +0200 Subject: ftrace: make ftrace_printk usable with the other tracers Currently ftrace_printk only works with the ftrace tracer, switch it to an iter_ctrl setting so we can make us of them with other tracers too. [rostedt@redhat.com: tweak to the disable condition] Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 8 +++++--- kernel/trace/trace.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d24101cfc42..8a00d6c5c0f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -235,6 +235,7 @@ static const char *trace_options[] = { "block", "stacktrace", "sched-tree", + "ftrace_printk", NULL }; @@ -3091,9 +3092,10 @@ static __init void tracer_init_debugfs(void) int __ftrace_printk(unsigned long ip, const char *fmt, ...) { - struct trace_array *tr = &global_trace; static DEFINE_SPINLOCK(trace_buf_lock); static char trace_buf[TRACE_BUF_SIZE]; + + struct trace_array *tr = &global_trace; struct trace_array_cpu *data; struct trace_entry *entry; unsigned long flags; @@ -3101,7 +3103,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) va_list ap; int cpu, len = 0, write, written = 0; - if (likely(!ftrace_function_enabled)) + if (!(trace_flags & TRACE_ITER_PRINTK) || !tr->ctrl || tracing_disabled) return 0; local_irq_save(flags); @@ -3109,7 +3111,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); - if (unlikely(disabled != 1 || !ftrace_function_enabled)) + if (unlikely(disabled != 1)) goto out; spin_lock(&trace_buf_lock); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 50b6d7a6f01..5f54c875c8b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -356,6 +356,7 @@ enum trace_iterator_flags { TRACE_ITER_BLOCK = 0x80, TRACE_ITER_STACKTRACE = 0x100, TRACE_ITER_SCHED_TREE = 0x200, + TRACE_ITER_PRINTK = 0x400, }; #endif /* _LINUX_KERNEL_TRACE_H */ -- cgit v1.2.3-70-g09d2 From 80b5e940050c273ba277acbf3a9fbc1d4441e681 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Sep 2008 10:24:16 +0200 Subject: ftrace: sched_switch: show the wakee's cpu While profiling the smp behaviour of the scheduler it was needed to know to which cpu a task got woken. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 12 +++++++++--- kernel/trace/trace.h | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a00d6c5c0f..7e6cb4fe62f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -977,6 +977,7 @@ tracing_sched_switch_trace(struct trace_array *tr, entry->field.ctx.next_pid = next->pid; entry->field.ctx.next_prio = next->prio; entry->field.ctx.next_state = next->state; + entry->field.ctx.next_cpu = task_cpu(next); __trace_stack(tr, data, flags, 5); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -1003,6 +1004,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry->field.ctx.next_pid = wakee->pid; entry->field.ctx.next_prio = wakee->prio; entry->field.ctx.next_state = wakee->state; + entry->field.ctx.next_cpu = task_cpu(wakee); __trace_stack(tr, data, flags, 6); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); @@ -1636,10 +1638,11 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) __ffs(field->ctx.prev_state) + 1 : 0; S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; comm = trace_find_cmdline(field->ctx.next_pid); - trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", + trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", field->ctx.prev_pid, field->ctx.prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", + field->ctx.next_cpu, field->ctx.next_pid, field->ctx.next_prio, T, comm); @@ -1736,11 +1739,12 @@ static int print_trace_fmt(struct trace_iterator *iter) state_to_char[field->ctx.prev_state] : 'X'; T = field->ctx.next_state < sizeof(state_to_char) ? state_to_char[field->ctx.next_state] : 'X'; - ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", + ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", field->ctx.prev_pid, field->ctx.prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", + field->ctx.next_cpu, field->ctx.next_pid, field->ctx.next_prio, T); @@ -1817,10 +1821,11 @@ static int print_raw_fmt(struct trace_iterator *iter) state_to_char[field->ctx.next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; - ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", + ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", field->ctx.prev_pid, field->ctx.prev_prio, S, + field->ctx.next_cpu, field->ctx.next_pid, field->ctx.next_prio, T); @@ -1893,6 +1898,7 @@ static int print_hex_fmt(struct trace_iterator *iter) SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_pid); SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_prio); SEQ_PUT_HEX_FIELD_RET(s, S); + SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_cpu); SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_pid); SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_prio); SEQ_PUT_HEX_FIELD_RET(s, T); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5f54c875c8b..77c265f6a77 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -41,6 +41,7 @@ struct ctx_switch_entry { unsigned int next_pid; unsigned char next_prio; unsigned char next_state; + unsigned int next_cpu; }; /* -- cgit v1.2.3-70-g09d2 From d3ee6d992821f471193a7ee7a00af9ebb4bf5d01 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Sep 2008 14:04:51 +0200 Subject: ftrace: make it depend on DEBUG_KERNEL make most of the tracers depend on DEBUG_KERNEL - that's their intended purpose. (most distributions have DEBUG_KERNEL enabled anyway so this is not a practical limitation - but it simplifies the tracing menu in the normal case) Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5a9cffb0faf..16e5bb5daaa 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -22,6 +22,7 @@ config TRACING config FTRACE bool "Kernel Function Tracer" depends on HAVE_FTRACE + depends on DEBUG_KERNEL select FRAME_POINTER select TRACING select CONTEXT_SWITCH_TRACER @@ -40,6 +41,7 @@ config IRQSOFF_TRACER depends on TRACE_IRQFLAGS_SUPPORT depends on GENERIC_TIME depends on HAVE_FTRACE + depends on DEBUG_KERNEL select TRACE_IRQFLAGS select TRACING select TRACER_MAX_TRACE @@ -63,6 +65,7 @@ config PREEMPT_TRACER depends on GENERIC_TIME depends on PREEMPT depends on HAVE_FTRACE + depends on DEBUG_KERNEL select TRACING select TRACER_MAX_TRACE help @@ -90,6 +93,7 @@ config SYSPROF_TRACER config SCHED_TRACER bool "Scheduling Latency Tracer" depends on HAVE_FTRACE + depends on DEBUG_KERNEL select TRACING select CONTEXT_SWITCH_TRACER select TRACER_MAX_TRACE @@ -100,6 +104,7 @@ config SCHED_TRACER config CONTEXT_SWITCH_TRACER bool "Trace process context switches" depends on HAVE_FTRACE + depends on DEBUG_KERNEL select TRACING select MARKERS help @@ -120,6 +125,7 @@ config DYNAMIC_FTRACE bool "enable/disable ftrace tracepoints dynamically" depends on FTRACE depends on HAVE_DYNAMIC_FTRACE + depends on DEBUG_KERNEL default y help This option will modify all the calls to ftrace dynamically @@ -146,6 +152,7 @@ config FTRACE_SELFTEST config FTRACE_STARTUP_TEST bool "Perform a startup test on ftrace" depends on TRACING + depends on DEBUG_KERNEL select FTRACE_SELFTEST help This option performs a series of startup tests on ftrace. On bootup -- cgit v1.2.3-70-g09d2 From 644f991d4b920ab1f5043509651479420b293490 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 6 Sep 2008 01:06:04 -0400 Subject: ftrace: fix unlocking of hash This must be brown paper bag week for Steven Rostedt! While working on ftrace for PPC, I discovered that the hash locking done when CONFIG_FTRACE_MCOUNT_RECORD is not set, is totally incorrect. With a cut and paste error, I had the hash lock macro to lock for both hash_lock _and_ hash_unlock! This bug did not affect x86 since this bug was introduced when CONFIG_FTRACE_MCOUNT_RECORD was added to x86. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b69966f0f14..c9e09d86e1d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -170,7 +170,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) */ static DEFINE_SPINLOCK(ftrace_hash_lock); #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) -#define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) +#define ftrace_hash_unlock(flags) \ + spin_unlock_irqrestore(&ftrace_hash_lock, flags) #else /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) -- cgit v1.2.3-70-g09d2 From 45dcd8b8a8ca855591e3ac882d3a7fc255d09d43 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 16 Sep 2008 21:56:41 +0300 Subject: ftrace: move mmiotrace functions out of trace.c Moves the mmiotrace specific functions from trace.c to trace_mmiotrace.c. Functions trace_wake_up(), tracing_get_trace_entry(), and tracing_generic_entry_update() are therefore made available outside trace.c. Signed-off-by: Pekka Paalanen Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 46 ++---------------------------------------- kernel/trace/trace.h | 15 ++++++-------- kernel/trace/trace_mmiotrace.c | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 53 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7e6cb4fe62f..d372bc53596 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -785,7 +785,7 @@ trace_next_page(struct trace_array_cpu *data, void *addr) return page_address(page); } -static inline struct trace_entry * +struct trace_entry * tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data) { unsigned long idx, idx_next; @@ -821,7 +821,7 @@ tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data) return entry; } -static inline void +void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) { struct task_struct *tsk = current; @@ -865,48 +865,6 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, trace_function(tr, data, ip, parent_ip, flags); } -#ifdef CONFIG_MMIOTRACE -void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, - struct mmiotrace_rw *rw) -{ - struct trace_entry *entry; - unsigned long irq_flags; - - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - - entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_RW; - entry->field.mmiorw = *rw; - - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); - - trace_wake_up(); -} - -void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, - struct mmiotrace_map *map) -{ - struct trace_entry *entry; - unsigned long irq_flags; - - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - - entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_MAP; - entry->field.mmiomap = *map; - - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); - - trace_wake_up(); -} -#endif - void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 77c265f6a77..9d39aa00a9c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -213,11 +213,17 @@ struct trace_iterator { long idx; }; +void trace_wake_up(void); void tracing_reset(struct trace_array_cpu *data); int tracing_open_generic(struct inode *inode, struct file *filp); struct dentry *tracing_init_dentry(void); void init_tracer_sysprof_debugfs(struct dentry *d_tracer); +struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, + struct trace_array_cpu *data); +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned long flags); + void ftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, @@ -291,15 +297,6 @@ extern unsigned long ftrace_update_tot_cnt; extern int DYN_FTRACE_TEST_NAME(void); #endif -#ifdef CONFIG_MMIOTRACE -extern void __trace_mmiotrace_rw(struct trace_array *tr, - struct trace_array_cpu *data, - struct mmiotrace_rw *rw); -extern void __trace_mmiotrace_map(struct trace_array *tr, - struct trace_array_cpu *data, - struct mmiotrace_map *map); -#endif - #ifdef CONFIG_FTRACE_STARTUP_TEST #ifdef CONFIG_FTRACE extern int trace_selftest_startup_function(struct tracer *trace, diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 9b7a936f4b1..ef02747b26d 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -276,6 +276,27 @@ __init static int init_mmio_trace(void) } device_initcall(init_mmio_trace); +static void __trace_mmiotrace_rw(struct trace_array *tr, + struct trace_array_cpu *data, + struct mmiotrace_rw *rw) +{ + struct trace_entry *entry; + unsigned long irq_flags; + + raw_local_irq_save(irq_flags); + __raw_spin_lock(&data->lock); + + entry = tracing_get_trace_entry(tr, data); + tracing_generic_entry_update(entry, 0); + entry->type = TRACE_MMIO_RW; + entry->field.mmiorw = *rw; + + __raw_spin_unlock(&data->lock); + raw_local_irq_restore(irq_flags); + + trace_wake_up(); +} + void mmio_trace_rw(struct mmiotrace_rw *rw) { struct trace_array *tr = mmio_trace_array; @@ -283,6 +304,27 @@ void mmio_trace_rw(struct mmiotrace_rw *rw) __trace_mmiotrace_rw(tr, data, rw); } +static void __trace_mmiotrace_map(struct trace_array *tr, + struct trace_array_cpu *data, + struct mmiotrace_map *map) +{ + struct trace_entry *entry; + unsigned long irq_flags; + + raw_local_irq_save(irq_flags); + __raw_spin_lock(&data->lock); + + entry = tracing_get_trace_entry(tr, data); + tracing_generic_entry_update(entry, 0); + entry->type = TRACE_MMIO_MAP; + entry->field.mmiomap = *map; + + __raw_spin_unlock(&data->lock); + raw_local_irq_restore(irq_flags); + + trace_wake_up(); +} + void mmio_trace_mapping(struct mmiotrace_map *map) { struct trace_array *tr = mmio_trace_array; -- cgit v1.2.3-70-g09d2 From 801fe40001dfc263848552fb28924b766ed44ea4 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 16 Sep 2008 21:58:24 +0300 Subject: ftrace: add trace_vprintk() trace_vprintk() for easier implementation of tracer specific *_printk functions. Add check check for no_tracer, and implement __ftrace_printk() as a wrapper. Signed-off-by: Pekka Paalanen Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 24 ++++++++++++++++++------ kernel/trace/trace.h | 1 + 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d372bc53596..406de9cf282 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3054,7 +3054,7 @@ static __init void tracer_init_debugfs(void) (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) #define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) -int __ftrace_printk(unsigned long ip, const char *fmt, ...) +int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { static DEFINE_SPINLOCK(trace_buf_lock); static char trace_buf[TRACE_BUF_SIZE]; @@ -3064,10 +3064,9 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) struct trace_entry *entry; unsigned long flags; long disabled; - va_list ap; int cpu, len = 0, write, written = 0; - if (!(trace_flags & TRACE_ITER_PRINTK) || !tr->ctrl || tracing_disabled) + if (current_trace == &no_tracer || !tr->ctrl || tracing_disabled) return 0; local_irq_save(flags); @@ -3079,9 +3078,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) goto out; spin_lock(&trace_buf_lock); - va_start(ap, fmt); - len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, ap); - va_end(ap); + len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); len = min(len, TRACE_BUF_SIZE-1); trace_buf[len] = 0; @@ -3120,6 +3117,21 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) return len; } +EXPORT_SYMBOL_GPL(trace_vprintk); + +int __ftrace_printk(unsigned long ip, const char *fmt, ...) +{ + int ret; + va_list ap; + + if (!(trace_flags & TRACE_ITER_PRINTK)) + return 0; + + va_start(ap, fmt); + ret = trace_vprintk(ip, fmt, ap); + va_end(ap); + return ret; +} EXPORT_SYMBOL_GPL(__ftrace_printk); static int trace_panic_handler(struct notifier_block *this, diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 9d39aa00a9c..be3b3cf95f4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -333,6 +333,7 @@ extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt); extern long ns2usecs(cycle_t nsec); +extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); extern unsigned long trace_flags; -- cgit v1.2.3-70-g09d2 From 9e57fb35d711331a9b1410c5c56ebeb3733428a0 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 16 Sep 2008 22:00:34 +0300 Subject: x86 mmiotrace: implement mmiotrace_printk() Offer mmiotrace users a function to inject markers from inside the kernel. This depends on the trace_vprintk() patch. Signed-off-by: Pekka Paalanen Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- arch/x86/mm/mmio-mod.c | 19 ++++++++++++++++++- arch/x86/mm/testmmiotrace.c | 4 ++++ include/linux/mmiotrace.h | 17 +++++++++++++++-- kernel/trace/trace_mmiotrace.c | 5 +++++ 4 files changed, 42 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 754bd1eaf4f..5e2e2e72ee8 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -75,7 +75,7 @@ static LIST_HEAD(trace_list); /* struct remap_trace */ * and trace_lock. * - Routines depending on is_enabled() must take trace_lock. * - trace_list users must hold trace_lock. - * - is_enabled() guarantees that mmio_trace_record is allowed. + * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed. * - pre/post callbacks assume the effect of is_enabled() being true. */ @@ -379,6 +379,23 @@ void mmiotrace_iounmap(volatile void __iomem *addr) iounmap_trace_core(addr); } +int mmiotrace_printk(const char *fmt, ...) +{ + int ret = 0; + va_list args; + unsigned long flags; + va_start(args, fmt); + + spin_lock_irqsave(&trace_lock, flags); + if (is_enabled()) + ret = mmio_trace_printk(fmt, args); + spin_unlock_irqrestore(&trace_lock, flags); + + va_end(args); + return ret; +} +EXPORT_SYMBOL(mmiotrace_printk); + static void clear_trace_list(void) { struct remap_trace *trace; diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index d877c5b423e..ab50a8d7402 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -3,6 +3,7 @@ */ #include #include +#include #define MODULE_NAME "testmmiotrace" @@ -13,6 +14,7 @@ MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB."); static void do_write_test(void __iomem *p) { unsigned int i; + mmiotrace_printk("Write test.\n"); for (i = 0; i < 256; i++) iowrite8(i, p + i); for (i = 1024; i < (5 * 1024); i += 2) @@ -24,6 +26,7 @@ static void do_write_test(void __iomem *p) static void do_read_test(void __iomem *p) { unsigned int i; + mmiotrace_printk("Read test.\n"); for (i = 0; i < 256; i++) ioread8(p + i); for (i = 1024; i < (5 * 1024); i += 2) @@ -39,6 +42,7 @@ static void do_test(void) pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); return; } + mmiotrace_printk("ioremap returned %p.\n", p); do_write_test(p); do_read_test(p); iounmap(p); diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 61d19e1b7a0..60cc3bf5c53 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h @@ -34,11 +34,15 @@ extern void unregister_kmmio_probe(struct kmmio_probe *p); /* Called from page fault handler. */ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); -/* Called from ioremap.c */ #ifdef CONFIG_MMIOTRACE +/* Called from ioremap.c */ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr); extern void mmiotrace_iounmap(volatile void __iomem *addr); + +/* For anyone to insert markers. Remember trailing newline. */ +extern int mmiotrace_printk(const char *fmt, ...) + __attribute__ ((format (printf, 1, 2))); #else static inline void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr) @@ -48,7 +52,15 @@ static inline void mmiotrace_ioremap(resource_size_t offset, static inline void mmiotrace_iounmap(volatile void __iomem *addr) { } -#endif /* CONFIG_MMIOTRACE_HOOKS */ + +static inline int mmiotrace_printk(const char *fmt, ...) + __attribute__ ((format (printf, 1, 0))); + +static inline int mmiotrace_printk(const char *fmt, ...) +{ + return 0; +} +#endif /* CONFIG_MMIOTRACE */ enum mm_io_opcode { MMIO_READ = 0x1, /* struct mmiotrace_rw */ @@ -81,5 +93,6 @@ extern void enable_mmiotrace(void); extern void disable_mmiotrace(void); extern void mmio_trace_rw(struct mmiotrace_rw *rw); extern void mmio_trace_mapping(struct mmiotrace_map *map); +extern int mmio_trace_printk(const char *fmt, va_list args); #endif /* MMIOTRACE_H */ diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index ef02747b26d..767d1faf56e 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -335,3 +335,8 @@ void mmio_trace_mapping(struct mmiotrace_map *map) __trace_mmiotrace_map(tr, data, map); preempt_enable(); } + +int mmio_trace_printk(const char *fmt, va_list args) +{ + return trace_vprintk(0, fmt, args); +} -- cgit v1.2.3-70-g09d2 From fc5e27ae4b45a0619701a83f30d9b7fad7ed9400 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 16 Sep 2008 22:02:27 +0300 Subject: mmiotrace: handle TRACE_PRINT entries Also make trace_seq_print_cont() non-static, and add a newline if the seq buffer can't hold all data. Signed-off-by: Pekka Paalanen Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 31 +++++++++++-------------------- kernel/trace/trace.h | 19 +++++++++++++++++++ kernel/trace/trace_mmiotrace.c | 23 +++++++++++++++++++++++ 3 files changed, 53 insertions(+), 20 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 406de9cf282..7e7154f7700 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -199,23 +199,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) return nsecs / 1000; } -/* - * trace_flag_type is an enumeration that holds different - * states when a trace occurs. These are: - * IRQS_OFF - interrupts were disabled - * NEED_RESCED - reschedule is requested - * HARDIRQ - inside an interrupt handler - * SOFTIRQ - inside a softirq handler - * CONT - multiple entries hold the trace item - */ -enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, - TRACE_FLAG_NEED_RESCHED = 0x02, - TRACE_FLAG_HARDIRQ = 0x04, - TRACE_FLAG_SOFTIRQ = 0x08, - TRACE_FLAG_CONT = 0x10, -}; - /* * TRACE_ITER_SYM_MASK masks the options in trace_flags that * control the output of kernel symbols. @@ -1517,12 +1500,16 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; -static void -trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) +/* + * The message is supposed to contain an ending newline. + * If the printing stops prematurely, try to add a newline of our own. + */ +void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[iter->cpu]; struct trace_entry *ent; + bool ok = true; ent = trace_entry_idx(tr, data, iter, iter->cpu); if (!ent || ent->type != TRACE_CONT) { @@ -1531,10 +1518,14 @@ trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) } do { - trace_seq_printf(s, "%s", ent->cont.buf); + if (ok) + ok = (trace_seq_printf(s, "%s", ent->cont.buf) > 0); __trace_iterator_increment(iter, iter->cpu); ent = trace_entry_idx(tr, data, iter, iter->cpu); } while (ent && ent->type == TRACE_CONT); + + if (!ok) + trace_seq_putc(s, '\n'); } static int diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index be3b3cf95f4..648433d18cc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -71,6 +71,23 @@ struct print_entry { char buf[]; }; +/* + * trace_flag_type is an enumeration that holds different + * states when a trace occurs. These are: + * IRQS_OFF - interrupts were disabled + * NEED_RESCED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler + * CONT - multiple entries hold the trace item + */ +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_NEED_RESCHED = 0x02, + TRACE_FLAG_HARDIRQ = 0x04, + TRACE_FLAG_SOFTIRQ = 0x08, + TRACE_FLAG_CONT = 0x10, +}; + /* * The trace field - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: @@ -330,6 +347,8 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace, extern void *head_page(struct trace_array_cpu *data); extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +extern void trace_seq_print_cont(struct trace_seq *s, + struct trace_iterator *iter); extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt); extern long ns2usecs(cycle_t nsec); diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 767d1faf56e..a108c326f36 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -245,6 +245,27 @@ static int mmio_print_map(struct trace_iterator *iter) return 0; } +static int mmio_print_mark(struct trace_iterator *iter) +{ + struct trace_entry *entry = iter->ent; + const char *msg = entry->field.print.buf; + struct trace_seq *s = &iter->seq; + unsigned long long t = ns2usecs(entry->field.t); + unsigned long usec_rem = do_div(t, 1000000ULL); + unsigned secs = (unsigned long)t; + int ret; + + /* The trailing newline must be in the message. */ + ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); + if (!ret) + return 0; + + if (entry->field.flags & TRACE_FLAG_CONT) + trace_seq_print_cont(s, iter); + + return 1; +} + /* return 0 to abort printing without consuming current entry in pipe mode */ static int mmio_print_line(struct trace_iterator *iter) { @@ -253,6 +274,8 @@ static int mmio_print_line(struct trace_iterator *iter) return mmio_print_rw(iter); case TRACE_MMIO_MAP: return mmio_print_map(iter); + case TRACE_PRINT: + return mmio_print_mark(iter); default: return 1; /* ignore unknown entries */ } -- cgit v1.2.3-70-g09d2 From 5bf9a1ee350a10feb94107de32a203d81fbbe706 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 16 Sep 2008 22:06:42 +0300 Subject: ftrace: inject markers via trace_marker file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow a user to inject a marker (TRACE_PRINT entry) into the trace ring buffer. The related file operations are derived from code by Frédéric Weisbecker . Signed-off-by: Pekka Paalanen Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- Documentation/tracers/mmiotrace.txt | 5 +-- kernel/trace/trace.c | 76 ++++++++++++++++++++++++++++++++++--- kernel/trace/trace.h | 4 ++ 3 files changed, 77 insertions(+), 8 deletions(-) (limited to 'kernel/trace') diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt index a4afb560a45..5bbbe209622 100644 --- a/Documentation/tracers/mmiotrace.txt +++ b/Documentation/tracers/mmiotrace.txt @@ -36,7 +36,7 @@ $ mount -t debugfs debugfs /debug $ echo mmiotrace > /debug/tracing/current_tracer $ cat /debug/tracing/trace_pipe > mydump.txt & Start X or whatever. -$ echo "X is up" > /debug/tracing/marker +$ echo "X is up" > /debug/tracing/trace_marker $ echo none > /debug/tracing/current_tracer Check for lost events. @@ -59,9 +59,8 @@ The 'cat' process should stay running (sleeping) in the background. Load the driver you want to trace and use it. Mmiotrace will only catch MMIO accesses to areas that are ioremapped while mmiotrace is active. -[Unimplemented feature:] During tracing you can place comments (markers) into the trace by -$ echo "X is up" > /debug/tracing/marker +$ echo "X is up" > /debug/tracing/trace_marker This makes it easier to see which part of the (huge) trace corresponds to which action. It is recommended to place descriptive markers about what you do. diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7e7154f7700..eee1fd96489 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2879,6 +2879,66 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, return cnt; } +static int tracing_open_mark(struct inode *inode, struct file *filp) +{ + int ret; + + ret = tracing_open_generic(inode, filp); + if (ret) + return ret; + + if (current_trace == &no_tracer) + return -ENODEV; + + return 0; +} + +static int mark_printk(const char *fmt, ...) +{ + int ret; + va_list args; + va_start(args, fmt); + ret = trace_vprintk(0, fmt, args); + va_end(args); + return ret; +} + +static ssize_t +tracing_mark_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *fpos) +{ + char *buf; + char *end; + struct trace_array *tr = &global_trace; + + if (current_trace == &no_tracer || !tr->ctrl || tracing_disabled) + return -EINVAL; + + if (cnt > TRACE_BUF_SIZE) + cnt = TRACE_BUF_SIZE; + + buf = kmalloc(cnt + 1, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, cnt)) { + kfree(buf); + return -EFAULT; + } + + /* Cut from the first nil or newline. */ + buf[cnt] = '\0'; + end = strchr(buf, '\n'); + if (end) + *end = '\0'; + + cnt = mark_printk("%s\n", buf); + kfree(buf); + *fpos += cnt; + + return cnt; +} + static struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, @@ -2910,6 +2970,11 @@ static struct file_operations tracing_entries_fops = { .write = tracing_entries_write, }; +static struct file_operations tracing_mark_fops = { + .open = tracing_open_mark, + .write = tracing_mark_write, +}; + #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t @@ -3027,6 +3092,12 @@ static __init void tracer_init_debugfs(void) pr_warning("Could not create debugfs " "'trace_entries' entry\n"); + entry = debugfs_create_file("trace_marker", 0220, d_tracer, + NULL, &tracing_mark_fops); + if (!entry) + pr_warning("Could not create debugfs " + "'trace_marker' entry\n"); + #ifdef CONFIG_DYNAMIC_FTRACE entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, @@ -3040,11 +3111,6 @@ static __init void tracer_init_debugfs(void) #endif } -#define TRACE_BUF_SIZE 1024 -#define TRACE_PRINT_BUF_SIZE \ - (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) -#define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) - int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { static DEFINE_SPINLOCK(trace_buf_lock); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 648433d18cc..42f65d0097f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -124,6 +124,10 @@ struct trace_entry { }; #define TRACE_ENTRY_SIZE sizeof(struct trace_entry) +#define TRACE_BUF_SIZE 1024 +#define TRACE_PRINT_BUF_SIZE \ + (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) +#define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) /* * The CPU trace array - it consists of thousands of trace entries -- cgit v1.2.3-70-g09d2 From fb1b6d8b5154c692172a424e45fbd0573295cb93 Mon Sep 17 00:00:00 2001 From: Steven Noonan Date: Fri, 19 Sep 2008 03:06:43 -0700 Subject: ftrace: add nop tracer A no-op tracer which can serve two purposes: 1. A template for development of a new tracer. 2. A convenient way to see ftrace_printk() calls without an irrelevant trace making the output messy. [ mingo@elte.hu: resolved conflicts ] Signed-off-by: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 10 +++++++ kernel/trace/Makefile | 1 + kernel/trace/trace.h | 4 +++ kernel/trace/trace_nop.c | 65 +++++++++++++++++++++++++++++++++++++++++++ kernel/trace/trace_selftest.c | 9 ++++++ 5 files changed, 89 insertions(+) create mode 100644 kernel/trace/trace_nop.c (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 16e5bb5daaa..d7b2de74463 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -101,6 +101,16 @@ config SCHED_TRACER This tracer tracks the latency of the highest priority task to be scheduled in, starting from the point it has woken up. +config NOP_TRACER + bool "NOP Tracer" + depends on HAVE_FTRACE + depends on DEBUG_KERNEL + select TRACING + help + This tracer does nothing. The primary purpose for it is to + politely print the output of ftrace_printk() calls without + the overhead of an irrelevant trace taking place. + config CONTEXT_SWITCH_TRACER bool "Trace process context switches" depends on HAVE_FTRACE diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 58ec61c44bd..73ba13f5a46 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_FTRACE) += trace_functions.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o +obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 42f65d0097f..447d4b9b639 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -339,6 +339,10 @@ extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, extern int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr); #endif +#ifdef CONFIG_NOP_TRACER +extern int trace_selftest_startup_nop(struct tracer *trace, + struct trace_array *tr); +#endif #ifdef CONFIG_CONTEXT_SWITCH_TRACER extern int trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr); diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c new file mode 100644 index 00000000000..dafaefb8403 --- /dev/null +++ b/kernel/trace/trace_nop.c @@ -0,0 +1,65 @@ +/* + * nop tracer + * + * Copyright (C) 2008 Steven Noonan + * + */ + +#include +#include +#include +#include + +#include "trace.h" + +static struct trace_array *ctx_trace; + +static void start_nop_trace(struct trace_array *tr) +{ + /* Nothing to do! */ +} + +static void stop_nop_trace(struct trace_array *tr) +{ + /* Nothing to do! */ +} + +static void nop_trace_init(struct trace_array *tr) +{ + ctx_trace = tr; + + if (tr->ctrl) + start_nop_trace(tr); +} + +static void nop_trace_reset(struct trace_array *tr) +{ + if (tr->ctrl) + stop_nop_trace(tr); +} + +static void nop_trace_ctrl_update(struct trace_array *tr) +{ + /* When starting a new trace, reset the buffers */ + if (tr->ctrl) + start_nop_trace(tr); + else + stop_nop_trace(tr); +} + +static struct tracer nop_trace __read_mostly = +{ + .name = "nop", + .init = nop_trace_init, + .reset = nop_trace_reset, + .ctrl_update = nop_trace_ctrl_update, +#ifdef CONFIG_FTRACE_SELFTEST + .selftest = trace_selftest_startup_nop, +#endif +}; + +__init static int init_nop_trace(void) +{ + return register_tracer(&nop_trace); +} +device_initcall(init_nop_trace); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 630715bbd57..82db9103b9b 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -418,6 +418,15 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * } #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ +#ifdef CONFIG_NOP_TRACER +int +trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) +{ + /* What could possibly go wrong? */ + return 0; +} +#endif + #ifdef CONFIG_SCHED_TRACER static int trace_wakeup_test_thread(void *data) { -- cgit v1.2.3-70-g09d2 From 71c67d58b5660f8e42c7d4c3e77cbc03fac5ed31 Mon Sep 17 00:00:00 2001 From: Steven Noonan Date: Sat, 20 Sep 2008 01:00:37 -0700 Subject: ftrace: mcount_addr defined but not used When CONFIG_DYNAMIC_FTRACE isn't used, neither is mcount_addr. This patch eliminates that warning. Signed-off-by: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c9e09d86e1d..7694f3e5979 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -36,14 +36,6 @@ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; -/* - * Since MCOUNT_ADDR may point to mcount itself, we do not want - * to get it confused by reading a reference in the code as we - * are parsing on objcopy output of text. Use a variable for - * it instead. - */ -static unsigned long mcount_addr = MCOUNT_ADDR; - /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -178,6 +170,14 @@ static DEFINE_SPINLOCK(ftrace_hash_lock); #define ftrace_hash_unlock(flags) do { } while(0) #endif +/* + * Since MCOUNT_ADDR may point to mcount itself, we do not want + * to get it confused by reading a reference in the code as we + * are parsing on objcopy output of text. Use a variable for + * it instead. + */ +static unsigned long mcount_addr = MCOUNT_ADDR; + static struct task_struct *ftraced_task; enum { -- cgit v1.2.3-70-g09d2 From 8925b394eca0bb0a444a6487d702d0f650e94a10 Mon Sep 17 00:00:00 2001 From: Steven Noonan Date: Sat, 20 Sep 2008 01:00:38 -0700 Subject: trace: remove pointless ifdefs The functions are already 'extern' anyway, so there's no problem with linkage. Removing these ifdefs also helps find any potential compiler errors. Suggested by Andrew Morton. Signed-off-by: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 447d4b9b639..c8c687088b4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -319,38 +319,22 @@ extern int DYN_FTRACE_TEST_NAME(void); #endif #ifdef CONFIG_FTRACE_STARTUP_TEST -#ifdef CONFIG_FTRACE extern int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_IRQSOFF_TRACER extern int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_PREEMPT_TRACER extern int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr); -#endif -#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_SCHED_TRACER extern int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_NOP_TRACER extern int trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_CONTEXT_SWITCH_TRACER extern int trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr); -#endif -#ifdef CONFIG_SYSPROF_TRACER extern int trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr); -#endif #endif /* CONFIG_FTRACE_STARTUP_TEST */ extern void *head_page(struct trace_array_cpu *data); -- cgit v1.2.3-70-g09d2 From 35cb5ed01261f5669657d2f1720aca902299887d Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Sun, 21 Sep 2008 20:10:14 +0200 Subject: tracing/ftrace: make nop tracer reset previous entries If nop tracer is selected, some old entries from the previous tracer could still be enqueued. Tracing have to be reset. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/trace_nop.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index dafaefb8403..9fb02c17ad0 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c @@ -26,8 +26,12 @@ static void stop_nop_trace(struct trace_array *tr) static void nop_trace_init(struct trace_array *tr) { + int cpu; ctx_trace = tr; + for_each_online_cpu(cpu) + tracing_reset(tr->data[cpu]); + if (tr->ctrl) start_nop_trace(tr); } -- cgit v1.2.3-70-g09d2 From 2a3a4f669df2164288d11406d11d5e4933bf5e53 Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Sun, 21 Sep 2008 20:12:14 +0200 Subject: tracing/ftrace: tracing engine depends on Nop Tracer Now that the nop tracer is used as the default tracer by replacing the "none" tracer, tracing engine depends on it. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d7b2de74463..254328dec67 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -1,8 +1,13 @@ # # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: # + +config NOP_TRACER + bool + config HAVE_FTRACE bool + select NOP_TRACER config HAVE_DYNAMIC_FTRACE bool @@ -101,16 +106,6 @@ config SCHED_TRACER This tracer tracks the latency of the highest priority task to be scheduled in, starting from the point it has woken up. -config NOP_TRACER - bool "NOP Tracer" - depends on HAVE_FTRACE - depends on DEBUG_KERNEL - select TRACING - help - This tracer does nothing. The primary purpose for it is to - politely print the output of ftrace_printk() calls without - the overhead of an irrelevant trace taking place. - config CONTEXT_SWITCH_TRACER bool "Trace process context switches" depends on HAVE_FTRACE -- cgit v1.2.3-70-g09d2 From 43a15386c4faf913f7d70a47748c266d6210cd6e Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Sun, 21 Sep 2008 20:16:30 +0200 Subject: tracing/ftrace: replace none tracer by nop tracer Replace "none" tracer by the recently created "nop" tracer. Both are pretty similar except that nop accepts TRACE_PRINT or TRACE_SPECIAL entries. And as a consequence, changing the size of the ring buffer now requires that tracing has already been disabled. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 49 +++++++++--------------------------------------- kernel/trace/trace.h | 2 ++ kernel/trace/trace_nop.c | 7 +------ 3 files changed, 12 insertions(+), 46 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index eee1fd96489..f2ef72fa3f1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -142,24 +142,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds iter_ctrl options */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; -static notrace void no_trace_init(struct trace_array *tr) -{ - int cpu; - - ftrace_function_enabled = 0; - if(tr->ctrl) - for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); - tracer_enabled = 0; -} - -/* dummy trace to disable tracing */ -static struct tracer no_tracer __read_mostly = { - .name = "none", - .init = no_trace_init -}; - - /** * trace_wake_up - wake up tasks waiting for trace input * @@ -962,7 +944,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) long disabled; int cpu; - if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) + if (tracing_disabled || !tr->ctrl) return; local_irq_save(flags); @@ -2795,6 +2777,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, unsigned long val; char buf[64]; int i, ret; + struct trace_array *tr = filp->private_data; if (cnt >= sizeof(buf)) return -EINVAL; @@ -2814,9 +2797,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, mutex_lock(&trace_types_lock); - if (current_trace != &no_tracer) { + if (tr->ctrl) { cnt = -EBUSY; - pr_info("ftrace: set current_tracer to none" + pr_info("ftrace: please disable tracing" " before modifying buffer size\n"); goto out; } @@ -2879,20 +2862,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, return cnt; } -static int tracing_open_mark(struct inode *inode, struct file *filp) -{ - int ret; - - ret = tracing_open_generic(inode, filp); - if (ret) - return ret; - - if (current_trace == &no_tracer) - return -ENODEV; - - return 0; -} - static int mark_printk(const char *fmt, ...) { int ret; @@ -2911,7 +2880,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, char *end; struct trace_array *tr = &global_trace; - if (current_trace == &no_tracer || !tr->ctrl || tracing_disabled) + if (!tr->ctrl || tracing_disabled) return -EINVAL; if (cnt > TRACE_BUF_SIZE) @@ -2971,7 +2940,7 @@ static struct file_operations tracing_entries_fops = { }; static struct file_operations tracing_mark_fops = { - .open = tracing_open_mark, + .open = tracing_open_generic, .write = tracing_mark_write, }; @@ -3123,7 +3092,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) long disabled; int cpu, len = 0, write, written = 0; - if (current_trace == &no_tracer || !tr->ctrl || tracing_disabled) + if (!tr->ctrl || tracing_disabled) return 0; local_irq_save(flags); @@ -3539,8 +3508,8 @@ __init static int tracer_alloc_buffers(void) trace_init_cmdlines(); - register_tracer(&no_tracer); - current_trace = &no_tracer; + register_tracer(&nop_trace); + current_trace = &nop_trace; /* All seems OK, enable tracing */ global_trace.ctrl = tracer_enabled; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c8c687088b4..cb2c3fb7dd5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -369,4 +369,6 @@ enum trace_iterator_flags { TRACE_ITER_PRINTK = 0x400, }; +extern struct tracer nop_trace; + #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 9fb02c17ad0..16c9ba060ba 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c @@ -51,7 +51,7 @@ static void nop_trace_ctrl_update(struct trace_array *tr) stop_nop_trace(tr); } -static struct tracer nop_trace __read_mostly = +struct tracer nop_trace __read_mostly = { .name = "nop", .init = nop_trace_init, @@ -62,8 +62,3 @@ static struct tracer nop_trace __read_mostly = #endif }; -__init static int init_nop_trace(void) -{ - return register_tracer(&nop_trace); -} -device_initcall(init_nop_trace); -- cgit v1.2.3-70-g09d2 From 05736a427f7e16be948ccbf39782bd3a6ae16b14 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 22 Sep 2008 14:55:47 -0700 Subject: ftrace: warn on failure to disable mcount callers With the recent updates to ftrace, there should not be any failures when modifying the code. If there is, then we need to warn about it. This patch has a cleaned up version of the code that I used to discover that the weak symbols were causing failures. Signed-off-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7694f3e5979..4dda4f60a2a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -576,6 +576,16 @@ static void ftrace_shutdown_replenish(void) ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); } +static void print_ip_ins(const char *fmt, unsigned char *p) +{ + int i; + + printk(KERN_CONT "%s", fmt); + + for (i = 0; i < MCOUNT_INSN_SIZE; i++) + printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); +} + static int ftrace_code_disable(struct dyn_ftrace *rec) { @@ -590,6 +600,23 @@ ftrace_code_disable(struct dyn_ftrace *rec) failed = ftrace_modify_code(ip, call, nop); if (failed) { + switch (failed) { + case 1: + WARN_ON_ONCE(1); + pr_info("ftrace faulted on modifying "); + print_ip_sym(ip); + break; + case 2: + WARN_ON_ONCE(1); + pr_info("ftrace failed to modify "); + print_ip_sym(ip); + print_ip_ins(" expected: ", call); + print_ip_ins(" actual: ", (unsigned char *)ip); + print_ip_ins(" replace: ", nop); + printk(KERN_CONT "\n"); + break; + } + rec->flags |= FTRACE_FL_FAILED; return 0; } -- cgit v1.2.3-70-g09d2 From d13744cd6e3fef373a3fe656ac349b4e7c49ff79 Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Tue, 23 Sep 2008 11:32:08 +0100 Subject: tracing/ftrace: add the boot tracer Add the boot/initcall tracer. It's primary purpose is to be able to trace the initcalls. It is intended to be used with scripts/bootgraph.pl after some small improvements. Note that it is not active after its init. To avoid tracing (and so crashing) before the whole tracing engine init, you have to explicitly call start_boot_trace() after do_pre_smp_initcalls() to enable it. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 19 +++++++++ kernel/trace/trace.h | 4 ++ kernel/trace/trace_boot.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 kernel/trace/trace_boot.c (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 5de9903645d..91954eb6460 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -5,6 +5,8 @@ #include #include +#include +#include extern int ftrace_enabled; extern int @@ -209,4 +211,21 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } #endif +struct boot_trace { + pid_t caller; + initcall_t func; + int result; + unsigned long long duration; +}; + +#ifdef CONFIG_BOOT_TRACER +extern void trace_boot(struct boot_trace *it); +extern void start_boot_trace(void); +#else +static inline void trace_boot(struct boot_trace *it) { } +static inline void start_boot_trace(void) { } +#endif + + + #endif /* _LINUX_FTRACE_H */ diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cb2c3fb7dd5..b28bf8812ef 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -6,6 +6,7 @@ #include #include #include +#include enum trace_type { __TRACE_FIRST_TYPE = 0, @@ -19,6 +20,7 @@ enum trace_type { TRACE_SPECIAL, TRACE_MMIO_RW, TRACE_MMIO_MAP, + TRACE_BOOT, __TRACE_LAST_TYPE }; @@ -30,6 +32,7 @@ struct ftrace_entry { unsigned long ip; unsigned long parent_ip; }; +extern struct tracer boot_tracer; /* * Context switch trace entry - which task (and prio) we switched from/to: @@ -108,6 +111,7 @@ struct trace_field { struct print_entry print; struct mmiotrace_rw mmiorw; struct mmiotrace_map mmiomap; + struct boot_trace initcall; }; }; diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c new file mode 100644 index 00000000000..c65ef8ffd6b --- /dev/null +++ b/kernel/trace/trace_boot.c @@ -0,0 +1,101 @@ +/* + * ring buffer based initcalls tracer + * + * Copyright (C) 2008 Frederic Weisbecker + * + */ + +#include +#include +#include + +#include "trace.h" + +static struct trace_array *boot_trace; +static int trace_boot_enabled; + + +/* Should be started after do_pre_smp_initcalls() in init/main.c */ +void start_boot_trace(void) +{ + trace_boot_enabled = 1; +} + +void stop_boot_trace(struct trace_array *tr) +{ + trace_boot_enabled = 0; +} + +static void boot_trace_init(struct trace_array *tr) +{ + int cpu; + boot_trace = tr; + + trace_boot_enabled = 0; + + for_each_cpu_mask(cpu, cpu_possible_map) + tracing_reset(tr->data[cpu]); +} + +static void boot_trace_ctrl_update(struct trace_array *tr) +{ + if (tr->ctrl) + start_boot_trace(); + else + stop_boot_trace(tr); +} + +static int initcall_print_line(struct trace_iterator *iter) +{ + int ret = 1; + struct trace_entry *entry = iter->ent; + struct boot_trace *it = &entry->field.initcall; + struct trace_seq *s = &iter->seq; + + if (iter->ent->type == TRACE_BOOT) + ret = trace_seq_printf(s, "%pF called from %i " + "returned %d after %lld msecs\n", + it->func, it->caller, it->result, + it->duration); + if (ret) + return 1; + return 0; +} + +struct tracer boot_tracer __read_mostly = +{ + .name = "initcall", + .init = boot_trace_init, + .reset = stop_boot_trace, + .ctrl_update = boot_trace_ctrl_update, + .print_line = initcall_print_line, +}; + + +void trace_boot(struct boot_trace *it) +{ + struct trace_entry *entry; + struct trace_array_cpu *data; + unsigned long irq_flags; + struct trace_array *tr = boot_trace; + + if (!trace_boot_enabled) + return; + + preempt_disable(); + data = tr->data[smp_processor_id()]; + + raw_local_irq_save(irq_flags); + __raw_spin_lock(&data->lock); + + entry = tracing_get_trace_entry(tr, data); + tracing_generic_entry_update(entry, 0); + entry->type = TRACE_BOOT; + entry->field.initcall = *it; + + __raw_spin_unlock(&data->lock); + raw_local_irq_restore(irq_flags); + trace_wake_up(); + + preempt_enable(); +} -- cgit v1.2.3-70-g09d2 From b5ad384e79add1d87fff54070000dadcf218ffab Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Tue, 23 Sep 2008 11:34:32 +0100 Subject: tracing/ftrace: make tracing suitable to run the boot tracer The tracing engine have now to be init in early_initcall to set the boot tracer. Only the debugfs settings will be initialized at fs_initcall time. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f2ef72fa3f1..6ada059832a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2990,7 +2990,7 @@ struct dentry *tracing_init_dentry(void) #include "trace_selftest.c" #endif -static __init void tracer_init_debugfs(void) +static __init int tracer_init_debugfs(void) { struct dentry *d_tracer; struct dentry *entry; @@ -3078,6 +3078,7 @@ static __init void tracer_init_debugfs(void) #ifdef CONFIG_SYSPROF_TRACER init_tracer_sysprof_debugfs(d_tracer); #endif + return 0; } int trace_vprintk(unsigned long ip, const char *fmt, va_list args) @@ -3504,17 +3505,20 @@ __init static int tracer_alloc_buffers(void) pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); pr_info(" actual entries %ld\n", global_trace.entries); - tracer_init_debugfs(); - trace_init_cmdlines(); register_tracer(&nop_trace); +#ifdef CONFIG_BOOT_TRACER + register_tracer(&boot_tracer); + current_trace = &boot_tracer; + current_trace->init(&global_trace); +#else current_trace = &nop_trace; +#endif /* All seems OK, enable tracing */ global_trace.ctrl = tracer_enabled; tracing_disabled = 0; - atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); @@ -3548,4 +3552,5 @@ __init static int tracer_alloc_buffers(void) } return ret; } -fs_initcall(tracer_alloc_buffers); +early_initcall(tracer_alloc_buffers); +fs_initcall(tracer_init_debugfs); -- cgit v1.2.3-70-g09d2 From 1f5c2abbdeb2bb07b20c6a66bfecefe6c867b1ee Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Tue, 23 Sep 2008 11:36:20 +0100 Subject: tracing/ftrace: give an entry on the config for boot tracer Bring the entry to choose the boot tracer on the kernel config. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 12 ++++++++++++ kernel/trace/Makefile | 1 + 2 files changed, 13 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 254328dec67..81a17ef6b94 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -116,6 +116,18 @@ config CONTEXT_SWITCH_TRACER This tracer gets called from the context switch and records all switching of tasks. +config BOOT_TRACER + bool "Trace boot initcalls" + depends on HAVE_FTRACE + depends on DEBUG_KERNEL + select TRACING + help + This tracer helps developers to optimize boot times: it records + the timings of the initcalls. Its aim is to be parsed by the + /scripts/bootgraph.pl tool to produce pretty graphics about + boot inefficiencies, giving a visual representation of the + delays during initcalls. + config STACK_TRACER bool "Trace max stack" depends on HAVE_FTRACE diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 73ba13f5a46..35a07f7cfa8 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -22,5 +22,6 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +obj-$(CONFIG_BOOT_TRACER) += trace_boot.o libftrace-y := ftrace.o -- cgit v1.2.3-70-g09d2 From 3ce2b9200da8b7170cc7463b7ee4212fad7b291e Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Wed, 24 Sep 2008 10:36:09 +0100 Subject: ftrace/fastboot: disable tracers self-tests when boot tracer is selected The tracing engine resets the ring buffer and the tracers touch it too during self-tests. These self-tests happen during tracers registering and work against boot tracing which is logging initcalls. We have to disable tracing self-tests if the boot-tracer is selected. Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 81a17ef6b94..4feb3c81f94 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -126,7 +126,9 @@ config BOOT_TRACER the timings of the initcalls. Its aim is to be parsed by the /scripts/bootgraph.pl tool to produce pretty graphics about boot inefficiencies, giving a visual representation of the - delays during initcalls. + delays during initcalls. Note that tracers self tests can't + be enabled if this tracer is selected since only one tracer + should touch the tracing buffer at a time. config STACK_TRACER bool "Trace max stack" @@ -168,8 +170,7 @@ config FTRACE_SELFTEST config FTRACE_STARTUP_TEST bool "Perform a startup test on ftrace" - depends on TRACING - depends on DEBUG_KERNEL + depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER select FTRACE_SELFTEST help This option performs a series of startup tests on ftrace. On bootup -- cgit v1.2.3-70-g09d2 From 7c572ac0cf5e8cd8e17f084bc6c253296cc42279 Mon Sep 17 00:00:00 2001 From: Frédéric Weisbecker Date: Thu, 25 Sep 2008 13:25:30 +0100 Subject: tracing/ftrace: don't consume unhandled entries by boot tracer When the boot tracer can't handle an entry output, it returns 1. It should return 0 to relay on other output functions. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace_boot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index c65ef8ffd6b..d5c9e2e4a9c 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -47,7 +47,7 @@ static void boot_trace_ctrl_update(struct trace_array *tr) static int initcall_print_line(struct trace_iterator *iter) { - int ret = 1; + int ret = 0; struct trace_entry *entry = iter->ent; struct boot_trace *it = &entry->field.initcall; struct trace_seq *s = &iter->seq; -- cgit v1.2.3-70-g09d2 From 5aa60c6073456812251caf9177cb921b2de68f77 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:37 -0400 Subject: ftrace: give time for wakeup test to run It is possible that the testing thread in the ftrace wakeup test does not run before we stop the trace. This will cause the trace to fail since nothing will be in the buffers. This patch adds a small wait in the wakeup test to allow for the woken task to run and be traced. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_selftest.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 82db9103b9b..5ebd4b13549 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -498,6 +498,9 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) wake_up_process(p); + /* give a little time to let the thread wake up */ + msleep(100); + /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); -- cgit v1.2.3-70-g09d2 From 7a8e76a3829f1067b70f715771ff88baf2fbf3c3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:38 -0400 Subject: tracing: unified trace buffer This is a unified tracing buffer that implements a ring buffer that hopefully everyone will eventually be able to use. The events recorded into the buffer have the following structure: struct ring_buffer_event { u32 type:2, len:3, time_delta:27; u32 array[]; }; The minimum size of an event is 8 bytes. All events are 4 byte aligned inside the buffer. There are 4 types (all internal use for the ring buffer, only the data type is exported to the interface users). RINGBUF_TYPE_PADDING: this type is used to note extra space at the end of a buffer page. RINGBUF_TYPE_TIME_EXTENT: This type is used when the time between events is greater than the 27 bit delta can hold. We add another 32 bits, and record that in its own event (8 byte size). RINGBUF_TYPE_TIME_STAMP: (Not implemented yet). This will hold data to help keep the buffer timestamps in sync. RINGBUF_TYPE_DATA: The event actually holds user data. The "len" field is only three bits. Since the data must be 4 byte aligned, this field is shifted left by 2, giving a max length of 28 bytes. If the data load is greater than 28 bytes, the first array field holds the full length of the data load and the len field is set to zero. Example, data size of 7 bytes: type = RINGBUF_TYPE_DATA len = 2 time_delta: - array[0..1]: <7 bytes of data> <1 byte empty> This event is saved in 12 bytes of the buffer. An event with 82 bytes of data: type = RINGBUF_TYPE_DATA len = 0 time_delta: - array[0]: 84 (Note the alignment) array[1..14]: <82 bytes of data> <2 bytes empty> The above event is saved in 92 bytes (if my math is correct). 82 bytes of data, 2 bytes empty, 4 byte header, 4 byte length. Do not reference the above event struct directly. Use the following functions to gain access to the event table, since the ring_buffer_event structure may change in the future. ring_buffer_event_length(event): get the length of the event. This is the size of the memory used to record this event, and not the size of the data pay load. ring_buffer_time_delta(event): get the time delta of the event This returns the delta time stamp since the last event. Note: Even though this is in the header, there should be no reason to access this directly, accept for debugging. ring_buffer_event_data(event): get the data from the event This is the function to use to get the actual data from the event. Note, it is only a pointer to the data inside the buffer. This data must be copied to another location otherwise you risk it being written over in the buffer. ring_buffer_lock: A way to lock the entire buffer. ring_buffer_unlock: unlock the buffer. ring_buffer_alloc: create a new ring buffer. Can choose between overwrite or consumer/producer mode. Overwrite will overwrite old data, where as consumer producer will throw away new data if the consumer catches up with the producer. The consumer/producer is the default. ring_buffer_free: free the ring buffer. ring_buffer_resize: resize the buffer. Changes the size of each cpu buffer. Note, it is up to the caller to provide that the buffer is not being used while this is happening. This requirement may go away but do not count on it. ring_buffer_lock_reserve: locks the ring buffer and allocates an entry on the buffer to write to. ring_buffer_unlock_commit: unlocks the ring buffer and commits it to the buffer. ring_buffer_write: writes some data into the ring buffer. ring_buffer_peek: Look at a next item in the cpu buffer. ring_buffer_consume: get the next item in the cpu buffer and consume it. That is, this function increments the head pointer. ring_buffer_read_start: Start an iterator of a cpu buffer. For now, this disables the cpu buffer, until you issue a finish. This is just because we do not want the iterator to be overwritten. This restriction may change in the future. But note, this is used for static reading of a buffer which is usually done "after" a trace. Live readings would want to use the ring_buffer_consume above, which will not disable the ring buffer. ring_buffer_read_finish: Finishes the read iterator and reenables the ring buffer. ring_buffer_iter_peek: Look at the next item in the cpu iterator. ring_buffer_read: Read the iterator and increment it. ring_buffer_iter_reset: Reset the iterator to point to the beginning of the cpu buffer. ring_buffer_iter_empty: Returns true if the iterator is at the end of the cpu buffer. ring_buffer_size: returns the size in bytes of each cpu buffer. Note, the real size is this times the number of CPUs. ring_buffer_reset_cpu: Sets the cpu buffer to empty ring_buffer_reset: sets all cpu buffers to empty ring_buffer_swap_cpu: swaps a cpu buffer from one buffer with a cpu buffer of another buffer. This is handy when you want to take a snap shot of a running trace on just one cpu. Having a backup buffer, to swap with facilitates this. Ftrace max latencies use this. ring_buffer_empty: Returns true if the ring buffer is empty. ring_buffer_empty_cpu: Returns true if the cpu buffer is empty. ring_buffer_record_disable: disable all cpu buffers (read only) ring_buffer_record_disable_cpu: disable a single cpu buffer (read only) ring_buffer_record_enable: enable all cpu buffers. ring_buffer_record_enabl_cpu: enable a single cpu buffer. ring_buffer_entries: The number of entries in a ring buffer. ring_buffer_overruns: The number of entries removed due to writing wrap. ring_buffer_time_stamp: Get the time stamp used by the ring buffer ring_buffer_normalize_time_stamp: normalize the ring buffer time stamp into nanosecs. I still need to implement the GTOD feature. But we need support from the cpu frequency infrastructure. But this can be done at a later time without affecting the ring buffer interface. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/ring_buffer.h | 130 ++++ kernel/trace/Kconfig | 4 + kernel/trace/Makefile | 1 + kernel/trace/ring_buffer.c | 1672 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1807 insertions(+) create mode 100644 include/linux/ring_buffer.h create mode 100644 kernel/trace/ring_buffer.c (limited to 'kernel/trace') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h new file mode 100644 index 00000000000..c52375b8330 --- /dev/null +++ b/include/linux/ring_buffer.h @@ -0,0 +1,130 @@ +#ifndef _LINUX_RING_BUFFER_H +#define _LINUX_RING_BUFFER_H + +#include +#include + +struct ring_buffer; +struct ring_buffer_iter; + +/* + * Don't reference this struct directly, use functions below. + */ +struct ring_buffer_event { + u32 type:2, len:3, time_delta:27; + u32 array[]; +}; + +/** + * enum ring_buffer_type - internal ring buffer types + * + * @RINGBUF_TYPE_PADDING: Left over page padding + * array is ignored + * size is variable depending on how much + * padding is needed + * + * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta + * array[0] = time delta (28 .. 59) + * size = 8 bytes + * + * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock + * array[0] = tv_nsec + * array[1] = tv_sec + * size = 16 bytes + * + * @RINGBUF_TYPE_DATA: Data record + * If len is zero: + * array[0] holds the actual length + * array[1..(length+3)/4-1] holds data + * else + * length = len << 2 + * array[0..(length+3)/4] holds data + */ +enum ring_buffer_type { + RINGBUF_TYPE_PADDING, + RINGBUF_TYPE_TIME_EXTEND, + /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ + RINGBUF_TYPE_TIME_STAMP, + RINGBUF_TYPE_DATA, +}; + +unsigned ring_buffer_event_length(struct ring_buffer_event *event); +void *ring_buffer_event_data(struct ring_buffer_event *event); + +/** + * ring_buffer_event_time_delta - return the delta timestamp of the event + * @event: the event to get the delta timestamp of + * + * The delta timestamp is the 27 bit timestamp since the last event. + */ +static inline unsigned +ring_buffer_event_time_delta(struct ring_buffer_event *event) +{ + return event->time_delta; +} + +void ring_buffer_lock(struct ring_buffer *buffer, unsigned long *flags); +void ring_buffer_unlock(struct ring_buffer *buffer, unsigned long flags); + +/* + * size is in bytes for each per CPU buffer. + */ +struct ring_buffer * +ring_buffer_alloc(unsigned long size, unsigned flags); +void ring_buffer_free(struct ring_buffer *buffer); + +int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); + +struct ring_buffer_event * +ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length, + unsigned long *flags); +int ring_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags); +int ring_buffer_write(struct ring_buffer *buffer, + unsigned long length, void *data); + +struct ring_buffer_event * +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); +struct ring_buffer_event * +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); + +struct ring_buffer_iter * +ring_buffer_read_start(struct ring_buffer *buffer, int cpu); +void ring_buffer_read_finish(struct ring_buffer_iter *iter); + +struct ring_buffer_event * +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); +struct ring_buffer_event * +ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_reset(struct ring_buffer_iter *iter); +int ring_buffer_iter_empty(struct ring_buffer_iter *iter); + +unsigned long ring_buffer_size(struct ring_buffer *buffer); + +void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_reset(struct ring_buffer *buffer); + +int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu); + +int ring_buffer_empty(struct ring_buffer *buffer); +int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); + +void ring_buffer_record_disable(struct ring_buffer *buffer); +void ring_buffer_record_enable(struct ring_buffer *buffer); +void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); + +unsigned long ring_buffer_entries(struct ring_buffer *buffer); +unsigned long ring_buffer_overruns(struct ring_buffer *buffer); + +u64 ring_buffer_time_stamp(int cpu); +void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1 << 0, +}; + +#endif /* _LINUX_RING_BUFFER_H */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4feb3c81f94..396aea11398 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -18,9 +18,13 @@ config HAVE_FTRACE_MCOUNT_RECORD config TRACER_MAX_TRACE bool +config RING_BUFFER + bool + config TRACING bool select DEBUG_FS + select RING_BUFFER select STACKTRACE select TRACEPOINTS diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 35a07f7cfa8..a85dfba88ba 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -11,6 +11,7 @@ obj-y += trace_selftest_dynamic.o endif obj-$(CONFIG_FTRACE) += libftrace.o +obj-$(CONFIG_RING_BUFFER) += ring_buffer.o obj-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c new file mode 100644 index 00000000000..830a2930dd9 --- /dev/null +++ b/kernel/trace/ring_buffer.c @@ -0,0 +1,1672 @@ +/* + * Generic ring buffer + * + * Copyright (C) 2008 Steven Rostedt + */ +#include +#include +#include +#include +#include +#include +#include +#include /* used for sched_clock() (for now) */ +#include +#include +#include +#include + +/* Up this if you want to test the TIME_EXTENTS and normalization */ +#define DEBUG_SHIFT 0 + +/* FIXME!!! */ +u64 ring_buffer_time_stamp(int cpu) +{ + /* shift to debug/test normalization and TIME_EXTENTS */ + return sched_clock() << DEBUG_SHIFT; +} + +void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) +{ + /* Just stupid testing the normalize function and deltas */ + *ts >>= DEBUG_SHIFT; +} + +#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) +#define RB_ALIGNMENT_SHIFT 2 +#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) +#define RB_MAX_SMALL_DATA 28 + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 16, +}; + +/* inline for ring buffer fast paths */ +static inline unsigned +rb_event_length(struct ring_buffer_event *event) +{ + unsigned length; + + switch (event->type) { + case RINGBUF_TYPE_PADDING: + /* undefined */ + return -1; + + case RINGBUF_TYPE_TIME_EXTEND: + return RB_LEN_TIME_EXTEND; + + case RINGBUF_TYPE_TIME_STAMP: + return RB_LEN_TIME_STAMP; + + case RINGBUF_TYPE_DATA: + if (event->len) + length = event->len << RB_ALIGNMENT_SHIFT; + else + length = event->array[0]; + return length + RB_EVNT_HDR_SIZE; + default: + BUG(); + } + /* not hit */ + return 0; +} + +/** + * ring_buffer_event_length - return the length of the event + * @event: the event to get the length of + */ +unsigned ring_buffer_event_length(struct ring_buffer_event *event) +{ + return rb_event_length(event); +} + +/* inline for ring buffer fast paths */ +static inline void * +rb_event_data(struct ring_buffer_event *event) +{ + BUG_ON(event->type != RINGBUF_TYPE_DATA); + /* If length is in len field, then array[0] has the data */ + if (event->len) + return (void *)&event->array[0]; + /* Otherwise length is in array[0] and array[1] has the data */ + return (void *)&event->array[1]; +} + +/** + * ring_buffer_event_data - return the data of the event + * @event: the event to get the data from + */ +void *ring_buffer_event_data(struct ring_buffer_event *event) +{ + return rb_event_data(event); +} + +#define for_each_buffer_cpu(buffer, cpu) \ + for_each_cpu_mask(cpu, buffer->cpumask) + +#define TS_SHIFT 27 +#define TS_MASK ((1ULL << TS_SHIFT) - 1) +#define TS_DELTA_TEST (~TS_MASK) + +/* + * This hack stolen from mm/slob.c. + * We can store per page timing information in the page frame of the page. + * Thanks to Peter Zijlstra for suggesting this idea. + */ +struct buffer_page { + union { + struct { + unsigned long flags; /* mandatory */ + atomic_t _count; /* mandatory */ + u64 time_stamp; /* page time stamp */ + unsigned size; /* size of page data */ + struct list_head list; /* list of free pages */ + }; + struct page page; + }; +}; + +/* + * We need to fit the time_stamp delta into 27 bits. + */ +static inline int test_time_stamp(u64 delta) +{ + if (delta & TS_DELTA_TEST) + return 1; + return 0; +} + +#define BUF_PAGE_SIZE PAGE_SIZE + +/* + * head_page == tail_page && head == tail then buffer is empty. + */ +struct ring_buffer_per_cpu { + int cpu; + struct ring_buffer *buffer; + spinlock_t lock; + struct lock_class_key lock_key; + struct list_head pages; + unsigned long head; /* read from head */ + unsigned long tail; /* write to tail */ + struct buffer_page *head_page; + struct buffer_page *tail_page; + unsigned long overrun; + unsigned long entries; + u64 write_stamp; + u64 read_stamp; + atomic_t record_disabled; +}; + +struct ring_buffer { + unsigned long size; + unsigned pages; + unsigned flags; + int cpus; + cpumask_t cpumask; + atomic_t record_disabled; + + struct mutex mutex; + + struct ring_buffer_per_cpu **buffers; +}; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long head; + struct buffer_page *head_page; + u64 read_stamp; +}; + +#define RB_WARN_ON(buffer, cond) \ + if (unlikely(cond)) { \ + atomic_inc(&buffer->record_disabled); \ + WARN_ON(1); \ + return -1; \ + } + +/** + * check_pages - integrity check of buffer pages + * @cpu_buffer: CPU buffer with pages to test + * + * As a safty measure we check to make sure the data pages have not + * been corrupted. + */ +static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct list_head *head = &cpu_buffer->pages; + struct buffer_page *page, *tmp; + + RB_WARN_ON(cpu_buffer, head->next->prev != head); + RB_WARN_ON(cpu_buffer, head->prev->next != head); + + list_for_each_entry_safe(page, tmp, head, list) { + RB_WARN_ON(cpu_buffer, page->list.next->prev != &page->list); + RB_WARN_ON(cpu_buffer, page->list.prev->next != &page->list); + } + + return 0; +} + +static unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) +{ + return cpu_buffer->head_page->size; +} + +static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, + unsigned nr_pages) +{ + struct list_head *head = &cpu_buffer->pages; + struct buffer_page *page, *tmp; + unsigned long addr; + LIST_HEAD(pages); + unsigned i; + + for (i = 0; i < nr_pages; i++) { + addr = __get_free_page(GFP_KERNEL); + if (!addr) + goto free_pages; + page = (struct buffer_page *)virt_to_page(addr); + list_add(&page->list, &pages); + } + + list_splice(&pages, head); + + rb_check_pages(cpu_buffer); + + return 0; + + free_pages: + list_for_each_entry_safe(page, tmp, &pages, list) { + list_del_init(&page->list); + __free_page(&page->page); + } + return -ENOMEM; +} + +static struct ring_buffer_per_cpu * +rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int ret; + + cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!cpu_buffer) + return NULL; + + cpu_buffer->cpu = cpu; + cpu_buffer->buffer = buffer; + spin_lock_init(&cpu_buffer->lock); + INIT_LIST_HEAD(&cpu_buffer->pages); + + ret = rb_allocate_pages(cpu_buffer, buffer->pages); + if (ret < 0) + goto fail_free_buffer; + + cpu_buffer->head_page + = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + cpu_buffer->tail_page + = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + + return cpu_buffer; + + fail_free_buffer: + kfree(cpu_buffer); + return NULL; +} + +static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct list_head *head = &cpu_buffer->pages; + struct buffer_page *page, *tmp; + + list_for_each_entry_safe(page, tmp, head, list) { + list_del_init(&page->list); + __free_page(&page->page); + } + kfree(cpu_buffer); +} + +/** + * ring_buffer_alloc - allocate a new ring_buffer + * @size: the size in bytes that is needed. + * @flags: attributes to set for the ring buffer. + * + * Currently the only flag that is available is the RB_FL_OVERWRITE + * flag. This flag means that the buffer will overwrite old data + * when the buffer wraps. If this flag is not set, the buffer will + * drop data when the tail hits the head. + */ +struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) +{ + struct ring_buffer *buffer; + int bsize; + int cpu; + + /* keep it in its own cache line */ + buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), + GFP_KERNEL); + if (!buffer) + return NULL; + + buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); + buffer->flags = flags; + + /* need at least two pages */ + if (buffer->pages == 1) + buffer->pages++; + + buffer->cpumask = cpu_possible_map; + buffer->cpus = nr_cpu_ids; + + bsize = sizeof(void *) * nr_cpu_ids; + buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), + GFP_KERNEL); + if (!buffer->buffers) + goto fail_free_buffer; + + for_each_buffer_cpu(buffer, cpu) { + buffer->buffers[cpu] = + rb_allocate_cpu_buffer(buffer, cpu); + if (!buffer->buffers[cpu]) + goto fail_free_buffers; + } + + mutex_init(&buffer->mutex); + + return buffer; + + fail_free_buffers: + for_each_buffer_cpu(buffer, cpu) { + if (buffer->buffers[cpu]) + rb_free_cpu_buffer(buffer->buffers[cpu]); + } + kfree(buffer->buffers); + + fail_free_buffer: + kfree(buffer); + return NULL; +} + +/** + * ring_buffer_free - free a ring buffer. + * @buffer: the buffer to free. + */ +void +ring_buffer_free(struct ring_buffer *buffer) +{ + int cpu; + + for_each_buffer_cpu(buffer, cpu) + rb_free_cpu_buffer(buffer->buffers[cpu]); + + kfree(buffer); +} + +static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); + +static void +rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) +{ + struct buffer_page *page; + struct list_head *p; + unsigned i; + + atomic_inc(&cpu_buffer->record_disabled); + synchronize_sched(); + + for (i = 0; i < nr_pages; i++) { + BUG_ON(list_empty(&cpu_buffer->pages)); + p = cpu_buffer->pages.next; + page = list_entry(p, struct buffer_page, list); + list_del_init(&page->list); + __free_page(&page->page); + } + BUG_ON(list_empty(&cpu_buffer->pages)); + + rb_reset_cpu(cpu_buffer); + + rb_check_pages(cpu_buffer); + + atomic_dec(&cpu_buffer->record_disabled); + +} + +static void +rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, + struct list_head *pages, unsigned nr_pages) +{ + struct buffer_page *page; + struct list_head *p; + unsigned i; + + atomic_inc(&cpu_buffer->record_disabled); + synchronize_sched(); + + for (i = 0; i < nr_pages; i++) { + BUG_ON(list_empty(pages)); + p = pages->next; + page = list_entry(p, struct buffer_page, list); + list_del_init(&page->list); + list_add_tail(&page->list, &cpu_buffer->pages); + } + rb_reset_cpu(cpu_buffer); + + rb_check_pages(cpu_buffer); + + atomic_dec(&cpu_buffer->record_disabled); +} + +/** + * ring_buffer_resize - resize the ring buffer + * @buffer: the buffer to resize. + * @size: the new size. + * + * The tracer is responsible for making sure that the buffer is + * not being used while changing the size. + * Note: We may be able to change the above requirement by using + * RCU synchronizations. + * + * Minimum size is 2 * BUF_PAGE_SIZE. + * + * Returns -1 on failure. + */ +int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) +{ + struct ring_buffer_per_cpu *cpu_buffer; + unsigned nr_pages, rm_pages, new_pages; + struct buffer_page *page, *tmp; + unsigned long buffer_size; + unsigned long addr; + LIST_HEAD(pages); + int i, cpu; + + size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); + size *= BUF_PAGE_SIZE; + buffer_size = buffer->pages * BUF_PAGE_SIZE; + + /* we need a minimum of two pages */ + if (size < BUF_PAGE_SIZE * 2) + size = BUF_PAGE_SIZE * 2; + + if (size == buffer_size) + return size; + + mutex_lock(&buffer->mutex); + + nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); + + if (size < buffer_size) { + + /* easy case, just free pages */ + BUG_ON(nr_pages >= buffer->pages); + + rm_pages = buffer->pages - nr_pages; + + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + rb_remove_pages(cpu_buffer, rm_pages); + } + goto out; + } + + /* + * This is a bit more difficult. We only want to add pages + * when we can allocate enough for all CPUs. We do this + * by allocating all the pages and storing them on a local + * link list. If we succeed in our allocation, then we + * add these pages to the cpu_buffers. Otherwise we just free + * them all and return -ENOMEM; + */ + BUG_ON(nr_pages <= buffer->pages); + new_pages = nr_pages - buffer->pages; + + for_each_buffer_cpu(buffer, cpu) { + for (i = 0; i < new_pages; i++) { + addr = __get_free_page(GFP_KERNEL); + if (!addr) + goto free_pages; + page = (struct buffer_page *)virt_to_page(addr); + list_add(&page->list, &pages); + } + } + + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + rb_insert_pages(cpu_buffer, &pages, new_pages); + } + + BUG_ON(!list_empty(&pages)); + + out: + buffer->pages = nr_pages; + mutex_unlock(&buffer->mutex); + + return size; + + free_pages: + list_for_each_entry_safe(page, tmp, &pages, list) { + list_del_init(&page->list); + __free_page(&page->page); + } + return -ENOMEM; +} + +static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) +{ + return cpu_buffer->head_page == cpu_buffer->tail_page && + cpu_buffer->head == cpu_buffer->tail; +} + +static inline int rb_null_event(struct ring_buffer_event *event) +{ + return event->type == RINGBUF_TYPE_PADDING; +} + +static inline void *rb_page_index(struct buffer_page *page, unsigned index) +{ + void *addr = page_address(&page->page); + + return addr + index; +} + +static inline struct ring_buffer_event * +rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) +{ + return rb_page_index(cpu_buffer->head_page, + cpu_buffer->head); +} + +static inline struct ring_buffer_event * +rb_iter_head_event(struct ring_buffer_iter *iter) +{ + return rb_page_index(iter->head_page, + iter->head); +} + +/* + * When the tail hits the head and the buffer is in overwrite mode, + * the head jumps to the next page and all content on the previous + * page is discarded. But before doing so, we update the overrun + * variable of the buffer. + */ +static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct ring_buffer_event *event; + unsigned long head; + + for (head = 0; head < rb_head_size(cpu_buffer); + head += rb_event_length(event)) { + + event = rb_page_index(cpu_buffer->head_page, head); + BUG_ON(rb_null_event(event)); + /* Only count data entries */ + if (event->type != RINGBUF_TYPE_DATA) + continue; + cpu_buffer->overrun++; + cpu_buffer->entries--; + } +} + +static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, + struct buffer_page **page) +{ + struct list_head *p = (*page)->list.next; + + if (p == &cpu_buffer->pages) + p = p->next; + + *page = list_entry(p, struct buffer_page, list); +} + +static inline void +rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) +{ + cpu_buffer->tail_page->time_stamp = *ts; + cpu_buffer->write_stamp = *ts; +} + +static void rb_reset_read_page(struct ring_buffer_per_cpu *cpu_buffer) +{ + cpu_buffer->read_stamp = cpu_buffer->head_page->time_stamp; + cpu_buffer->head = 0; +} + +static void +rb_reset_iter_read_page(struct ring_buffer_iter *iter) +{ + iter->read_stamp = iter->head_page->time_stamp; + iter->head = 0; +} + +/** + * ring_buffer_update_event - update event type and data + * @event: the even to update + * @type: the type of event + * @length: the size of the event field in the ring buffer + * + * Update the type and data fields of the event. The length + * is the actual size that is written to the ring buffer, + * and with this, we can determine what to place into the + * data field. + */ +static inline void +rb_update_event(struct ring_buffer_event *event, + unsigned type, unsigned length) +{ + event->type = type; + + switch (type) { + + case RINGBUF_TYPE_PADDING: + break; + + case RINGBUF_TYPE_TIME_EXTEND: + event->len = + (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1)) + >> RB_ALIGNMENT_SHIFT; + break; + + case RINGBUF_TYPE_TIME_STAMP: + event->len = + (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1)) + >> RB_ALIGNMENT_SHIFT; + break; + + case RINGBUF_TYPE_DATA: + length -= RB_EVNT_HDR_SIZE; + if (length > RB_MAX_SMALL_DATA) { + event->len = 0; + event->array[0] = length; + } else + event->len = + (length + (RB_ALIGNMENT-1)) + >> RB_ALIGNMENT_SHIFT; + break; + default: + BUG(); + } +} + +static inline unsigned rb_calculate_event_length(unsigned length) +{ + struct ring_buffer_event event; /* Used only for sizeof array */ + + /* zero length can cause confusions */ + if (!length) + length = 1; + + if (length > RB_MAX_SMALL_DATA) + length += sizeof(event.array[0]); + + length += RB_EVNT_HDR_SIZE; + length = ALIGN(length, RB_ALIGNMENT); + + return length; +} + +static struct ring_buffer_event * +__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, + unsigned type, unsigned long length, u64 *ts) +{ + struct buffer_page *head_page, *tail_page; + unsigned long tail; + struct ring_buffer *buffer = cpu_buffer->buffer; + struct ring_buffer_event *event; + + tail_page = cpu_buffer->tail_page; + head_page = cpu_buffer->head_page; + tail = cpu_buffer->tail; + + if (tail + length > BUF_PAGE_SIZE) { + struct buffer_page *next_page = tail_page; + + rb_inc_page(cpu_buffer, &next_page); + + if (next_page == head_page) { + if (!(buffer->flags & RB_FL_OVERWRITE)) + return NULL; + + /* count overflows */ + rb_update_overflow(cpu_buffer); + + rb_inc_page(cpu_buffer, &head_page); + cpu_buffer->head_page = head_page; + rb_reset_read_page(cpu_buffer); + } + + if (tail != BUF_PAGE_SIZE) { + event = rb_page_index(tail_page, tail); + /* page padding */ + event->type = RINGBUF_TYPE_PADDING; + } + + tail_page->size = tail; + tail_page = next_page; + tail_page->size = 0; + tail = 0; + cpu_buffer->tail_page = tail_page; + cpu_buffer->tail = tail; + rb_add_stamp(cpu_buffer, ts); + } + + BUG_ON(tail + length > BUF_PAGE_SIZE); + + event = rb_page_index(tail_page, tail); + rb_update_event(event, type, length); + + return event; +} + +static int +rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, + u64 *ts, u64 *delta) +{ + struct ring_buffer_event *event; + static int once; + + if (unlikely(*delta > (1ULL << 59) && !once++)) { + printk(KERN_WARNING "Delta way too big! %llu" + " ts=%llu write stamp = %llu\n", + *delta, *ts, cpu_buffer->write_stamp); + WARN_ON(1); + } + + /* + * The delta is too big, we to add a + * new timestamp. + */ + event = __rb_reserve_next(cpu_buffer, + RINGBUF_TYPE_TIME_EXTEND, + RB_LEN_TIME_EXTEND, + ts); + if (!event) + return -1; + + /* check to see if we went to the next page */ + if (cpu_buffer->tail) { + /* Still on same page, update timestamp */ + event->time_delta = *delta & TS_MASK; + event->array[0] = *delta >> TS_SHIFT; + /* commit the time event */ + cpu_buffer->tail += + rb_event_length(event); + cpu_buffer->write_stamp = *ts; + *delta = 0; + } + + return 0; +} + +static struct ring_buffer_event * +rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, + unsigned type, unsigned long length) +{ + struct ring_buffer_event *event; + u64 ts, delta; + + ts = ring_buffer_time_stamp(cpu_buffer->cpu); + + if (cpu_buffer->tail) { + delta = ts - cpu_buffer->write_stamp; + + if (test_time_stamp(delta)) { + int ret; + + ret = rb_add_time_stamp(cpu_buffer, &ts, &delta); + if (ret < 0) + return NULL; + } + } else { + rb_add_stamp(cpu_buffer, &ts); + delta = 0; + } + + event = __rb_reserve_next(cpu_buffer, type, length, &ts); + if (!event) + return NULL; + + /* If the reserve went to the next page, our delta is zero */ + if (!cpu_buffer->tail) + delta = 0; + + event->time_delta = delta; + + return event; +} + +/** + * ring_buffer_lock_reserve - reserve a part of the buffer + * @buffer: the ring buffer to reserve from + * @length: the length of the data to reserve (excluding event header) + * @flags: a pointer to save the interrupt flags + * + * Returns a reseverd event on the ring buffer to copy directly to. + * The user of this interface will need to get the body to write into + * and can use the ring_buffer_event_data() interface. + * + * The length is the length of the data needed, not the event length + * which also includes the event header. + * + * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. + * If NULL is returned, then nothing has been allocated or locked. + */ +struct ring_buffer_event * +ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length, + unsigned long *flags) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + int cpu; + + if (atomic_read(&buffer->record_disabled)) + return NULL; + + raw_local_irq_save(*flags); + cpu = raw_smp_processor_id(); + + if (!cpu_isset(cpu, buffer->cpumask)) + goto out_irq; + + cpu_buffer = buffer->buffers[cpu]; + spin_lock(&cpu_buffer->lock); + + if (atomic_read(&cpu_buffer->record_disabled)) + goto no_record; + + length = rb_calculate_event_length(length); + if (length > BUF_PAGE_SIZE) + return NULL; + + event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); + if (!event) + goto no_record; + + return event; + + no_record: + spin_unlock(&cpu_buffer->lock); + out_irq: + local_irq_restore(*flags); + return NULL; +} + +static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event) +{ + cpu_buffer->tail += rb_event_length(event); + cpu_buffer->tail_page->size = cpu_buffer->tail; + cpu_buffer->write_stamp += event->time_delta; + cpu_buffer->entries++; +} + +/** + * ring_buffer_unlock_commit - commit a reserved + * @buffer: The buffer to commit to + * @event: The event pointer to commit. + * @flags: the interrupt flags received from ring_buffer_lock_reserve. + * + * This commits the data to the ring buffer, and releases any locks held. + * + * Must be paired with ring_buffer_lock_reserve. + */ +int ring_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int cpu = raw_smp_processor_id(); + + cpu_buffer = buffer->buffers[cpu]; + + assert_spin_locked(&cpu_buffer->lock); + + rb_commit(cpu_buffer, event); + + spin_unlock(&cpu_buffer->lock); + raw_local_irq_restore(flags); + + return 0; +} + +/** + * ring_buffer_write - write data to the buffer without reserving + * @buffer: The ring buffer to write to. + * @length: The length of the data being written (excluding the event header) + * @data: The data to write to the buffer. + * + * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as + * one function. If you already have the data to write to the buffer, it + * may be easier to simply call this function. + * + * Note, like ring_buffer_lock_reserve, the length is the length of the data + * and not the length of the event which would hold the header. + */ +int ring_buffer_write(struct ring_buffer *buffer, + unsigned long length, + void *data) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + unsigned long event_length, flags; + void *body; + int ret = -EBUSY; + int cpu; + + if (atomic_read(&buffer->record_disabled)) + return -EBUSY; + + local_irq_save(flags); + cpu = raw_smp_processor_id(); + + if (!cpu_isset(cpu, buffer->cpumask)) + goto out_irq; + + cpu_buffer = buffer->buffers[cpu]; + spin_lock(&cpu_buffer->lock); + + if (atomic_read(&cpu_buffer->record_disabled)) + goto out; + + event_length = rb_calculate_event_length(length); + event = rb_reserve_next_event(cpu_buffer, + RINGBUF_TYPE_DATA, event_length); + if (!event) + goto out; + + body = rb_event_data(event); + + memcpy(body, data, length); + + rb_commit(cpu_buffer, event); + + ret = 0; + out: + spin_unlock(&cpu_buffer->lock); + out_irq: + local_irq_restore(flags); + + return ret; +} + +/** + * ring_buffer_lock - lock the ring buffer + * @buffer: The ring buffer to lock + * @flags: The place to store the interrupt flags + * + * This locks all the per CPU buffers. + * + * Must be unlocked by ring_buffer_unlock. + */ +void ring_buffer_lock(struct ring_buffer *buffer, unsigned long *flags) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int cpu; + + local_irq_save(*flags); + + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + spin_lock(&cpu_buffer->lock); + } +} + +/** + * ring_buffer_unlock - unlock a locked buffer + * @buffer: The locked buffer to unlock + * @flags: The interrupt flags received by ring_buffer_lock + */ +void ring_buffer_unlock(struct ring_buffer *buffer, unsigned long flags) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int cpu; + + for (cpu = buffer->cpus - 1; cpu >= 0; cpu--) { + if (!cpu_isset(cpu, buffer->cpumask)) + continue; + cpu_buffer = buffer->buffers[cpu]; + spin_unlock(&cpu_buffer->lock); + } + + local_irq_restore(flags); +} + +/** + * ring_buffer_record_disable - stop all writes into the buffer + * @buffer: The ring buffer to stop writes to. + * + * This prevents all writes to the buffer. Any attempt to write + * to the buffer after this will fail and return NULL. + * + * The caller should call synchronize_sched() after this. + */ +void ring_buffer_record_disable(struct ring_buffer *buffer) +{ + atomic_inc(&buffer->record_disabled); +} + +/** + * ring_buffer_record_enable - enable writes to the buffer + * @buffer: The ring buffer to enable writes + * + * Note, multiple disables will need the same number of enables + * to truely enable the writing (much like preempt_disable). + */ +void ring_buffer_record_enable(struct ring_buffer *buffer) +{ + atomic_dec(&buffer->record_disabled); +} + +/** + * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer + * @buffer: The ring buffer to stop writes to. + * @cpu: The CPU buffer to stop + * + * This prevents all writes to the buffer. Any attempt to write + * to the buffer after this will fail and return NULL. + * + * The caller should call synchronize_sched() after this. + */ +void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpu_isset(cpu, buffer->cpumask)) + return; + + cpu_buffer = buffer->buffers[cpu]; + atomic_inc(&cpu_buffer->record_disabled); +} + +/** + * ring_buffer_record_enable_cpu - enable writes to the buffer + * @buffer: The ring buffer to enable writes + * @cpu: The CPU to enable. + * + * Note, multiple disables will need the same number of enables + * to truely enable the writing (much like preempt_disable). + */ +void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpu_isset(cpu, buffer->cpumask)) + return; + + cpu_buffer = buffer->buffers[cpu]; + atomic_dec(&cpu_buffer->record_disabled); +} + +/** + * ring_buffer_entries_cpu - get the number of entries in a cpu buffer + * @buffer: The ring buffer + * @cpu: The per CPU buffer to get the entries from. + */ +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpu_isset(cpu, buffer->cpumask)) + return 0; + + cpu_buffer = buffer->buffers[cpu]; + return cpu_buffer->entries; +} + +/** + * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer + * @buffer: The ring buffer + * @cpu: The per CPU buffer to get the number of overruns from + */ +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpu_isset(cpu, buffer->cpumask)) + return 0; + + cpu_buffer = buffer->buffers[cpu]; + return cpu_buffer->overrun; +} + +/** + * ring_buffer_entries - get the number of entries in a buffer + * @buffer: The ring buffer + * + * Returns the total number of entries in the ring buffer + * (all CPU entries) + */ +unsigned long ring_buffer_entries(struct ring_buffer *buffer) +{ + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long entries = 0; + int cpu; + + /* if you care about this being correct, lock the buffer */ + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + entries += cpu_buffer->entries; + } + + return entries; +} + +/** + * ring_buffer_overrun_cpu - get the number of overruns in buffer + * @buffer: The ring buffer + * + * Returns the total number of overruns in the ring buffer + * (all CPU entries) + */ +unsigned long ring_buffer_overruns(struct ring_buffer *buffer) +{ + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long overruns = 0; + int cpu; + + /* if you care about this being correct, lock the buffer */ + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + overruns += cpu_buffer->overrun; + } + + return overruns; +} + +/** + * ring_buffer_iter_reset - reset an iterator + * @iter: The iterator to reset + * + * Resets the iterator, so that it will start from the beginning + * again. + */ +void ring_buffer_iter_reset(struct ring_buffer_iter *iter) +{ + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; + + iter->head_page = cpu_buffer->head_page; + iter->head = cpu_buffer->head; + rb_reset_iter_read_page(iter); +} + +/** + * ring_buffer_iter_empty - check if an iterator has no more to read + * @iter: The iterator to check + */ +int ring_buffer_iter_empty(struct ring_buffer_iter *iter) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + cpu_buffer = iter->cpu_buffer; + + return iter->head_page == cpu_buffer->tail_page && + iter->head == cpu_buffer->tail; +} + +static void +rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event) +{ + u64 delta; + + switch (event->type) { + case RINGBUF_TYPE_PADDING: + return; + + case RINGBUF_TYPE_TIME_EXTEND: + delta = event->array[0]; + delta <<= TS_SHIFT; + delta += event->time_delta; + cpu_buffer->read_stamp += delta; + return; + + case RINGBUF_TYPE_TIME_STAMP: + /* FIXME: not implemented */ + return; + + case RINGBUF_TYPE_DATA: + cpu_buffer->read_stamp += event->time_delta; + return; + + default: + BUG(); + } + return; +} + +static void +rb_update_iter_read_stamp(struct ring_buffer_iter *iter, + struct ring_buffer_event *event) +{ + u64 delta; + + switch (event->type) { + case RINGBUF_TYPE_PADDING: + return; + + case RINGBUF_TYPE_TIME_EXTEND: + delta = event->array[0]; + delta <<= TS_SHIFT; + delta += event->time_delta; + iter->read_stamp += delta; + return; + + case RINGBUF_TYPE_TIME_STAMP: + /* FIXME: not implemented */ + return; + + case RINGBUF_TYPE_DATA: + iter->read_stamp += event->time_delta; + return; + + default: + BUG(); + } + return; +} + +static void rb_advance_head(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct ring_buffer_event *event; + unsigned length; + + /* + * Check if we are at the end of the buffer. + */ + if (cpu_buffer->head >= cpu_buffer->head_page->size) { + BUG_ON(cpu_buffer->head_page == cpu_buffer->tail_page); + rb_inc_page(cpu_buffer, &cpu_buffer->head_page); + rb_reset_read_page(cpu_buffer); + return; + } + + event = rb_head_event(cpu_buffer); + + if (event->type == RINGBUF_TYPE_DATA) + cpu_buffer->entries--; + + length = rb_event_length(event); + + /* + * This should not be called to advance the header if we are + * at the tail of the buffer. + */ + BUG_ON((cpu_buffer->head_page == cpu_buffer->tail_page) && + (cpu_buffer->head + length > cpu_buffer->tail)); + + rb_update_read_stamp(cpu_buffer, event); + + cpu_buffer->head += length; + + /* check for end of page */ + if ((cpu_buffer->head >= cpu_buffer->head_page->size) && + (cpu_buffer->head_page != cpu_buffer->tail_page)) + rb_advance_head(cpu_buffer); +} + +static void rb_advance_iter(struct ring_buffer_iter *iter) +{ + struct ring_buffer *buffer; + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + unsigned length; + + cpu_buffer = iter->cpu_buffer; + buffer = cpu_buffer->buffer; + + /* + * Check if we are at the end of the buffer. + */ + if (iter->head >= iter->head_page->size) { + BUG_ON(iter->head_page == cpu_buffer->tail_page); + rb_inc_page(cpu_buffer, &iter->head_page); + rb_reset_iter_read_page(iter); + return; + } + + event = rb_iter_head_event(iter); + + length = rb_event_length(event); + + /* + * This should not be called to advance the header if we are + * at the tail of the buffer. + */ + BUG_ON((iter->head_page == cpu_buffer->tail_page) && + (iter->head + length > cpu_buffer->tail)); + + rb_update_iter_read_stamp(iter, event); + + iter->head += length; + + /* check for end of page padding */ + if ((iter->head >= iter->head_page->size) && + (iter->head_page != cpu_buffer->tail_page)) + rb_advance_iter(iter); +} + +/** + * ring_buffer_peek - peek at the next event to be read + * @buffer: The ring buffer to read + * @cpu: The cpu to peak at + * @ts: The timestamp counter of this event. + * + * This will return the event that will be read next, but does + * not consume the data. + */ +struct ring_buffer_event * +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + + if (!cpu_isset(cpu, buffer->cpumask)) + return NULL; + + cpu_buffer = buffer->buffers[cpu]; + + again: + if (rb_per_cpu_empty(cpu_buffer)) + return NULL; + + event = rb_head_event(cpu_buffer); + + switch (event->type) { + case RINGBUF_TYPE_PADDING: + rb_inc_page(cpu_buffer, &cpu_buffer->head_page); + rb_reset_read_page(cpu_buffer); + goto again; + + case RINGBUF_TYPE_TIME_EXTEND: + /* Internal data, OK to advance */ + rb_advance_head(cpu_buffer); + goto again; + + case RINGBUF_TYPE_TIME_STAMP: + /* FIXME: not implemented */ + rb_advance_head(cpu_buffer); + goto again; + + case RINGBUF_TYPE_DATA: + if (ts) { + *ts = cpu_buffer->read_stamp + event->time_delta; + ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); + } + return event; + + default: + BUG(); + } + + return NULL; +} + +/** + * ring_buffer_iter_peek - peek at the next event to be read + * @iter: The ring buffer iterator + * @ts: The timestamp counter of this event. + * + * This will return the event that will be read next, but does + * not increment the iterator. + */ +struct ring_buffer_event * +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) +{ + struct ring_buffer *buffer; + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + + if (ring_buffer_iter_empty(iter)) + return NULL; + + cpu_buffer = iter->cpu_buffer; + buffer = cpu_buffer->buffer; + + again: + if (rb_per_cpu_empty(cpu_buffer)) + return NULL; + + event = rb_iter_head_event(iter); + + switch (event->type) { + case RINGBUF_TYPE_PADDING: + rb_inc_page(cpu_buffer, &iter->head_page); + rb_reset_iter_read_page(iter); + goto again; + + case RINGBUF_TYPE_TIME_EXTEND: + /* Internal data, OK to advance */ + rb_advance_iter(iter); + goto again; + + case RINGBUF_TYPE_TIME_STAMP: + /* FIXME: not implemented */ + rb_advance_iter(iter); + goto again; + + case RINGBUF_TYPE_DATA: + if (ts) { + *ts = iter->read_stamp + event->time_delta; + ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); + } + return event; + + default: + BUG(); + } + + return NULL; +} + +/** + * ring_buffer_consume - return an event and consume it + * @buffer: The ring buffer to get the next event from + * + * Returns the next event in the ring buffer, and that event is consumed. + * Meaning, that sequential reads will keep returning a different event, + * and eventually empty the ring buffer if the producer is slower. + */ +struct ring_buffer_event * +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event; + + if (!cpu_isset(cpu, buffer->cpumask)) + return NULL; + + event = ring_buffer_peek(buffer, cpu, ts); + if (!event) + return NULL; + + cpu_buffer = buffer->buffers[cpu]; + rb_advance_head(cpu_buffer); + + return event; +} + +/** + * ring_buffer_read_start - start a non consuming read of the buffer + * @buffer: The ring buffer to read from + * @cpu: The cpu buffer to iterate over + * + * This starts up an iteration through the buffer. It also disables + * the recording to the buffer until the reading is finished. + * This prevents the reading from being corrupted. This is not + * a consuming read, so a producer is not expected. + * + * Must be paired with ring_buffer_finish. + */ +struct ring_buffer_iter * +ring_buffer_read_start(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_iter *iter; + + if (!cpu_isset(cpu, buffer->cpumask)) + return NULL; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return NULL; + + cpu_buffer = buffer->buffers[cpu]; + + iter->cpu_buffer = cpu_buffer; + + atomic_inc(&cpu_buffer->record_disabled); + synchronize_sched(); + + spin_lock(&cpu_buffer->lock); + iter->head = cpu_buffer->head; + iter->head_page = cpu_buffer->head_page; + rb_reset_iter_read_page(iter); + spin_unlock(&cpu_buffer->lock); + + return iter; +} + +/** + * ring_buffer_finish - finish reading the iterator of the buffer + * @iter: The iterator retrieved by ring_buffer_start + * + * This re-enables the recording to the buffer, and frees the + * iterator. + */ +void +ring_buffer_read_finish(struct ring_buffer_iter *iter) +{ + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; + + atomic_dec(&cpu_buffer->record_disabled); + kfree(iter); +} + +/** + * ring_buffer_read - read the next item in the ring buffer by the iterator + * @iter: The ring buffer iterator + * @ts: The time stamp of the event read. + * + * This reads the next event in the ring buffer and increments the iterator. + */ +struct ring_buffer_event * +ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) +{ + struct ring_buffer_event *event; + + event = ring_buffer_iter_peek(iter, ts); + if (!event) + return NULL; + + rb_advance_iter(iter); + + return event; +} + +/** + * ring_buffer_size - return the size of the ring buffer (in bytes) + * @buffer: The ring buffer. + */ +unsigned long ring_buffer_size(struct ring_buffer *buffer) +{ + return BUF_PAGE_SIZE * buffer->pages; +} + +static void +rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) +{ + cpu_buffer->head_page + = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + cpu_buffer->tail_page + = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + + cpu_buffer->head = cpu_buffer->tail = 0; + cpu_buffer->overrun = 0; + cpu_buffer->entries = 0; +} + +/** + * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer + * @buffer: The ring buffer to reset a per cpu buffer of + * @cpu: The CPU buffer to be reset + */ +void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + unsigned long flags; + + if (!cpu_isset(cpu, buffer->cpumask)) + return; + + raw_local_irq_save(flags); + spin_lock(&cpu_buffer->lock); + + rb_reset_cpu(cpu_buffer); + + spin_unlock(&cpu_buffer->lock); + raw_local_irq_restore(flags); +} + +/** + * ring_buffer_reset - reset a ring buffer + * @buffer: The ring buffer to reset all cpu buffers + */ +void ring_buffer_reset(struct ring_buffer *buffer) +{ + unsigned long flags; + int cpu; + + ring_buffer_lock(buffer, &flags); + + for_each_buffer_cpu(buffer, cpu) + rb_reset_cpu(buffer->buffers[cpu]); + + ring_buffer_unlock(buffer, flags); +} + +/** + * rind_buffer_empty - is the ring buffer empty? + * @buffer: The ring buffer to test + */ +int ring_buffer_empty(struct ring_buffer *buffer) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int cpu; + + /* yes this is racy, but if you don't like the race, lock the buffer */ + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + if (!rb_per_cpu_empty(cpu_buffer)) + return 0; + } + return 1; +} + +/** + * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? + * @buffer: The ring buffer + * @cpu: The CPU buffer to test + */ +int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpu_isset(cpu, buffer->cpumask)) + return 1; + + cpu_buffer = buffer->buffers[cpu]; + return rb_per_cpu_empty(cpu_buffer); +} + +/** + * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers + * @buffer_a: One buffer to swap with + * @buffer_b: The other buffer to swap with + * + * This function is useful for tracers that want to take a "snapshot" + * of a CPU buffer and has another back up buffer lying around. + * it is expected that the tracer handles the cpu buffer not being + * used at the moment. + */ +int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer_a; + struct ring_buffer_per_cpu *cpu_buffer_b; + + if (!cpu_isset(cpu, buffer_a->cpumask) || + !cpu_isset(cpu, buffer_b->cpumask)) + return -EINVAL; + + /* At least make sure the two buffers are somewhat the same */ + if (buffer_a->size != buffer_b->size || + buffer_a->pages != buffer_b->pages) + return -EINVAL; + + cpu_buffer_a = buffer_a->buffers[cpu]; + cpu_buffer_b = buffer_b->buffers[cpu]; + + /* + * We can't do a synchronize_sched here because this + * function can be called in atomic context. + * Normally this will be called from the same CPU as cpu. + * If not it's up to the caller to protect this. + */ + atomic_inc(&cpu_buffer_a->record_disabled); + atomic_inc(&cpu_buffer_b->record_disabled); + + buffer_a->buffers[cpu] = cpu_buffer_b; + buffer_b->buffers[cpu] = cpu_buffer_a; + + cpu_buffer_b->buffer = buffer_a; + cpu_buffer_a->buffer = buffer_b; + + atomic_dec(&cpu_buffer_a->record_disabled); + atomic_dec(&cpu_buffer_b->record_disabled); + + return 0; +} + -- cgit v1.2.3-70-g09d2 From a7b1374333407f409cf8df7e623b12490f073c84 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:39 -0400 Subject: ring_buffer: add paranoid check for buffer page If for some strange reason the buffer_page gets bigger, or the page struct gets smaller, I want to know this ASAP. The best way is to not let the kernel compile. This patch adds code to test the size of the struct buffer_page against the page struct and will cause compile issues if the buffer_page ever gets bigger than the page struct. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 830a2930dd9..95ca9338cb6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -289,6 +289,12 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) kfree(cpu_buffer); } +/* + * Causes compile errors if the struct buffer_page gets bigger + * than the struct page. + */ +extern int ring_buffer_page_too_big(void); + /** * ring_buffer_alloc - allocate a new ring_buffer * @size: the size in bytes that is needed. @@ -305,6 +311,11 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) int bsize; int cpu; + /* Paranoid! Optimizes out when all is well */ + if (sizeof(struct buffer_page) > sizeof(struct page)) + ring_buffer_page_too_big(); + + /* keep it in its own cache line */ buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), GFP_KERNEL); -- cgit v1.2.3-70-g09d2 From ed56829cb3195de499f97fa6108fe9134319bae6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:40 -0400 Subject: ring_buffer: reset buffer page when freeing Mathieu Desnoyers pointed out that the freeing of the page frame needs to be reset otherwise we might trigger BUG_ON in the page free code. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 95ca9338cb6..cfa711374d9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -127,6 +127,17 @@ struct buffer_page { }; }; +/* + * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing + * this issue out. + */ +static inline void free_buffer_page(struct buffer_page *bpage) +{ + reset_page_mapcount(&bpage->page); + bpage->page.mapping = NULL; + __free_page(&bpage->page); +} + /* * We need to fit the time_stamp delta into 27 bits. */ @@ -240,7 +251,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, free_pages: list_for_each_entry_safe(page, tmp, &pages, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } return -ENOMEM; } @@ -284,7 +295,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) list_for_each_entry_safe(page, tmp, head, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } kfree(cpu_buffer); } @@ -393,7 +404,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) p = cpu_buffer->pages.next; page = list_entry(p, struct buffer_page, list); list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } BUG_ON(list_empty(&cpu_buffer->pages)); @@ -520,7 +531,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) free_pages: list_for_each_entry_safe(page, tmp, &pages, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } return -ENOMEM; } -- cgit v1.2.3-70-g09d2 From 3928a8a2d98081d1bc3c0a84a2d70e29b90ecf1c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:41 -0400 Subject: ftrace: make work with new ring buffer This patch ports ftrace over to the new ring buffer. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 932 ++++++++++---------------------------- kernel/trace/trace.h | 22 +- kernel/trace/trace_boot.c | 16 +- kernel/trace/trace_functions.c | 2 +- kernel/trace/trace_irqsoff.c | 6 +- kernel/trace/trace_mmiotrace.c | 40 +- kernel/trace/trace_nop.c | 2 +- kernel/trace/trace_sched_switch.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- kernel/trace/trace_selftest.c | 60 +-- kernel/trace/trace_sysprof.c | 2 +- 11 files changed, 288 insertions(+), 798 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6ada059832a..ef80793858b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -33,25 +33,22 @@ #include #include +#include #include "trace.h" +#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) + unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_thresh; -static unsigned long __read_mostly tracing_nr_buffers; static cpumask_t __read_mostly tracing_buffer_mask; #define for_each_tracing_cpu(cpu) \ for_each_cpu_mask(cpu, tracing_buffer_mask) -static int trace_alloc_page(void); -static int trace_free_page(void); - static int tracing_disabled = 1; -static unsigned long tracing_pages_allocated; - long ns2usecs(cycle_t nsec) { @@ -62,7 +59,9 @@ ns2usecs(cycle_t nsec) cycle_t ftrace_now(int cpu) { - return cpu_clock(cpu); + u64 ts = ring_buffer_time_stamp(cpu); + ring_buffer_normalize_time_stamp(cpu, &ts); + return ts; } /* @@ -102,18 +101,18 @@ static int tracer_enabled = 1; int ftrace_function_enabled; /* - * trace_nr_entries is the number of entries that is allocated - * for a buffer. Note, the number of entries is always rounded - * to ENTRIES_PER_PAGE. + * trace_buf_size is the size in bytes that is allocated + * for a buffer. Note, the number of bytes is always rounded + * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ -#define TRACE_ENTRIES_DEFAULT 16384UL +#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ -static unsigned long trace_nr_entries = TRACE_ENTRIES_DEFAULT; +static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; @@ -158,23 +157,21 @@ void trace_wake_up(void) wake_up(&trace_wait); } -#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) - -static int __init set_nr_entries(char *str) +static int __init set_buf_size(char *str) { - unsigned long nr_entries; + unsigned long buf_size; int ret; if (!str) return 0; - ret = strict_strtoul(str, 0, &nr_entries); + ret = strict_strtoul(str, 0, &buf_size); /* nr_entries can not be zero */ - if (ret < 0 || nr_entries == 0) + if (ret < 0 || buf_size == 0) return 0; - trace_nr_entries = nr_entries; + trace_buf_size = buf_size; return 1; } -__setup("trace_entries=", set_nr_entries); +__setup("trace_buf_size=", set_buf_size); unsigned long nsecs_to_usecs(unsigned long nsecs) { @@ -243,54 +240,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) tracing_record_cmdline(current); } -#define CHECK_COND(cond) \ - if (unlikely(cond)) { \ - tracing_disabled = 1; \ - WARN_ON(1); \ - return -1; \ - } - -/** - * check_pages - integrity check of trace buffers - * - * As a safty measure we check to make sure the data pages have not - * been corrupted. - */ -int check_pages(struct trace_array_cpu *data) -{ - struct page *page, *tmp; - - CHECK_COND(data->trace_pages.next->prev != &data->trace_pages); - CHECK_COND(data->trace_pages.prev->next != &data->trace_pages); - - list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) { - CHECK_COND(page->lru.next->prev != &page->lru); - CHECK_COND(page->lru.prev->next != &page->lru); - } - - return 0; -} - -/** - * head_page - page address of the first page in per_cpu buffer. - * - * head_page returns the page address of the first page in - * a per_cpu buffer. This also preforms various consistency - * checks to make sure the buffer has not been corrupted. - */ -void *head_page(struct trace_array_cpu *data) -{ - struct page *page; - - if (list_empty(&data->trace_pages)) - return NULL; - - page = list_entry(data->trace_pages.next, struct page, lru); - BUG_ON(&page->lru == &data->trace_pages); - - return page_address(page); -} - /** * trace_seq_printf - sequence printing of trace information * @s: trace sequence descriptor @@ -437,34 +386,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) trace_seq_reset(s); } -/* - * flip the trace buffers between two trace descriptors. - * This usually is the buffers between the global_trace and - * the max_tr to record a snapshot of a current trace. - * - * The ftrace_max_lock must be held. - */ -static void -flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) -{ - struct list_head flip_pages; - - INIT_LIST_HEAD(&flip_pages); - - memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx, - sizeof(struct trace_array_cpu) - - offsetof(struct trace_array_cpu, trace_head_idx)); - - check_pages(tr1); - check_pages(tr2); - list_splice_init(&tr1->trace_pages, &flip_pages); - list_splice_init(&tr2->trace_pages, &tr1->trace_pages); - list_splice_init(&flip_pages, &tr2->trace_pages); - BUG_ON(!list_empty(&flip_pages)); - check_pages(tr1); - check_pages(tr2); -} - /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer @@ -477,17 +398,15 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct trace_array_cpu *data; - int i; + struct ring_buffer *buf = tr->buffer; WARN_ON_ONCE(!irqs_disabled()); __raw_spin_lock(&ftrace_max_lock); - /* clear out all the previous traces */ - for_each_tracing_cpu(i) { - data = tr->data[i]; - flip_trace(max_tr.data[i], data); - tracing_reset(data); - } + + tr->buffer = max_tr.buffer; + max_tr.buffer = buf; + + ring_buffer_reset(tr->buffer); __update_max_tr(tr, tsk, cpu); __raw_spin_unlock(&ftrace_max_lock); @@ -504,16 +423,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct trace_array_cpu *data = tr->data[cpu]; - int i; + int ret; WARN_ON_ONCE(!irqs_disabled()); __raw_spin_lock(&ftrace_max_lock); - for_each_tracing_cpu(i) - tracing_reset(max_tr.data[i]); - flip_trace(max_tr.data[cpu], data); - tracing_reset(data); + ring_buffer_reset(max_tr.buffer); + ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); + + WARN_ON_ONCE(ret); __update_max_tr(tr, tsk, cpu); __raw_spin_unlock(&ftrace_max_lock); @@ -550,7 +468,6 @@ int register_tracer(struct tracer *type) #ifdef CONFIG_FTRACE_STARTUP_TEST if (type->selftest) { struct tracer *saved_tracer = current_trace; - struct trace_array_cpu *data; struct trace_array *tr = &global_trace; int saved_ctrl = tr->ctrl; int i; @@ -562,10 +479,7 @@ int register_tracer(struct tracer *type) * If we fail, we do not register this tracer. */ for_each_tracing_cpu(i) { - data = tr->data[i]; - if (!head_page(data)) - continue; - tracing_reset(data); + tracing_reset(tr, i); } current_trace = type; tr->ctrl = 0; @@ -581,10 +495,7 @@ int register_tracer(struct tracer *type) } /* Only reset on passing, to avoid touching corrupted buffers */ for_each_tracing_cpu(i) { - data = tr->data[i]; - if (!head_page(data)) - continue; - tracing_reset(data); + tracing_reset(tr, i); } printk(KERN_CONT "PASSED\n"); } @@ -630,13 +541,9 @@ void unregister_tracer(struct tracer *type) mutex_unlock(&trace_types_lock); } -void tracing_reset(struct trace_array_cpu *data) +void tracing_reset(struct trace_array *tr, int cpu) { - data->trace_idx = 0; - data->overrun = 0; - data->trace_head = data->trace_tail = head_page(data); - data->trace_head_idx = 0; - data->trace_tail_idx = 0; + ring_buffer_reset_cpu(tr->buffer, cpu); } #define SAVED_CMDLINES 128 @@ -722,70 +629,6 @@ void tracing_record_cmdline(struct task_struct *tsk) trace_save_cmdline(tsk); } -static inline struct list_head * -trace_next_list(struct trace_array_cpu *data, struct list_head *next) -{ - /* - * Roundrobin - but skip the head (which is not a real page): - */ - next = next->next; - if (unlikely(next == &data->trace_pages)) - next = next->next; - BUG_ON(next == &data->trace_pages); - - return next; -} - -static inline void * -trace_next_page(struct trace_array_cpu *data, void *addr) -{ - struct list_head *next; - struct page *page; - - page = virt_to_page(addr); - - next = trace_next_list(data, &page->lru); - page = list_entry(next, struct page, lru); - - return page_address(page); -} - -struct trace_entry * -tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data) -{ - unsigned long idx, idx_next; - struct trace_entry *entry; - - data->trace_idx++; - idx = data->trace_head_idx; - idx_next = idx + 1; - - BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE); - - entry = data->trace_head + idx * TRACE_ENTRY_SIZE; - - if (unlikely(idx_next >= ENTRIES_PER_PAGE)) { - data->trace_head = trace_next_page(data, data->trace_head); - idx_next = 0; - } - - if (data->trace_head == data->trace_tail && - idx_next == data->trace_tail_idx) { - /* overrun */ - data->overrun++; - data->trace_tail_idx++; - if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { - data->trace_tail = - trace_next_page(data, data->trace_tail); - data->trace_tail_idx = 0; - } - } - - data->trace_head_idx = idx_next; - - return entry; -} - void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) { @@ -796,7 +639,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) entry->field.preempt_count = pc & 0xff; entry->field.pid = (tsk) ? tsk->pid : 0; - entry->field.t = ftrace_now(raw_smp_processor_id()); entry->field.flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | @@ -808,18 +650,20 @@ void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags) { + struct ring_buffer_event *event; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, flags); entry->type = TRACE_FN; entry->field.fn.ip = ip; entry->field.fn.parent_ip = parent_ip; - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } void @@ -835,13 +679,19 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip) { + struct ring_buffer_event *event; struct trace_entry *entry; struct stack_trace trace; + unsigned long irq_flags; if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, flags); entry->type = TRACE_STACK; @@ -853,28 +703,31 @@ void __trace_stack(struct trace_array *tr, trace.entries = entry->field.stack.caller; save_stack_trace(&trace); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } void __trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3) { + struct ring_buffer_event *event; struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, 0); entry->type = TRACE_SPECIAL; entry->field.special.arg1 = arg1; entry->field.special.arg2 = arg2; entry->field.special.arg3 = arg3; + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, irq_flags, 4); - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); trace_wake_up(); } @@ -886,12 +739,15 @@ tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *next, unsigned long flags) { + struct ring_buffer_event *event; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, flags); entry->type = TRACE_CTX; entry->field.ctx.prev_pid = prev->pid; @@ -901,9 +757,8 @@ tracing_sched_switch_trace(struct trace_array *tr, entry->field.ctx.next_prio = next->prio; entry->field.ctx.next_state = next->state; entry->field.ctx.next_cpu = task_cpu(next); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, flags, 5); - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); } void @@ -913,12 +768,15 @@ tracing_sched_wakeup_trace(struct trace_array *tr, struct task_struct *curr, unsigned long flags) { + struct ring_buffer_event *event; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, flags); entry->type = TRACE_WAKE; entry->field.ctx.prev_pid = curr->pid; @@ -928,9 +786,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry->field.ctx.next_prio = wakee->prio; entry->field.ctx.next_state = wakee->state; entry->field.ctx.next_cpu = task_cpu(wakee); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, flags, 6); - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); trace_wake_up(); } @@ -1011,183 +868,77 @@ enum trace_file_type { TRACE_FILE_LAT_FMT = 1, }; -/* Return the current entry. */ -static struct trace_entry * -trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, - struct trace_iterator *iter, int cpu) -{ - struct page *page; - struct trace_entry *array; - - if (iter->next_idx[cpu] >= tr->entries || - iter->next_idx[cpu] >= data->trace_idx || - (data->trace_head == data->trace_tail && - data->trace_head_idx == data->trace_tail_idx)) - return NULL; - - if (!iter->next_page[cpu]) { - /* Initialize the iterator for this cpu trace buffer */ - WARN_ON(!data->trace_tail); - page = virt_to_page(data->trace_tail); - iter->next_page[cpu] = &page->lru; - iter->next_page_idx[cpu] = data->trace_tail_idx; - } - - page = list_entry(iter->next_page[cpu], struct page, lru); - BUG_ON(&data->trace_pages == &page->lru); - - array = page_address(page); - - WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); - return &array[iter->next_page_idx[cpu]]; -} - -/* Increment the index counter of an iterator by one */ -static void __trace_iterator_increment(struct trace_iterator *iter, int cpu) -{ - iter->next_idx[cpu]++; - iter->next_page_idx[cpu]++; - - if (iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE) { - struct trace_array_cpu *data = iter->tr->data[cpu]; - - iter->next_page_idx[cpu] = 0; - iter->next_page[cpu] = - trace_next_list(data, iter->next_page[cpu]); - } -} - static void trace_iterator_increment(struct trace_iterator *iter, int cpu) { iter->idx++; - __trace_iterator_increment(iter, cpu); + ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); } static struct trace_entry * -trace_entry_next(struct trace_array *tr, struct trace_array_cpu *data, - struct trace_iterator *iter, int cpu) +peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) { - struct list_head *next_page; - struct trace_entry *ent; - int idx, next_idx, next_page_idx; - - ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); - - if (likely(!ent || ent->type != TRACE_CONT)) - return ent; - - /* save the iterator details */ - idx = iter->idx; - next_idx = iter->next_idx[cpu]; - next_page_idx = iter->next_page_idx[cpu]; - next_page = iter->next_page[cpu]; - - /* find a real entry */ - do { - __trace_iterator_increment(iter, cpu); - ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); - } while (ent && ent->type != TRACE_CONT); - - /* reset the iterator */ - iter->idx = idx; - iter->next_idx[cpu] = next_idx; - iter->next_page_idx[cpu] = next_page_idx; - iter->next_page[cpu] = next_page; + struct ring_buffer_event *event; + struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; - return ent; + event = ring_buffer_iter_peek(buf_iter, ts); + return event ? ring_buffer_event_data(event) : NULL; } - static struct trace_entry * -__find_next_entry(struct trace_iterator *iter, int *ent_cpu, int inc) +__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { - struct trace_array *tr = iter->tr; + struct ring_buffer *buffer = iter->tr->buffer; struct trace_entry *ent, *next = NULL; + u64 next_ts = 0, ts; int next_cpu = -1; int cpu; for_each_tracing_cpu(cpu) { - if (!head_page(tr->data[cpu])) - continue; - ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); + if (ring_buffer_empty_cpu(buffer, cpu)) + continue; - if (ent && ent->type == TRACE_CONT) { - struct trace_array_cpu *data = tr->data[cpu]; - - if (!inc) - ent = trace_entry_next(tr, data, iter, cpu); - else { - while (ent && ent->type == TRACE_CONT) { - __trace_iterator_increment(iter, cpu); - ent = trace_entry_idx(tr, tr->data[cpu], - iter, cpu); - } - } - } + ent = peek_next_entry(iter, cpu, &ts); /* * Pick the entry with the smallest timestamp: */ - if (ent && (!next || ent->field.t < next->field.t)) { + if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; + next_ts = ts; } } if (ent_cpu) *ent_cpu = next_cpu; + if (ent_ts) + *ent_ts = next_ts; + return next; } /* Find the next real entry, without updating the iterator itself */ static struct trace_entry * -find_next_entry(struct trace_iterator *iter, int *ent_cpu) +find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { - return __find_next_entry(iter, ent_cpu, 0); + return __find_next_entry(iter, ent_cpu, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ static void *find_next_entry_inc(struct trace_iterator *iter) { - struct trace_entry *next; - int next_cpu = -1; - - next = __find_next_entry(iter, &next_cpu, 1); - - iter->prev_ent = iter->ent; - iter->prev_cpu = iter->cpu; + iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); - iter->ent = next; - iter->cpu = next_cpu; - - if (next) + if (iter->ent) trace_iterator_increment(iter, iter->cpu); - return next ? iter : NULL; + return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { - struct trace_array_cpu *data = iter->tr->data[iter->cpu]; - struct trace_entry *ent; - - again: - data->trace_tail_idx++; - if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { - data->trace_tail = trace_next_page(data, data->trace_tail); - data->trace_tail_idx = 0; - } - - /* Check if we empty it, then reset the index */ - if (data->trace_head == data->trace_tail && - data->trace_head_idx == data->trace_tail_idx) - data->trace_idx = 0; - - ent = trace_entry_idx(iter->tr, iter->tr->data[iter->cpu], - iter, iter->cpu); - if (ent && ent->type == TRACE_CONT) - goto again; + ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) @@ -1220,7 +971,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) struct trace_iterator *iter = m->private; void *p = NULL; loff_t l = 0; - int i; + int cpu; mutex_lock(&trace_types_lock); @@ -1239,12 +990,9 @@ static void *s_start(struct seq_file *m, loff_t *pos) iter->ent = NULL; iter->cpu = 0; iter->idx = -1; - iter->prev_ent = NULL; - iter->prev_cpu = -1; - for_each_tracing_cpu(i) { - iter->next_idx[i] = 0; - iter->next_page[i] = NULL; + for_each_tracing_cpu(cpu) { + ring_buffer_iter_reset(iter->buffer_iter[cpu]); } for (p = iter; p && l < *pos; p = s_next(m, p, &l)) @@ -1365,23 +1113,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[tr->cpu]; struct tracer *type = current_trace; - unsigned long total = 0; - unsigned long entries = 0; - int cpu; + unsigned long total; + unsigned long entries; const char *name = "preemption"; if (type) name = type->name; - for_each_tracing_cpu(cpu) { - if (head_page(tr->data[cpu])) { - total += tr->data[cpu]->trace_idx; - if (tr->data[cpu]->trace_idx > tr->entries) - entries += tr->entries; - else - entries += tr->data[cpu]->trace_idx; - } - } + entries = ring_buffer_entries(iter->tr->buffer); + total = entries + + ring_buffer_overruns(iter->tr->buffer); seq_printf(m, "%s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); @@ -1468,7 +1209,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) unsigned long preempt_mark_thresh = 100; static void -lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, +lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, unsigned long rel_usecs) { trace_seq_printf(s, " %4lldus", abs_usecs); @@ -1488,12 +1229,10 @@ static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; */ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) { - struct trace_array *tr = iter->tr; - struct trace_array_cpu *data = tr->data[iter->cpu]; struct trace_entry *ent; bool ok = true; - ent = trace_entry_idx(tr, data, iter, iter->cpu); + ent = peek_next_entry(iter, iter->cpu, NULL); if (!ent || ent->type != TRACE_CONT) { trace_seq_putc(s, '\n'); return; @@ -1502,8 +1241,8 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) do { if (ok) ok = (trace_seq_printf(s, "%s", ent->cont.buf) > 0); - __trace_iterator_increment(iter, iter->cpu); - ent = trace_entry_idx(tr, data, iter, iter->cpu); + ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); + ent = peek_next_entry(iter, iter->cpu, NULL); } while (ent && ent->type == TRACE_CONT); if (!ok) @@ -1515,25 +1254,26 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) { struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); - struct trace_entry *next_entry = find_next_entry(iter, NULL); + struct trace_entry *next_entry; unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); struct trace_entry *entry = iter->ent; struct trace_field *field = &entry->field; unsigned long abs_usecs; unsigned long rel_usecs; + u64 next_ts; char *comm; int S, T; int i; unsigned state; - if (!next_entry) - next_entry = entry; - if (entry->type == TRACE_CONT) return 1; - rel_usecs = ns2usecs(next_entry->field.t - entry->field.t); - abs_usecs = ns2usecs(entry->field.t - iter->tr->time_start); + next_entry = find_next_entry(iter, NULL, &next_ts); + if (!next_entry) + next_ts = iter->ts; + rel_usecs = ns2usecs(next_ts - iter->ts); + abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); if (verbose) { comm = trace_find_cmdline(field->pid); @@ -1542,7 +1282,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) comm, field->pid, cpu, field->flags, field->preempt_count, trace_idx, - ns2usecs(field->t), + ns2usecs(iter->ts), abs_usecs/1000, abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); @@ -1627,7 +1367,7 @@ static int print_trace_fmt(struct trace_iterator *iter) comm = trace_find_cmdline(iter->ent->field.pid); - t = ns2usecs(field->t); + t = ns2usecs(iter->ts); usec_rem = do_div(t, 1000000ULL); secs = (unsigned long)t; @@ -1732,7 +1472,7 @@ static int print_raw_fmt(struct trace_iterator *iter) field = &entry->field; ret = trace_seq_printf(s, "%d %d %llu ", - field->pid, iter->cpu, field->t); + field->pid, iter->cpu, iter->ts); if (!ret) return 0; @@ -1811,7 +1551,7 @@ static int print_hex_fmt(struct trace_iterator *iter) SEQ_PUT_HEX_FIELD_RET(s, field->pid); SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); - SEQ_PUT_HEX_FIELD_RET(s, field->t); + SEQ_PUT_HEX_FIELD_RET(s, iter->ts); switch (entry->type) { case TRACE_FN: @@ -1861,7 +1601,7 @@ static int print_bin_fmt(struct trace_iterator *iter) SEQ_PUT_FIELD_RET(s, field->pid); SEQ_PUT_FIELD_RET(s, field->cpu); - SEQ_PUT_FIELD_RET(s, field->t); + SEQ_PUT_FIELD_RET(s, iter->ts); switch (entry->type) { case TRACE_FN: @@ -1888,15 +1628,10 @@ static int print_bin_fmt(struct trace_iterator *iter) static int trace_empty(struct trace_iterator *iter) { - struct trace_array_cpu *data; int cpu; for_each_tracing_cpu(cpu) { - data = iter->tr->data[cpu]; - - if (head_page(data) && data->trace_idx && - (data->trace_tail != data->trace_head || - data->trace_tail_idx != data->trace_head_idx)) + if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } return 1; @@ -1961,6 +1696,8 @@ static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, int *ret) { struct trace_iterator *iter; + struct seq_file *m; + int cpu; if (tracing_disabled) { *ret = -ENODEV; @@ -1981,28 +1718,43 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) iter->trace = current_trace; iter->pos = -1; + for_each_tracing_cpu(cpu) { + iter->buffer_iter[cpu] = + ring_buffer_read_start(iter->tr->buffer, cpu); + if (!iter->buffer_iter[cpu]) + goto fail_buffer; + } + /* TODO stop tracer */ *ret = seq_open(file, &tracer_seq_ops); - if (!*ret) { - struct seq_file *m = file->private_data; - m->private = iter; + if (*ret) + goto fail_buffer; - /* stop the trace while dumping */ - if (iter->tr->ctrl) { - tracer_enabled = 0; - ftrace_function_enabled = 0; - } + m = file->private_data; + m->private = iter; - if (iter->trace && iter->trace->open) - iter->trace->open(iter); - } else { - kfree(iter); - iter = NULL; + /* stop the trace while dumping */ + if (iter->tr->ctrl) { + tracer_enabled = 0; + ftrace_function_enabled = 0; } + + if (iter->trace && iter->trace->open) + iter->trace->open(iter); + mutex_unlock(&trace_types_lock); out: return iter; + + fail_buffer: + for_each_tracing_cpu(cpu) { + if (iter->buffer_iter[cpu]) + ring_buffer_read_finish(iter->buffer_iter[cpu]); + } + mutex_unlock(&trace_types_lock); + + return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) @@ -2018,8 +1770,14 @@ int tracing_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; struct trace_iterator *iter = m->private; + int cpu; mutex_lock(&trace_types_lock); + for_each_tracing_cpu(cpu) { + if (iter->buffer_iter[cpu]) + ring_buffer_read_finish(iter->buffer_iter[cpu]); + } + if (iter->trace && iter->trace->close) iter->trace->close(iter); @@ -2526,6 +2284,7 @@ static atomic_t tracing_reader; static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_iterator *iter; + int cpu; if (tracing_disabled) return -ENODEV; @@ -2546,17 +2305,38 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) iter->trace = current_trace; filp->private_data = iter; + for_each_tracing_cpu(cpu) { + iter->buffer_iter[cpu] = + ring_buffer_read_start(iter->tr->buffer, cpu); + if (!iter->buffer_iter[cpu]) + goto fail_buffer; + } + if (iter->trace->pipe_open) iter->trace->pipe_open(iter); mutex_unlock(&trace_types_lock); return 0; + + fail_buffer: + for_each_tracing_cpu(cpu) { + if (iter->buffer_iter[cpu]) + ring_buffer_read_finish(iter->buffer_iter[cpu]); + } + mutex_unlock(&trace_types_lock); + + return -ENOMEM; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; + int cpu; + for_each_tracing_cpu(cpu) { + if (iter->buffer_iter[cpu]) + ring_buffer_read_finish(iter->buffer_iter[cpu]); + } kfree(iter); atomic_dec(&tracing_reader); @@ -2592,13 +2372,10 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; - struct trace_array_cpu *data; - static cpumask_t mask; unsigned long flags; #ifdef CONFIG_FTRACE int ftrace_save; #endif - int cpu; ssize_t sret; /* return any leftover data */ @@ -2687,32 +2464,13 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, * and then release the locks again. */ - cpus_clear(mask); - local_irq_save(flags); + local_irq_disable(); #ifdef CONFIG_FTRACE ftrace_save = ftrace_enabled; ftrace_enabled = 0; #endif smp_wmb(); - for_each_tracing_cpu(cpu) { - data = iter->tr->data[cpu]; - - if (!head_page(data) || !data->trace_idx) - continue; - - atomic_inc(&data->disabled); - cpu_set(cpu, mask); - } - - for_each_cpu_mask(cpu, mask) { - data = iter->tr->data[cpu]; - __raw_spin_lock(&data->lock); - - if (data->overrun > iter->last_overrun[cpu]) - iter->overrun[cpu] += - data->overrun - iter->last_overrun[cpu]; - iter->last_overrun[cpu] = data->overrun; - } + ring_buffer_lock(iter->tr->buffer, &flags); while (find_next_entry_inc(iter) != NULL) { int ret; @@ -2731,19 +2489,11 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, break; } - for_each_cpu_mask(cpu, mask) { - data = iter->tr->data[cpu]; - __raw_spin_unlock(&data->lock); - } - - for_each_cpu_mask(cpu, mask) { - data = iter->tr->data[cpu]; - atomic_dec(&data->disabled); - } + ring_buffer_unlock(iter->tr->buffer, flags); #ifdef CONFIG_FTRACE ftrace_enabled = ftrace_save; #endif - local_irq_restore(flags); + local_irq_enable(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); @@ -2776,7 +2526,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, { unsigned long val; char buf[64]; - int i, ret; + int ret; struct trace_array *tr = filp->private_data; if (cnt >= sizeof(buf)) @@ -2804,52 +2554,31 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, goto out; } - if (val > global_trace.entries) { - long pages_requested; - unsigned long freeable_pages; - - /* make sure we have enough memory before mapping */ - pages_requested = - (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE; - - /* account for each buffer (and max_tr) */ - pages_requested *= tracing_nr_buffers * 2; - - /* Check for overflow */ - if (pages_requested < 0) { - cnt = -ENOMEM; - goto out; - } - - freeable_pages = determine_dirtyable_memory(); - - /* we only allow to request 1/4 of useable memory */ - if (pages_requested > - ((freeable_pages + tracing_pages_allocated) / 4)) { - cnt = -ENOMEM; + if (val != global_trace.entries) { + ret = ring_buffer_resize(global_trace.buffer, val); + if (ret < 0) { + cnt = ret; goto out; } - while (global_trace.entries < val) { - if (trace_alloc_page()) { - cnt = -ENOMEM; - goto out; + ret = ring_buffer_resize(max_tr.buffer, val); + if (ret < 0) { + int r; + cnt = ret; + r = ring_buffer_resize(global_trace.buffer, + global_trace.entries); + if (r < 0) { + /* AARGH! We are left with different + * size max buffer!!!! */ + WARN_ON(1); + tracing_disabled = 1; } - /* double check that we don't go over the known pages */ - if (tracing_pages_allocated > pages_requested) - break; + goto out; } - } else { - /* include the number of entries in val (inc of page entries) */ - while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) - trace_free_page(); + global_trace.entries = val; } - /* check integrity */ - for_each_tracing_cpu(i) - check_pages(global_trace.data[i]); - filp->f_pos += cnt; /* If check pages failed, return ENOMEM */ @@ -3086,10 +2815,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) static DEFINE_SPINLOCK(trace_buf_lock); static char trace_buf[TRACE_BUF_SIZE]; + struct ring_buffer_event *event; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; struct trace_entry *entry; - unsigned long flags; + unsigned long flags, irq_flags; long disabled; int cpu, len = 0, write, written = 0; @@ -3110,8 +2840,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) len = min(len, TRACE_BUF_SIZE-1); trace_buf[len] = 0; - __raw_spin_lock(&data->lock); - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + goto out_unlock; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, flags); entry->type = TRACE_PRINT; entry->field.print.ip = ip; @@ -3121,21 +2854,27 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) memcpy(&entry->field.print.buf, trace_buf, write); entry->field.print.buf[write] = 0; written = write; + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); if (written != len) entry->field.flags |= TRACE_FLAG_CONT; while (written != len) { - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + goto out_unlock; + entry = ring_buffer_event_data(event); entry->type = TRACE_CONT; write = min(len - written, (int)(TRACE_CONT_BUF_SIZE-1)); memcpy(&entry->cont.buf, trace_buf+written, write); entry->cont.buf[write] = 0; written += write; + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } - __raw_spin_unlock(&data->lock); + out_unlock: spin_unlock(&trace_buf_lock); out: @@ -3227,12 +2966,10 @@ void ftrace_dump(void) static DEFINE_SPINLOCK(ftrace_dump_lock); /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; - struct trace_array_cpu *data; static cpumask_t mask; static int dump_ran; - unsigned long flags; + unsigned long flags, irq_flags; int cnt = 0; - int cpu; /* only one dump */ spin_lock_irqsave(&ftrace_dump_lock, flags); @@ -3258,25 +2995,7 @@ void ftrace_dump(void) cpus_clear(mask); - for_each_tracing_cpu(cpu) { - data = iter.tr->data[cpu]; - - if (!head_page(data) || !data->trace_idx) - continue; - - atomic_inc(&data->disabled); - cpu_set(cpu, mask); - } - - for_each_cpu_mask(cpu, mask) { - data = iter.tr->data[cpu]; - __raw_spin_lock(&data->lock); - - if (data->overrun > iter.last_overrun[cpu]) - iter.overrun[cpu] += - data->overrun - iter.last_overrun[cpu]; - iter.last_overrun[cpu] = data->overrun; - } + ring_buffer_lock(iter.tr->buffer, &irq_flags); while (!trace_empty(&iter)) { @@ -3305,205 +3024,47 @@ void ftrace_dump(void) else printk(KERN_TRACE "---------------------------------\n"); - for_each_cpu_mask(cpu, mask) { - data = iter.tr->data[cpu]; - __raw_spin_unlock(&data->lock); - } - - for_each_cpu_mask(cpu, mask) { - data = iter.tr->data[cpu]; - atomic_dec(&data->disabled); - } - + ring_buffer_unlock(iter.tr->buffer, irq_flags); out: spin_unlock_irqrestore(&ftrace_dump_lock, flags); } -static int trace_alloc_page(void) +__init static int tracer_alloc_buffers(void) { struct trace_array_cpu *data; - struct page *page, *tmp; - LIST_HEAD(pages); - void *array; - unsigned pages_allocated = 0; int i; - /* first allocate a page for each CPU */ - for_each_tracing_cpu(i) { - array = (void *)__get_free_page(GFP_KERNEL); - if (array == NULL) { - printk(KERN_ERR "tracer: failed to allocate page" - "for trace buffer!\n"); - goto free_pages; - } - - pages_allocated++; - page = virt_to_page(array); - list_add(&page->lru, &pages); + /* TODO: make the number of buffers hot pluggable with CPUS */ + tracing_buffer_mask = cpu_possible_map; -/* Only allocate if we are actually using the max trace */ -#ifdef CONFIG_TRACER_MAX_TRACE - array = (void *)__get_free_page(GFP_KERNEL); - if (array == NULL) { - printk(KERN_ERR "tracer: failed to allocate page" - "for trace buffer!\n"); - goto free_pages; - } - pages_allocated++; - page = virt_to_page(array); - list_add(&page->lru, &pages); -#endif + global_trace.buffer = ring_buffer_alloc(trace_buf_size, + TRACE_BUFFER_FLAGS); + if (!global_trace.buffer) { + printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); + WARN_ON(1); + return 0; } - - /* Now that we successfully allocate a page per CPU, add them */ - for_each_tracing_cpu(i) { - data = global_trace.data[i]; - page = list_entry(pages.next, struct page, lru); - list_del_init(&page->lru); - list_add_tail(&page->lru, &data->trace_pages); - ClearPageLRU(page); + global_trace.entries = ring_buffer_size(global_trace.buffer); #ifdef CONFIG_TRACER_MAX_TRACE - data = max_tr.data[i]; - page = list_entry(pages.next, struct page, lru); - list_del_init(&page->lru); - list_add_tail(&page->lru, &data->trace_pages); - SetPageLRU(page); -#endif - } - tracing_pages_allocated += pages_allocated; - global_trace.entries += ENTRIES_PER_PAGE; - - return 0; - - free_pages: - list_for_each_entry_safe(page, tmp, &pages, lru) { - list_del_init(&page->lru); - __free_page(page); + max_tr.buffer = ring_buffer_alloc(trace_buf_size, + TRACE_BUFFER_FLAGS); + if (!max_tr.buffer) { + printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); + WARN_ON(1); + ring_buffer_free(global_trace.buffer); + return 0; } - return -ENOMEM; -} - -static int trace_free_page(void) -{ - struct trace_array_cpu *data; - struct page *page; - struct list_head *p; - int i; - int ret = 0; - - /* free one page from each buffer */ - for_each_tracing_cpu(i) { - data = global_trace.data[i]; - p = data->trace_pages.next; - if (p == &data->trace_pages) { - /* should never happen */ - WARN_ON(1); - tracing_disabled = 1; - ret = -1; - break; - } - page = list_entry(p, struct page, lru); - ClearPageLRU(page); - list_del(&page->lru); - tracing_pages_allocated--; - tracing_pages_allocated--; - __free_page(page); - - tracing_reset(data); - -#ifdef CONFIG_TRACER_MAX_TRACE - data = max_tr.data[i]; - p = data->trace_pages.next; - if (p == &data->trace_pages) { - /* should never happen */ - WARN_ON(1); - tracing_disabled = 1; - ret = -1; - break; - } - page = list_entry(p, struct page, lru); - ClearPageLRU(page); - list_del(&page->lru); - __free_page(page); - - tracing_reset(data); + max_tr.entries = ring_buffer_size(max_tr.buffer); + WARN_ON(max_tr.entries != global_trace.entries); #endif - } - global_trace.entries -= ENTRIES_PER_PAGE; - - return ret; -} - -__init static int tracer_alloc_buffers(void) -{ - struct trace_array_cpu *data; - void *array; - struct page *page; - int pages = 0; - int ret = -ENOMEM; - int i; - - /* TODO: make the number of buffers hot pluggable with CPUS */ - tracing_nr_buffers = num_possible_cpus(); - tracing_buffer_mask = cpu_possible_map; /* Allocate the first page for all buffers */ for_each_tracing_cpu(i) { data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); max_tr.data[i] = &per_cpu(max_data, i); - - array = (void *)__get_free_page(GFP_KERNEL); - if (array == NULL) { - printk(KERN_ERR "tracer: failed to allocate page" - "for trace buffer!\n"); - goto free_buffers; - } - - /* set the array to the list */ - INIT_LIST_HEAD(&data->trace_pages); - page = virt_to_page(array); - list_add(&page->lru, &data->trace_pages); - /* use the LRU flag to differentiate the two buffers */ - ClearPageLRU(page); - - data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; - max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; - -/* Only allocate if we are actually using the max trace */ -#ifdef CONFIG_TRACER_MAX_TRACE - array = (void *)__get_free_page(GFP_KERNEL); - if (array == NULL) { - printk(KERN_ERR "tracer: failed to allocate page" - "for trace buffer!\n"); - goto free_buffers; - } - - INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); - page = virt_to_page(array); - list_add(&page->lru, &max_tr.data[i]->trace_pages); - SetPageLRU(page); -#endif - } - - /* - * Since we allocate by orders of pages, we may be able to - * round up a bit. - */ - global_trace.entries = ENTRIES_PER_PAGE; - pages++; - - while (global_trace.entries < trace_nr_entries) { - if (trace_alloc_page()) - break; - pages++; } - max_tr.entries = global_trace.entries; - - pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", - pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); - pr_info(" actual entries %ld\n", global_trace.entries); trace_init_cmdlines(); @@ -3519,38 +3080,13 @@ __init static int tracer_alloc_buffers(void) /* All seems OK, enable tracing */ global_trace.ctrl = tracer_enabled; tracing_disabled = 0; + atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); return 0; - - free_buffers: - for (i-- ; i >= 0; i--) { - struct page *page, *tmp; - struct trace_array_cpu *data = global_trace.data[i]; - - if (data) { - list_for_each_entry_safe(page, tmp, - &data->trace_pages, lru) { - list_del_init(&page->lru); - __free_page(page); - } - } - -#ifdef CONFIG_TRACER_MAX_TRACE - data = max_tr.data[i]; - if (data) { - list_for_each_entry_safe(page, tmp, - &data->trace_pages, lru) { - list_del_init(&page->lru); - __free_page(page); - } - } -#endif - } - return ret; } early_initcall(tracer_alloc_buffers); fs_initcall(tracer_init_debugfs); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b28bf8812ef..f6965f775b4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -102,7 +103,6 @@ struct trace_field { char flags; char preempt_count; int pid; - cycle_t t; union { struct ftrace_entry fn; struct ctx_switch_entry ctx; @@ -139,16 +139,9 @@ struct trace_entry { * the trace, etc.) */ struct trace_array_cpu { - struct list_head trace_pages; atomic_t disabled; - raw_spinlock_t lock; - struct lock_class_key lock_key; /* these fields get copied into max-trace: */ - unsigned trace_head_idx; - unsigned trace_tail_idx; - void *trace_head; /* producer */ - void *trace_tail; /* consumer */ unsigned long trace_idx; unsigned long overrun; unsigned long saved_latency; @@ -172,6 +165,7 @@ struct trace_iterator; * They have on/off state as well: */ struct trace_array { + struct ring_buffer *buffer; unsigned long entries; long ctrl; int cpu; @@ -219,27 +213,21 @@ struct trace_iterator { struct trace_array *tr; struct tracer *trace; void *private; - long last_overrun[NR_CPUS]; - long overrun[NR_CPUS]; + struct ring_buffer_iter *buffer_iter[NR_CPUS]; /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; int cpu; - - struct trace_entry *prev_ent; - int prev_cpu; + u64 ts; unsigned long iter_flags; loff_t pos; - unsigned long next_idx[NR_CPUS]; - struct list_head *next_page[NR_CPUS]; - unsigned next_page_idx[NR_CPUS]; long idx; }; void trace_wake_up(void); -void tracing_reset(struct trace_array_cpu *data); +void tracing_reset(struct trace_array *tr, int cpu); int tracing_open_generic(struct inode *inode, struct file *filp); struct dentry *tracing_init_dentry(void); void init_tracer_sysprof_debugfs(struct dentry *d_tracer); diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index d5c9e2e4a9c..3657eec6b87 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -34,7 +34,7 @@ static void boot_trace_init(struct trace_array *tr) trace_boot_enabled = 0; for_each_cpu_mask(cpu, cpu_possible_map) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); } static void boot_trace_ctrl_update(struct trace_array *tr) @@ -74,6 +74,7 @@ struct tracer boot_tracer __read_mostly = void trace_boot(struct boot_trace *it) { + struct ring_buffer_event *event; struct trace_entry *entry; struct trace_array_cpu *data; unsigned long irq_flags; @@ -85,17 +86,18 @@ void trace_boot(struct boot_trace *it) preempt_disable(); data = tr->data[smp_processor_id()]; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + goto out; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, 0); entry->type = TRACE_BOOT; entry->field.initcall = *it; + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); trace_wake_up(); + out: preempt_enable(); } diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 31214489797..e90eb0c2c56 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr) tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); } static void start_function_trace(struct trace_array *tr) diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index ece6cfb649f..37ad49407f2 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -173,7 +173,7 @@ out_unlock: out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); - tracing_reset(data); + tracing_reset(tr, cpu); trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); } @@ -203,7 +203,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; - tracing_reset(data); + tracing_reset(tr, cpu); local_save_flags(flags); @@ -234,7 +234,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) data = tr->data[cpu]; - if (unlikely(!data) || unlikely(!head_page(data)) || + if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) return; diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index a108c326f36..bdbf09d8413 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -27,7 +27,7 @@ static void mmio_reset_data(struct trace_array *tr) tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); } static void mmio_trace_init(struct trace_array *tr) @@ -130,10 +130,14 @@ static unsigned long count_overruns(struct trace_iterator *iter) { int cpu; unsigned long cnt = 0; +/* FIXME: */ +#if 0 for_each_online_cpu(cpu) { cnt += iter->overrun[cpu]; iter->overrun[cpu] = 0; } +#endif + (void)cpu; return cnt; } @@ -176,7 +180,7 @@ static int mmio_print_rw(struct trace_iterator *iter) struct trace_entry *entry = iter->ent; struct mmiotrace_rw *rw = &entry->field.mmiorw; struct trace_seq *s = &iter->seq; - unsigned long long t = ns2usecs(entry->field.t); + unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; @@ -218,7 +222,7 @@ static int mmio_print_map(struct trace_iterator *iter) struct trace_entry *entry = iter->ent; struct mmiotrace_map *m = &entry->field.mmiomap; struct trace_seq *s = &iter->seq; - unsigned long long t = ns2usecs(entry->field.t); + unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; @@ -250,7 +254,7 @@ static int mmio_print_mark(struct trace_iterator *iter) struct trace_entry *entry = iter->ent; const char *msg = entry->field.print.buf; struct trace_seq *s = &iter->seq; - unsigned long long t = ns2usecs(entry->field.t); + unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret; @@ -303,19 +307,19 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_rw *rw) { + struct ring_buffer_event *event; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, 0); entry->type = TRACE_MMIO_RW; entry->field.mmiorw = *rw; - - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); } @@ -331,19 +335,19 @@ static void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_map *map) { + struct ring_buffer_event *event; struct trace_entry *entry; unsigned long irq_flags; - raw_local_irq_save(irq_flags); - __raw_spin_lock(&data->lock); - - entry = tracing_get_trace_entry(tr, data); + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + return; + entry = ring_buffer_event_data(event); tracing_generic_entry_update(entry, 0); entry->type = TRACE_MMIO_MAP; entry->field.mmiomap = *map; - - __raw_spin_unlock(&data->lock); - raw_local_irq_restore(irq_flags); + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); } diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 16c9ba060ba..4592b486251 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c @@ -30,7 +30,7 @@ static void nop_trace_init(struct trace_array *tr) ctx_trace = tr; for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); if (tr->ctrl) start_nop_trace(tr); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 789e927abc9..e0b06db0f7a 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -81,7 +81,7 @@ static void sched_switch_reset(struct trace_array *tr) tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); } static int tracing_sched_register(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 08206b4e29c..01e75e0639b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -191,7 +191,7 @@ static void __wakeup_reset(struct trace_array *tr) for_each_possible_cpu(cpu) { data = tr->data[cpu]; - tracing_reset(data); + tracing_reset(tr, cpu); } wakeup_cpu = -1; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 5ebd4b13549..09cf230d7ec 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -18,58 +18,20 @@ static inline int trace_valid_entry(struct trace_entry *entry) return 0; } -static int -trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) +static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) { - struct trace_entry *entries; - struct page *page; - int idx = 0; - int i; + struct ring_buffer_event *event; + struct trace_entry *entry; - BUG_ON(list_empty(&data->trace_pages)); - page = list_entry(data->trace_pages.next, struct page, lru); - entries = page_address(page); + while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { + entry = ring_buffer_event_data(event); - check_pages(data); - if (head_page(data) != entries) - goto failed; - - /* - * The starting trace buffer always has valid elements, - * if any element exists. - */ - entries = head_page(data); - - for (i = 0; i < tr->entries; i++) { - - if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) { + if (!trace_valid_entry(entry)) { printk(KERN_CONT ".. invalid entry %d ", - entries[idx].type); + entry->type); goto failed; } - - idx++; - if (idx >= ENTRIES_PER_PAGE) { - page = virt_to_page(entries); - if (page->lru.next == &data->trace_pages) { - if (i != tr->entries - 1) { - printk(KERN_CONT ".. entries buffer mismatch"); - goto failed; - } - } else { - page = list_entry(page->lru.next, struct page, lru); - entries = page_address(page); - } - idx = 0; - } - } - - page = virt_to_page(entries); - if (page->lru.next != &data->trace_pages) { - printk(KERN_CONT ".. too many entries"); - goto failed; } - return 0; failed: @@ -91,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) /* Don't allow flipping of max traces now */ raw_local_irq_save(flags); __raw_spin_lock(&ftrace_max_lock); - for_each_possible_cpu(cpu) { - if (!head_page(tr->data[cpu])) - continue; - cnt += tr->data[cpu]->trace_idx; + cnt = ring_buffer_entries(tr->buffer); - ret = trace_test_buffer_cpu(tr, tr->data[cpu]); + for_each_possible_cpu(cpu) { + ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index db58fb66a13..9587d3bcba5 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -241,7 +241,7 @@ static void stack_reset(struct trace_array *tr) tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) - tracing_reset(tr->data[cpu]); + tracing_reset(tr, cpu); } static void start_stack_trace(struct trace_array *tr) -- cgit v1.2.3-70-g09d2 From 777e208d40d0953efc6fb4ab58590da3f7d8f02d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 29 Sep 2008 23:02:42 -0400 Subject: ftrace: take advantage of variable length entries Now that the underlining ring buffer for ftrace now hold variable length entries, we can take advantage of this by only storing the size of the actual event into the buffer. This happens to increase the number of entries in the buffer dramatically. We can also get rid of the "trace_cont" operation, but I'm keeping that until we have no more users. Some of the ftrace tracers can now change their code to adapt to this new feature. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 439 ++++++++++++++++++++++------------------- kernel/trace/trace.h | 81 ++++---- kernel/trace/trace_boot.c | 13 +- kernel/trace/trace_mmiotrace.c | 31 +-- 4 files changed, 301 insertions(+), 263 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ef80793858b..ed9e47c1881 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -637,9 +637,9 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) pc = preempt_count(); - entry->field.preempt_count = pc & 0xff; - entry->field.pid = (tsk) ? tsk->pid : 0; - entry->field.flags = + entry->preempt_count = pc & 0xff; + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -651,7 +651,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct ftrace_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -659,10 +659,10 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, flags); - entry->type = TRACE_FN; - entry->field.fn.ip = ip; - entry->field.fn.parent_ip = parent_ip; + tracing_generic_entry_update(&entry->ent, flags); + entry->ent.type = TRACE_FN; + entry->ip = ip; + entry->parent_ip = parent_ip; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } @@ -680,7 +680,7 @@ void __trace_stack(struct trace_array *tr, int skip) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct stack_entry *entry; struct stack_trace trace; unsigned long irq_flags; @@ -692,15 +692,15 @@ void __trace_stack(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, flags); - entry->type = TRACE_STACK; + tracing_generic_entry_update(&entry->ent, flags); + entry->ent.type = TRACE_STACK; - memset(&entry->field.stack, 0, sizeof(entry->field.stack)); + memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = skip; - trace.entries = entry->field.stack.caller; + trace.entries = entry->caller; save_stack_trace(&trace); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); @@ -713,7 +713,7 @@ __trace_special(void *__tr, void *__data, struct ring_buffer_event *event; struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; - struct trace_entry *entry; + struct special_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -721,11 +721,11 @@ __trace_special(void *__tr, void *__data, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_SPECIAL; - entry->field.special.arg1 = arg1; - entry->field.special.arg2 = arg2; - entry->field.special.arg3 = arg3; + tracing_generic_entry_update(&entry->ent, 0); + entry->ent.type = TRACE_SPECIAL; + entry->arg1 = arg1; + entry->arg2 = arg2; + entry->arg3 = arg3; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, irq_flags, 4); @@ -740,7 +740,7 @@ tracing_sched_switch_trace(struct trace_array *tr, unsigned long flags) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct ctx_switch_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -748,15 +748,15 @@ tracing_sched_switch_trace(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, flags); - entry->type = TRACE_CTX; - entry->field.ctx.prev_pid = prev->pid; - entry->field.ctx.prev_prio = prev->prio; - entry->field.ctx.prev_state = prev->state; - entry->field.ctx.next_pid = next->pid; - entry->field.ctx.next_prio = next->prio; - entry->field.ctx.next_state = next->state; - entry->field.ctx.next_cpu = task_cpu(next); + tracing_generic_entry_update(&entry->ent, flags); + entry->ent.type = TRACE_CTX; + entry->prev_pid = prev->pid; + entry->prev_prio = prev->prio; + entry->prev_state = prev->state; + entry->next_pid = next->pid; + entry->next_prio = next->prio; + entry->next_state = next->state; + entry->next_cpu = task_cpu(next); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, flags, 5); } @@ -769,7 +769,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, unsigned long flags) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct ctx_switch_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -777,15 +777,15 @@ tracing_sched_wakeup_trace(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, flags); - entry->type = TRACE_WAKE; - entry->field.ctx.prev_pid = curr->pid; - entry->field.ctx.prev_prio = curr->prio; - entry->field.ctx.prev_state = curr->state; - entry->field.ctx.next_pid = wakee->pid; - entry->field.ctx.next_prio = wakee->prio; - entry->field.ctx.next_state = wakee->state; - entry->field.ctx.next_cpu = task_cpu(wakee); + tracing_generic_entry_update(&entry->ent, flags); + entry->ent.type = TRACE_WAKE; + entry->prev_pid = curr->pid; + entry->prev_prio = curr->prio; + entry->prev_state = curr->state; + entry->next_pid = wakee->pid; + entry->next_prio = wakee->prio; + entry->next_state = wakee->state; + entry->next_cpu = task_cpu(wakee); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); __trace_stack(tr, data, flags, 6); @@ -1173,20 +1173,19 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) static void lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) { - struct trace_field *field = &entry->field; int hardirq, softirq; char *comm; - comm = trace_find_cmdline(field->pid); + comm = trace_find_cmdline(entry->pid); - trace_seq_printf(s, "%8.8s-%-5d ", comm, field->pid); + trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); trace_seq_printf(s, "%3d", cpu); trace_seq_printf(s, "%c%c", - (field->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', - ((field->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); + (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', + ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); - hardirq = field->flags & TRACE_FLAG_HARDIRQ; - softirq = field->flags & TRACE_FLAG_SOFTIRQ; + hardirq = entry->flags & TRACE_FLAG_HARDIRQ; + softirq = entry->flags & TRACE_FLAG_SOFTIRQ; if (hardirq && softirq) { trace_seq_putc(s, 'H'); } else { @@ -1200,8 +1199,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) } } - if (field->preempt_count) - trace_seq_printf(s, "%x", field->preempt_count); + if (entry->preempt_count) + trace_seq_printf(s, "%x", entry->preempt_count); else trace_seq_puts(s, "."); } @@ -1230,6 +1229,7 @@ static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) { struct trace_entry *ent; + struct trace_field_cont *cont; bool ok = true; ent = peek_next_entry(iter, iter->cpu, NULL); @@ -1239,8 +1239,9 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) } do { + cont = (struct trace_field_cont *)ent; if (ok) - ok = (trace_seq_printf(s, "%s", ent->cont.buf) > 0); + ok = (trace_seq_printf(s, "%s", cont->buf) > 0); ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ent = peek_next_entry(iter, iter->cpu, NULL); } while (ent && ent->type == TRACE_CONT); @@ -1257,7 +1258,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) struct trace_entry *next_entry; unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); struct trace_entry *entry = iter->ent; - struct trace_field *field = &entry->field; unsigned long abs_usecs; unsigned long rel_usecs; u64 next_ts; @@ -1276,12 +1276,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); if (verbose) { - comm = trace_find_cmdline(field->pid); + comm = trace_find_cmdline(entry->pid); trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, - field->pid, cpu, field->flags, - field->preempt_count, trace_idx, + entry->pid, cpu, entry->flags, + entry->preempt_count, trace_idx, ns2usecs(iter->ts), abs_usecs/1000, abs_usecs % 1000, rel_usecs/1000, @@ -1291,53 +1291,69 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) lat_print_timestamp(s, abs_usecs, rel_usecs); } switch (entry->type) { - case TRACE_FN: - seq_print_ip_sym(s, field->fn.ip, sym_flags); + case TRACE_FN: { + struct ftrace_entry *field = (struct ftrace_entry *)entry; + + seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_puts(s, " ("); - if (kretprobed(field->fn.parent_ip)) + if (kretprobed(field->parent_ip)) trace_seq_puts(s, KRETPROBE_MSG); else - seq_print_ip_sym(s, field->fn.parent_ip, sym_flags); + seq_print_ip_sym(s, field->parent_ip, sym_flags); trace_seq_puts(s, ")\n"); break; + } case TRACE_CTX: - case TRACE_WAKE: - T = field->ctx.next_state < sizeof(state_to_char) ? - state_to_char[field->ctx.next_state] : 'X'; + case TRACE_WAKE: { + struct ctx_switch_entry *field = + (struct ctx_switch_entry *)entry; + + T = field->next_state < sizeof(state_to_char) ? + state_to_char[field->next_state] : 'X'; - state = field->ctx.prev_state ? - __ffs(field->ctx.prev_state) + 1 : 0; + state = field->prev_state ? + __ffs(field->prev_state) + 1 : 0; S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; - comm = trace_find_cmdline(field->ctx.next_pid); + comm = trace_find_cmdline(field->next_pid); trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", - field->ctx.prev_pid, - field->ctx.prev_prio, + field->prev_pid, + field->prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", - field->ctx.next_cpu, - field->ctx.next_pid, - field->ctx.next_prio, + field->next_cpu, + field->next_pid, + field->next_prio, T, comm); break; - case TRACE_SPECIAL: + } + case TRACE_SPECIAL: { + struct special_entry *field = (struct special_entry *)entry; + trace_seq_printf(s, "# %ld %ld %ld\n", - field->special.arg1, - field->special.arg2, - field->special.arg3); + field->arg1, + field->arg2, + field->arg3); break; - case TRACE_STACK: + } + case TRACE_STACK: { + struct stack_entry *field = (struct stack_entry *)entry; + for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { if (i) trace_seq_puts(s, " <= "); - seq_print_ip_sym(s, field->stack.caller[i], sym_flags); + seq_print_ip_sym(s, field->caller[i], sym_flags); } trace_seq_puts(s, "\n"); break; - case TRACE_PRINT: - seq_print_ip_sym(s, field->print.ip, sym_flags); - trace_seq_printf(s, ": %s", field->print.buf); - if (field->flags & TRACE_FLAG_CONT) + } + case TRACE_PRINT: { + struct print_entry *field = (struct print_entry *)entry; + + seq_print_ip_sym(s, field->ip, sym_flags); + trace_seq_printf(s, ": %s", field->buf); + if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; + } default: trace_seq_printf(s, "Unknown type %d\n", entry->type); } @@ -1349,7 +1365,6 @@ static int print_trace_fmt(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; - struct trace_field *field; unsigned long usec_rem; unsigned long long t; unsigned long secs; @@ -1363,15 +1378,13 @@ static int print_trace_fmt(struct trace_iterator *iter) if (entry->type == TRACE_CONT) return 1; - field = &entry->field; - - comm = trace_find_cmdline(iter->ent->field.pid); + comm = trace_find_cmdline(iter->ent->pid); t = ns2usecs(iter->ts); usec_rem = do_div(t, 1000000ULL); secs = (unsigned long)t; - ret = trace_seq_printf(s, "%16s-%-5d ", comm, field->pid); + ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (!ret) return 0; ret = trace_seq_printf(s, "[%03d] ", iter->cpu); @@ -1382,20 +1395,22 @@ static int print_trace_fmt(struct trace_iterator *iter) return 0; switch (entry->type) { - case TRACE_FN: - ret = seq_print_ip_sym(s, field->fn.ip, sym_flags); + case TRACE_FN: { + struct ftrace_entry *field = (struct ftrace_entry *)entry; + + ret = seq_print_ip_sym(s, field->ip, sym_flags); if (!ret) return 0; if ((sym_flags & TRACE_ITER_PRINT_PARENT) && - field->fn.parent_ip) { + field->parent_ip) { ret = trace_seq_printf(s, " <-"); if (!ret) return 0; - if (kretprobed(field->fn.parent_ip)) + if (kretprobed(field->parent_ip)) ret = trace_seq_puts(s, KRETPROBE_MSG); else ret = seq_print_ip_sym(s, - field->fn.parent_ip, + field->parent_ip, sym_flags); if (!ret) return 0; @@ -1404,40 +1419,50 @@ static int print_trace_fmt(struct trace_iterator *iter) if (!ret) return 0; break; + } case TRACE_CTX: - case TRACE_WAKE: - S = field->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[field->ctx.prev_state] : 'X'; - T = field->ctx.next_state < sizeof(state_to_char) ? - state_to_char[field->ctx.next_state] : 'X'; + case TRACE_WAKE: { + struct ctx_switch_entry *field = + (struct ctx_switch_entry *)entry; + + S = field->prev_state < sizeof(state_to_char) ? + state_to_char[field->prev_state] : 'X'; + T = field->next_state < sizeof(state_to_char) ? + state_to_char[field->next_state] : 'X'; ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", - field->ctx.prev_pid, - field->ctx.prev_prio, + field->prev_pid, + field->prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", - field->ctx.next_cpu, - field->ctx.next_pid, - field->ctx.next_prio, + field->next_cpu, + field->next_pid, + field->next_prio, T); if (!ret) return 0; break; - case TRACE_SPECIAL: + } + case TRACE_SPECIAL: { + struct special_entry *field = (struct special_entry *)entry; + ret = trace_seq_printf(s, "# %ld %ld %ld\n", - field->special.arg1, - field->special.arg2, - field->special.arg3); + field->arg1, + field->arg2, + field->arg3); if (!ret) return 0; break; - case TRACE_STACK: + } + case TRACE_STACK: { + struct stack_entry *field = (struct stack_entry *)entry; + for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { if (i) { ret = trace_seq_puts(s, " <= "); if (!ret) return 0; } - ret = seq_print_ip_sym(s, field->stack.caller[i], + ret = seq_print_ip_sym(s, field->caller[i], sym_flags); if (!ret) return 0; @@ -1446,13 +1471,17 @@ static int print_trace_fmt(struct trace_iterator *iter) if (!ret) return 0; break; - case TRACE_PRINT: - seq_print_ip_sym(s, field->print.ip, sym_flags); - trace_seq_printf(s, ": %s", field->print.buf); - if (field->flags & TRACE_FLAG_CONT) + } + case TRACE_PRINT: { + struct print_entry *field = (struct print_entry *)entry; + + seq_print_ip_sym(s, field->ip, sym_flags); + trace_seq_printf(s, ": %s", field->buf); + if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } + } return 1; } @@ -1460,7 +1489,6 @@ static int print_raw_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; - struct trace_field *field; int ret; int S, T; @@ -1469,56 +1497,66 @@ static int print_raw_fmt(struct trace_iterator *iter) if (entry->type == TRACE_CONT) return 1; - field = &entry->field; - ret = trace_seq_printf(s, "%d %d %llu ", - field->pid, iter->cpu, iter->ts); + entry->pid, iter->cpu, iter->ts); if (!ret) return 0; switch (entry->type) { - case TRACE_FN: + case TRACE_FN: { + struct ftrace_entry *field = (struct ftrace_entry *)entry; + ret = trace_seq_printf(s, "%x %x\n", - field->fn.ip, - field->fn.parent_ip); + field->ip, + field->parent_ip); if (!ret) return 0; break; + } case TRACE_CTX: - case TRACE_WAKE: - S = field->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[field->ctx.prev_state] : 'X'; - T = field->ctx.next_state < sizeof(state_to_char) ? - state_to_char[field->ctx.next_state] : 'X'; + case TRACE_WAKE: { + struct ctx_switch_entry *field = + (struct ctx_switch_entry *)entry; + + S = field->prev_state < sizeof(state_to_char) ? + state_to_char[field->prev_state] : 'X'; + T = field->next_state < sizeof(state_to_char) ? + state_to_char[field->next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", - field->ctx.prev_pid, - field->ctx.prev_prio, + field->prev_pid, + field->prev_prio, S, - field->ctx.next_cpu, - field->ctx.next_pid, - field->ctx.next_prio, + field->next_cpu, + field->next_pid, + field->next_prio, T); if (!ret) return 0; break; + } case TRACE_SPECIAL: - case TRACE_STACK: + case TRACE_STACK: { + struct special_entry *field = (struct special_entry *)entry; + ret = trace_seq_printf(s, "# %ld %ld %ld\n", - field->special.arg1, - field->special.arg2, - field->special.arg3); + field->arg1, + field->arg2, + field->arg3); if (!ret) return 0; break; - case TRACE_PRINT: - trace_seq_printf(s, "# %lx %s", - field->print.ip, field->print.buf); - if (field->flags & TRACE_FLAG_CONT) + } + case TRACE_PRINT: { + struct print_entry *field = (struct print_entry *)entry; + + trace_seq_printf(s, "# %lx %s", field->ip, field->buf); + if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } + } return 1; } @@ -1539,7 +1577,6 @@ static int print_hex_fmt(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; - struct trace_field *field; int S, T; entry = iter->ent; @@ -1547,40 +1584,48 @@ static int print_hex_fmt(struct trace_iterator *iter) if (entry->type == TRACE_CONT) return 1; - field = &entry->field; - - SEQ_PUT_HEX_FIELD_RET(s, field->pid); + SEQ_PUT_HEX_FIELD_RET(s, entry->pid); SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); SEQ_PUT_HEX_FIELD_RET(s, iter->ts); switch (entry->type) { - case TRACE_FN: - SEQ_PUT_HEX_FIELD_RET(s, field->fn.ip); - SEQ_PUT_HEX_FIELD_RET(s, field->fn.parent_ip); + case TRACE_FN: { + struct ftrace_entry *field = (struct ftrace_entry *)entry; + + SEQ_PUT_HEX_FIELD_RET(s, field->ip); + SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); break; + } case TRACE_CTX: - case TRACE_WAKE: - S = field->ctx.prev_state < sizeof(state_to_char) ? - state_to_char[field->ctx.prev_state] : 'X'; - T = field->ctx.next_state < sizeof(state_to_char) ? - state_to_char[field->ctx.next_state] : 'X'; + case TRACE_WAKE: { + struct ctx_switch_entry *field = + (struct ctx_switch_entry *)entry; + + S = field->prev_state < sizeof(state_to_char) ? + state_to_char[field->prev_state] : 'X'; + T = field->next_state < sizeof(state_to_char) ? + state_to_char[field->next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; - SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_pid); - SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_prio); + SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); + SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); SEQ_PUT_HEX_FIELD_RET(s, S); - SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_cpu); - SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_pid); - SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_prio); + SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); + SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); + SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); SEQ_PUT_HEX_FIELD_RET(s, T); break; + } case TRACE_SPECIAL: - case TRACE_STACK: - SEQ_PUT_HEX_FIELD_RET(s, field->special.arg1); - SEQ_PUT_HEX_FIELD_RET(s, field->special.arg2); - SEQ_PUT_HEX_FIELD_RET(s, field->special.arg3); + case TRACE_STACK: { + struct special_entry *field = (struct special_entry *)entry; + + SEQ_PUT_HEX_FIELD_RET(s, field->arg1); + SEQ_PUT_HEX_FIELD_RET(s, field->arg2); + SEQ_PUT_HEX_FIELD_RET(s, field->arg3); break; } + } SEQ_PUT_FIELD_RET(s, newline); return 1; @@ -1590,39 +1635,46 @@ static int print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; - struct trace_field *field; entry = iter->ent; if (entry->type == TRACE_CONT) return 1; - field = &entry->field; - - SEQ_PUT_FIELD_RET(s, field->pid); - SEQ_PUT_FIELD_RET(s, field->cpu); + SEQ_PUT_FIELD_RET(s, entry->pid); + SEQ_PUT_FIELD_RET(s, iter->cpu); SEQ_PUT_FIELD_RET(s, iter->ts); switch (entry->type) { - case TRACE_FN: - SEQ_PUT_FIELD_RET(s, field->fn.ip); - SEQ_PUT_FIELD_RET(s, field->fn.parent_ip); + case TRACE_FN: { + struct ftrace_entry *field = (struct ftrace_entry *)entry; + + SEQ_PUT_FIELD_RET(s, field->ip); + SEQ_PUT_FIELD_RET(s, field->parent_ip); break; - case TRACE_CTX: - SEQ_PUT_FIELD_RET(s, field->ctx.prev_pid); - SEQ_PUT_FIELD_RET(s, field->ctx.prev_prio); - SEQ_PUT_FIELD_RET(s, field->ctx.prev_state); - SEQ_PUT_FIELD_RET(s, field->ctx.next_pid); - SEQ_PUT_FIELD_RET(s, field->ctx.next_prio); - SEQ_PUT_FIELD_RET(s, field->ctx.next_state); + } + case TRACE_CTX: { + struct ctx_switch_entry *field = + (struct ctx_switch_entry *)entry; + + SEQ_PUT_FIELD_RET(s, field->prev_pid); + SEQ_PUT_FIELD_RET(s, field->prev_prio); + SEQ_PUT_FIELD_RET(s, field->prev_state); + SEQ_PUT_FIELD_RET(s, field->next_pid); + SEQ_PUT_FIELD_RET(s, field->next_prio); + SEQ_PUT_FIELD_RET(s, field->next_state); break; + } case TRACE_SPECIAL: - case TRACE_STACK: - SEQ_PUT_FIELD_RET(s, field->special.arg1); - SEQ_PUT_FIELD_RET(s, field->special.arg2); - SEQ_PUT_FIELD_RET(s, field->special.arg3); + case TRACE_STACK: { + struct special_entry *field = (struct special_entry *)entry; + + SEQ_PUT_FIELD_RET(s, field->arg1); + SEQ_PUT_FIELD_RET(s, field->arg2); + SEQ_PUT_FIELD_RET(s, field->arg3); break; } + } return 1; } @@ -2818,10 +2870,10 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) struct ring_buffer_event *event; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - struct trace_entry *entry; + struct print_entry *entry; unsigned long flags, irq_flags; long disabled; - int cpu, len = 0, write, written = 0; + int cpu, len = 0, size; if (!tr->ctrl || tracing_disabled) return 0; @@ -2840,40 +2892,19 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) len = min(len, TRACE_BUF_SIZE-1); trace_buf[len] = 0; - event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), - &irq_flags); + size = sizeof(*entry) + len + 1; + event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); if (!event) goto out_unlock; - entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, flags); - entry->type = TRACE_PRINT; - entry->field.print.ip = ip; + entry = ring_buffer_event_data(event); + tracing_generic_entry_update(&entry->ent, flags); + entry->ent.type = TRACE_PRINT; + entry->ip = ip; - write = min(len, (int)(TRACE_PRINT_BUF_SIZE-1)); - - memcpy(&entry->field.print.buf, trace_buf, write); - entry->field.print.buf[write] = 0; - written = write; + memcpy(&entry->buf, trace_buf, len); + entry->buf[len] = 0; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - if (written != len) - entry->field.flags |= TRACE_FLAG_CONT; - - while (written != len) { - event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), - &irq_flags); - if (!event) - goto out_unlock; - entry = ring_buffer_event_data(event); - - entry->type = TRACE_CONT; - write = min(len - written, (int)(TRACE_CONT_BUF_SIZE-1)); - memcpy(&entry->cont.buf, trace_buf+written, write); - entry->cont.buf[write] = 0; - written += write; - ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - } - out_unlock: spin_unlock(&trace_buf_lock); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f6965f775b4..e541a6b7e31 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -26,10 +26,25 @@ enum trace_type { __TRACE_LAST_TYPE }; +/* + * The trace entry - the most basic unit of tracing. This is what + * is printed in the end as a single line in the trace output, such as: + * + * bash-15816 [01] 235.197585: idle_cpu <- irq_enter + */ +struct trace_entry { + unsigned char type; + unsigned char cpu; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + /* * Function trace entry - function address and parent function addres: */ struct ftrace_entry { + struct trace_entry ent; unsigned long ip; unsigned long parent_ip; }; @@ -39,6 +54,7 @@ extern struct tracer boot_tracer; * Context switch trace entry - which task (and prio) we switched from/to: */ struct ctx_switch_entry { + struct trace_entry ent; unsigned int prev_pid; unsigned char prev_prio; unsigned char prev_state; @@ -52,6 +68,7 @@ struct ctx_switch_entry { * Special (free-form) trace entry: */ struct special_entry { + struct trace_entry ent; unsigned long arg1; unsigned long arg2; unsigned long arg3; @@ -64,6 +81,7 @@ struct special_entry { #define FTRACE_STACK_ENTRIES 8 struct stack_entry { + struct trace_entry ent; unsigned long caller[FTRACE_STACK_ENTRIES]; }; @@ -71,10 +89,34 @@ struct stack_entry { * ftrace_printk entry: */ struct print_entry { + struct trace_entry ent; unsigned long ip; char buf[]; }; +#define TRACE_OLD_SIZE 88 + +struct trace_field_cont { + unsigned char type; + /* Temporary till we get rid of this completely */ + char buf[TRACE_OLD_SIZE - 1]; +}; + +struct trace_mmiotrace_rw { + struct trace_entry ent; + struct mmiotrace_rw rw; +}; + +struct trace_mmiotrace_map { + struct trace_entry ent; + struct mmiotrace_map map; +}; + +struct trace_boot { + struct trace_entry ent; + struct boot_trace initcall; +}; + /* * trace_flag_type is an enumeration that holds different * states when a trace occurs. These are: @@ -92,46 +134,7 @@ enum trace_flag_type { TRACE_FLAG_CONT = 0x10, }; -/* - * The trace field - the most basic unit of tracing. This is what - * is printed in the end as a single line in the trace output, such as: - * - * bash-15816 [01] 235.197585: idle_cpu <- irq_enter - */ -struct trace_field { - char cpu; - char flags; - char preempt_count; - int pid; - union { - struct ftrace_entry fn; - struct ctx_switch_entry ctx; - struct special_entry special; - struct stack_entry stack; - struct print_entry print; - struct mmiotrace_rw mmiorw; - struct mmiotrace_map mmiomap; - struct boot_trace initcall; - }; -}; - -struct trace_field_cont { - char buf[sizeof(struct trace_field)]; -}; - -struct trace_entry { - char type; - union { - struct trace_field field; - struct trace_field_cont cont; - }; -}; - -#define TRACE_ENTRY_SIZE sizeof(struct trace_entry) #define TRACE_BUF_SIZE 1024 -#define TRACE_PRINT_BUF_SIZE \ - (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) -#define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) /* * The CPU trace array - it consists of thousands of trace entries diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 3657eec6b87..fa8cca1be11 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -49,10 +49,11 @@ static int initcall_print_line(struct trace_iterator *iter) { int ret = 0; struct trace_entry *entry = iter->ent; - struct boot_trace *it = &entry->field.initcall; + struct trace_boot *field = (struct trace_boot *)entry; + struct boot_trace *it = &field->initcall; struct trace_seq *s = &iter->seq; - if (iter->ent->type == TRACE_BOOT) + if (entry->type == TRACE_BOOT) ret = trace_seq_printf(s, "%pF called from %i " "returned %d after %lld msecs\n", it->func, it->caller, it->result, @@ -75,7 +76,7 @@ struct tracer boot_tracer __read_mostly = void trace_boot(struct boot_trace *it) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct trace_boot *entry; struct trace_array_cpu *data; unsigned long irq_flags; struct trace_array *tr = boot_trace; @@ -91,9 +92,9 @@ void trace_boot(struct boot_trace *it) if (!event) goto out; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_BOOT; - entry->field.initcall = *it; + tracing_generic_entry_update(&entry->ent, 0); + entry->ent.type = TRACE_BOOT; + entry->initcall = *it; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index bdbf09d8413..3df441ea274 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -178,14 +178,16 @@ print_out: static int mmio_print_rw(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct mmiotrace_rw *rw = &entry->field.mmiorw; + struct trace_mmiotrace_rw *field = + (struct trace_mmiotrace_rw *)entry; + struct mmiotrace_rw *rw = &field->rw; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; - switch (entry->field.mmiorw.opcode) { + switch (rw->opcode) { case MMIO_READ: ret = trace_seq_printf(s, "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", @@ -220,14 +222,14 @@ static int mmio_print_rw(struct trace_iterator *iter) static int mmio_print_map(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct mmiotrace_map *m = &entry->field.mmiomap; + struct mmiotrace_map *m = (struct mmiotrace_map *)entry; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; - switch (entry->field.mmiorw.opcode) { + switch (m->opcode) { case MMIO_PROBE: ret = trace_seq_printf(s, "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", @@ -252,7 +254,8 @@ static int mmio_print_map(struct trace_iterator *iter) static int mmio_print_mark(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - const char *msg = entry->field.print.buf; + struct print_entry *print = (struct print_entry *)entry; + const char *msg = print->buf; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); @@ -264,7 +267,7 @@ static int mmio_print_mark(struct trace_iterator *iter) if (!ret) return 0; - if (entry->field.flags & TRACE_FLAG_CONT) + if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); return 1; @@ -308,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, struct mmiotrace_rw *rw) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct trace_mmiotrace_rw *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -316,9 +319,9 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_RW; - entry->field.mmiorw = *rw; + tracing_generic_entry_update(&entry->ent, 0); + entry->ent.type = TRACE_MMIO_RW; + entry->rw = *rw; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); @@ -336,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, struct mmiotrace_map *map) { struct ring_buffer_event *event; - struct trace_entry *entry; + struct trace_mmiotrace_map *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), @@ -344,9 +347,9 @@ static void __trace_mmiotrace_map(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(entry, 0); - entry->type = TRACE_MMIO_MAP; - entry->field.mmiomap = *map; + tracing_generic_entry_update(&entry->ent, 0); + entry->ent.type = TRACE_MMIO_MAP; + entry->map = *map; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); -- cgit v1.2.3-70-g09d2 From 2c4f035f6c3e8fda661eb6105aa51ef07aa71607 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 29 Sep 2008 20:18:34 +0200 Subject: tracing/ftrace: change the type of the print_line callback We need a kind of disambiguation when a print_line callback returns 0. _There is not enough space to print all the entry. Please flush the seq and retry. _I can't handle this type of entry This patch changes the type of this callback for better information. Also some changes have been made in this V2. _ Only relay to default functions after the print_line callback fails. _ This patch doesn't fix the issue with the broken pipe (see patch 2/4 for that) Some things are still in discussion: _ Find better names for the enum print_line_t values _ Change the type of print_trace_line into boolean. Patches to change that can be sent later. Signed-off-by: Frederic Weisbecker Acked-by: Pekka Paalanen Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 77 ++++++++++++++++++++++++++++------------------------ kernel/trace/trace.h | 10 ++++++- 2 files changed, 50 insertions(+), 37 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed9e47c1881..b38a4bb4054 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1250,7 +1250,7 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) trace_seq_putc(s, '\n'); } -static int +static enum print_line_t print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) { struct trace_seq *s = &iter->seq; @@ -1267,7 +1267,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) unsigned state; if (entry->type == TRACE_CONT) - return 1; + return TRACE_TYPE_HANDLED; next_entry = find_next_entry(iter, NULL, &next_ts); if (!next_entry) @@ -1357,10 +1357,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) default: trace_seq_printf(s, "Unknown type %d\n", entry->type); } - return 1; + return TRACE_TYPE_HANDLED; } -static int print_trace_fmt(struct trace_iterator *iter) +static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); @@ -1376,7 +1376,7 @@ static int print_trace_fmt(struct trace_iterator *iter) entry = iter->ent; if (entry->type == TRACE_CONT) - return 1; + return TRACE_TYPE_HANDLED; comm = trace_find_cmdline(iter->ent->pid); @@ -1386,13 +1386,13 @@ static int print_trace_fmt(struct trace_iterator *iter) ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; ret = trace_seq_printf(s, "[%03d] ", iter->cpu); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; switch (entry->type) { case TRACE_FN: { @@ -1400,12 +1400,12 @@ static int print_trace_fmt(struct trace_iterator *iter) ret = seq_print_ip_sym(s, field->ip, sym_flags); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; if ((sym_flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { ret = trace_seq_printf(s, " <-"); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; if (kretprobed(field->parent_ip)) ret = trace_seq_puts(s, KRETPROBE_MSG); else @@ -1413,11 +1413,11 @@ static int print_trace_fmt(struct trace_iterator *iter) field->parent_ip, sym_flags); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; } ret = trace_seq_printf(s, "\n"); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_CTX: @@ -1439,7 +1439,7 @@ static int print_trace_fmt(struct trace_iterator *iter) field->next_prio, T); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_SPECIAL: { @@ -1450,7 +1450,7 @@ static int print_trace_fmt(struct trace_iterator *iter) field->arg2, field->arg3); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_STACK: { @@ -1460,16 +1460,16 @@ static int print_trace_fmt(struct trace_iterator *iter) if (i) { ret = trace_seq_puts(s, " <= "); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; } ret = seq_print_ip_sym(s, field->caller[i], sym_flags); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; } ret = trace_seq_puts(s, "\n"); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_PRINT: { @@ -1482,10 +1482,10 @@ static int print_trace_fmt(struct trace_iterator *iter) break; } } - return 1; + return TRACE_TYPE_HANDLED; } -static int print_raw_fmt(struct trace_iterator *iter) +static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; @@ -1495,12 +1495,12 @@ static int print_raw_fmt(struct trace_iterator *iter) entry = iter->ent; if (entry->type == TRACE_CONT) - return 1; + return TRACE_TYPE_HANDLED; ret = trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; switch (entry->type) { case TRACE_FN: { @@ -1510,7 +1510,7 @@ static int print_raw_fmt(struct trace_iterator *iter) field->ip, field->parent_ip); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_CTX: @@ -1533,7 +1533,7 @@ static int print_raw_fmt(struct trace_iterator *iter) field->next_prio, T); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_SPECIAL: @@ -1545,7 +1545,7 @@ static int print_raw_fmt(struct trace_iterator *iter) field->arg2, field->arg3); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_PRINT: { @@ -1557,7 +1557,7 @@ static int print_raw_fmt(struct trace_iterator *iter) break; } } - return 1; + return TRACE_TYPE_HANDLED; } #define SEQ_PUT_FIELD_RET(s, x) \ @@ -1572,7 +1572,7 @@ do { \ return 0; \ } while (0) -static int print_hex_fmt(struct trace_iterator *iter) +static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; @@ -1582,7 +1582,7 @@ static int print_hex_fmt(struct trace_iterator *iter) entry = iter->ent; if (entry->type == TRACE_CONT) - return 1; + return TRACE_TYPE_HANDLED; SEQ_PUT_HEX_FIELD_RET(s, entry->pid); SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); @@ -1628,10 +1628,10 @@ static int print_hex_fmt(struct trace_iterator *iter) } SEQ_PUT_FIELD_RET(s, newline); - return 1; + return TRACE_TYPE_HANDLED; } -static int print_bin_fmt(struct trace_iterator *iter) +static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; @@ -1639,7 +1639,7 @@ static int print_bin_fmt(struct trace_iterator *iter) entry = iter->ent; if (entry->type == TRACE_CONT) - return 1; + return TRACE_TYPE_HANDLED; SEQ_PUT_FIELD_RET(s, entry->pid); SEQ_PUT_FIELD_RET(s, iter->cpu); @@ -1686,13 +1686,18 @@ static int trace_empty(struct trace_iterator *iter) if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } - return 1; + return TRACE_TYPE_HANDLED; } -static int print_trace_line(struct trace_iterator *iter) +static enum print_line_t print_trace_line(struct trace_iterator *iter) { - if (iter->trace && iter->trace->print_line) - return iter->trace->print_line(iter); + enum print_line_t ret; + + if (iter->trace && iter->trace->print_line) { + ret = iter->trace->print_line(iter); + if (ret != TRACE_TYPE_UNHANDLED) + return ret; + } if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); @@ -2525,11 +2530,11 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ring_buffer_lock(iter->tr->buffer, &flags); while (find_next_entry_inc(iter) != NULL) { - int ret; + enum print_line_t ret; int len = iter->seq.len; ret = print_trace_line(iter); - if (!ret) { + if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.len = len; break; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e541a6b7e31..a921ba5d292 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -177,6 +177,14 @@ struct trace_array { struct trace_array_cpu *data[NR_CPUS]; }; + +/* Return values for print_line callback */ +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ +}; + /* * A specific tracer, represented by methods that operate on a trace array: */ @@ -197,7 +205,7 @@ struct tracer { int (*selftest)(struct tracer *trace, struct trace_array *tr); #endif - int (*print_line)(struct trace_iterator *iter); + enum print_line_t (*print_line)(struct trace_iterator *iter); struct tracer *next; int print_max; }; -- cgit v1.2.3-70-g09d2 From 9ff4b9744c187cae58c3774361ea090addbc4130 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Mon, 29 Sep 2008 20:23:48 +0200 Subject: tracing/ftrace: fix pipe breaking This patch fixes a bug which break the pipe when the seq is empty. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b38a4bb4054..6a1c76bb56b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2439,7 +2439,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) return sret; - sret = 0; trace_seq_reset(&iter->seq); @@ -2450,6 +2449,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, goto out; } +waitagain: + sret = 0; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { @@ -2556,8 +2557,13 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= iter->seq.len) trace_seq_reset(&iter->seq); + + /* + * If there was nothing to send to user, inspite of consuming trace + * entries, go back to wait for more entries. + */ if (sret == -EBUSY) - sret = 0; + goto waitagain; out: mutex_unlock(&trace_types_lock); -- cgit v1.2.3-70-g09d2 From 07f4e4f790895d55f46aaf89e7437da4a585989c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 29 Sep 2008 20:27:42 +0200 Subject: tracing/ftrace: adapt mmiotrace to the new type of print_line Adapt mmiotrace to the new print_line type. By default, it ignores (and consumes) types it doesn't support. Signed-off-by: Frederic Weisbecker Acked-by: Pekka Paalanen Signed-off-by: Ingo Molnar --- kernel/trace/trace_mmiotrace.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 3df441ea274..1a266aa08e1 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -175,7 +175,7 @@ print_out: return (ret == -EBUSY) ? 0 : ret; } -static int mmio_print_rw(struct trace_iterator *iter) +static enum print_line_t mmio_print_rw(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct trace_mmiotrace_rw *field = @@ -215,11 +215,11 @@ static int mmio_print_rw(struct trace_iterator *iter) break; } if (ret) - return 1; - return 0; + return TRACE_TYPE_HANDLED; + return TRACE_TYPE_PARTIAL_LINE; } -static int mmio_print_map(struct trace_iterator *iter) +static enum print_line_t mmio_print_map(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct mmiotrace_map *m = (struct mmiotrace_map *)entry; @@ -227,7 +227,7 @@ static int mmio_print_map(struct trace_iterator *iter) unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; - int ret = 1; + int ret; switch (m->opcode) { case MMIO_PROBE: @@ -247,11 +247,11 @@ static int mmio_print_map(struct trace_iterator *iter) break; } if (ret) - return 1; - return 0; + return TRACE_TYPE_HANDLED; + return TRACE_TYPE_PARTIAL_LINE; } -static int mmio_print_mark(struct trace_iterator *iter) +static enum print_line_t mmio_print_mark(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct print_entry *print = (struct print_entry *)entry; @@ -265,16 +265,15 @@ static int mmio_print_mark(struct trace_iterator *iter) /* The trailing newline must be in the message. */ ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); if (!ret) - return 0; + return TRACE_TYPE_PARTIAL_LINE; if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); - return 1; + return TRACE_TYPE_HANDLED; } -/* return 0 to abort printing without consuming current entry in pipe mode */ -static int mmio_print_line(struct trace_iterator *iter) +static enum print_line_t mmio_print_line(struct trace_iterator *iter) { switch (iter->ent->type) { case TRACE_MMIO_RW: @@ -284,7 +283,7 @@ static int mmio_print_line(struct trace_iterator *iter) case TRACE_PRINT: return mmio_print_mark(iter); default: - return 1; /* ignore unknown entries */ + return TRACE_TYPE_HANDLED; /* ignore unknown entries */ } } -- cgit v1.2.3-70-g09d2 From 9e9efffb7848fe53c334996819139b431b983ac2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 29 Sep 2008 20:31:58 +0200 Subject: tracing/ftrace: adapt the boot tracer to the new print_line type This patch adapts the boot tracer to the new type of the print_line callback. It still relays entries it doesn't support to default output functions. Signed-off-by: Frederic Weisbecker Acked-by: Pekka Paalanen Signed-off-by: Ingo Molnar --- kernel/trace/trace_boot.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index fa8cca1be11..43bde20b95b 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -45,22 +45,25 @@ static void boot_trace_ctrl_update(struct trace_array *tr) stop_boot_trace(tr); } -static int initcall_print_line(struct trace_iterator *iter) +static enum print_line_t initcall_print_line(struct trace_iterator *iter) { - int ret = 0; + int ret; struct trace_entry *entry = iter->ent; struct trace_boot *field = (struct trace_boot *)entry; struct boot_trace *it = &field->initcall; struct trace_seq *s = &iter->seq; - if (entry->type == TRACE_BOOT) + if (entry->type == TRACE_BOOT) { ret = trace_seq_printf(s, "%pF called from %i " "returned %d after %lld msecs\n", it->func, it->caller, it->result, it->duration); - if (ret) - return 1; - return 0; + if (ret) + return TRACE_TYPE_HANDLED; + else + return TRACE_TYPE_PARTIAL_LINE; + } + return TRACE_TYPE_UNHANDLED; } struct tracer boot_tracer __read_mostly = -- cgit v1.2.3-70-g09d2 From 70255b5e3f1bd1a5af5b1e425ec2c4db7c735112 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Oct 2008 00:29:52 -0400 Subject: ring_buffer: remove raw from local_irq_save The raw_local_irq_save causes issues with lockdep. We don't need it so replace them with local_irq_save. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index cfa711374d9..8e7392fd0db 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -847,7 +847,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, if (atomic_read(&buffer->record_disabled)) return NULL; - raw_local_irq_save(*flags); + local_irq_save(*flags); cpu = raw_smp_processor_id(); if (!cpu_isset(cpu, buffer->cpumask)) @@ -909,7 +909,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, rb_commit(cpu_buffer, event); spin_unlock(&cpu_buffer->lock); - raw_local_irq_restore(flags); + local_irq_restore(flags); return 0; } @@ -1583,13 +1583,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (!cpu_isset(cpu, buffer->cpumask)) return; - raw_local_irq_save(flags); + local_irq_save(flags); spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); spin_unlock(&cpu_buffer->lock); - raw_local_irq_restore(flags); + local_irq_restore(flags); } /** -- cgit v1.2.3-70-g09d2 From d769041f865330034131525ee6a7f72eb4af2a24 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Oct 2008 00:29:53 -0400 Subject: ring_buffer: implement new locking The old "lock always" scheme had issues with lockdep, and was not very efficient anyways. This patch does a new design to be partially lockless on writes. Writes will add new entries to the per cpu pages by simply disabling interrupts. When a write needs to go to another page than it will grab the lock. A new "read page" has been added so that the reader can pull out a page from the ring buffer to read without worrying about the writer writing over it. This allows us to not take the lock for all reads. The lock is now only taken when a read needs to go to a new page. This is far from lockless, and interrupts still need to be disabled, but it is a step towards a more lockless solution, and it also solves a lot of the issues that were noticed by the first conversion of ftrace to the ring buffers. Note: the ring_buffer_{un}lock API has been removed. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/ring_buffer.h | 3 - kernel/trace/ring_buffer.c | 298 +++++++++++++++++++++++++------------------- kernel/trace/trace.c | 113 +++++++++++------ 3 files changed, 247 insertions(+), 167 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index c52375b8330..536b0ca46a0 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -63,9 +63,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) return event->time_delta; } -void ring_buffer_lock(struct ring_buffer *buffer, unsigned long *flags); -void ring_buffer_unlock(struct ring_buffer *buffer, unsigned long flags); - /* * size is in bytes for each per CPU buffer. */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8e7392fd0db..9631abf2ae2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -161,8 +161,10 @@ struct ring_buffer_per_cpu { struct list_head pages; unsigned long head; /* read from head */ unsigned long tail; /* write to tail */ + unsigned long reader; struct buffer_page *head_page; struct buffer_page *tail_page; + struct buffer_page *reader_page; unsigned long overrun; unsigned long entries; u64 write_stamp; @@ -260,6 +262,7 @@ static struct ring_buffer_per_cpu * rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; + unsigned long addr; int ret; cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), @@ -272,9 +275,16 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) spin_lock_init(&cpu_buffer->lock); INIT_LIST_HEAD(&cpu_buffer->pages); + addr = __get_free_page(GFP_KERNEL); + if (!addr) + goto fail_free_buffer; + cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr); + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); + cpu_buffer->reader_page->size = 0; + ret = rb_allocate_pages(cpu_buffer, buffer->pages); if (ret < 0) - goto fail_free_buffer; + goto fail_free_reader; cpu_buffer->head_page = list_entry(cpu_buffer->pages.next, struct buffer_page, list); @@ -283,6 +293,9 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) return cpu_buffer; + fail_free_reader: + free_buffer_page(cpu_buffer->reader_page); + fail_free_buffer: kfree(cpu_buffer); return NULL; @@ -293,6 +306,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) struct list_head *head = &cpu_buffer->pages; struct buffer_page *page, *tmp; + list_del_init(&cpu_buffer->reader_page->list); + free_buffer_page(cpu_buffer->reader_page); + list_for_each_entry_safe(page, tmp, head, list) { list_del_init(&page->list); free_buffer_page(page); @@ -538,8 +554,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) { - return cpu_buffer->head_page == cpu_buffer->tail_page && - cpu_buffer->head == cpu_buffer->tail; + return (cpu_buffer->reader == cpu_buffer->reader_page->size && + (cpu_buffer->tail_page == cpu_buffer->reader_page || + (cpu_buffer->tail_page == cpu_buffer->head_page && + cpu_buffer->head == cpu_buffer->tail))); } static inline int rb_null_event(struct ring_buffer_event *event) @@ -555,10 +573,10 @@ static inline void *rb_page_index(struct buffer_page *page, unsigned index) } static inline struct ring_buffer_event * -rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) +rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) { - return rb_page_index(cpu_buffer->head_page, - cpu_buffer->head); + return rb_page_index(cpu_buffer->reader_page, + cpu_buffer->reader); } static inline struct ring_buffer_event * @@ -610,15 +628,32 @@ rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) cpu_buffer->write_stamp = *ts; } -static void rb_reset_read_page(struct ring_buffer_per_cpu *cpu_buffer) +static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer) { - cpu_buffer->read_stamp = cpu_buffer->head_page->time_stamp; cpu_buffer->head = 0; } -static void -rb_reset_iter_read_page(struct ring_buffer_iter *iter) +static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { + cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; + cpu_buffer->reader = 0; +} + +static inline void rb_inc_iter(struct ring_buffer_iter *iter) +{ + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; + + /* + * The iterator could be on the reader page (it starts there). + * But the head could have moved, since the reader was + * found. Check for this case and assign the iterator + * to the head page instead of next. + */ + if (iter->head_page == cpu_buffer->reader_page) + iter->head_page = cpu_buffer->head_page; + else + rb_inc_page(cpu_buffer, &iter->head_page); + iter->read_stamp = iter->head_page->time_stamp; iter->head = 0; } @@ -693,30 +728,39 @@ static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, unsigned type, unsigned long length, u64 *ts) { - struct buffer_page *head_page, *tail_page; + struct buffer_page *tail_page, *head_page, *reader_page; unsigned long tail; struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer_event *event; + /* No locking needed for tail page */ tail_page = cpu_buffer->tail_page; - head_page = cpu_buffer->head_page; tail = cpu_buffer->tail; if (tail + length > BUF_PAGE_SIZE) { struct buffer_page *next_page = tail_page; + spin_lock(&cpu_buffer->lock); rb_inc_page(cpu_buffer, &next_page); + head_page = cpu_buffer->head_page; + reader_page = cpu_buffer->reader_page; + + /* we grabbed the lock before incrementing */ + WARN_ON(next_page == reader_page); + if (next_page == head_page) { - if (!(buffer->flags & RB_FL_OVERWRITE)) + if (!(buffer->flags & RB_FL_OVERWRITE)) { + spin_unlock(&cpu_buffer->lock); return NULL; + } /* count overflows */ rb_update_overflow(cpu_buffer); rb_inc_page(cpu_buffer, &head_page); cpu_buffer->head_page = head_page; - rb_reset_read_page(cpu_buffer); + rb_reset_head_page(cpu_buffer); } if (tail != BUF_PAGE_SIZE) { @@ -732,6 +776,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, cpu_buffer->tail_page = tail_page; cpu_buffer->tail = tail; rb_add_stamp(cpu_buffer, ts); + spin_unlock(&cpu_buffer->lock); } BUG_ON(tail + length > BUF_PAGE_SIZE); @@ -802,7 +847,9 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, return NULL; } } else { + spin_lock(&cpu_buffer->lock); rb_add_stamp(cpu_buffer, &ts); + spin_unlock(&cpu_buffer->lock); delta = 0; } @@ -851,13 +898,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, cpu = raw_smp_processor_id(); if (!cpu_isset(cpu, buffer->cpumask)) - goto out_irq; + goto out; cpu_buffer = buffer->buffers[cpu]; - spin_lock(&cpu_buffer->lock); if (atomic_read(&cpu_buffer->record_disabled)) - goto no_record; + goto out; length = rb_calculate_event_length(length); if (length > BUF_PAGE_SIZE) @@ -865,13 +911,11 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); if (!event) - goto no_record; + goto out; return event; - no_record: - spin_unlock(&cpu_buffer->lock); - out_irq: + out: local_irq_restore(*flags); return NULL; } @@ -904,11 +948,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, cpu_buffer = buffer->buffers[cpu]; - assert_spin_locked(&cpu_buffer->lock); - rb_commit(cpu_buffer, event); - spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return 0; @@ -945,10 +986,9 @@ int ring_buffer_write(struct ring_buffer *buffer, cpu = raw_smp_processor_id(); if (!cpu_isset(cpu, buffer->cpumask)) - goto out_irq; + goto out; cpu_buffer = buffer->buffers[cpu]; - spin_lock(&cpu_buffer->lock); if (atomic_read(&cpu_buffer->record_disabled)) goto out; @@ -967,55 +1007,11 @@ int ring_buffer_write(struct ring_buffer *buffer, ret = 0; out: - spin_unlock(&cpu_buffer->lock); - out_irq: local_irq_restore(flags); return ret; } -/** - * ring_buffer_lock - lock the ring buffer - * @buffer: The ring buffer to lock - * @flags: The place to store the interrupt flags - * - * This locks all the per CPU buffers. - * - * Must be unlocked by ring_buffer_unlock. - */ -void ring_buffer_lock(struct ring_buffer *buffer, unsigned long *flags) -{ - struct ring_buffer_per_cpu *cpu_buffer; - int cpu; - - local_irq_save(*flags); - - for_each_buffer_cpu(buffer, cpu) { - cpu_buffer = buffer->buffers[cpu]; - spin_lock(&cpu_buffer->lock); - } -} - -/** - * ring_buffer_unlock - unlock a locked buffer - * @buffer: The locked buffer to unlock - * @flags: The interrupt flags received by ring_buffer_lock - */ -void ring_buffer_unlock(struct ring_buffer *buffer, unsigned long flags) -{ - struct ring_buffer_per_cpu *cpu_buffer; - int cpu; - - for (cpu = buffer->cpus - 1; cpu >= 0; cpu--) { - if (!cpu_isset(cpu, buffer->cpumask)) - continue; - cpu_buffer = buffer->buffers[cpu]; - spin_unlock(&cpu_buffer->lock); - } - - local_irq_restore(flags); -} - /** * ring_buffer_record_disable - stop all writes into the buffer * @buffer: The ring buffer to stop writes to. @@ -1169,9 +1165,18 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; - iter->head_page = cpu_buffer->head_page; - iter->head = cpu_buffer->head; - rb_reset_iter_read_page(iter); + /* Iterator usage is expected to have record disabled */ + if (list_empty(&cpu_buffer->reader_page->list)) { + iter->head_page = cpu_buffer->head_page; + iter->head = cpu_buffer->head; + } else { + iter->head_page = cpu_buffer->reader_page; + iter->head = cpu_buffer->reader; + } + if (iter->head) + iter->read_stamp = cpu_buffer->read_stamp; + else + iter->read_stamp = iter->head_page->time_stamp; } /** @@ -1250,43 +1255,84 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter, return; } -static void rb_advance_head(struct ring_buffer_per_cpu *cpu_buffer) +static struct buffer_page * +rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { - struct ring_buffer_event *event; - unsigned length; + struct buffer_page *reader = NULL; + unsigned long flags; + + spin_lock_irqsave(&cpu_buffer->lock, flags); + + again: + reader = cpu_buffer->reader_page; + + /* If there's more to read, return this page */ + if (cpu_buffer->reader < reader->size) + goto out; + + /* Never should we have an index greater than the size */ + WARN_ON(cpu_buffer->reader > reader->size); + + /* check if we caught up to the tail */ + reader = NULL; + if (cpu_buffer->tail_page == cpu_buffer->reader_page) + goto out; /* - * Check if we are at the end of the buffer. + * Splice the empty reader page into the list around the head. + * Reset the reader page to size zero. */ - if (cpu_buffer->head >= cpu_buffer->head_page->size) { - BUG_ON(cpu_buffer->head_page == cpu_buffer->tail_page); - rb_inc_page(cpu_buffer, &cpu_buffer->head_page); - rb_reset_read_page(cpu_buffer); - return; - } - event = rb_head_event(cpu_buffer); + reader = cpu_buffer->head_page; + cpu_buffer->reader_page->list.next = reader->list.next; + cpu_buffer->reader_page->list.prev = reader->list.prev; + cpu_buffer->reader_page->size = 0; - if (event->type == RINGBUF_TYPE_DATA) - cpu_buffer->entries--; - - length = rb_event_length(event); + /* Make the reader page now replace the head */ + reader->list.prev->next = &cpu_buffer->reader_page->list; + reader->list.next->prev = &cpu_buffer->reader_page->list; /* - * This should not be called to advance the header if we are - * at the tail of the buffer. + * If the tail is on the reader, then we must set the head + * to the inserted page, otherwise we set it one before. */ - BUG_ON((cpu_buffer->head_page == cpu_buffer->tail_page) && - (cpu_buffer->head + length > cpu_buffer->tail)); + cpu_buffer->head_page = cpu_buffer->reader_page; - rb_update_read_stamp(cpu_buffer, event); + if (cpu_buffer->tail_page != reader) + rb_inc_page(cpu_buffer, &cpu_buffer->head_page); + + /* Finally update the reader page to the new head */ + cpu_buffer->reader_page = reader; + rb_reset_reader_page(cpu_buffer); + + goto again; + + out: + spin_unlock_irqrestore(&cpu_buffer->lock, flags); + + return reader; +} + +static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct ring_buffer_event *event; + struct buffer_page *reader; + unsigned length; + + reader = rb_get_reader_page(cpu_buffer); - cpu_buffer->head += length; + /* This function should not be called when buffer is empty */ + BUG_ON(!reader); - /* check for end of page */ - if ((cpu_buffer->head >= cpu_buffer->head_page->size) && - (cpu_buffer->head_page != cpu_buffer->tail_page)) - rb_advance_head(cpu_buffer); + event = rb_reader_event(cpu_buffer); + + if (event->type == RINGBUF_TYPE_DATA) + cpu_buffer->entries--; + + rb_update_read_stamp(cpu_buffer, event); + + length = rb_event_length(event); + cpu_buffer->reader += length; } static void rb_advance_iter(struct ring_buffer_iter *iter) @@ -1304,8 +1350,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) */ if (iter->head >= iter->head_page->size) { BUG_ON(iter->head_page == cpu_buffer->tail_page); - rb_inc_page(cpu_buffer, &iter->head_page); - rb_reset_iter_read_page(iter); + rb_inc_iter(iter); return; } @@ -1344,6 +1389,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; + struct buffer_page *reader; if (!cpu_isset(cpu, buffer->cpumask)) return NULL; @@ -1351,25 +1397,26 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) cpu_buffer = buffer->buffers[cpu]; again: - if (rb_per_cpu_empty(cpu_buffer)) + reader = rb_get_reader_page(cpu_buffer); + if (!reader) return NULL; - event = rb_head_event(cpu_buffer); + event = rb_reader_event(cpu_buffer); switch (event->type) { case RINGBUF_TYPE_PADDING: - rb_inc_page(cpu_buffer, &cpu_buffer->head_page); - rb_reset_read_page(cpu_buffer); - goto again; + WARN_ON(1); + rb_advance_reader(cpu_buffer); + return NULL; case RINGBUF_TYPE_TIME_EXTEND: /* Internal data, OK to advance */ - rb_advance_head(cpu_buffer); + rb_advance_reader(cpu_buffer); goto again; case RINGBUF_TYPE_TIME_STAMP: /* FIXME: not implemented */ - rb_advance_head(cpu_buffer); + rb_advance_reader(cpu_buffer); goto again; case RINGBUF_TYPE_DATA: @@ -1415,8 +1462,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) switch (event->type) { case RINGBUF_TYPE_PADDING: - rb_inc_page(cpu_buffer, &iter->head_page); - rb_reset_iter_read_page(iter); + rb_inc_iter(iter); goto again; case RINGBUF_TYPE_TIME_EXTEND: @@ -1465,7 +1511,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) return NULL; cpu_buffer = buffer->buffers[cpu]; - rb_advance_head(cpu_buffer); + rb_advance_reader(cpu_buffer); return event; } @@ -1487,6 +1533,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_iter *iter; + unsigned long flags; if (!cpu_isset(cpu, buffer->cpumask)) return NULL; @@ -1502,11 +1549,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) atomic_inc(&cpu_buffer->record_disabled); synchronize_sched(); - spin_lock(&cpu_buffer->lock); - iter->head = cpu_buffer->head; - iter->head_page = cpu_buffer->head_page; - rb_reset_iter_read_page(iter); - spin_unlock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); + ring_buffer_iter_reset(iter); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); return iter; } @@ -1562,10 +1607,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) { cpu_buffer->head_page = list_entry(cpu_buffer->pages.next, struct buffer_page, list); - cpu_buffer->tail_page - = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + cpu_buffer->head_page->size = 0; + cpu_buffer->tail_page = cpu_buffer->head_page; + cpu_buffer->tail_page->size = 0; + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); + cpu_buffer->reader_page->size = 0; + + cpu_buffer->head = cpu_buffer->tail = cpu_buffer->reader = 0; - cpu_buffer->head = cpu_buffer->tail = 0; cpu_buffer->overrun = 0; cpu_buffer->entries = 0; } @@ -1583,13 +1632,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (!cpu_isset(cpu, buffer->cpumask)) return; - local_irq_save(flags); - spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); rb_reset_cpu(cpu_buffer); - spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); } /** @@ -1598,15 +1645,10 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) */ void ring_buffer_reset(struct ring_buffer *buffer) { - unsigned long flags; int cpu; - ring_buffer_lock(buffer, &flags); - for_each_buffer_cpu(buffer, cpu) - rb_reset_cpu(buffer->buffers[cpu]); - - ring_buffer_unlock(buffer, flags); + ring_buffer_reset_cpu(buffer, cpu); } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6a1c76bb56b..b542f883780 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -42,6 +42,20 @@ unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_thresh; +static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); + +static inline void ftrace_disable_cpu(void) +{ + preempt_disable(); + local_inc(&__get_cpu_var(ftrace_cpu_disabled)); +} + +static inline void ftrace_enable_cpu(void) +{ + local_dec(&__get_cpu_var(ftrace_cpu_disabled)); + preempt_enable(); +} + static cpumask_t __read_mostly tracing_buffer_mask; #define for_each_tracing_cpu(cpu) \ @@ -406,7 +420,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) tr->buffer = max_tr.buffer; max_tr.buffer = buf; + ftrace_disable_cpu(); ring_buffer_reset(tr->buffer); + ftrace_enable_cpu(); __update_max_tr(tr, tsk, cpu); __raw_spin_unlock(&ftrace_max_lock); @@ -428,9 +444,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) WARN_ON_ONCE(!irqs_disabled()); __raw_spin_lock(&ftrace_max_lock); + ftrace_disable_cpu(); + ring_buffer_reset(max_tr.buffer); ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); + ftrace_enable_cpu(); + WARN_ON_ONCE(ret); __update_max_tr(tr, tsk, cpu); @@ -543,7 +563,9 @@ void unregister_tracer(struct tracer *type) void tracing_reset(struct trace_array *tr, int cpu) { + ftrace_disable_cpu(); ring_buffer_reset_cpu(tr->buffer, cpu); + ftrace_enable_cpu(); } #define SAVED_CMDLINES 128 @@ -654,6 +676,10 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, struct ftrace_entry *entry; unsigned long irq_flags; + /* If we are reading the ring buffer, don't trace */ + if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) + return; + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) @@ -870,8 +896,14 @@ enum trace_file_type { static void trace_iterator_increment(struct trace_iterator *iter, int cpu) { + /* Don't allow ftrace to trace into the ring buffers */ + ftrace_disable_cpu(); + iter->idx++; - ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); + if (iter->buffer_iter[iter->cpu]) + ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); + + ftrace_enable_cpu(); } static struct trace_entry * @@ -880,9 +912,19 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; - event = ring_buffer_iter_peek(buf_iter, ts); + /* Don't allow ftrace to trace into the ring buffers */ + ftrace_disable_cpu(); + + if (buf_iter) + event = ring_buffer_iter_peek(buf_iter, ts); + else + event = ring_buffer_peek(iter->tr->buffer, cpu, ts); + + ftrace_enable_cpu(); + return event ? ring_buffer_event_data(event) : NULL; } + static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { @@ -938,7 +980,10 @@ static void *find_next_entry_inc(struct trace_iterator *iter) static void trace_consume(struct trace_iterator *iter) { + /* Don't allow ftrace to trace into the ring buffers */ + ftrace_disable_cpu(); ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); + ftrace_enable_cpu(); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) @@ -991,10 +1036,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) iter->cpu = 0; iter->idx = -1; + ftrace_disable_cpu(); + for_each_tracing_cpu(cpu) { ring_buffer_iter_reset(iter->buffer_iter[cpu]); } + ftrace_enable_cpu(); + for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; @@ -1242,7 +1291,16 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) cont = (struct trace_field_cont *)ent; if (ok) ok = (trace_seq_printf(s, "%s", cont->buf) > 0); - ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); + + ftrace_disable_cpu(); + + if (iter->buffer_iter[iter->cpu]) + ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); + else + ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); + + ftrace_enable_cpu(); + ent = peek_next_entry(iter, iter->cpu, NULL); } while (ent && ent->type == TRACE_CONT); @@ -1683,9 +1741,15 @@ static int trace_empty(struct trace_iterator *iter) int cpu; for_each_tracing_cpu(cpu) { - if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) - return 0; + if (iter->buffer_iter[cpu]) { + if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) + return 0; + } else { + if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) + return 0; + } } + return TRACE_TYPE_HANDLED; } @@ -1776,8 +1840,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) iter->pos = -1; for_each_tracing_cpu(cpu) { + iter->buffer_iter[cpu] = ring_buffer_read_start(iter->tr->buffer, cpu); + if (!iter->buffer_iter[cpu]) goto fail_buffer; } @@ -2341,7 +2407,6 @@ static atomic_t tracing_reader; static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_iterator *iter; - int cpu; if (tracing_disabled) return -ENODEV; @@ -2362,38 +2427,17 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) iter->trace = current_trace; filp->private_data = iter; - for_each_tracing_cpu(cpu) { - iter->buffer_iter[cpu] = - ring_buffer_read_start(iter->tr->buffer, cpu); - if (!iter->buffer_iter[cpu]) - goto fail_buffer; - } - if (iter->trace->pipe_open) iter->trace->pipe_open(iter); mutex_unlock(&trace_types_lock); return 0; - - fail_buffer: - for_each_tracing_cpu(cpu) { - if (iter->buffer_iter[cpu]) - ring_buffer_read_finish(iter->buffer_iter[cpu]); - } - mutex_unlock(&trace_types_lock); - - return -ENOMEM; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; - int cpu; - for_each_tracing_cpu(cpu) { - if (iter->buffer_iter[cpu]) - ring_buffer_read_finish(iter->buffer_iter[cpu]); - } kfree(iter); atomic_dec(&tracing_reader); @@ -2429,7 +2473,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; - unsigned long flags; #ifdef CONFIG_FTRACE int ftrace_save; #endif @@ -2528,7 +2571,6 @@ waitagain: ftrace_enabled = 0; #endif smp_wmb(); - ring_buffer_lock(iter->tr->buffer, &flags); while (find_next_entry_inc(iter) != NULL) { enum print_line_t ret; @@ -2547,7 +2589,6 @@ waitagain: break; } - ring_buffer_unlock(iter->tr->buffer, flags); #ifdef CONFIG_FTRACE ftrace_enabled = ftrace_save; #endif @@ -3010,8 +3051,8 @@ void ftrace_dump(void) static struct trace_iterator iter; static cpumask_t mask; static int dump_ran; - unsigned long flags, irq_flags; - int cnt = 0; + unsigned long flags; + int cnt = 0, cpu; /* only one dump */ spin_lock_irqsave(&ftrace_dump_lock, flags); @@ -3023,6 +3064,10 @@ void ftrace_dump(void) /* No turning back! */ ftrace_kill_atomic(); + for_each_tracing_cpu(cpu) { + atomic_inc(&global_trace.data[cpu]->disabled); + } + printk(KERN_TRACE "Dumping ftrace buffer:\n"); iter.tr = &global_trace; @@ -3037,8 +3082,6 @@ void ftrace_dump(void) cpus_clear(mask); - ring_buffer_lock(iter.tr->buffer, &irq_flags); - while (!trace_empty(&iter)) { if (!cnt) @@ -3066,8 +3109,6 @@ void ftrace_dump(void) else printk(KERN_TRACE "---------------------------------\n"); - ring_buffer_unlock(iter.tr->buffer, irq_flags); - out: spin_unlock_irqrestore(&ftrace_dump_lock, flags); } -- cgit v1.2.3-70-g09d2 From 797d3712a9dd75c720558612be05f42c031a7bb5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 30 Sep 2008 18:13:45 +0200 Subject: tracing/ftrace: adapt mmiotrace to the new type of print_line, fix Correct the value's type of trace_empty function Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b542f883780..c1634068adf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1750,7 +1750,7 @@ static int trace_empty(struct trace_iterator *iter) } } - return TRACE_TYPE_HANDLED; + return 1; } static enum print_line_t print_trace_line(struct trace_iterator *iter) -- cgit v1.2.3-70-g09d2 From 7104f300c5a69b46dda00d898034dd05c9f21739 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Oct 2008 10:52:51 -0400 Subject: ftrace: type cast filter+verifier The mmiotrace map had a bug that would typecast the entry from the trace to the wrong type. That is a known danger of C typecasts, there's absolutely zero checking done on them. Help that problem a bit by using a GCC extension to implement a type filter that restricts the types that a trace record can be cast into, and by adding a dynamic check (in debug mode) to verify the type of the entry. This patch adds a macro to assign all entries of ftrace using the type of the variable and checking the entry id. The typecasts are now done in the macro for only those types that it knows about, which should be all the types that are allowed to be read from the tracer. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 85 +++++++++++++++++++++++++++++------------- kernel/trace/trace.h | 42 +++++++++++++++++++++ kernel/trace/trace_mmiotrace.c | 14 +++++-- 3 files changed, 112 insertions(+), 29 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c1634068adf..948f7d821c6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1350,7 +1350,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) } switch (entry->type) { case TRACE_FN: { - struct ftrace_entry *field = (struct ftrace_entry *)entry; + struct ftrace_entry *field; + + trace_assign_type(field, entry); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_puts(s, " ("); @@ -1363,8 +1365,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) } case TRACE_CTX: case TRACE_WAKE: { - struct ctx_switch_entry *field = - (struct ctx_switch_entry *)entry; + struct ctx_switch_entry *field; + + trace_assign_type(field, entry); T = field->next_state < sizeof(state_to_char) ? state_to_char[field->next_state] : 'X'; @@ -1384,7 +1387,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) break; } case TRACE_SPECIAL: { - struct special_entry *field = (struct special_entry *)entry; + struct special_entry *field; + + trace_assign_type(field, entry); trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, @@ -1393,7 +1398,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) break; } case TRACE_STACK: { - struct stack_entry *field = (struct stack_entry *)entry; + struct stack_entry *field; + + trace_assign_type(field, entry); for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { if (i) @@ -1404,7 +1411,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) break; } case TRACE_PRINT: { - struct print_entry *field = (struct print_entry *)entry; + struct print_entry *field; + + trace_assign_type(field, entry); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_printf(s, ": %s", field->buf); @@ -1454,7 +1463,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) switch (entry->type) { case TRACE_FN: { - struct ftrace_entry *field = (struct ftrace_entry *)entry; + struct ftrace_entry *field; + + trace_assign_type(field, entry); ret = seq_print_ip_sym(s, field->ip, sym_flags); if (!ret) @@ -1480,8 +1491,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) } case TRACE_CTX: case TRACE_WAKE: { - struct ctx_switch_entry *field = - (struct ctx_switch_entry *)entry; + struct ctx_switch_entry *field; + + trace_assign_type(field, entry); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; @@ -1501,7 +1513,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) break; } case TRACE_SPECIAL: { - struct special_entry *field = (struct special_entry *)entry; + struct special_entry *field; + + trace_assign_type(field, entry); ret = trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, @@ -1512,7 +1526,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) break; } case TRACE_STACK: { - struct stack_entry *field = (struct stack_entry *)entry; + struct stack_entry *field; + + trace_assign_type(field, entry); for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { if (i) { @@ -1531,7 +1547,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) break; } case TRACE_PRINT: { - struct print_entry *field = (struct print_entry *)entry; + struct print_entry *field; + + trace_assign_type(field, entry); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_printf(s, ": %s", field->buf); @@ -1562,7 +1580,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) switch (entry->type) { case TRACE_FN: { - struct ftrace_entry *field = (struct ftrace_entry *)entry; + struct ftrace_entry *field; + + trace_assign_type(field, entry); ret = trace_seq_printf(s, "%x %x\n", field->ip, @@ -1573,8 +1593,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) } case TRACE_CTX: case TRACE_WAKE: { - struct ctx_switch_entry *field = - (struct ctx_switch_entry *)entry; + struct ctx_switch_entry *field; + + trace_assign_type(field, entry); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; @@ -1596,7 +1617,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) } case TRACE_SPECIAL: case TRACE_STACK: { - struct special_entry *field = (struct special_entry *)entry; + struct special_entry *field; + + trace_assign_type(field, entry); ret = trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, @@ -1607,7 +1630,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) break; } case TRACE_PRINT: { - struct print_entry *field = (struct print_entry *)entry; + struct print_entry *field; + + trace_assign_type(field, entry); trace_seq_printf(s, "# %lx %s", field->ip, field->buf); if (entry->flags & TRACE_FLAG_CONT) @@ -1648,7 +1673,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) switch (entry->type) { case TRACE_FN: { - struct ftrace_entry *field = (struct ftrace_entry *)entry; + struct ftrace_entry *field; + + trace_assign_type(field, entry); SEQ_PUT_HEX_FIELD_RET(s, field->ip); SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); @@ -1656,8 +1683,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) } case TRACE_CTX: case TRACE_WAKE: { - struct ctx_switch_entry *field = - (struct ctx_switch_entry *)entry; + struct ctx_switch_entry *field; + + trace_assign_type(field, entry); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; @@ -1676,7 +1704,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) } case TRACE_SPECIAL: case TRACE_STACK: { - struct special_entry *field = (struct special_entry *)entry; + struct special_entry *field; + + trace_assign_type(field, entry); SEQ_PUT_HEX_FIELD_RET(s, field->arg1); SEQ_PUT_HEX_FIELD_RET(s, field->arg2); @@ -1705,15 +1735,18 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) switch (entry->type) { case TRACE_FN: { - struct ftrace_entry *field = (struct ftrace_entry *)entry; + struct ftrace_entry *field; + + trace_assign_type(field, entry); SEQ_PUT_FIELD_RET(s, field->ip); SEQ_PUT_FIELD_RET(s, field->parent_ip); break; } case TRACE_CTX: { - struct ctx_switch_entry *field = - (struct ctx_switch_entry *)entry; + struct ctx_switch_entry *field; + + trace_assign_type(field, entry); SEQ_PUT_FIELD_RET(s, field->prev_pid); SEQ_PUT_FIELD_RET(s, field->prev_prio); @@ -1725,7 +1758,9 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) } case TRACE_SPECIAL: case TRACE_STACK: { - struct special_entry *field = (struct special_entry *)entry; + struct special_entry *field; + + trace_assign_type(field, entry); SEQ_PUT_FIELD_RET(s, field->arg1); SEQ_PUT_FIELD_RET(s, field->arg2); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a921ba5d292..f02042d0d82 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -177,6 +177,48 @@ struct trace_array { struct trace_array_cpu *data[NR_CPUS]; }; +#define FTRACE_CMP_TYPE(var, type) \ + __builtin_types_compatible_p(typeof(var), type *) + +#undef IF_ASSIGN +#define IF_ASSIGN(var, entry, etype, id) \ + if (FTRACE_CMP_TYPE(var, etype)) { \ + var = (typeof(var))(entry); \ + WARN_ON(id && (entry)->type != id); \ + break; \ + } + +/* Will cause compile errors if type is not found. */ +extern void __ftrace_bad_type(void); + +/* + * The trace_assign_type is a verifier that the entry type is + * the same as the type being assigned. To add new types simply + * add a line with the following format: + * + * IF_ASSIGN(var, ent, type, id); + * + * Where "type" is the trace type that includes the trace_entry + * as the "ent" item. And "id" is the trace identifier that is + * used in the trace_type enum. + * + * If the type can have more than one id, then use zero. + */ +#define trace_assign_type(var, ent) \ + do { \ + IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ + IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ + IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ + IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ + IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ + IF_ASSIGN(var, ent, struct special_entry, 0); \ + IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ + TRACE_MMIO_RW); \ + IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ + TRACE_MMIO_MAP); \ + IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ + __ftrace_bad_type(); \ + } while (0) /* Return values for print_line callback */ enum print_line_t { diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 1a266aa08e1..0e819f47bb7 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -178,15 +178,17 @@ print_out: static enum print_line_t mmio_print_rw(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct trace_mmiotrace_rw *field = - (struct trace_mmiotrace_rw *)entry; - struct mmiotrace_rw *rw = &field->rw; + struct trace_mmiotrace_rw *field; + struct mmiotrace_rw *rw; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret = 1; + trace_assign_type(field, entry); + rw = &field->rw; + switch (rw->opcode) { case MMIO_READ: ret = trace_seq_printf(s, @@ -222,13 +224,17 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) static enum print_line_t mmio_print_map(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; - struct mmiotrace_map *m = (struct mmiotrace_map *)entry; + struct trace_mmiotrace_map *field; + struct mmiotrace_map *m; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, 1000000ULL); unsigned secs = (unsigned long)t; int ret; + trace_assign_type(field, entry); + m = &field->map; + switch (m->opcode) { case MMIO_PROBE: ret = trace_seq_printf(s, -- cgit v1.2.3-70-g09d2 From e4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Oct 2008 11:14:54 -0400 Subject: ring_buffer: allocate buffer page pointer The current method of overlaying the page frame as the buffer page pointer can be very dangerous and limits our ability to do other things with a page from the buffer, like send it off to disk. This patch allocates the buffer_page instead of overlaying the page's page frame. The use of the buffer_page has hardly changed due to this. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 54 +++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 22 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9631abf2ae2..98145718988 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) * Thanks to Peter Zijlstra for suggesting this idea. */ struct buffer_page { - union { - struct { - unsigned long flags; /* mandatory */ - atomic_t _count; /* mandatory */ - u64 time_stamp; /* page time stamp */ - unsigned size; /* size of page data */ - struct list_head list; /* list of free pages */ - }; - struct page page; - }; + u64 time_stamp; /* page time stamp */ + unsigned size; /* size of page data */ + struct list_head list; /* list of free pages */ + void *page; /* Actual data page */ }; /* @@ -133,9 +127,9 @@ struct buffer_page { */ static inline void free_buffer_page(struct buffer_page *bpage) { - reset_page_mapcount(&bpage->page); - bpage->page.mapping = NULL; - __free_page(&bpage->page); + if (bpage->page) + __free_page(bpage->page); + kfree(bpage); } /* @@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned i; for (i = 0; i < nr_pages; i++) { + page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto free_pages; + list_add(&page->list, &pages); + addr = __get_free_page(GFP_KERNEL); if (!addr) goto free_pages; - page = (struct buffer_page *)virt_to_page(addr); - list_add(&page->list, &pages); + page->page = (void *)addr; } list_splice(&pages, head); @@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu * rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; + struct buffer_page *page; unsigned long addr; int ret; @@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) spin_lock_init(&cpu_buffer->lock); INIT_LIST_HEAD(&cpu_buffer->pages); + page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto fail_free_buffer; + + cpu_buffer->reader_page = page; addr = __get_free_page(GFP_KERNEL); if (!addr) - goto fail_free_buffer; - cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr); + goto fail_free_reader; + page->page = (void *)addr; + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); cpu_buffer->reader_page->size = 0; @@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) for_each_buffer_cpu(buffer, cpu) { for (i = 0; i < new_pages; i++) { + page = kzalloc_node(ALIGN(sizeof(*page), + cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto free_pages; + list_add(&page->list, &pages); addr = __get_free_page(GFP_KERNEL); if (!addr) goto free_pages; - page = (struct buffer_page *)virt_to_page(addr); - list_add(&page->list, &pages); + page->page = (void *)addr; } } @@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event) static inline void *rb_page_index(struct buffer_page *page, unsigned index) { - void *addr = page_address(&page->page); - - return addr + index; + return page->page + index; } static inline struct ring_buffer_event * -- cgit v1.2.3-70-g09d2 From 38697053fa006411224a1790e2adb8216440ab0f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Oct 2008 13:14:09 -0400 Subject: ftrace: preempt disable over interrupt disable With the new ring buffer infrastructure in ftrace, I'm trying to make ftrace a little more light weight. This patch converts a lot of the local_irq_save/restore into preempt_disable/enable. The original preempt count in a lot of cases has to be sent in as a parameter so that it can be recorded correctly. Some places were recording it incorrectly before anyway. This is also laying the ground work to make ftrace a little bit more reentrant, and remove all locking. The function tracers must still protect from reentrancy. Note: All the function tracers must be careful when using preempt_disable. It must do the following: resched = need_resched(); preempt_disable_notrace(); [...] if (resched) preempt_enable_no_resched_notrace(); else preempt_enable_notrace(); The reason is that if this function traces schedule() itself, the preempt_enable_notrace() will cause a schedule, which will lead us into a recursive failure. If we needed to reschedule before calling preempt_disable, we should have already scheduled. Since we did not, this is most likely that we should not and are probably inside a schedule function. If resched was not set, we still need to catch the need resched flag being set when preemption was off and the if case at the end will catch that for us. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 123 +++++++++++++++++++------------------- kernel/trace/trace.h | 13 ++-- kernel/trace/trace_boot.c | 2 +- kernel/trace/trace_irqsoff.c | 13 ++-- kernel/trace/trace_mmiotrace.c | 4 +- kernel/trace/trace_sched_switch.c | 9 ++- kernel/trace/trace_sched_wakeup.c | 13 +++- 7 files changed, 97 insertions(+), 80 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 948f7d821c6..1cd2e8143bb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -652,12 +652,10 @@ void tracing_record_cmdline(struct task_struct *tsk) } void -tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) +tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, + int pc) { struct task_struct *tsk = current; - unsigned long pc; - - pc = preempt_count(); entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; @@ -670,7 +668,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) void trace_function(struct trace_array *tr, struct trace_array_cpu *data, - unsigned long ip, unsigned long parent_ip, unsigned long flags) + unsigned long ip, unsigned long parent_ip, unsigned long flags, + int pc) { struct ring_buffer_event *event; struct ftrace_entry *entry; @@ -685,7 +684,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, flags); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_FN; entry->ip = ip; entry->parent_ip = parent_ip; @@ -694,16 +693,17 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, void ftrace(struct trace_array *tr, struct trace_array_cpu *data, - unsigned long ip, unsigned long parent_ip, unsigned long flags) + unsigned long ip, unsigned long parent_ip, unsigned long flags, + int pc) { if (likely(!atomic_read(&data->disabled))) - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, pc); } -void __trace_stack(struct trace_array *tr, - struct trace_array_cpu *data, - unsigned long flags, - int skip) +static void ftrace_trace_stack(struct trace_array *tr, + struct trace_array_cpu *data, + unsigned long flags, + int skip, int pc) { struct ring_buffer_event *event; struct stack_entry *entry; @@ -718,7 +718,7 @@ void __trace_stack(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, flags); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_STACK; memset(&entry->caller, 0, sizeof(entry->caller)); @@ -732,9 +732,18 @@ void __trace_stack(struct trace_array *tr, ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } -void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3) +void __trace_stack(struct trace_array *tr, + struct trace_array_cpu *data, + unsigned long flags, + int skip) +{ + ftrace_trace_stack(tr, data, flags, skip, preempt_count()); +} + +static void +ftrace_trace_special(void *__tr, void *__data, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + int pc) { struct ring_buffer_event *event; struct trace_array_cpu *data = __data; @@ -747,23 +756,30 @@ __trace_special(void *__tr, void *__data, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0); + tracing_generic_entry_update(&entry->ent, 0, pc); entry->ent.type = TRACE_SPECIAL; entry->arg1 = arg1; entry->arg2 = arg2; entry->arg3 = arg3; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - __trace_stack(tr, data, irq_flags, 4); + ftrace_trace_stack(tr, data, irq_flags, 4, pc); trace_wake_up(); } +void +__trace_special(void *__tr, void *__data, + unsigned long arg1, unsigned long arg2, unsigned long arg3) +{ + ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); +} + void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, - unsigned long flags) + unsigned long flags, int pc) { struct ring_buffer_event *event; struct ctx_switch_entry *entry; @@ -774,7 +790,7 @@ tracing_sched_switch_trace(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, flags); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_CTX; entry->prev_pid = prev->pid; entry->prev_prio = prev->prio; @@ -784,7 +800,7 @@ tracing_sched_switch_trace(struct trace_array *tr, entry->next_state = next->state; entry->next_cpu = task_cpu(next); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - __trace_stack(tr, data, flags, 5); + ftrace_trace_stack(tr, data, flags, 5, pc); } void @@ -792,7 +808,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *curr, - unsigned long flags) + unsigned long flags, int pc) { struct ring_buffer_event *event; struct ctx_switch_entry *entry; @@ -803,7 +819,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, flags); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_WAKE; entry->prev_pid = curr->pid; entry->prev_prio = curr->prio; @@ -813,7 +829,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); - __trace_stack(tr, data, flags, 6); + ftrace_trace_stack(tr, data, flags, 6, pc); trace_wake_up(); } @@ -823,23 +839,24 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - unsigned long flags; long disabled; int cpu; + int pc; if (tracing_disabled || !tr->ctrl) return; - local_irq_save(flags); + pc = preempt_count(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - __trace_special(tr, data, arg1, arg2, arg3); + ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); atomic_dec(&data->disabled); - local_irq_restore(flags); + preempt_enable_notrace(); } #ifdef CONFIG_FTRACE @@ -850,7 +867,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) struct trace_array_cpu *data; unsigned long flags; long disabled; - int cpu; + int cpu, resched; + int pc; if (unlikely(!ftrace_function_enabled)) return; @@ -858,16 +876,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) if (skip_trace(ip)) return; - local_irq_save(flags); + pc = preempt_count(); + resched = need_resched(); + preempt_disable_notrace(); + local_save_flags(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); - local_irq_restore(flags); + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = @@ -2508,9 +2532,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; -#ifdef CONFIG_FTRACE - int ftrace_save; -#endif ssize_t sret; /* return any leftover data */ @@ -2593,20 +2614,6 @@ waitagain: offsetof(struct trace_iterator, seq)); iter->pos = -1; - /* - * We need to stop all tracing on all CPUS to read the - * the next buffer. This is a bit expensive, but is - * not done often. We fill all what we can read, - * and then release the locks again. - */ - - local_irq_disable(); -#ifdef CONFIG_FTRACE - ftrace_save = ftrace_enabled; - ftrace_enabled = 0; -#endif - smp_wmb(); - while (find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int len = iter->seq.len; @@ -2624,11 +2631,6 @@ waitagain: break; } -#ifdef CONFIG_FTRACE - ftrace_enabled = ftrace_save; -#endif - local_irq_enable(); - /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= iter->seq.len) @@ -2960,12 +2962,13 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) struct print_entry *entry; unsigned long flags, irq_flags; long disabled; - int cpu, len = 0, size; + int cpu, len = 0, size, pc; if (!tr->ctrl || tracing_disabled) return 0; - local_irq_save(flags); + pc = preempt_count(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); @@ -2973,7 +2976,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) if (unlikely(disabled != 1)) goto out; - spin_lock(&trace_buf_lock); + spin_lock_irqsave(&trace_buf_lock, flags); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); len = min(len, TRACE_BUF_SIZE-1); @@ -2984,7 +2987,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) if (!event) goto out_unlock; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, flags); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_PRINT; entry->ip = ip; @@ -2993,11 +2996,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) ring_buffer_unlock_commit(tr->buffer, event, irq_flags); out_unlock: - spin_unlock(&trace_buf_lock); + spin_unlock_irqrestore(&trace_buf_lock, flags); out: atomic_dec(&data->disabled); - local_irq_restore(flags); + preempt_enable_notrace(); return len; } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f02042d0d82..f1f99572cde 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -288,35 +288,36 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data); void tracing_generic_entry_update(struct trace_entry *entry, - unsigned long flags); + unsigned long flags, + int pc); void ftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, - unsigned long flags); + unsigned long flags, int pc); void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, - unsigned long flags); + unsigned long flags, int pc); void tracing_record_cmdline(struct task_struct *tsk); void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *cur, - unsigned long flags); + unsigned long flags, int pc); void trace_special(struct trace_array *tr, struct trace_array_cpu *data, unsigned long arg1, unsigned long arg2, - unsigned long arg3); + unsigned long arg3, int pc); void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, - unsigned long flags); + unsigned long flags, int pc); void tracing_start_cmdline_record(void); void tracing_stop_cmdline_record(void); diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 43bde20b95b..f2dac6f1cf0 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -95,7 +95,7 @@ void trace_boot(struct boot_trace *it) if (!event) goto out; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0); + tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_BOOT; entry->initcall = *it; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 37ad49407f2..f925dbbff2a 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } @@ -130,6 +130,7 @@ check_critical_timing(struct trace_array *tr, unsigned long latency, t0, t1; cycle_t T0, T1, delta; unsigned long flags; + int pc; /* * usecs conversion is slow so we try to delay the conversion @@ -144,13 +145,15 @@ check_critical_timing(struct trace_array *tr, if (!report_latency(delta)) goto out; + pc = preempt_count(); + spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; - trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); + trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); latency = nsecs_to_usecs(delta); @@ -174,7 +177,7 @@ out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); tracing_reset(tr, cpu); - trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); + trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); } static inline void @@ -207,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) local_save_flags(flags); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; @@ -241,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) atomic_inc(&data->disabled); local_save_flags(flags); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 0e819f47bb7..f28484618ff 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -324,7 +324,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0); + tracing_generic_entry_update(&entry->ent, 0, preempt_count()); entry->ent.type = TRACE_MMIO_RW; entry->rw = *rw; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); @@ -352,7 +352,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, if (!event) return; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0); + tracing_generic_entry_update(&entry->ent, 0, preempt_count()); entry->ent.type = TRACE_MMIO_MAP; entry->map = *map; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e0b06db0f7a..c7fa08a5b7f 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -26,6 +26,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, unsigned long flags; long disabled; int cpu; + int pc; if (!atomic_read(&sched_ref)) return; @@ -36,13 +37,14 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, if (!tracer_enabled) return; + pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - tracing_sched_switch_trace(ctx_trace, data, prev, next, flags); + tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); atomic_dec(&data->disabled); local_irq_restore(flags); @@ -54,11 +56,12 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) struct trace_array_cpu *data; unsigned long flags; long disabled; - int cpu; + int cpu, pc; if (!likely(tracer_enabled)) return; + pc = preempt_count(); tracing_record_cmdline(current); local_irq_save(flags); @@ -68,7 +71,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) if (likely(disabled == 1)) tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, - flags); + flags, pc); atomic_dec(&data->disabled); local_irq_restore(flags); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 01e75e0639b..fe4a252c236 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) long disabled; int resched; int cpu; + int pc; if (likely(!wakeup_task)) return; + pc = preempt_count(); resched = need_resched(); preempt_disable_notrace(); @@ -70,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) if (task_cpu(wakeup_task) != cpu) goto unlock; - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, pc); unlock: __raw_spin_unlock(&wakeup_lock); @@ -121,6 +123,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, unsigned long flags; long disabled; int cpu; + int pc; tracing_record_cmdline(prev); @@ -139,6 +142,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, if (next != wakeup_task) return; + pc = preempt_count(); + /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; @@ -155,7 +160,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; - trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); + trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); /* * usecs conversion is slow so we try to delay the conversion @@ -220,6 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) int cpu = smp_processor_id(); unsigned long flags; long disabled; + int pc; if (likely(!tracer_enabled)) return; @@ -232,6 +238,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) p->prio >= current->prio) return; + pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; @@ -256,7 +263,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], - CALLER_ADDR1, CALLER_ADDR2, flags); + CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); -- cgit v1.2.3-70-g09d2 From 77ae11f63befb7fc41ec256f1fcb72ca7e4160d5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 2 Oct 2008 11:04:14 +0200 Subject: ring-buffer: fix build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix: kernel/trace/ring_buffer.c: In function ‘rb_allocate_pages’: kernel/trace/ring_buffer.c:235: error: ‘cpu’ undeclared (first use in this function) kernel/trace/ring_buffer.c:235: error: (Each undeclared identifier is reported only once kernel/trace/ring_buffer.c:235: error: for each function it appears in.) Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 98145718988..54a30986493 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -232,7 +232,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, for (i = 0; i < nr_pages; i++) { page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), - GFP_KERNEL, cpu_to_node(cpu)); + GFP_KERNEL, cpu_to_node(i)); if (!page) goto free_pages; list_add(&page->list, &pages); -- cgit v1.2.3-70-g09d2 From cb5ab74204a6e2579d1119bf1348eb806526b12b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 2 Oct 2008 12:59:20 +0200 Subject: tracing/fastboot: change the printing of boot tracer according to bootgraph.pl Change the boot tracer printing to make it parsable for the scripts/bootgraph.pl script. We have now to output two lines for each initcall, according to the printk in do_one_initcall() in init/main.c We need now the call's time and the return's time. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 2 ++ init/main.c | 20 +++++++++----------- kernel/trace/trace_boot.c | 22 +++++++++++++++------- 3 files changed, 26 insertions(+), 18 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 91954eb6460..4455490d91b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -216,6 +216,8 @@ struct boot_trace { initcall_t func; int result; unsigned long long duration; + ktime_t calltime; + ktime_t rettime; }; #ifdef CONFIG_BOOT_TRACER diff --git a/init/main.c b/init/main.c index 1e39a1eab19..61eb6615939 100644 --- a/init/main.c +++ b/init/main.c @@ -706,34 +706,32 @@ __setup("initcall_debug", initcall_debug_setup); int do_one_initcall(initcall_t fn) { int count = preempt_count(); - ktime_t t0, t1, delta; + ktime_t delta; char msgbuf[64]; - int result; struct boot_trace it; if (initcall_debug) { it.caller = task_pid_nr(current); it.func = fn; printk("calling %pF @ %i\n", fn, it.caller); - t0 = ktime_get(); + it.calltime = ktime_get(); } - result = fn(); + it.result = fn(); if (initcall_debug) { - t1 = ktime_get(); - delta = ktime_sub(t1, t0); - it.result = result; + it.rettime = ktime_get(); + delta = ktime_sub(it.rettime, it.calltime); it.duration = (unsigned long long) delta.tv64 >> 20; printk("initcall %pF returned %d after %Ld msecs\n", fn, - result, it.duration); + it.result, it.duration); trace_boot(&it); } msgbuf[0] = 0; - if (result && result != -ENODEV && initcall_debug) - sprintf(msgbuf, "error code %d ", result); + if (it.result && it.result != -ENODEV && initcall_debug) + sprintf(msgbuf, "error code %d ", it.result); if (preempt_count() != count) { strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); @@ -747,7 +745,7 @@ int do_one_initcall(initcall_t fn) printk("initcall %pF returned with %s\n", fn, msgbuf); } - return result; + return it.result; } diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index f2dac6f1cf0..7c15f3e68ba 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -52,16 +52,24 @@ static enum print_line_t initcall_print_line(struct trace_iterator *iter) struct trace_boot *field = (struct trace_boot *)entry; struct boot_trace *it = &field->initcall; struct trace_seq *s = &iter->seq; + struct timespec calltime = ktime_to_timespec(it->calltime); + struct timespec rettime = ktime_to_timespec(it->rettime); if (entry->type == TRACE_BOOT) { - ret = trace_seq_printf(s, "%pF called from %i " - "returned %d after %lld msecs\n", - it->func, it->caller, it->result, - it->duration); - if (ret) - return TRACE_TYPE_HANDLED; - else + ret = trace_seq_printf(s, "[%5ld.%06ld] calling %pF @ %i\n", + calltime.tv_sec, + calltime.tv_nsec, + it->func, it->caller); + if (!ret) return TRACE_TYPE_PARTIAL_LINE; + ret = trace_seq_printf(s, "[%5ld.%06ld] initcall %pF " + "returned %d after %lld msecs\n", + rettime.tv_sec, + rettime.tv_nsec, + it->func, it->result, it->duration); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + return TRACE_TYPE_HANDLED; } return TRACE_TYPE_UNHANDLED; } -- cgit v1.2.3-70-g09d2 From 5601020feb0c3010e9e3e0131e9697ac6a06777b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 2 Oct 2008 13:26:05 +0200 Subject: tracing/fastboot: get the initcall name before it disappears After some initcall traces, some initcall names may be inconsistent. That's because these functions will disappear from the .init section and also their name from the symbols table. So we have to copy the name of the function in a buffer large enough during the trace appending. It is not costly for the ring_buffer because the number of initcall entries is commonly not really large. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 7 ++++--- init/main.c | 3 +-- kernel/trace/trace_boot.c | 14 ++++++++++---- 3 files changed, 15 insertions(+), 9 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4455490d91b..e672e51c40a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -7,6 +7,7 @@ #include #include #include +#include extern int ftrace_enabled; extern int @@ -213,7 +214,7 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } struct boot_trace { pid_t caller; - initcall_t func; + char func[KSYM_NAME_LEN]; int result; unsigned long long duration; ktime_t calltime; @@ -221,10 +222,10 @@ struct boot_trace { }; #ifdef CONFIG_BOOT_TRACER -extern void trace_boot(struct boot_trace *it); +extern void trace_boot(struct boot_trace *it, initcall_t fn); extern void start_boot_trace(void); #else -static inline void trace_boot(struct boot_trace *it) { } +static inline void trace_boot(struct boot_trace *it, initcall_t fn) { } static inline void start_boot_trace(void) { } #endif diff --git a/init/main.c b/init/main.c index 61eb6615939..8e96a0ef17f 100644 --- a/init/main.c +++ b/init/main.c @@ -712,7 +712,6 @@ int do_one_initcall(initcall_t fn) if (initcall_debug) { it.caller = task_pid_nr(current); - it.func = fn; printk("calling %pF @ %i\n", fn, it.caller); it.calltime = ktime_get(); } @@ -725,7 +724,7 @@ int do_one_initcall(initcall_t fn) it.duration = (unsigned long long) delta.tv64 >> 20; printk("initcall %pF returned %d after %Ld msecs\n", fn, it.result, it.duration); - trace_boot(&it); + trace_boot(&it, fn); } msgbuf[0] = 0; diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 7c15f3e68ba..b9dc2c0093a 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "trace.h" @@ -56,17 +57,19 @@ static enum print_line_t initcall_print_line(struct trace_iterator *iter) struct timespec rettime = ktime_to_timespec(it->rettime); if (entry->type == TRACE_BOOT) { - ret = trace_seq_printf(s, "[%5ld.%06ld] calling %pF @ %i\n", + ret = trace_seq_printf(s, "[%5ld.%06ld] calling %s @ %i\n", calltime.tv_sec, calltime.tv_nsec, it->func, it->caller); if (!ret) return TRACE_TYPE_PARTIAL_LINE; - ret = trace_seq_printf(s, "[%5ld.%06ld] initcall %pF " + + ret = trace_seq_printf(s, "[%5ld.%06ld] initcall %s " "returned %d after %lld msecs\n", rettime.tv_sec, rettime.tv_nsec, it->func, it->result, it->duration); + if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; @@ -83,8 +86,7 @@ struct tracer boot_tracer __read_mostly = .print_line = initcall_print_line, }; - -void trace_boot(struct boot_trace *it) +void trace_boot(struct boot_trace *it, initcall_t fn) { struct ring_buffer_event *event; struct trace_boot *entry; @@ -95,6 +97,10 @@ void trace_boot(struct boot_trace *it) if (!trace_boot_enabled) return; + /* Get its name now since this function could + * disappear because it is in the .init section. + */ + sprint_symbol(it->func, (unsigned long)fn); preempt_disable(); data = tr->data[smp_processor_id()]; -- cgit v1.2.3-70-g09d2 From aa1e0e3bcf95ce684d005bedb16e5d4559455685 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 2 Oct 2008 19:18:09 -0400 Subject: ring_buffer: map to cpu not page My original patch had a compile bug when NUMA was configured. I referenced cpu when it should have been cpu_buffer->cpu. Ingo quickly fixed this bug by replacing cpu with 'i' because that was the loop counter. Unfortunately, the 'i' was the counter of pages, not CPUs. This caused a crash when the number of pages allocated for the buffers exceeded the number of pages, which would usually be the case. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 54a30986493..6b8dac02364 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -232,7 +232,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, for (i = 0; i < nr_pages; i++) { page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), - GFP_KERNEL, cpu_to_node(i)); + GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); if (!page) goto free_pages; list_add(&page->list, &pages); -- cgit v1.2.3-70-g09d2 From 6450c1d3213e27b0dcbf34cce7ad1ae74244c520 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 2 Oct 2008 19:23:04 -0400 Subject: ftrace: move pc counter in irqtrace The assigning of the pc counter is in the wrong spot in the check_critical_timing function. The pc variable is used in the out jump. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_irqsoff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index f925dbbff2a..a7db7f040ae 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -142,11 +142,11 @@ check_critical_timing(struct trace_array *tr, local_save_flags(flags); + pc = preempt_count(); + if (!report_latency(delta)) goto out; - pc = preempt_count(); - spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ -- cgit v1.2.3-70-g09d2 From 097d036a2f25eecc42435c57e010aaf4a2eed2d9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 3 Oct 2008 15:39:21 +0200 Subject: tracing/fastboot: only trace non-module initcalls At this time, only built-in initcalls interest us. We can't really produce a relevant graph if we include the modules initcall too. I had good results after this patch (see svg in attachment). Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 2 ++ init/main.c | 1 + kernel/trace/trace_boot.c | 11 ++++++++--- 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ed53265d1f6..5812dba4ee2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -225,9 +225,11 @@ struct boot_trace { #ifdef CONFIG_BOOT_TRACER extern void trace_boot(struct boot_trace *it, initcall_t fn); extern void start_boot_trace(void); +extern void stop_boot_trace(void); #else static inline void trace_boot(struct boot_trace *it, initcall_t fn) { } static inline void start_boot_trace(void) { } +static inline void stop_boot_trace(void) { } #endif diff --git a/init/main.c b/init/main.c index 8e96a0ef17f..e7939de80f3 100644 --- a/init/main.c +++ b/init/main.c @@ -886,6 +886,7 @@ static int __init kernel_init(void * unused) * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ + stop_boot_trace(); init_post(); return 0; } diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index b9dc2c0093a..a7efe355965 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -22,11 +22,16 @@ void start_boot_trace(void) trace_boot_enabled = 1; } -void stop_boot_trace(struct trace_array *tr) +void stop_boot_trace(void) { trace_boot_enabled = 0; } +void reset_boot_trace(struct trace_array *tr) +{ + stop_boot_trace(); +} + static void boot_trace_init(struct trace_array *tr) { int cpu; @@ -43,7 +48,7 @@ static void boot_trace_ctrl_update(struct trace_array *tr) if (tr->ctrl) start_boot_trace(); else - stop_boot_trace(tr); + stop_boot_trace(); } static enum print_line_t initcall_print_line(struct trace_iterator *iter) @@ -81,7 +86,7 @@ struct tracer boot_tracer __read_mostly = { .name = "initcall", .init = boot_trace_init, - .reset = stop_boot_trace, + .reset = reset_boot_trace, .ctrl_update = boot_trace_ctrl_update, .print_line = initcall_print_line, }; -- cgit v1.2.3-70-g09d2 From 6f807acd27734197b11d42829d3cbb9c0937b572 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 4 Oct 2008 02:00:58 -0400 Subject: ring-buffer: move page indexes into page headers Remove the global head and tail indexes and move them into the page header. Each page will now keep track of where the last write and read was made. We also rename the head and tail to read and write for better clarification. This patch is needed for future enhancements to move the ring buffer to a lockless solution. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 75 +++++++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 34 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6b8dac02364..09d4f0d879a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -117,6 +117,8 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) struct buffer_page { u64 time_stamp; /* page time stamp */ unsigned size; /* size of page data */ + unsigned write; /* index for next write */ + unsigned read; /* index for next read */ struct list_head list; /* list of free pages */ void *page; /* Actual data page */ }; @@ -153,11 +155,8 @@ struct ring_buffer_per_cpu { spinlock_t lock; struct lock_class_key lock_key; struct list_head pages; - unsigned long head; /* read from head */ - unsigned long tail; /* write to tail */ - unsigned long reader; - struct buffer_page *head_page; - struct buffer_page *tail_page; + struct buffer_page *head_page; /* read from head */ + struct buffer_page *tail_page; /* write to tail */ struct buffer_page *reader_page; unsigned long overrun; unsigned long entries; @@ -566,10 +565,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) { - return (cpu_buffer->reader == cpu_buffer->reader_page->size && + return cpu_buffer->reader_page->read == cpu_buffer->reader_page->size && (cpu_buffer->tail_page == cpu_buffer->reader_page || (cpu_buffer->tail_page == cpu_buffer->head_page && - cpu_buffer->head == cpu_buffer->tail))); + cpu_buffer->head_page->read == + cpu_buffer->tail_page->write)); } static inline int rb_null_event(struct ring_buffer_event *event) @@ -577,7 +577,7 @@ static inline int rb_null_event(struct ring_buffer_event *event) return event->type == RINGBUF_TYPE_PADDING; } -static inline void *rb_page_index(struct buffer_page *page, unsigned index) +static inline void *__rb_page_index(struct buffer_page *page, unsigned index) { return page->page + index; } @@ -585,15 +585,21 @@ static inline void *rb_page_index(struct buffer_page *page, unsigned index) static inline struct ring_buffer_event * rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) { - return rb_page_index(cpu_buffer->reader_page, - cpu_buffer->reader); + return __rb_page_index(cpu_buffer->reader_page, + cpu_buffer->reader_page->read); +} + +static inline struct ring_buffer_event * +rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) +{ + return __rb_page_index(cpu_buffer->head_page, + cpu_buffer->head_page->read); } static inline struct ring_buffer_event * rb_iter_head_event(struct ring_buffer_iter *iter) { - return rb_page_index(iter->head_page, - iter->head); + return __rb_page_index(iter->head_page, iter->head); } /* @@ -610,7 +616,7 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) for (head = 0; head < rb_head_size(cpu_buffer); head += rb_event_length(event)) { - event = rb_page_index(cpu_buffer->head_page, head); + event = __rb_page_index(cpu_buffer->head_page, head); BUG_ON(rb_null_event(event)); /* Only count data entries */ if (event->type != RINGBUF_TYPE_DATA) @@ -640,13 +646,13 @@ rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer) { - cpu_buffer->head = 0; + cpu_buffer->head_page->read = 0; } static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; - cpu_buffer->reader = 0; + cpu_buffer->reader_page->read = 0; } static inline void rb_inc_iter(struct ring_buffer_iter *iter) @@ -743,9 +749,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer_event *event; - /* No locking needed for tail page */ tail_page = cpu_buffer->tail_page; - tail = cpu_buffer->tail; + tail = cpu_buffer->tail_page->write; if (tail + length > BUF_PAGE_SIZE) { struct buffer_page *next_page = tail_page; @@ -774,7 +779,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, } if (tail != BUF_PAGE_SIZE) { - event = rb_page_index(tail_page, tail); + event = __rb_page_index(tail_page, tail); /* page padding */ event->type = RINGBUF_TYPE_PADDING; } @@ -784,14 +789,14 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, tail_page->size = 0; tail = 0; cpu_buffer->tail_page = tail_page; - cpu_buffer->tail = tail; + cpu_buffer->tail_page->write = tail; rb_add_stamp(cpu_buffer, ts); spin_unlock(&cpu_buffer->lock); } BUG_ON(tail + length > BUF_PAGE_SIZE); - event = rb_page_index(tail_page, tail); + event = __rb_page_index(tail_page, tail); rb_update_event(event, type, length); return event; @@ -823,12 +828,12 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, return -1; /* check to see if we went to the next page */ - if (cpu_buffer->tail) { + if (cpu_buffer->tail_page->write) { /* Still on same page, update timestamp */ event->time_delta = *delta & TS_MASK; event->array[0] = *delta >> TS_SHIFT; /* commit the time event */ - cpu_buffer->tail += + cpu_buffer->tail_page->write += rb_event_length(event); cpu_buffer->write_stamp = *ts; *delta = 0; @@ -846,7 +851,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, ts = ring_buffer_time_stamp(cpu_buffer->cpu); - if (cpu_buffer->tail) { + if (cpu_buffer->tail_page->write) { delta = ts - cpu_buffer->write_stamp; if (test_time_stamp(delta)) { @@ -868,7 +873,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, return NULL; /* If the reserve went to the next page, our delta is zero */ - if (!cpu_buffer->tail) + if (!cpu_buffer->tail_page->write) delta = 0; event->time_delta = delta; @@ -933,8 +938,8 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { - cpu_buffer->tail += rb_event_length(event); - cpu_buffer->tail_page->size = cpu_buffer->tail; + cpu_buffer->tail_page->write += rb_event_length(event); + cpu_buffer->tail_page->size = cpu_buffer->tail_page->write; cpu_buffer->write_stamp += event->time_delta; cpu_buffer->entries++; } @@ -1178,10 +1183,10 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) /* Iterator usage is expected to have record disabled */ if (list_empty(&cpu_buffer->reader_page->list)) { iter->head_page = cpu_buffer->head_page; - iter->head = cpu_buffer->head; + iter->head = cpu_buffer->head_page->read; } else { iter->head_page = cpu_buffer->reader_page; - iter->head = cpu_buffer->reader; + iter->head = cpu_buffer->reader_page->read; } if (iter->head) iter->read_stamp = cpu_buffer->read_stamp; @@ -1200,7 +1205,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) cpu_buffer = iter->cpu_buffer; return iter->head_page == cpu_buffer->tail_page && - iter->head == cpu_buffer->tail; + iter->head == cpu_buffer->tail_page->write; } static void @@ -1277,11 +1282,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) reader = cpu_buffer->reader_page; /* If there's more to read, return this page */ - if (cpu_buffer->reader < reader->size) + if (cpu_buffer->reader_page->read < reader->size) goto out; /* Never should we have an index greater than the size */ - WARN_ON(cpu_buffer->reader > reader->size); + WARN_ON(cpu_buffer->reader_page->read > reader->size); /* check if we caught up to the tail */ reader = NULL; @@ -1342,7 +1347,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) rb_update_read_stamp(cpu_buffer, event); length = rb_event_length(event); - cpu_buffer->reader += length; + cpu_buffer->reader_page->read += length; } static void rb_advance_iter(struct ring_buffer_iter *iter) @@ -1373,7 +1378,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) * at the tail of the buffer. */ BUG_ON((iter->head_page == cpu_buffer->tail_page) && - (iter->head + length > cpu_buffer->tail)); + (iter->head + length > cpu_buffer->tail_page->write)); rb_update_iter_read_stamp(iter, event); @@ -1623,7 +1628,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) INIT_LIST_HEAD(&cpu_buffer->reader_page->list); cpu_buffer->reader_page->size = 0; - cpu_buffer->head = cpu_buffer->tail = cpu_buffer->reader = 0; + cpu_buffer->head_page->read = 0; + cpu_buffer->tail_page->write = 0; + cpu_buffer->reader_page->read = 0; cpu_buffer->overrun = 0; cpu_buffer->entries = 0; -- cgit v1.2.3-70-g09d2 From bf41a158cacba6ca5fc6407a54e7ad8ce1567e2e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 4 Oct 2008 02:00:59 -0400 Subject: ring-buffer: make reentrant This patch replaces the local_irq_save/restore with preempt_disable/ enable. This allows for interrupts to enter while recording. To write to the ring buffer, you must reserve data, and then commit it. During this time, an interrupt may call a trace function that will also record into the buffer before the commit is made. The interrupt will reserve its entry after the first entry, even though the first entry did not finish yet. The time stamp delta of the interrupt entry will be zero, since in the view of the trace, the interrupt happened during the first field anyway. Locking still takes place when the tail/write moves from one page to the next. The reader always takes the locks. A new page pointer is added, called the commit. The write/tail will always point to the end of all entries. The commit field will point to the last committed entry. Only this commit entry may update the write time stamp. The reader can only go up to the commit. It cannot go past it. If a lot of interrupts come in during a commit that fills up the buffer, and it happens to make it all the way around the buffer back to the commit, then a warning is printed and new events will be dropped. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 487 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 374 insertions(+), 113 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 09d4f0d879a..94af1fe56bb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -116,8 +116,8 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) */ struct buffer_page { u64 time_stamp; /* page time stamp */ - unsigned size; /* size of page data */ - unsigned write; /* index for next write */ + local_t write; /* index for next write */ + local_t commit; /* write commited index */ unsigned read; /* index for next read */ struct list_head list; /* list of free pages */ void *page; /* Actual data page */ @@ -157,6 +157,7 @@ struct ring_buffer_per_cpu { struct list_head pages; struct buffer_page *head_page; /* read from head */ struct buffer_page *tail_page; /* write to tail */ + struct buffer_page *commit_page; /* commited pages */ struct buffer_page *reader_page; unsigned long overrun; unsigned long entries; @@ -185,12 +186,32 @@ struct ring_buffer_iter { u64 read_stamp; }; -#define RB_WARN_ON(buffer, cond) \ - if (unlikely(cond)) { \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - return -1; \ - } +#define RB_WARN_ON(buffer, cond) \ + do { \ + if (unlikely(cond)) { \ + atomic_inc(&buffer->record_disabled); \ + WARN_ON(1); \ + } \ + } while (0) + +#define RB_WARN_ON_RET(buffer, cond) \ + do { \ + if (unlikely(cond)) { \ + atomic_inc(&buffer->record_disabled); \ + WARN_ON(1); \ + return -1; \ + } \ + } while (0) + +#define RB_WARN_ON_ONCE(buffer, cond) \ + do { \ + static int once; \ + if (unlikely(cond) && !once) { \ + once++; \ + atomic_inc(&buffer->record_disabled); \ + WARN_ON(1); \ + } \ + } while (0) /** * check_pages - integrity check of buffer pages @@ -204,22 +225,19 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) struct list_head *head = &cpu_buffer->pages; struct buffer_page *page, *tmp; - RB_WARN_ON(cpu_buffer, head->next->prev != head); - RB_WARN_ON(cpu_buffer, head->prev->next != head); + RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); + RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); list_for_each_entry_safe(page, tmp, head, list) { - RB_WARN_ON(cpu_buffer, page->list.next->prev != &page->list); - RB_WARN_ON(cpu_buffer, page->list.prev->next != &page->list); + RB_WARN_ON_RET(cpu_buffer, + page->list.next->prev != &page->list); + RB_WARN_ON_RET(cpu_buffer, + page->list.prev->next != &page->list); } return 0; } -static unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) -{ - return cpu_buffer->head_page->size; -} - static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) { @@ -286,7 +304,6 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) page->page = (void *)addr; INIT_LIST_HEAD(&cpu_buffer->reader_page->list); - cpu_buffer->reader_page->size = 0; ret = rb_allocate_pages(cpu_buffer, buffer->pages); if (ret < 0) @@ -294,8 +311,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->head_page = list_entry(cpu_buffer->pages.next, struct buffer_page, list); - cpu_buffer->tail_page - = list_entry(cpu_buffer->pages.next, struct buffer_page, list); + cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; return cpu_buffer; @@ -563,15 +579,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) return -ENOMEM; } -static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) -{ - return cpu_buffer->reader_page->read == cpu_buffer->reader_page->size && - (cpu_buffer->tail_page == cpu_buffer->reader_page || - (cpu_buffer->tail_page == cpu_buffer->head_page && - cpu_buffer->head_page->read == - cpu_buffer->tail_page->write)); -} - static inline int rb_null_event(struct ring_buffer_event *event) { return event->type == RINGBUF_TYPE_PADDING; @@ -602,6 +609,33 @@ rb_iter_head_event(struct ring_buffer_iter *iter) return __rb_page_index(iter->head_page, iter->head); } +static inline unsigned rb_page_write(struct buffer_page *bpage) +{ + return local_read(&bpage->write); +} + +static inline unsigned rb_page_commit(struct buffer_page *bpage) +{ + return local_read(&bpage->commit); +} + +/* Size is determined by what has been commited */ +static inline unsigned rb_page_size(struct buffer_page *bpage) +{ + return rb_page_commit(bpage); +} + +static inline unsigned +rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) +{ + return rb_page_commit(cpu_buffer->commit_page); +} + +static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) +{ + return rb_page_commit(cpu_buffer->head_page); +} + /* * When the tail hits the head and the buffer is in overwrite mode, * the head jumps to the next page and all content on the previous @@ -637,16 +671,76 @@ static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, *page = list_entry(p, struct buffer_page, list); } -static inline void -rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) +static inline unsigned +rb_event_index(struct ring_buffer_event *event) { - cpu_buffer->tail_page->time_stamp = *ts; - cpu_buffer->write_stamp = *ts; + unsigned long addr = (unsigned long)event; + + return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); } -static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer) +static inline int +rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event) { - cpu_buffer->head_page->read = 0; + unsigned long addr = (unsigned long)event; + unsigned long index; + + index = rb_event_index(event); + addr &= PAGE_MASK; + + return cpu_buffer->commit_page->page == (void *)addr && + rb_commit_index(cpu_buffer) == index; +} + +static inline void +rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event) +{ + unsigned long addr = (unsigned long)event; + unsigned long index; + + index = rb_event_index(event); + addr &= PAGE_MASK; + + while (cpu_buffer->commit_page->page != (void *)addr) { + RB_WARN_ON(cpu_buffer, + cpu_buffer->commit_page == cpu_buffer->tail_page); + cpu_buffer->commit_page->commit = + cpu_buffer->commit_page->write; + rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); + cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; + } + + /* Now set the commit to the event's index */ + local_set(&cpu_buffer->commit_page->commit, index); +} + +static inline void +rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) +{ + /* + * We only race with interrupts and NMIs on this CPU. + * If we own the commit event, then we can commit + * all others that interrupted us, since the interruptions + * are in stack format (they finish before they come + * back to us). This allows us to do a simple loop to + * assign the commit to the tail. + */ + while (cpu_buffer->commit_page != cpu_buffer->tail_page) { + cpu_buffer->commit_page->commit = + cpu_buffer->commit_page->write; + rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); + cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; + /* add barrier to keep gcc from optimizing too much */ + barrier(); + } + while (rb_commit_index(cpu_buffer) != + rb_page_write(cpu_buffer->commit_page)) { + cpu_buffer->commit_page->commit = + cpu_buffer->commit_page->write; + barrier(); + } } static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) @@ -745,61 +839,120 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, unsigned type, unsigned long length, u64 *ts) { struct buffer_page *tail_page, *head_page, *reader_page; - unsigned long tail; + unsigned long tail, write; struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer_event *event; + unsigned long flags; tail_page = cpu_buffer->tail_page; - tail = cpu_buffer->tail_page->write; + write = local_add_return(length, &tail_page->write); + tail = write - length; - if (tail + length > BUF_PAGE_SIZE) { + /* See if we shot pass the end of this buffer page */ + if (write > BUF_PAGE_SIZE) { struct buffer_page *next_page = tail_page; - spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); + rb_inc_page(cpu_buffer, &next_page); head_page = cpu_buffer->head_page; reader_page = cpu_buffer->reader_page; /* we grabbed the lock before incrementing */ - WARN_ON(next_page == reader_page); + RB_WARN_ON(cpu_buffer, next_page == reader_page); + + /* + * If for some reason, we had an interrupt storm that made + * it all the way around the buffer, bail, and warn + * about it. + */ + if (unlikely(next_page == cpu_buffer->commit_page)) { + WARN_ON_ONCE(1); + goto out_unlock; + } if (next_page == head_page) { if (!(buffer->flags & RB_FL_OVERWRITE)) { - spin_unlock(&cpu_buffer->lock); - return NULL; + /* reset write */ + if (tail <= BUF_PAGE_SIZE) + local_set(&tail_page->write, tail); + goto out_unlock; } - /* count overflows */ - rb_update_overflow(cpu_buffer); + /* tail_page has not moved yet? */ + if (tail_page == cpu_buffer->tail_page) { + /* count overflows */ + rb_update_overflow(cpu_buffer); + + rb_inc_page(cpu_buffer, &head_page); + cpu_buffer->head_page = head_page; + cpu_buffer->head_page->read = 0; + } + } - rb_inc_page(cpu_buffer, &head_page); - cpu_buffer->head_page = head_page; - rb_reset_head_page(cpu_buffer); + /* + * If the tail page is still the same as what we think + * it is, then it is up to us to update the tail + * pointer. + */ + if (tail_page == cpu_buffer->tail_page) { + local_set(&next_page->write, 0); + local_set(&next_page->commit, 0); + cpu_buffer->tail_page = next_page; + + /* reread the time stamp */ + *ts = ring_buffer_time_stamp(cpu_buffer->cpu); + cpu_buffer->tail_page->time_stamp = *ts; } - if (tail != BUF_PAGE_SIZE) { + /* + * The actual tail page has moved forward. + */ + if (tail < BUF_PAGE_SIZE) { + /* Mark the rest of the page with padding */ event = __rb_page_index(tail_page, tail); - /* page padding */ event->type = RINGBUF_TYPE_PADDING; } - tail_page->size = tail; - tail_page = next_page; - tail_page->size = 0; - tail = 0; - cpu_buffer->tail_page = tail_page; - cpu_buffer->tail_page->write = tail; - rb_add_stamp(cpu_buffer, ts); - spin_unlock(&cpu_buffer->lock); + if (tail <= BUF_PAGE_SIZE) + /* Set the write back to the previous setting */ + local_set(&tail_page->write, tail); + + /* + * If this was a commit entry that failed, + * increment that too + */ + if (tail_page == cpu_buffer->commit_page && + tail == rb_commit_index(cpu_buffer)) { + rb_set_commit_to_write(cpu_buffer); + } + + spin_unlock_irqrestore(&cpu_buffer->lock, flags); + + /* fail and let the caller try again */ + return ERR_PTR(-EAGAIN); } - BUG_ON(tail + length > BUF_PAGE_SIZE); + /* We reserved something on the buffer */ + + BUG_ON(write > BUF_PAGE_SIZE); event = __rb_page_index(tail_page, tail); rb_update_event(event, type, length); + /* + * If this is a commit and the tail is zero, then update + * this page's time stamp. + */ + if (!tail && rb_is_commit(cpu_buffer, event)) + cpu_buffer->commit_page->time_stamp = *ts; + return event; + + out_unlock: + spin_unlock_irqrestore(&cpu_buffer->lock, flags); + return NULL; } static int @@ -808,6 +961,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, { struct ring_buffer_event *event; static int once; + int ret; if (unlikely(*delta > (1ULL << 59) && !once++)) { printk(KERN_WARNING "Delta way too big! %llu" @@ -825,21 +979,38 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, RB_LEN_TIME_EXTEND, ts); if (!event) - return -1; + return -EBUSY; - /* check to see if we went to the next page */ - if (cpu_buffer->tail_page->write) { - /* Still on same page, update timestamp */ - event->time_delta = *delta & TS_MASK; - event->array[0] = *delta >> TS_SHIFT; - /* commit the time event */ - cpu_buffer->tail_page->write += - rb_event_length(event); + if (PTR_ERR(event) == -EAGAIN) + return -EAGAIN; + + /* Only a commited time event can update the write stamp */ + if (rb_is_commit(cpu_buffer, event)) { + /* + * If this is the first on the page, then we need to + * update the page itself, and just put in a zero. + */ + if (rb_event_index(event)) { + event->time_delta = *delta & TS_MASK; + event->array[0] = *delta >> TS_SHIFT; + } else { + cpu_buffer->commit_page->time_stamp = *ts; + event->time_delta = 0; + event->array[0] = 0; + } cpu_buffer->write_stamp = *ts; - *delta = 0; + /* let the caller know this was the commit */ + ret = 1; + } else { + /* Darn, this is just wasted space */ + event->time_delta = 0; + event->array[0] = 0; + ret = 0; } - return 0; + *delta = 0; + + return ret; } static struct ring_buffer_event * @@ -848,32 +1019,69 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, { struct ring_buffer_event *event; u64 ts, delta; + int commit = 0; + again: ts = ring_buffer_time_stamp(cpu_buffer->cpu); - if (cpu_buffer->tail_page->write) { + /* + * Only the first commit can update the timestamp. + * Yes there is a race here. If an interrupt comes in + * just after the conditional and it traces too, then it + * will also check the deltas. More than one timestamp may + * also be made. But only the entry that did the actual + * commit will be something other than zero. + */ + if (cpu_buffer->tail_page == cpu_buffer->commit_page && + rb_page_write(cpu_buffer->tail_page) == + rb_commit_index(cpu_buffer)) { + delta = ts - cpu_buffer->write_stamp; + /* make sure this delta is calculated here */ + barrier(); + + /* Did the write stamp get updated already? */ + if (unlikely(ts < cpu_buffer->write_stamp)) + goto again; + if (test_time_stamp(delta)) { - int ret; - ret = rb_add_time_stamp(cpu_buffer, &ts, &delta); - if (ret < 0) + commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); + + if (commit == -EBUSY) return NULL; + + if (commit == -EAGAIN) + goto again; + + RB_WARN_ON(cpu_buffer, commit < 0); } - } else { - spin_lock(&cpu_buffer->lock); - rb_add_stamp(cpu_buffer, &ts); - spin_unlock(&cpu_buffer->lock); + } else + /* Non commits have zero deltas */ delta = 0; - } event = __rb_reserve_next(cpu_buffer, type, length, &ts); - if (!event) + if (PTR_ERR(event) == -EAGAIN) + goto again; + + if (!event) { + if (unlikely(commit)) + /* + * Ouch! We needed a timestamp and it was commited. But + * we didn't get our event reserved. + */ + rb_set_commit_to_write(cpu_buffer); return NULL; + } - /* If the reserve went to the next page, our delta is zero */ - if (!cpu_buffer->tail_page->write) + /* + * If the timestamp was commited, make the commit our entry + * now so that we will update it when needed. + */ + if (commit) + rb_set_commit_event(cpu_buffer, event); + else if (!rb_is_commit(cpu_buffer, event)) delta = 0; event->time_delta = delta; @@ -881,6 +1089,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, return event; } +static DEFINE_PER_CPU(int, rb_need_resched); + /** * ring_buffer_lock_reserve - reserve a part of the buffer * @buffer: the ring buffer to reserve from @@ -904,12 +1114,15 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; - int cpu; + int cpu, resched; if (atomic_read(&buffer->record_disabled)) return NULL; - local_irq_save(*flags); + /* If we are tracing schedule, we don't want to recurse */ + resched = need_resched(); + preempt_disable_notrace(); + cpu = raw_smp_processor_id(); if (!cpu_isset(cpu, buffer->cpumask)) @@ -922,26 +1135,42 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, length = rb_calculate_event_length(length); if (length > BUF_PAGE_SIZE) - return NULL; + goto out; event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); if (!event) goto out; + /* + * Need to store resched state on this cpu. + * Only the first needs to. + */ + + if (preempt_count() == 1) + per_cpu(rb_need_resched, cpu) = resched; + return event; out: - local_irq_restore(*flags); + if (resched) + preempt_enable_notrace(); + else + preempt_enable_notrace(); return NULL; } static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { - cpu_buffer->tail_page->write += rb_event_length(event); - cpu_buffer->tail_page->size = cpu_buffer->tail_page->write; - cpu_buffer->write_stamp += event->time_delta; cpu_buffer->entries++; + + /* Only process further if we own the commit */ + if (!rb_is_commit(cpu_buffer, event)) + return; + + cpu_buffer->write_stamp += event->time_delta; + + rb_set_commit_to_write(cpu_buffer); } /** @@ -965,7 +1194,16 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, rb_commit(cpu_buffer, event); - local_irq_restore(flags); + /* + * Only the last preempt count needs to restore preemption. + */ + if (preempt_count() == 1) { + if (per_cpu(rb_need_resched, cpu)) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); + } else + preempt_enable_no_resched_notrace(); return 0; } @@ -989,15 +1227,17 @@ int ring_buffer_write(struct ring_buffer *buffer, { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; - unsigned long event_length, flags; + unsigned long event_length; void *body; int ret = -EBUSY; - int cpu; + int cpu, resched; if (atomic_read(&buffer->record_disabled)) return -EBUSY; - local_irq_save(flags); + resched = need_resched(); + preempt_disable_notrace(); + cpu = raw_smp_processor_id(); if (!cpu_isset(cpu, buffer->cpumask)) @@ -1022,11 +1262,26 @@ int ring_buffer_write(struct ring_buffer *buffer, ret = 0; out: - local_irq_restore(flags); + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); return ret; } +static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) +{ + struct buffer_page *reader = cpu_buffer->reader_page; + struct buffer_page *head = cpu_buffer->head_page; + struct buffer_page *commit = cpu_buffer->commit_page; + + return reader->read == rb_page_commit(reader) && + (commit == reader || + (commit == head && + head->read == rb_page_commit(commit))); +} + /** * ring_buffer_record_disable - stop all writes into the buffer * @buffer: The ring buffer to stop writes to. @@ -1204,8 +1459,8 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) cpu_buffer = iter->cpu_buffer; - return iter->head_page == cpu_buffer->tail_page && - iter->head == cpu_buffer->tail_page->write; + return iter->head_page == cpu_buffer->commit_page && + iter->head == rb_commit_index(cpu_buffer); } static void @@ -1282,15 +1537,16 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) reader = cpu_buffer->reader_page; /* If there's more to read, return this page */ - if (cpu_buffer->reader_page->read < reader->size) + if (cpu_buffer->reader_page->read < rb_page_size(reader)) goto out; /* Never should we have an index greater than the size */ - WARN_ON(cpu_buffer->reader_page->read > reader->size); + RB_WARN_ON(cpu_buffer, + cpu_buffer->reader_page->read > rb_page_size(reader)); /* check if we caught up to the tail */ reader = NULL; - if (cpu_buffer->tail_page == cpu_buffer->reader_page) + if (cpu_buffer->commit_page == cpu_buffer->reader_page) goto out; /* @@ -1301,7 +1557,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) reader = cpu_buffer->head_page; cpu_buffer->reader_page->list.next = reader->list.next; cpu_buffer->reader_page->list.prev = reader->list.prev; - cpu_buffer->reader_page->size = 0; + + local_set(&cpu_buffer->reader_page->write, 0); + local_set(&cpu_buffer->reader_page->commit, 0); /* Make the reader page now replace the head */ reader->list.prev->next = &cpu_buffer->reader_page->list; @@ -1313,7 +1571,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) */ cpu_buffer->head_page = cpu_buffer->reader_page; - if (cpu_buffer->tail_page != reader) + if (cpu_buffer->commit_page != reader) rb_inc_page(cpu_buffer, &cpu_buffer->head_page); /* Finally update the reader page to the new head */ @@ -1363,8 +1621,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) /* * Check if we are at the end of the buffer. */ - if (iter->head >= iter->head_page->size) { - BUG_ON(iter->head_page == cpu_buffer->tail_page); + if (iter->head >= rb_page_size(iter->head_page)) { + BUG_ON(iter->head_page == cpu_buffer->commit_page); rb_inc_iter(iter); return; } @@ -1377,16 +1635,16 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) * This should not be called to advance the header if we are * at the tail of the buffer. */ - BUG_ON((iter->head_page == cpu_buffer->tail_page) && - (iter->head + length > cpu_buffer->tail_page->write)); + BUG_ON((iter->head_page == cpu_buffer->commit_page) && + (iter->head + length > rb_commit_index(cpu_buffer))); rb_update_iter_read_stamp(iter, event); iter->head += length; /* check for end of page padding */ - if ((iter->head >= iter->head_page->size) && - (iter->head_page != cpu_buffer->tail_page)) + if ((iter->head >= rb_page_size(iter->head_page)) && + (iter->head_page != cpu_buffer->commit_page)) rb_advance_iter(iter); } @@ -1420,7 +1678,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) switch (event->type) { case RINGBUF_TYPE_PADDING: - WARN_ON(1); + RB_WARN_ON(cpu_buffer, 1); rb_advance_reader(cpu_buffer); return NULL; @@ -1622,14 +1880,17 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) { cpu_buffer->head_page = list_entry(cpu_buffer->pages.next, struct buffer_page, list); - cpu_buffer->head_page->size = 0; - cpu_buffer->tail_page = cpu_buffer->head_page; - cpu_buffer->tail_page->size = 0; - INIT_LIST_HEAD(&cpu_buffer->reader_page->list); - cpu_buffer->reader_page->size = 0; + local_set(&cpu_buffer->head_page->write, 0); + local_set(&cpu_buffer->head_page->commit, 0); cpu_buffer->head_page->read = 0; - cpu_buffer->tail_page->write = 0; + + cpu_buffer->tail_page = cpu_buffer->head_page; + cpu_buffer->commit_page = cpu_buffer->head_page; + + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); + local_set(&cpu_buffer->reader_page->write, 0); + local_set(&cpu_buffer->reader_page->commit, 0); cpu_buffer->reader_page->read = 0; cpu_buffer->overrun = 0; -- cgit v1.2.3-70-g09d2 From 3ea2e6d71aafe35b8aaf89ed711a283815acfae6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 4 Oct 2008 02:01:00 -0400 Subject: ftrace: make some tracers reentrant Now that the ring buffer is reentrant, some of the ftrace tracers (sched_swich, debugging traces) can also be reentrant. Note: Never make the function tracer reentrant, that can cause recursion problems all over the kernel. The function tracer must disable reentrancy. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 10 ++-------- kernel/trace/trace_sched_switch.c | 10 ++-------- 2 files changed, 4 insertions(+), 16 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1cd2e8143bb..caa4051ce77 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - long disabled; int cpu; int pc; @@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); - atomic_dec(&data->disabled); preempt_enable_notrace(); } @@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) struct trace_array_cpu *data; struct print_entry *entry; unsigned long flags, irq_flags; - long disabled; int cpu, len = 0, size, pc; if (!tr->ctrl || tracing_disabled) @@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (unlikely(disabled != 1)) + if (unlikely(atomic_read(&data->disabled))) goto out; spin_lock_irqsave(&trace_buf_lock, flags); @@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) spin_unlock_irqrestore(&trace_buf_lock, flags); out: - atomic_dec(&data->disabled); preempt_enable_notrace(); return len; diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index c7fa08a5b7f..b8f56beb1a6 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, { struct trace_array_cpu *data; unsigned long flags; - long disabled; int cpu; int pc; @@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); - atomic_dec(&data->disabled); local_irq_restore(flags); } @@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) { struct trace_array_cpu *data; unsigned long flags; - long disabled; int cpu, pc; if (!likely(tracer_enabled)) @@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, flags, pc); - atomic_dec(&data->disabled); local_irq_restore(flags); } -- cgit v1.2.3-70-g09d2 From c2931e05ec5965597cbfb79ad332d4a29aeceb23 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 4 Oct 2008 22:04:44 +0200 Subject: ftrace: return an error when setting a nonexistent tracer When one try to set a nonexistent tracer, no error is returned as if the name of the tracer was correct. We should return -EINVAL. Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index caa4051ce77..78d56614c95 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2381,9 +2381,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, struct tracer *t; char buf[max_tracer_type_len+1]; int i; + size_t ret; if (cnt > max_tracer_type_len) cnt = max_tracer_type_len; + ret = cnt; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; @@ -2399,7 +2401,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, if (strcmp(t->name, buf) == 0) break; } - if (!t || t == current_trace) + if (!t) { + ret = -EINVAL; + goto out; + } + if (t == current_trace) goto out; if (current_trace && current_trace->reset) @@ -2412,9 +2418,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, out: mutex_unlock(&trace_types_lock); - filp->f_pos += cnt; + if (ret == cnt) + filp->f_pos += cnt; - return cnt; + return ret; } static ssize_t -- cgit v1.2.3-70-g09d2 From 8a5d900cca57115326bc5b9e7f3bac980986c2c8 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 4 Oct 2008 13:42:27 -0700 Subject: tracing/fastboot: fix printk format typo in boot tracer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When printing nanoseconds, the right printk format string is %09 not %06... Signed-off-by: Arjan van de Ven Acked-by: Frédéric Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace_boot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a7efe355965..d0a5e50eeff 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -62,14 +62,14 @@ static enum print_line_t initcall_print_line(struct trace_iterator *iter) struct timespec rettime = ktime_to_timespec(it->rettime); if (entry->type == TRACE_BOOT) { - ret = trace_seq_printf(s, "[%5ld.%06ld] calling %s @ %i\n", + ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", calltime.tv_sec, calltime.tv_nsec, it->func, it->caller); if (!ret) return TRACE_TYPE_PARTIAL_LINE; - ret = trace_seq_printf(s, "[%5ld.%06ld] initcall %s " + ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " "returned %d after %lld msecs\n", rettime.tv_sec, rettime.tv_nsec, -- cgit v1.2.3-70-g09d2 From 2fbc474901933c8f0c09b0280dfbb6780cb8bd60 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 8 Oct 2008 16:51:49 -0700 Subject: ftrace: fix hex output mode of ftrace Fix the output of ftrace in hex mode as the hi/lo nibbles are output in reverse order. Without this patch, the output of ftrace is: raw mode : 6474 0 141531612444 0 140 + 6402 120 S hex mode : 000091a4 00000000 000000023f1f50c1 00000000 c8 000000b2 00009120 87 ffff00c8 00000035 There is an inversion on ouput hex(6474) is 194a [based on a patch by Philippe Reynes ] Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 78d56614c95..36cbb873845 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -336,14 +336,12 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) } #define HEX_CHARS 17 -static const char hex2asc[] = "0123456789abcdef"; static int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) { unsigned char hex[HEX_CHARS]; unsigned char *data = mem; - unsigned char byte; int i, j; BUG_ON(len >= HEX_CHARS); @@ -353,10 +351,8 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) #else for (i = len-1, j = 0; i >= 0; i--) { #endif - byte = data[i]; - - hex[j++] = hex2asc[byte & 0x0f]; - hex[j++] = hex2asc[byte >> 4]; + hex[j++] = hex_asc_hi(data[i]); + hex[j++] = hex_asc_lo(data[i]); } hex[j++] = ' '; -- cgit v1.2.3-70-g09d2 From ad0a3b68114e8f3c25ac0045b45a2838f23e3b3a Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 8 Oct 2008 17:44:55 -0700 Subject: trace: add build-time check to avoid overrunning hex buffer Remove the runtime BUG_ON and change to a compile-time check in the macro that calls the hex format routine [Noticed by Joe Perches] Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 36cbb873845..d345d649d07 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -335,7 +335,8 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) return len; } -#define HEX_CHARS 17 +#define MAX_MEMHEX_BYTES 8 +#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) static int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) @@ -344,8 +345,6 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) unsigned char *data = mem; int i, j; - BUG_ON(len >= HEX_CHARS); - #ifdef __BIG_ENDIAN for (i = 0, j = 0; i < len; i++) { #else @@ -1668,6 +1667,7 @@ do { \ #define SEQ_PUT_HEX_FIELD_RET(s, x) \ do { \ + BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ return 0; \ } while (0) -- cgit v1.2.3-70-g09d2 From 4519d9e54dcd273975ad0adebad2a08c20428029 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 14 Oct 2008 14:15:43 +0200 Subject: tracing/stacktrace: improve help text Improve the help text that is displayed for CONFIG_STACK_TRACER. Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 396aea11398..11fd03a429f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -141,8 +141,17 @@ config STACK_TRACER select FTRACE select STACKTRACE help - This tracer records the max stack of the kernel, and displays - it in debugfs/tracing/stack_trace + This special tracer records the maximum stack footprint of the + kernel and displays it in debugfs/tracing/stack_trace. + + This tracer works by hooking into every function call that the + kernel executes, and keeping a maximum stack depth value and + stack-trace saved. Because this logic has to execute in every + kernel function, all the time, this option can slow down the + kernel measurably and is generally intended for kernel + developers only. + + Say N if unsure. config DYNAMIC_FTRACE bool "enable/disable ftrace tracepoints dynamically" -- cgit v1.2.3-70-g09d2 From 98d9c66ab07471006fd7910cb16453581c41a3e7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 14 Oct 2008 14:27:20 +0200 Subject: tracing/fastboot: improve help text Improve the help text of the boot tracer. Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 11fd03a429f..1cb3e1f616a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -127,12 +127,17 @@ config BOOT_TRACER select TRACING help This tracer helps developers to optimize boot times: it records - the timings of the initcalls. Its aim is to be parsed by the - /scripts/bootgraph.pl tool to produce pretty graphics about - boot inefficiencies, giving a visual representation of the - delays during initcalls. Note that tracers self tests can't - be enabled if this tracer is selected since only one tracer - should touch the tracing buffer at a time. + the timings of the initcalls and traces key events and the identity + of tasks that can cause boot delays, such as context-switches. + + Its aim is to be parsed by the /scripts/bootgraph.pl tool to + produce pretty graphics about boot inefficiencies, giving a visual + representation of the delays during initcalls - but the raw + /debug/tracing/trace text output is readable too. + + ( Note that tracing self tests can't be enabled if this tracer is + selected, because the self-tests are an initcall as well and that + would invalidate the boot trace. ) config STACK_TRACER bool "Trace max stack" -- cgit v1.2.3-70-g09d2