summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-12 15:24:24 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-12 22:27:58 +0100
commit2ed84eeb8808cf3c9f039213ca137ffd7d753f0e (patch)
tree3aa22269a1fd5ed0b66826120ca9b572400962b5
parent68d119f0a66f7e3663304343b072e56a2693446b (diff)
trace: rename unlikely profiler to branch profiler
Impact: name change of unlikely tracer and profiler Ingo Molnar suggested changing the config from UNLIKELY_PROFILE to BRANCH_PROFILING. I never did like the "unlikely" name so I went one step farther, and renamed all the unlikely configurations to a "BRANCH" variant. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/vdso/vclock_gettime.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/compiler.h19
-rw-r--r--kernel/trace/Kconfig10
-rw-r--r--kernel/trace/Makefile7
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_unlikely.c4
9 files changed, 27 insertions, 27 deletions
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index ece02932ea5..6f3d3d4cd97 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -18,7 +18,7 @@
*/
/* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
#include <linux/time.h>
#include <linux/init.h>
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6e667631e7d..d9d35824c56 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -10,7 +10,7 @@
*/
/* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
#include <linux/kernel.h>
#include <linux/posix-timers.h>
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e10beb5335c..a5e4ed9baec 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -45,7 +45,7 @@
#define MCOUNT_REC()
#endif
-#ifdef CONFIG_TRACE_UNLIKELY_PROFILE
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_likely_profile) = .; \
*(_ftrace_likely) \
VMLINUX_SYMBOL(__stop_likely_profile) = .; \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 63b7d9089d6..c7d804a7a4d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -59,26 +59,27 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/
-/*
- * Note: DISABLE_UNLIKELY_PROFILE can be used by special lowlevel code
- * to disable branch tracing on a per file basis.
- */
-#if defined(CONFIG_TRACE_UNLIKELY_PROFILE) && !defined(DISABLE_UNLIKELY_PROFILE)
-struct ftrace_likely_data {
+struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
unsigned long correct;
unsigned long incorrect;
};
-void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define likely_check(x) ({ \
int ______r; \
- static struct ftrace_likely_data \
+ static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_likely"))) \
______f = { \
@@ -93,7 +94,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
})
#define unlikely_check(x) ({ \
int ______r; \
- static struct ftrace_likely_data \
+ static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_unlikely"))) \
______f = { \
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8abcaf821be..9c89526b6b7 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -159,7 +159,7 @@ config BOOT_TRACER
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )
-config TRACE_UNLIKELY_PROFILE
+config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
depends on DEBUG_KERNEL
select TRACING
@@ -175,7 +175,7 @@ config TRACE_UNLIKELY_PROFILE
Say N if unsure.
-config TRACING_UNLIKELY
+config TRACING_BRANCHES
bool
help
Selected by tracers that will trace the likely and unlikely
@@ -183,10 +183,10 @@ config TRACING_UNLIKELY
profiled. Profiling the tracing infrastructure can only happen
when the likelys and unlikelys are not being traced.
-config UNLIKELY_TRACER
+config BRANCH_TRACER
bool "Trace likely/unlikely instances"
- depends on TRACE_UNLIKELY_PROFILE
- select TRACING_UNLIKELY
+ depends on TRACE_BRANCH_PROFILING
+ select TRACING_BRANCHES
help
This traces the events of likely and unlikely condition
calls in the kernel. The difference between this and the
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index c938d03516c..0087df7ba44 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -11,9 +11,8 @@ obj-y += trace_selftest_dynamic.o
endif
# If unlikely tracing is enabled, do not trace these files
-ifdef CONFIG_TRACING_UNLIKELY
-KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
-KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
+ifdef CONFIG_TRACING_BRANCHES
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
@@ -31,6 +30,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
-obj-$(CONFIG_TRACE_UNLIKELY_PROFILE) += trace_unlikely.o
+obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_unlikely.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d842db14a59..bad59d32a4a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -258,7 +258,7 @@ static const char *trace_options[] = {
"sched-tree",
"ftrace_printk",
"ftrace_preempt",
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
"unlikely",
#endif
NULL
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9635aa2c4fc..dccae631294 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -468,7 +468,7 @@ enum trace_iterator_flags {
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
TRACE_ITER_UNLIKELY = 0x1000,
#endif
};
@@ -530,7 +530,7 @@ static inline void ftrace_preempt_enable(int resched)
preempt_enable_notrace();
}
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
extern int enable_unlikely_tracing(struct trace_array *tr);
extern void disable_unlikely_tracing(void);
static inline int trace_unlikely_enable(struct trace_array *tr)
@@ -552,6 +552,6 @@ static inline int trace_unlikely_enable(struct trace_array *tr)
static inline void trace_unlikely_disable(void)
{
}
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index 7290e0e7b4e..856eb3b7f69 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -15,7 +15,7 @@
#include <asm/local.h>
#include "trace.h"
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
static int unlikely_tracing_enabled __read_mostly;
static DEFINE_MUTEX(unlikely_tracing_mutex);
@@ -119,7 +119,7 @@ static inline
void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
}
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
{