diff options
215 files changed, 5297 insertions, 4179 deletions
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt index 59a91e5c690..611f5a5499b 100644 --- a/Documentation/debugging-via-ohci1394.txt +++ b/Documentation/debugging-via-ohci1394.txt @@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update. Bernhard Kaindl enhanced firescope to support accessing 64-bit machines from 32-bit firescope and vice versa: -- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2 +- http://halobates.de/firewire/firescope-0.2.2.tar.bz2 and he implemented fast system dump (alpha version - read README.txt): -- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2 +- http://halobates.de/firewire/firedump-0.1.tar.bz2 There is also a gdb proxy for firewire which allows to use gdb to access data which can be referenced from symbols found by gdb in vmlinux: -- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2 +- http://halobates.de/firewire/fireproxy-0.33.tar.bz2 The latest version of this gdb proxy (fireproxy-0.34) can communicate (not yet stable) with kgdb over an memory-based communication module (kgdbom). @@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization: Notes ----- -Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs +Documentation and specifications: http://halobates.de/firewire/ FireWire is a trademark of Apple Inc. - for more information please refer to: http://en.wikipedia.org/wiki/FireWire diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 89a47b5aff0..04e6c819b28 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR will also allow making ALSA OSS emulation independent of sound_core. The dependency will be broken then too. Who: Tejun Heo <tj@kernel.org> + +---------------------------- + +What: Support for VMware's guest paravirtuliazation technique [VMI] will be + dropped. +When: 2.6.37 or earlier. +Why: With the recent innovations in CPU hardware acceleration technologies + from Intel and AMD, VMware ran a few experiments to compare these + techniques to guest paravirtualization technique on VMware's platform. + These hardware assisted virtualization techniques have outperformed the + performance benefits provided by VMI in most of the workloads. VMware + expects that these hardware features will be ubiquitous in a couple of + years, as a result, VMware has started a phased retirement of this + feature from the hypervisor. We will be removing this feature from the + Kernel too. Right now we are targeting 2.6.37 but can retire earlier if + technical reasons (read opportunity to remove major chunk of pvops) + arise. + + Please note that VMI has always been an optimization and non-VMI kernels + still work fine on VMware's platform. + Latest versions of VMware's product which support VMI are, + Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence + releases for these products will continue supporting VMI. + + For more details about VMI retirement take a look at this, + http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html + +Who: Alok N Kataria <akataria@vmware.com> + +---------------------------- diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt index 570f9bd9be2..05d5cf1d743 100644 --- a/Documentation/filesystems/ext3.txt +++ b/Documentation/filesystems/ext3.txt @@ -123,10 +123,18 @@ resuid=n The user ID which may use the reserved blocks. sb=n Use alternate superblock at this location. -quota -noquota -grpquota -usrquota +quota These options are ignored by the filesystem. They +noquota are used only by quota tools to recognize volumes +grpquota where quota should be turned on. See documentation +usrquota in the quota-tools package for more details + (http://sourceforge.net/projects/linuxquota). + +jqfmt=<quota type> These options tell filesystem details about quota +usrjquota=<file> so that quota information can be properly updated +grpjquota=<file> during journal replay. They replace the above + quota options. See documentation in the quota-tools + package for more details + (http://sourceforge.net/projects/linuxquota). bh (*) ext3 associates buffer heads to data pages to nobh (a) cache disk block mapping information diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9107b387e91..c8d1b2be28e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -779,6 +779,13 @@ and is between 256 and 4096 characters. It is defined in the file by the set_ftrace_notrace file in the debugfs tracing directory. + ftrace_graph_filter=[function-list] + [FTRACE] Limit the top level callers functions traced + by the function graph tracer at boot up. + function-list is a comma separated list of functions + that can be changed at run time by the + set_graph_function file in the debugfs tracing directory. + gamecon.map[2|3]= [HW,JOY] Multisystem joystick and NES/SNES/PSX pad support via parallel port (up to 5 devices per port) diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 75fddb40f41..4c7f9aee5c4 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -359,6 +359,7 @@ STAC9227/9228/9229/927x 5stack-no-fp D965 5stack without front panel dell-3stack Dell Dimension E520 dell-bios Fixes with Dell BIOS setup + volknob Fixes with volume-knob widget 0x24 auto BIOS setup (default) STAC92HD71B* diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt index 7003e10f10f..641a1ef2a7f 100644 --- a/Documentation/trace/ftrace-design.txt +++ b/Documentation/trace/ftrace-design.txt @@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option. <details to be filled> -HAVE_FTRACE_SYSCALLS +HAVE_SYSCALL_TRACEPOINTS --------------------- -<details to be filled> +You need very few things to get the syscalls tracing in an arch. + +- Have a NR_syscalls variable in <asm/unistd.h> that provides the number + of syscalls supported by the arch. +- Implement arch_syscall_addr() that resolves a syscall address from a + syscall number. +- Support the TIF_SYSCALL_TRACEPOINT thread flags +- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace + in the ptrace syscalls tracing path. +- Tag this arch as HAVE_SYSCALL_TRACEPOINTS. HAVE_FTRACE_MCOUNT_RECORD diff --git a/MAINTAINERS b/MAINTAINERS index cf690913f2b..d5eb8c13ef0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -577,6 +577,11 @@ M: Mike Rapoport <mike@compulab.co.il> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +ARM/CONTEC MICRO9 MACHINE SUPPORT +M: Hubert Feurstein <hubert.feurstein@contec.at> +S: Maintained +F: arch/arm/mach-ep93xx/micro9.c + ARM/CORGI MACHINE SUPPORT M: Richard Purdie <rpurdie@rpsys.net> S: Maintained @@ -2610,6 +2615,7 @@ L: linux1394-devel@lists.sourceforge.net W: http://www.linux1394.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained +F: Documentation/debugging-via-ohci1394.txt F: drivers/ieee1394/ IEEE 1394 RAW I/O DRIVER @@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ # Alternatively CROSS_COMPILE can be set in the environment. # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile -# -# To force ARCH and CROSS_COMPILE settings include kernel.* files -# in the kernel tree - do not patch this file. export KBUILD_BUILDHOST := $(SUBARCH) - -# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files. -# Restore these settings and check that user did not specify -# conflicting values. - -saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null) -saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null) - -ifneq ($(CROSS_COMPILE),) - ifneq ($(saved_cross),) - ifneq ($(CROSS_COMPILE),$(saved_cross)) - $(error CROSS_COMPILE changed from \ - "$(saved_cross)" to \ - to "$(CROSS_COMPILE)". \ - Use "make mrproper" to fix it up) - endif - endif -else - CROSS_COMPILE := $(saved_cross) -endif - -ifneq ($(ARCH),) - ifneq ($(saved_arch),) - ifneq ($(saved_arch),$(ARCH)) - $(error ARCH changed from \ - "$(saved_arch)" to "$(ARCH)". \ - Use "make mrproper" to fix it up) - endif - endif -else - ifneq ($(saved_arch),) - ARCH := $(saved_arch) - else - ARCH := $(SUBARCH) - endif -endif +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= # Architecture as present in compile.h UTS_MACHINE := $(ARCH) @@ -483,11 +446,6 @@ ifeq ($(config-targets),1) include $(srctree)/arch/$(SRCARCH)/Makefile export KBUILD_DEFCONFIG KBUILD_KCONFIG -# save ARCH & CROSS_COMPILE settings -$(shell mkdir -p include/generated && \ - echo $(ARCH) > include/generated/kernel.arch && \ - echo $(CROSS_COMPILE) > include/generated/kernel.cross) - config: scripts_basic outputmakefile FORCE $(Q)mkdir -p include/linux include/config $(Q)$(MAKE) $(build)=scripts/kconfig $@ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 63a481fbbed..338ff19ae44 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) *p = res | mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) *p = res & ~mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) *p = res ^ mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } #include <asm-generic/bitops/non-atomic.h> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 467b69ed102..f838f36eb70 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -45,21 +45,21 @@ static int __init user_debug_setup(char *str) __setup("user_debug=", user_debug_setup); #endif -static void dump_mem(const char *str, unsigned long bottom, unsigned long top); +static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { #ifdef CONFIG_KALLSYMS - printk("[<%08lx>] ", where); - print_symbol("(%s) ", where); - printk("from [<%08lx>] ", from); - print_symbol("(%s)\n", from); + char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN]; + sprint_symbol(sym1, where); + sprint_symbol(sym2, from); + printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif if (in_exception_text(where)) - dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); } #ifndef CONFIG_ARM_UNWIND @@ -81,9 +81,10 @@ static int verify_stack(unsigned long sp) /* * Dump out the contents of some memory nicely... */ -static void dump_mem(const char *str, unsigned long bottom, unsigned long top) +static void dump_mem(const char *lvl, const char *str, unsigned long bottom, + unsigned long top) { - unsigned long p = bottom & ~31; + unsigned long first; mm_segment_t fs; int i; @@ -95,33 +96,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top) fs = get_fs(); set_fs(KERNEL_DS); - printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); + printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); - for (p = bottom & ~31; p < top;) { - printk("%04lx: ", p & 0xffff); + for (first = bottom & ~31; first < top; first += 32) { + unsigned long p; + char str[sizeof(" 12345678") * 8 + 1]; - for (i = 0; i < 8; i++, p += 4) { - unsigned int val; + memset(str, ' ', sizeof(str)); + str[sizeof(str) - 1] = '\0'; - if (p < bottom || p >= top) - printk(" "); - else { - __get_user(val, (unsigned long *)p); - printk("%08x ", val); + for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { + if (p >= bottom && p < top) { + unsigned long val; + if (__get_user(val, (unsigned long *)p) == 0) + sprintf(str + i * 9, " %08lx", val); + else + sprintf(str + i * 9, " ????????"); } } - printk ("\n"); + printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } set_fs(fs); } -static void dump_instr(struct pt_regs *regs) +static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; mm_segment_t fs; + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* @@ -132,7 +137,6 @@ static void dump_instr(struct pt_regs *regs) fs = get_fs(); set_fs(KERNEL_DS); - printk("Code: "); for (i = -4; i < 1; i++) { unsigned int val, bad; @@ -142,13 +146,14 @@ static void dump_instr(struct pt_regs *regs) bad = __get_user(val, &((u32 *)addr)[i]); if (!bad) - printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); + p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", + width, val); else { - printk("bad PC value."); + p += sprintf(p, "bad PC value"); break; } } - printk("\n"); + printk("%sCode: %s\n", lvl, str); set_fs(fs); } @@ -224,18 +229,19 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p struct task_struct *tsk = thread->task; static int die_counter; - printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", + printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); + sysfs_printk_last_file(); print_modules(); __show_regs(regs); - printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, task_pid_nr(tsk), thread + 1); + printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { - dump_mem("Stack: ", regs->ARM_sp, + dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); dump_backtrace(regs, tsk); - dump_instr(regs); + dump_instr(KERN_EMERG, regs); } } @@ -250,13 +256,14 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) oops_enter(); - console_verbose(); spin_lock_irq(&die_lock); + console_verbose(); bust_spinlocks(1); __die(str, err, thread, regs); bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); + oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); @@ -264,7 +271,6 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) if (panic_on_oops) panic("Fatal exception"); - oops_exit(); do_exit(SIGSEGV); } @@ -349,7 +355,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); - dump_instr(regs); + dump_instr(KERN_INFO, regs); } #endif @@ -400,7 +406,7 @@ static int bad_syscall(int n, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", task_pid_nr(current), current->comm, n); - dump_instr(regs); + dump_instr(KERN_ERR, regs); } #endif @@ -579,7 +585,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { printk("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); - dump_instr(regs); + dump_instr("", regs); if (user_mode(regs)) { __show_regs(regs); c_backtrace(regs->ARM_fp, processor_mode(regs)); @@ -656,7 +662,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) if (user_debug & UDBG_BADABORT) { printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", task_pid_nr(current), current->comm, code, instr); - dump_instr(regs); + dump_instr(KERN_ERR, regs); show_pte(current->mm, addr); } #endif diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c index 4b4f69251b3..e590bbe0a7b 100644 --- a/arch/arm/mach-bcmring/core.c +++ b/arch/arm/mach-bcmring/core.c @@ -271,12 +271,12 @@ static struct irqaction bcmring_timer_irq = { .handler = bcmring_timer_interrupt, }; -static cycle_t bcmring_get_cycles_timer1(void) +static cycle_t bcmring_get_cycles_timer1(struct clocksource *cs) { return ~readl(TIMER1_VA_BASE + TIMER_VALUE); } -static cycle_t bcmring_get_cycles_timer3(void) +static cycle_t bcmring_get_cycles_timer3(struct clocksource *cs) { return ~readl(TIMER3_VA_BASE + TIMER_VALUE); } diff --git a/arch/arm/mach-bcmring/include/mach/system.h b/arch/arm/mach-bcmring/include/mach/system.h index cdbf93c694a..38b37060d42 100644 --- a/arch/arm/mach-bcmring/include/mach/system.h +++ b/arch/arm/mach-bcmring/include/mach/system.h @@ -29,7 +29,7 @@ static inline void arch_idle(void) cpu_do_idle(); } -static inline void arch_reset(char mode, char *cmd) +static inline void arch_reset(char mode, const char *cmd) { printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot); diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig index d7291c682a6..9167c3d2a5e 100644 --- a/arch/arm/mach-ep93xx/Kconfig +++ b/arch/arm/mach-ep93xx/Kconfig @@ -17,13 +17,31 @@ config EP93XX_SDCE3_SYNC_PHYS_OFFSET bool "0x00000000 - SDCE3/SyncBoot" help Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0x00000000 + first SDRAM bank at 0x00000000. config EP93XX_SDCE0_PHYS_OFFSET bool "0xc0000000 - SDCEO" help Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0xc0000000 + first SDRAM bank at 0xc0000000. + +config EP93XX_SDCE1_PHYS_OFFSET + bool "0xd0000000 - SDCE1" + help + Select this option if you want support for EP93xx boards with the + first SDRAM bank at 0xd0000000. + +config EP93XX_SDCE2_PHYS_OFFSET + bool "0xe0000000 - SDCE2" + help + Select this option if you want support for EP93xx boards with the + first SDRAM bank at 0xe0000000. + +config EP93XX_SDCE3_ASYNC_PHYS_OFFSET + bool "0xf0000000 - SDCE3/AsyncBoot" + help + Select this option if you want support for EP93xx boards with the + first SDRAM bank at 0xf0000000. endchoice @@ -112,28 +130,36 @@ config MACH_MICRO9 bool config MACH_MICRO9H - bool "Support Contec Hypercontrol Micro9-H" + bool "Support Contec Micro9-High" depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the - Contec Hypercontrol Micro9-H board. + Contec Micro9-High board. config MACH_MICRO9M - bool "Support Contec Hypercontrol Micro9-M" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET + bool "Support Contec Micro9-Mid" + depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the - Contec Hypercontrol Micro9-M board. + Contec Micro9-Mid board. config MACH_MICRO9L - bool "Support Contec Hypercontrol Micro9-L" + bool "Support Contec Micro9-Lite" depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the - Contec Hypercontrol Micro9-L board. + Contec Micro9-Lite board. + +config MACH_MICRO9S + bool "Support Contec Micro9-Slim" + depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET + select MACH_MICRO9 + help + Say 'Y' here if you want your kernel to support the + Contec Micro9-Slim board. config MACH_TS72XX bool "Support Technologic Systems TS-72xx SBC" diff --git a/arch/arm/mach-ep93xx/Makefile.boot b/arch/arm/mach-ep93xx/Makefile.boot index 27a085a8f12..0ad33f15c62 100644 --- a/arch/arm/mach-ep93xx/Makefile.boot +++ b/arch/arm/mach-ep93xx/Makefile.boot @@ -3,3 +3,12 @@ params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00000100 zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0008000 params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0000100 + + zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0008000 +params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0000100 + + zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0008000 +params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0000100 + + zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0008000 +params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0000100 diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index dda19cd7619..1d0f9d8aff2 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -16,13 +16,16 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/io.h> +#include <linux/spinlock.h> + +#include <mach/hardware.h> #include <asm/clkdev.h> #include <asm/div64.h> -#include <mach/hardware.h> struct clk { + struct clk *parent; unsigned long rate; int users; int sw_locked; @@ -39,40 +42,60 @@ static unsigned long get_uart_rate(struct clk *clk); static int set_keytchclk_rate(struct clk *clk, unsigned long rate); static int set_div_rate(struct clk *clk, unsigned long rate); + +static struct clk clk_xtali = { + .rate = EP93XX_EXT_CLK_RATE, +}; static struct clk clk_uart1 = { + .parent = &clk_xtali, .sw_locked = 1, .enable_reg = EP93XX_SYSCON_DEVCFG, .enable_mask = EP93XX_SYSCON_DEVCFG_U1EN, .get_rate = get_uart_rate, }; static struct clk clk_uart2 = { + .parent = &clk_xtali, .sw_locked = 1, .enable_reg = EP93XX_SYSCON_DEVCFG, .enable_mask = EP93XX_SYSCON_DEVCFG_U2EN, .get_rate = get_uart_rate, }; static struct clk clk_uart3 = { + .parent = &clk_xtali, .sw_locked = 1, .enable_reg = EP93XX_SYSCON_DEVCFG, .enable_mask = EP93XX_SYSCON_DEVCFG_U3EN, .get_rate = get_uart_rate, }; -static struct clk clk_pll1; -static struct clk clk_f; -static struct clk clk_h; -static struct clk clk_p; -static struct clk clk_pll2; +static struct clk clk_pll1 = { + .parent = &clk_xtali, +}; +static struct clk clk_f = { + .parent = &clk_pll1, +}; +static struct clk clk_h = { + .parent = &clk_pll1, +}; +static struct clk clk_p = { + .parent = &clk_pll1, +}; +static struct clk clk_pll2 = { + .parent = &clk_xtali, +}; static struct clk clk_usb_host = { + .parent = &clk_pll2, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_USH_EN, }; static struct clk clk_keypad = { + .parent = &clk_xtali, .sw_locked = 1, .enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV, .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN, .set_rate = set_keytchclk_rate, }; static struct clk clk_pwm = { + .parent = &clk_xtali, .rate = EP93XX_EXT_CLK_RATE, }; @@ -85,50 +108,62 @@ static struct clk clk_video = { /* DMA Clocks */ static struct clk clk_m2p0 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P0, }; static struct clk clk_m2p1 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P1, }; static struct clk clk_m2p2 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P2, }; static struct clk clk_m2p3 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P3, }; static struct clk clk_m2p4 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P4, }; static struct clk clk_m2p5 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P5, }; static struct clk clk_m2p6 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P6, }; static struct clk clk_m2p7 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P7, }; static struct clk clk_m2p8 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P8, }; static struct clk clk_m2p9 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P9, }; static struct clk clk_m2m0 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M0, }; static struct clk clk_m2m1 = { + .parent = &clk_h, .enable_reg = EP93XX_SYSCON_PWRCNT, .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M1, }; @@ -137,6 +172,7 @@ static struct clk clk_m2m1 = { { .dev_id = dev, .con_id = con, .clk = ck } static struct clk_lookup clocks[] = { + INIT_CK(NULL, "xtali", &clk_xtali), INIT_CK("apb:uart1", NULL, &clk_uart1), INIT_CK("apb:uart2", NULL, &clk_uart2), INIT_CK("apb:uart3", NULL, &clk_uart3), @@ -163,48 +199,84 @@ static struct clk_lookup clocks[] = { INIT_CK(NULL, "m2m1", &clk_m2m1), }; +static DEFINE_SPINLOCK(clk_lock); + +static void __clk_enable(struct clk *clk) +{ + if (!clk->users++) { + if (clk->parent) + __clk_enable(clk->parent); + + if (clk->enable_reg) { + u32 v; + + v = __raw_readl(clk->enable_reg); + v |= clk->enable_mask; + if (clk->sw_locked) + ep93xx_syscon_swlocked_write(v, clk->enable_reg); + else + __raw_writel(v, clk->enable_reg); + } + } +} int clk_enable(struct clk *clk) { - if (!clk->users++ && clk->enable_reg) { - u32 value; + unsigned long flags; - value = __raw_readl(clk->enable_reg); - value |= clk->enable_mask; - if (clk->sw_locked) - ep93xx_syscon_swlocked_write(value, clk->enable_reg); - else - __raw_writel(value, clk->enable_reg); - } + if (!clk) + return -EINVAL; + + spin_lock_irqsave(&clk_lock, flags); + __clk_enable(clk); + spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); -void clk_disable(struct clk *clk) +static void __clk_disable(struct clk *clk) { - if (!--clk->users && clk->enable_reg) { - u32 value; + if (!--clk->users) { + if (clk->enable_reg) { + u32 v; + + v = __raw_readl(clk->enable_reg); + v &= ~clk->enable_mask; + if (clk->sw_locked) + ep93xx_syscon_swlocked_write(v, clk->enable_reg); + else + __raw_writel(v, clk->enable_reg); + } - value = __raw_readl(clk->enable_reg); - value &= ~clk->enable_mask; - if (clk->sw_locked) - ep93xx_syscon_swlocked_write(value, clk->enable_reg); - else - __raw_writel(value, clk->enable_reg); + if (clk->parent) + __clk_disable(clk->parent); } } + +void clk_disable(struct clk *clk) +{ + unsigned long flags; + + if (!clk) + return; + + spin_lock_irqsave(&clk_lock, flags); + __clk_disable(clk); + spin_unlock_irqrestore(&clk_lock, flags); +} EXPORT_SYMBOL(clk_disable); static unsigned long get_uart_rate(struct clk *clk) { + unsigned long rate = clk_get_rate(clk->parent); u32 value; value = __raw_readl(EP93XX_SYSCON_PWRCNT); if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD) - return EP93XX_EXT_CLK_RATE; + return rate; else - return EP93XX_EXT_CLK_RATE / 2; + return rate / 2; } unsigned long clk_get_rate(struct clk *clk) @@ -244,16 +316,16 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate) return 0; } -static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel, - int *pdiv, int *div) +static int calc_clk_div(struct clk *clk, unsigned long rate, + int *psel, int *esel, int *pdiv, int *div) { - unsigned long max_rate, best_rate = 0, - actual_rate = 0, mclk_rate = 0, rate_err = -1; + struct clk *mclk; + unsigned long max_rate, actual_rate, mclk_rate, rate_err = -1; int i, found = 0, __div = 0, __pdiv = 0; /* Don't exceed the maximum rate */ max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4), - (unsigned long)EP93XX_EXT_CLK_RATE / 4); + clk_xtali.rate / 4); rate = min(rate, max_rate); /* @@ -267,11 +339,12 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel, */ for (i = 0; i < 3; i++) { if (i == 0) - mclk_rate = EP93XX_EXT_CLK_RATE * 2; + mclk = &clk_xtali; else if (i == 1) - mclk_rate = clk_pll1.rate * 2; - else if (i == 2) - mclk_rate = clk_pll2.rate * 2; + mclk = &clk_pll1; + else + mclk = &clk_pll2; + mclk_rate = mclk->rate * 2; /* Try each predivider value */ for (__pdiv = 4; __pdiv <= 6; __pdiv++) { @@ -286,7 +359,8 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel, *div = __div; *psel = (i == 2); *esel = (i != 0); - best_rate = actual_rate; + clk->parent = mclk; + clk->rate = actual_rate; rate_err = abs(actual_rate - rate); found = 1; } @@ -294,21 +368,19 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel, } if (!found) - return 0; + return -EINVAL; - return best_rate; + return 0; } static int set_div_rate(struct clk *clk, unsigned long rate) { - unsigned long actual_rate; - int psel = 0, esel = 0, pdiv = 0, div = 0; + int err, psel = 0, esel = 0, pdiv = 0, div = 0; u32 val; - actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div); - if (actual_rate == 0) - return -EINVAL; - clk->rate = actual_rate; + err = calc_clk_div(clk, rate, &psel, &esel, &pdiv, &div); + if (err) + return err; /* Clear the esel, psel, pdiv and div bits */ val = __raw_readl(clk->enable_reg); @@ -344,7 +416,7 @@ static unsigned long calc_pll_rate(u32 config_word) unsigned long long rate; int i; - rate = EP93XX_EXT_CLK_RATE; + rate = clk_xtali.rate; rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ @@ -377,7 +449,7 @@ static int __init ep93xx_clock_init(void) value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); if (!(value & 0x00800000)) { /* PLL1 bypassed? */ - clk_pll1.rate = EP93XX_EXT_CLK_RATE; + clk_pll1.rate = clk_xtali.rate; } else { clk_pll1.rate = calc_pll_rate(value); } @@ -388,7 +460,7 @@ static int __init ep93xx_clock_init(void) value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2); if (!(value & 0x00080000)) { /* PLL2 bypassed? */ - clk_pll2.rate = EP93XX_EXT_CLK_RATE; + clk_pll2.rate = clk_xtali.rate; } else if (value & 0x00040000) { /* PLL2 enabled? */ clk_pll2.rate = calc_pll_rate(value); } else { diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index f7ebed942f6..f95dc160c34 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -550,13 +550,11 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) platform_device_register(&ep93xx_eth_device); } -static struct i2c_gpio_platform_data ep93xx_i2c_data = { - .sda_pin = EP93XX_GPIO_LINE_EEDAT, - .sda_is_open_drain = 0, - .scl_pin = EP93XX_GPIO_LINE_EECLK, - .scl_is_open_drain = 0, - .udelay = 2, -}; + +/************************************************************************* + * EP93xx i2c peripheral handling + *************************************************************************/ +static struct i2c_gpio_platform_data ep93xx_i2c_data; static struct platform_device ep93xx_i2c_device = { .name = "i2c-gpio", @@ -564,8 +562,25 @@ static struct platform_device ep93xx_i2c_device = { .dev.platform_data = &ep93xx_i2c_data, }; -void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num) +void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data, + struct i2c_board_info *devices, int num) { + /* + * Set the EEPROM interface pin drive type control. + * Defines the driver type for the EECLK and EEDAT pins as either + * open drain, which will require an external pull-up, or a normal + * CMOS driver. + */ + if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT) + pr_warning("ep93xx: sda != EEDAT, open drain has no effect\n"); + if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK) + pr_warning("ep93xx: scl != EECLK, open drain has no effect\n"); + + __raw_writel((data->sda_is_open_drain << 1) | + (data->scl_is_open_drain << 0), + EP93XX_GPIO_EEDRIVE); + + ep93xx_i2c_data = *data; i2c_register_board_info(0, devices, num); platform_device_register(&ep93xx_i2c_device); } diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c index 73145ae5d3f..ca71cf1a72a 100644 --- a/arch/arm/mach-ep93xx/edb93xx.c +++ b/arch/arm/mach-ep93xx/edb93xx.c @@ -27,8 +27,10 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/i2c.h> #include <linux/mtd/physmap.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/i2c-gpio.h> #include <mach/hardware.h> @@ -76,13 +78,26 @@ static struct ep93xx_eth_data edb93xx_eth_data = { .phy_id = 1, }; -static struct i2c_board_info __initdata edb93xxa_i2c_data[] = { + +/************************************************************************* + * EDB93xx i2c peripheral handling + *************************************************************************/ +static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = { + .sda_pin = EP93XX_GPIO_LINE_EEDAT, + .sda_is_open_drain = 0, + .scl_pin = EP93XX_GPIO_LINE_EECLK, + .scl_is_open_drain = 0, + .udelay = 0, /* default to 100 kHz */ + .timeout = 0, /* default to 100 ms */ +}; + +static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { { I2C_BOARD_INFO("isl1208", 0x6f), }, }; -static struct i2c_board_info __initdata edb93xx_i2c_data[] = { +static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { { I2C_BOARD_INFO("ds1337", 0x68), }, @@ -92,12 +107,14 @@ static void __init edb93xx_register_i2c(void) { if (machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()) { - ep93xx_register_i2c(edb93xxa_i2c_data, - ARRAY_SIZE(edb93xxa_i2c_data)); + ep93xx_register_i2c(&edb93xx_i2c_gpio_data, + edb93xxa_i2c_board_info, + ARRAY_SIZE(edb93xxa_i2c_board_info)); } else if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { - ep93xx_register_i2c(edb93xx_i2c_data, - ARRAY_SIZE(edb93xx_i2c_data)); + ep93xx_register_i2c(&edb93xx_i2c_gpio_data + edb93xx_i2c_board_info, + ARRAY_SIZE(edb93xx_i2c_board_info)); } } diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index 0fbf87b1633..b1f937eda29 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -52,25 +52,27 @@ #define EP93XX_AHB_VIRT_BASE 0xfef00000 #define EP93XX_AHB_SIZE 0x00100000 +#define EP93XX_AHB_PHYS(x) (EP93XX_AHB_PHYS_BASE + (x)) #define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x)) #define EP93XX_APB_PHYS_BASE 0x80800000 #define EP93XX_APB_VIRT_BASE 0xfed00000 #define EP93XX_APB_SIZE 0x00200000 +#define EP93XX_APB_PHYS(x) (EP93XX_APB_PHYS_BASE + (x)) #define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x)) /* AHB peripherals */ #define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000) -#define EP93XX_ETHERNET_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00010000) +#define EP93XX_ETHERNET_PHYS_BASE EP93XX_AHB_PHYS(0x00010000) #define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000) -#define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000) +#define EP93XX_USB_PHYS_BASE EP93XX_AHB_PHYS(0x00020000) #define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000) -#define EP93XX_RASTER_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00030000) +#define EP93XX_RASTER_PHYS_BASE EP93XX_AHB_PHYS(0x00030000) #define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000) #define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000) @@ -112,21 +114,10 @@ #define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) #define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) -#define EP93XX_GPIO_F_INT_TYPE1 EP93XX_GPIO_REG(0x4c) -#define EP93XX_GPIO_F_INT_TYPE2 EP93XX_GPIO_REG(0x50) -#define EP93XX_GPIO_F_INT_ACK EP93XX_GPIO_REG(0x54) -#define EP93XX_GPIO_F_INT_ENABLE EP93XX_GPIO_REG(0x58) #define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c) -#define EP93XX_GPIO_A_INT_TYPE1 EP93XX_GPIO_REG(0x90) -#define EP93XX_GPIO_A_INT_TYPE2 EP93XX_GPIO_REG(0x94) -#define EP93XX_GPIO_A_INT_ACK EP93XX_GPIO_REG(0x98) -#define EP93XX_GPIO_A_INT_ENABLE EP93XX_GPIO_REG(0x9c) #define EP93XX_GPIO_A_INT_STATUS EP93XX_GPIO_REG(0xa0) -#define EP93XX_GPIO_B_INT_TYPE1 EP93XX_GPIO_REG(0xac) -#define EP93XX_GPIO_B_INT_TYPE2 EP93XX_GPIO_REG(0xb0) -#define EP93XX_GPIO_B_INT_ACK EP93XX_GPIO_REG(0xb4) -#define EP93XX_GPIO_B_INT_ENABLE EP93XX_GPIO_REG(0xb8) #define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc) +#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8) #define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000) @@ -134,13 +125,13 @@ #define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000) -#define EP93XX_UART1_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000c0000) +#define EP93XX_UART1_PHYS_BASE EP93XX_APB_PHYS(0x000c0000) #define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000) -#define EP93XX_UART2_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000d0000) +#define EP93XX_UART2_PHYS_BASE EP93XX_APB_PHYS(0x000d0000) #define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000) -#define EP93XX_UART3_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000e0000) +#define EP93XX_UART3_PHYS_BASE EP93XX_APB_PHYS(0x000e0000) #define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000) #define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000) @@ -148,10 +139,10 @@ #define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000) #define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000) -#define EP93XX_PWM_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00110000) +#define EP93XX_PWM_PHYS_BASE EP93XX_APB_PHYS(0x00110000) #define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000) -#define EP93XX_RTC_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00120000) +#define EP93XX_RTC_PHYS_BASE EP93XX_APB_PHYS(0x00120000) #define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000) #define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000) @@ -218,6 +209,17 @@ #define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV (1<<16) #define EP93XX_SYSCON_KEYTCHCLKDIV_KEN (1<<15) #define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0) +#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c) +#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000) +#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28) +#define EP93XX_SYSCON_SYSCFG_SBOOT (1<<8) +#define EP93XX_SYSCON_SYSCFG_LCSN7 (1<<7) +#define EP93XX_SYSCON_SYSCFG_LCSN6 (1<<6) +#define EP93XX_SYSCON_SYSCFG_LASDO (1<<5) +#define EP93XX_SYSCON_SYSCFG_LEEDA (1<<4) +#define EP93XX_SYSCON_SYSCFG_LEECLK (1<<3) +#define EP93XX_SYSCON_SYSCFG_LCSN2 (1<<1) +#define EP93XX_SYSCON_SYSCFG_LCSN1 (1<<0) #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) #define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000) diff --git a/arch/arm/mach-ep93xx/include/mach/gpio.h b/arch/arm/mach-ep93xx/include/mach/gpio.h index 0a1498ae899..c991b149bdf 100644 --- a/arch/arm/mach-ep93xx/include/mach/gpio.h +++ b/arch/arm/mach-ep93xx/include/mach/gpio.h @@ -114,17 +114,9 @@ extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable); * B0..B7 (7..15) to irq 72..79, and * F0..F7 (16..24) to irq 80..87. */ -static inline int gpio_to_irq(unsigned gpio) -{ - if (gpio <= EP93XX_GPIO_LINE_MAX_IRQ) - return 64 + gpio; - - return -EINVAL; -} - -static inline int irq_to_gpio(unsigned irq) -{ - return irq - gpio_to_irq(0); -} +#define gpio_to_irq(gpio) \ + (((gpio) <= EP93XX_GPIO_LINE_MAX_IRQ) ? (64 + (gpio)) : -EINVAL) + +#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0)) #endif diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h index 925b12ea099..554064e9030 100644 --- a/arch/arm/mach-ep93xx/include/mach/memory.h +++ b/arch/arm/mach-ep93xx/include/mach/memory.h @@ -9,6 +9,12 @@ #define PHYS_OFFSET UL(0x00000000) #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) #define PHYS_OFFSET UL(0xc0000000) +#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) +#define PHYS_OFFSET UL(0xd0000000) +#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) +#define PHYS_OFFSET UL(0xe0000000) +#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) +#define PHYS_OFFSET UL(0xf0000000) #else #error "Kconfig bug: No EP93xx PHYS_OFFSET set" #endif diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index 01a0f0838e5..a3ec33fd79d 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h @@ -4,6 +4,7 @@ #ifndef __ASSEMBLY__ +struct i2c_gpio_platform_data; struct i2c_board_info; struct platform_device; struct ep93xxfb_mach_info; @@ -33,7 +34,8 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits) } void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr); -void ep93xx_register_i2c(struct i2c_board_info *devices, int num); +void ep93xx_register_i2c(struct i2c_gpio_platform_data *data, + struct i2c_board_info *devices, int num); void ep93xx_register_fb(struct ep93xxfb_mach_info *data); void ep93xx_register_pwm(int pwm0, int pwm1); int ep93xx_pwm_acquire_gpio(struct platform_device *pdev); diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c index 0a313e82fb7..d83b80478b0 100644 --- a/arch/arm/mach-ep93xx/micro9.c +++ b/arch/arm/mach-ep93xx/micro9.c @@ -2,7 +2,9 @@ * linux/arch/arm/mach-ep93xx/micro9.c * * Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH - * Manfred Gruber <manfred.gruber@contec.at> + * Manfred Gruber <m.gruber@tirol.com> + * Copyright (C) 2009 Contec Steuerungstechnik & Automation GmbH + * Hubert Feurstein <hubert.feurstein@contec.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -20,104 +22,124 @@ #include <asm/mach/arch.h> -static struct ep93xx_eth_data micro9_eth_data = { - .phy_id = 0x1f, -}; - -static void __init micro9_init(void) -{ - ep93xx_register_eth(µ9_eth_data, 1); -} - -/* - * Micro9-H - */ -#ifdef CONFIG_MACH_MICRO9H -static struct physmap_flash_data micro9h_flash_data = { - .width = 4, -}; - -static struct resource micro9h_flash_resource = { +/************************************************************************* + * Micro9 NOR Flash + * + * Micro9-High has up to 64MB of 32-bit flash on CS1 + * Micro9-Mid has up to 64MB of either 32-bit or 16-bit flash on CS1 + * Micro9-Lite uses a seperate MTD map driver for flash support + * Micro9-Slim has up to 64MB of either 32-bit or 16-bit flash on CS1 + *************************************************************************/ +static struct physmap_flash_data micro9_flash_data; + +static struct resource micro9_flash_resource = { .start = EP93XX_CS1_PHYS_BASE, .end = EP93XX_CS1_PHYS_BASE + SZ_64M - 1, .flags = IORESOURCE_MEM, }; -static struct platform_device micro9h_flash = { +static struct platform_device micro9_flash = { .name = "physmap-flash", .id = 0, .dev = { - .platform_data = µ9h_flash_data, + .platform_data = µ9_flash_data, }, .num_resources = 1, - .resource = µ9h_flash_resource, + .resource = µ9_flash_resource, }; -static void __init micro9h_init(void) +static void __init __micro9_register_flash(unsigned int width) +{ + micro9_flash_data.width = width; + + platform_device_register(µ9_flash); +} + +static unsigned int __init micro9_detect_bootwidth(void) +{ + u32 v; + + /* Detect the bus width of the external flash memory */ + v = __raw_readl(EP93XX_SYSCON_SYSCFG); + if (v & EP93XX_SYSCON_SYSCFG_LCSN7) + return 4; /* 32-bit */ + else + return 2; /* 16-bit */ +} + +static void __init micro9_register_flash(void) { - platform_device_register(µ9h_flash); + if (machine_is_micro9()) + __micro9_register_flash(4); + else if (machine_is_micro9m() || machine_is_micro9s()) + __micro9_register_flash(micro9_detect_bootwidth()); } -static void __init micro9h_init_machine(void) + +/************************************************************************* + * Micro9 Ethernet + *************************************************************************/ +static struct ep93xx_eth_data micro9_eth_data = { + .phy_id = 0x1f, +}; + + +static void __init micro9_init_machine(void) { ep93xx_init_devices(); - micro9_init(); - micro9h_init(); + ep93xx_register_eth(µ9_eth_data, 1); + micro9_register_flash(); } -MACHINE_START(MICRO9, "Contec Hypercontrol Micro9-H") - /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */ + +#ifdef CONFIG_MACH_MICRO9H +MACHINE_START(MICRO9, "Contec Micro9-High") + /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .phys_io = EP93XX_APB_PHYS_BASE, .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, .boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .timer = &ep93xx_timer, - .init_machine = micro9h_init_machine, + .init_machine = micro9_init_machine, MACHINE_END #endif -/* - * Micro9-M - */ #ifdef CONFIG_MACH_MICRO9M -static void __init micro9m_init_machine(void) -{ - ep93xx_init_devices(); - micro9_init(); -} - -MACHINE_START(MICRO9M, "Contec Hypercontrol Micro9-M") - /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */ +MACHINE_START(MICRO9M, "Contec Micro9-Mid") + /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .phys_io = EP93XX_APB_PHYS_BASE, .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, - .boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, + .boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .timer = &ep93xx_timer, - .init_machine = micro9m_init_machine, + .init_machine = micro9_init_machine, MACHINE_END #endif -/* - * Micro9-L - */ #ifdef CONFIG_MACH_MICRO9L -static void __init micro9l_init_machine(void) -{ - ep93xx_init_devices(); - micro9_init(); -} - -MACHINE_START(MICRO9L, "Contec Hypercontrol Micro9-L") - /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */ +MACHINE_START(MICRO9L, "Contec Micro9-Lite") + /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .phys_io = EP93XX_APB_PHYS_BASE, .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, .boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .timer = &ep93xx_timer, - .init_machine = micro9l_init_machine, + .init_machine = micro9_init_machine, MACHINE_END #endif +#ifdef CONFIG_MACH_MICRO9S +MACHINE_START(MICRO9S, "Contec Micro9-Slim") + /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ + .phys_io = EP93XX_APB_PHYS_BASE, + .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, + .boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100, + .map_io = ep93xx_map_io, + .init_irq = ep93xx_init_irq, + .timer = &ep93xx_timer, + .init_machine = micro9_init_machine, +MACHINE_END +#endif diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 3a8ee2272ad..983cc8c2008 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c @@ -155,7 +155,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table static pxa_freqs_t pxa27x_freqs[] = { {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, - {156000, 104000, PXA27x_CCCR(1, 8, 6), 0, CCLKCFG2(1, 1, 1), 1000000, 1705000 }, + {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 }, {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 }, diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c index 79141f86272..965480eb4fe 100644 --- a/arch/arm/mach-pxa/csb726.c +++ b/arch/arm/mach-pxa/csb726.c @@ -238,7 +238,7 @@ static struct resource csb726_lan_resources[] = { }; struct smsc911x_platform_config csb726_lan_config = { - .irq_type = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_32BIT, .phy_interface = PHY_INTERFACE_MODE_MII, diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile index 8a5546e6d54..bb7b8198d0c 100644 --- a/arch/arm/mach-sa1100/Makefile +++ b/arch/arm/mach-sa1100/Makefile @@ -25,6 +25,7 @@ led-$(CONFIG_SA1100_CERF) += leds-cerf.o obj-$(CONFIG_SA1100_COLLIE) += collie.o +obj-$(CONFIG_SA1100_H3100) += h3600.o obj-$(CONFIG_SA1100_H3600) += h3600.o obj-$(CONFIG_SA1100_HACKKIT) += hackkit.o diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 8f5c13f4c93..295e25dd638 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) - + UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D line +1: + USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE +2: cmp r0, r1 blo 1b #endif @@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range) mov pc, lr /* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) +ENDPROC(v6_coherent_user_range) +ENDPROC(v6_coherent_kern_range) + +/* * v6_flush_kern_dcache_page(kaddr) * * Ensure that the data held in the page kaddr is written back diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index bda0ec31a4e..e1bd9759617 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) + UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification +1: + USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification dsb - mcr p15, 0, r0, c7, c5, 1 @ invalidate I line + USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line add r0, r0, r2 +2: cmp r0, r1 blo 1b mov r0, #0 @@ -167,6 +171,17 @@ ENTRY(v7_coherent_user_range) dsb isb mov pc, lr + +/* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index bc0099d5ae8..d0d17b6a370 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) page = pfn_to_page(pfn); mapping = page_mapping(page); - if (mapping) { #ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - - if (dirty) - __flush_dcache_page(mapping, page); + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(mapping, page); #endif - + if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, pfn); else if (vma->vm_flags & VM_EXEC) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ae0e25f5a70..10e06801afb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * down_read() */ might_sleep(); +#ifdef CONFIG_DEBUG_VM + if (!user_mode(regs) && + !search_exception_tables(regs->ARM_pc)) + goto no_context; +#endif } fault = __do_page_fault(mm, addr, fsr, tsk); diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 73cae57fa70..30f82fb5918 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type) if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); + kmap = kmap_high_get(page); if (kmap) return kmap; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 877c492f8e1..40940d7ce4f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -483,7 +483,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn); /* diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 704dd396257..77df726180b 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -438,7 +438,7 @@ static int diag204_probe(void) } if (diag204((unsigned long)SUBC_STIB6 | (unsigned long)INFO_EXT, pages, buf) >= 0) { - diag204_store_sc = SUBC_STIB7; + diag204_store_sc = SUBC_STIB6; diag204_info_type = INFO_EXT; goto out; } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index f5fe34dd821..5a82bc68193 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,73 +203,10 @@ out: #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned int sys_call_table[]; -static struct syscall_metadata **syscalls_metadata; - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) +unsigned long __init arch_syscall_addr(int nr) { - syscalls_metadata[num]->exit_id = id; -} - -static struct syscall_metadata *find_syscall_meta(unsigned long syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup(syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name + 3, str + 3)) - return start; - } - return NULL; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - struct syscall_metadata *meta; - int i; - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, - GFP_KERNEL); - if (!syscalls_metadata) - return -ENOMEM; - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta((unsigned long)sys_call_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)sys_call_table[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 802c8ab247f..0729f36c2fe 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void) static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[9] = { + static const char *hwcap_str[10] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat", "etf3eh" + "edat", "etf3eh", "highgprs" }; struct _lowcore *lc; unsigned long n = (unsigned long) v - 1; @@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); - for (i = 0; i < 9; i++) + for (i = 0; i < 10; i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 68d9223b145..3eb84931d2a 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -121,7 +121,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli - TRACE_IRQS_OfF + TRACE_IRQS_OFF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index a3dcc6d5d25..2c48e267256 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -291,31 +291,48 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -void arch_init_ftrace_syscalls(void) +int syscall_name_to_nr(char *name) +{ + int i; + + if (!syscalls_metadata) + return -1; + for (i = 0; i < NR_syscalls; i++) + if (syscalls_metadata[i]) + if (!strcmp(syscalls_metadata[i]->name, name)) + return i; + return -1; +} + +void set_syscall_enter_id(int num, int id) +{ + syscalls_metadata[num]->enter_id = id; +} + +void set_syscall_exit_id(int num, int id) +{ + syscalls_metadata[num]->exit_id = id; +} + +static int __init arch_init_ftrace_syscalls(void) { int i; struct syscall_metadata *meta; unsigned long **psys_syscall_table = &sys_call_table; - static atomic_t refs; - - if (atomic_inc_return(&refs) != 1) - goto end; syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * FTRACE_SYSCALL_MAX, GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); - return; + return -ENOMEM; } for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { meta = find_syscall_meta(psys_syscall_table[i]); syscalls_metadata[i] = meta; } - return; - /* Paranoid: avoid overflow */ -end: - atomic_dec(&refs); + return 0; } +arch_initcall(arch_init_ftrace_syscalls); #endif /* CONFIG_FTRACE_SYSCALLS */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index f9d44f8e0df..99b4fb553bf 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -549,6 +549,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu == 0) seq_printf(m, "machine\t\t: %s\n", get_system_type()); + else + seq_printf(m, "\n"); seq_printf(m, "processor\t: %d\n", cpu); seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 6729703547a..3db37425210 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) { struct task_struct *tsk = current; - if (!(current_cpu_data.flags & CPU_HAS_FPU)) + if (!(boot_cpu_data.flags & CPU_HAS_FPU)) return 0; set_used_math(); @@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc, { struct task_struct *tsk = current; - if (!(current_cpu_data.flags & CPU_HAS_FPU)) + if (!(boot_cpu_data.flags & CPU_HAS_FPU)) return 0; if (!used_math()) { @@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p #undef COPY #ifdef CONFIG_SH_FPU - if (current_cpu_data.flags & CPU_HAS_FPU) { + if (boot_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; struct task_struct *tsk = current; @@ -472,6 +472,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; + flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) @@ -497,8 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); - flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); - return 0; give_sigsegv: diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 442d8d47a41..160db1003cf 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -35,6 +35,8 @@ static inline void __init smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; + memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); + c->loops_per_jiffy = loops_per_jiffy; } diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index e0b5e4b5acc..7a2ee3a6b8e 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -25,6 +25,7 @@ #include <linux/kexec.h> #include <linux/limits.h> #include <linux/proc_fs.h> +#include <linux/sysfs.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/fpu.h> @@ -159,12 +160,12 @@ void die(const char * str, struct pt_regs * regs, long err) oops_enter(); - console_verbose(); spin_lock_irq(&die_lock); + console_verbose(); bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - + sysfs_printk_last_file(); print_modules(); show_regs(regs); @@ -180,6 +181,7 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); + oops_exit(); if (kexec_should_crash(current)) crash_kexec(regs); @@ -190,7 +192,6 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); - oops_exit(); do_exit(SIGSEGV); } diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 35c37b7f717..5e1091be9dc 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma, return; page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { + if (pfn_valid(pfn)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) { unsigned long addr = (unsigned long)page_address(page); diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index adf5f273868..cb3c72c45aa 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name) snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); err = request_irq(lp->cfg.rx_irq, ldc_rx, - IRQF_SAMPLE_RANDOM | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, lp->rx_irq_name, lp); if (err) return err; err = request_irq(lp->cfg.tx_irq, ldc_tx, - IRQF_SAMPLE_RANDOM | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, lp->tx_irq_name, lp); if (err) { free_irq(lp->cfg.rx_irq, lp); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 04db9274389..fa5936e1c3b 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = { .lower_shift = 6, .event_mask = 0xfff, .hv_bit = 0x8, - .irq_bit = 0x03, + .irq_bit = 0x30, .upper_nop = 0x220, .lower_nop = 0x220, }; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a70a5e1904d..1886d37d411 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn) struct page *page; page = pfn_to_page(pfn); - if (page && page_mapping(page)) { + if (page) { unsigned long pg_flags; pg_flags = page->flags; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c876bace8fd..07e01149e3b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -491,7 +491,7 @@ if PARAVIRT_GUEST source "arch/x86/xen/Kconfig" config VMI - bool "VMI Guest support" + bool "VMI Guest support (DEPRECATED)" select PARAVIRT depends on X86_32 ---help--- @@ -500,6 +500,15 @@ config VMI at the moment), by linking the kernel to a GPL-ed ROM module provided by the hypervisor. + As of September 2009, VMware has started a phased retirement + of this feature from VMware's products. Please see + feature-removal-schedule.txt for details. If you are + planning to enable this option, please note that you cannot + live migrate a VMI enabled VM to a future VMware product, + which doesn't support VMI. So if you expect your kernel to + seamlessly migrate to newer VMware products, keep this + disabled. + config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 8aebcc41041..efb38994859 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) static inline unsigned long __raw_local_save_flags(void) { - unsigned long f; - - asm volatile(paravirt_alt(PARAVIRT_CALL) - : "=a"(f) - : paravirt_type(pv_irq_ops.save_fl), - paravirt_clobber(CLBR_EAX) - : "memory", "cc"); - return f; + return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); } static inline void raw_local_irq_restore(unsigned long f) { - asm volatile(paravirt_alt(PARAVIRT_CALL) - : "=a"(f) - : PV_FLAGS_ARG(f), - paravirt_type(pv_irq_ops.restore_fl), - paravirt_clobber(CLBR_EAX) - : "memory", "cc"); + PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); } static inline void raw_local_irq_disable(void) { - asm volatile(paravirt_alt(PARAVIRT_CALL) - : - : paravirt_type(pv_irq_ops.irq_disable), - paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); + PVOP_VCALLEE0(pv_irq_ops.irq_disable); } static inline void raw_local_irq_enable(void) { - asm volatile(paravirt_alt(PARAVIRT_CALL) - : - : paravirt_type(pv_irq_ops.irq_enable), - paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); + PVOP_VCALLEE0(pv_irq_ops.irq_enable); } static inline unsigned long __raw_local_irq_save(void) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index dd0f5b32489..9357473c8da 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -494,10 +494,11 @@ int paravirt_disable_iospace(void); #define EXTRA_CLOBBERS #define VEXTRA_CLOBBERS #else /* CONFIG_X86_64 */ +/* [re]ax isn't an arg, but the return val */ #define PVOP_VCALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax + __edx = __edx, __ecx = __ecx, __eax = __eax +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) @@ -509,6 +510,7 @@ int paravirt_disable_iospace(void); "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) +/* void functions are still allowed [re]ax for scratch */ #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS @@ -583,8 +585,8 @@ int paravirt_disable_iospace(void); VEXTRA_CLOBBERS, \ pre, post, ##__VA_ARGS__) -#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ +#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ + ____PVOP_VCALL(op.func, CLBR_RET_REG, \ PVOP_VCALLEE_CLOBBERS, , \ pre, post, ##__VA_ARGS__) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd506..8d9f8548a87 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -28,9 +28,20 @@ */ #define ARCH_PERFMON_EVENT_MASK 0xffff +/* + * filter mask to validate fixed counter events. + * the following filters disqualify for fixed counters: + * - inv + * - edge + * - cnt-mask + * The other filters are supported by fixed counters. + * The any-thread option is supported starting with v3. + */ +#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 + #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 7da00b799cd..0e50e1e5c57 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S @@ -56,6 +56,6 @@ SECTIONS /DISCARD/ : { *(.note*) } - - . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); } + +ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b5801c31184..2e20bca3cca 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -77,6 +77,18 @@ struct cpu_hw_events { struct debug_store *ds; }; +struct event_constraint { + unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + int code; +}; + +#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } +#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } + +#define for_each_event_constraint(e, c) \ + for ((e) = (c); (e)->idxmsk[0]; (e)++) + + /* * struct x86_pmu - generic x86 pmu */ @@ -102,6 +114,8 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); }; static struct x86_pmu x86_pmu __read_mostly; @@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; +static const struct event_constraint *event_constraints; + /* * Not sure about some of these */ @@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event) return hw_event & P6_EVNTSEL_MASK; } +static const struct event_constraint intel_p6_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT_END +}; /* * Intel PerfMon v3. Used on Core2 and later. @@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; +static const struct event_constraint intel_core_event_constraints[] = +{ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ + EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ + EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ + EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ + EVENT_CONSTRAINT_END +}; + +static const struct event_constraint intel_nehalem_event_constraints[] = +{ + EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ + EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ + EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ + EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ + EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ + EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ + EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ + EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ + EVENT_CONSTRAINT_END +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL #define CORE_EVNTSEL_INV_MASK 0x00800000ULL -#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL +#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ @@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event) */ hwc->config = ARCH_PERFMON_EVENTSEL_INT; + hwc->idx = -1; + /* * Count user and OS events unless requested not to. */ @@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) x86_pmu_enable_event(hwc, idx); } -static int -fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) +static int fixed_mode_idx(struct hw_perf_event *hwc) { unsigned int hw_event; @@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) if (!x86_pmu.num_events_fixed) return -1; + /* + * fixed counters do not take all possible filters + */ + if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) + return -1; + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) @@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) } /* - * Find a PMC slot for the freshly enabled / scheduled in event: + * generic counter allocator: get next free counter */ -static int x86_pmu_enable(struct perf_event *event) +static int +gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) +{ + int idx; + + idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); + return idx == x86_pmu.num_events ? -1 : idx; +} + +/* + * intel-specific counter allocator: check event constraints + */ +static int +intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) +{ + const struct event_constraint *event_constraint; + int i, code; + + if (!event_constraints) + goto skip; + + code = hwc->config & CORE_EVNTSEL_EVENT_MASK; + + for_each_event_constraint(event_constraint, event_constraints) { + if (code == event_constraint->code) { + for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { + if (!test_and_set_bit(i, cpuc->used_mask)) + return i; + } + return -1; + } + } +skip: + return gen_get_event_idx(cpuc, hwc); +} + +static int +x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; int idx; - idx = fixed_mode_idx(event, hwc); + idx = fixed_mode_idx(hwc); if (idx == X86_PMC_IDX_FIXED_BTS) { /* BTS is already occupied. */ if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; hwc->config_base = 0; - hwc->event_base = 0; + hwc->event_base = 0; hwc->idx = idx; } else if (idx >= 0) { /* @@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event) } else { idx = hwc->idx; /* Try to get the previous generic event again */ - if (test_and_set_bit(idx, cpuc->used_mask)) { + if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = find_first_zero_bit(cpuc->used_mask, - x86_pmu.num_events); - if (idx == x86_pmu.num_events) + idx = x86_pmu.get_event_idx(cpuc, hwc); + if (idx == -1) return -EAGAIN; set_bit(idx, cpuc->used_mask); hwc->idx = idx; } - hwc->config_base = x86_pmu.eventsel; - hwc->event_base = x86_pmu.perfctr; + hwc->config_base = x86_pmu.eventsel; + hwc->event_base = x86_pmu.perfctr; } + return idx; +} + +/* + * Find a PMC slot for the freshly enabled / scheduled in event: + */ +static int x86_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + idx = x86_schedule_event(cpuc, hwc); + if (idx < 0) + return idx; + perf_events_lapic_init(); x86_pmu.disable(hwc, idx); @@ -1877,6 +1989,7 @@ static struct x86_pmu p6_pmu = { */ .event_bits = 32, .event_mask = (1ULL << 32) - 1, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu intel_pmu = { @@ -1900,6 +2013,7 @@ static struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu amd_pmu = { @@ -1920,6 +2034,7 @@ static struct x86_pmu amd_pmu = { .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, + .get_event_idx = gen_get_event_idx, }; static int p6_pmu_init(void) @@ -1932,10 +2047,12 @@ static int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ + event_constraints = intel_p6_event_constraints; break; case 9: case 13: /* Pentium M */ + event_constraints = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2007,12 +2124,14 @@ static int intel_pmu_init(void) sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); + event_constraints = intel_core_event_constraints; break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: @@ -2105,11 +2224,47 @@ static const struct pmu pmu = { .unthrottle = x86_pmu_unthrottle, }; +static int +validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu != &pmu) + return 0; + + return x86_schedule_event(cpuc, &fake_event); +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + const struct pmu *hw_perf_event_init(struct perf_event *event) { int err; err = __hw_perf_event_init(event); + if (!err) { + if (event->group_leader != event) + err = validate_group(event); + } if (err) { if (event->destroy) event->destroy(event); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d607c..7d52e9da5e0 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1185,17 +1185,14 @@ END(ftrace_graph_caller) .globl return_to_handler return_to_handler: - pushl $0 pushl %eax - pushl %ecx pushl %edx movl %ebp, %eax call ftrace_return_to_handler - movl %eax, 0xc(%esp) + movl %eax, %ecx popl %edx - popl %ecx popl %eax - ret + jmp *%ecx #endif .section .rodata,"a" diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b5c061f8f35..bd5bbddddf9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -155,11 +155,11 @@ GLOBAL(return_to_handler) call ftrace_return_to_handler - movq %rax, 16(%rsp) + movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax - addq $16, %rsp - retq + addq $24, %rsp + jmp *%rdi #endif diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 9dbb527e165..5a1b9758fd6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -9,6 +9,8 @@ * the dangers of modifying code on the run. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/spinlock.h> #include <linux/hardirq.h> #include <linux/uaccess.h> @@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data) switch (faulted) { case 0: - pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); + pr_info("converting mcount calls to 0f 1f 44 00 00\n"); memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); break; case 1: - pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); + pr_info("converting mcount calls to 66 66 66 66 90\n"); memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); break; case 2: - pr_info("ftrace: converting mcount calls to jmp . + 5\n"); + pr_info("converting mcount calls to jmp . + 5\n"); memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); break; } @@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned long *sys_call_table; -static struct syscall_metadata **syscalls_metadata; - -static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name, str)) - return start; - } - return NULL; -} - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) +unsigned long __init arch_syscall_addr(int nr) { - int i; - - if (!syscalls_metadata) - return -1; - - for (i = 0; i < NR_syscalls; i++) { - if (syscalls_metadata[i]) { - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - } - } - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) -{ - syscalls_metadata[num]->exit_id = id; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - int i; - struct syscall_metadata *meta; - unsigned long **psys_syscall_table = &sys_call_table; - - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * - NR_syscalls, GFP_KERNEL); - if (!syscalls_metadata) { - WARN_ON(1); - return -ENOMEM; - } - - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta(psys_syscall_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)(&sys_call_table)[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 39120619951..74656d1d4e3 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __func__, smp_processor_id(), vector, irq); } - run_local_timers(); irq_exit(); set_irq_regs(old_regs); @@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs) if (generic_interrupt_extension) generic_interrupt_extension(); - run_local_timers(); irq_exit(); set_irq_regs(old_regs); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index d20009b4e6e..b2a71dca564 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -311,7 +311,7 @@ void pci_iommu_shutdown(void) amd_iommu_shutdown(); } /* Must execute after PCI subsystem */ -fs_initcall(pci_iommu_init); +rootfs_initcall(pci_iommu_init); #ifdef CONFIG_PCI /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d915d956e66..ec1de97600e 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); - run_local_timers(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index dcb00d27851..be2573448ed 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs) #ifdef CONFIG_FRAME_POINTER return *(unsigned long *)(regs->bp + sizeof(long)); #else - unsigned long *sp = (unsigned long *)regs->sp; + unsigned long *sp = + (unsigned long *)kernel_stack_pointer(regs); /* * Return address is either directly at stack pointer * or above a saved flags. Eflags has bits 22-31 zero, diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 699f7eeb896..cd022121cab 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -3,8 +3,16 @@ #include <asm/trampoline.h> #include <asm/e820.h> +#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) +#define __trampinit +#define __trampinitdata +#else +#define __trampinit __cpuinit +#define __trampinitdata __cpuinitdata +#endif + /* ready for x86_64 and x86 */ -unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); +unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); void __init reserve_trampoline_memory(void) { @@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void) * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */ -unsigned long __cpuinit setup_trampoline(void) +unsigned long __trampinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 596d54c660a..3af2dff58b2 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -32,8 +32,12 @@ #include <asm/segment.h> #include <asm/processor-flags.h> +#ifdef CONFIG_ACPI_SLEEP +.section .rodata, "a", @progbits +#else /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ __CPUINITRODATA +#endif .code16 ENTRY(trampoline_data) diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 31e6f6cfe53..d430e4c3019 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -648,7 +648,7 @@ static inline int __init activate_vmi(void) pv_info.paravirt_enabled = 1; pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; - pv_info.name = "vmi"; + pv_info.name = "vmi [deprecated]"; pv_init_ops.patch = vmi_patch; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 92929fb3f9f..8d6001ad8d8 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -305,8 +305,8 @@ SECTIONS #ifdef CONFIG_X86_32 -. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE"); +ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); #else /* * Per-cpu symbols which need to be offset from __per_cpu_load @@ -319,12 +319,12 @@ INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE"); +ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP -. = ASSERT((per_cpu__irq_stack_union == 0), - "irq_stack_union is not at start of per-cpu area"); +ASSERT((per_cpu__irq_stack_union == 0), + "irq_stack_union is not at start of per-cpu area"); #endif #endif /* CONFIG_X86_32 */ @@ -332,7 +332,6 @@ INIT_PER_CPU(irq_stack_union); #ifdef CONFIG_KEXEC #include <asm/kexec.h> -. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, - "kexec control code size is too big"); +ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, + "kexec control code size is too big"); #endif - diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 427fd1b56df..8565d944f7c 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -1,12 +1,13 @@ /* * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/io.h> #include <linux/mmiotrace.h> -#define MODULE_NAME "testmmiotrace" - static unsigned long mmio_address; module_param(mmio_address, ulong, 0); MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " @@ -30,7 +31,7 @@ static unsigned v32(unsigned i) static void do_write_test(void __iomem *p) { unsigned int i; - pr_info(MODULE_NAME ": write test.\n"); + pr_info("write test.\n"); mmiotrace_printk("Write test.\n"); for (i = 0; i < 256; i++) @@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p) { unsigned int i; unsigned errs[3] = { 0 }; - pr_info(MODULE_NAME ": read test.\n"); + pr_info("read test.\n"); mmiotrace_printk("Read test.\n"); for (i = 0; i < 256; i++) @@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p) static void do_read_far_test(void __iomem *p) { - pr_info(MODULE_NAME ": read far test.\n"); + pr_info("read far test.\n"); mmiotrace_printk("Read far test.\n"); ioread32(p + read_far); @@ -78,7 +79,7 @@ static void do_test(unsigned long size) { void __iomem *p = ioremap_nocache(mmio_address, size); if (!p) { - pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); + pr_err("could not ioremap, aborting.\n"); return; } mmiotrace_printk("ioremap returned %p.\n", p); @@ -94,24 +95,22 @@ static int __init init(void) unsigned long size = (read_far) ? (8 << 20) : (16 << 10); if (mmio_address == 0) { - pr_err(MODULE_NAME ": you have to use the module argument " - "mmio_address.\n"); - pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS" - " YOU REALLY KNOW WHAT YOU ARE DOING!\n"); + pr_err("you have to use the module argument mmio_address.\n"); + pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n"); return -ENXIO; } - pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " - "address space, and writing 16 kB of rubbish in there.\n", - size >> 10, mmio_address); + pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, " + "and writing 16 kB of rubbish in there.\n", + size >> 10, mmio_address); do_test(size); - pr_info(MODULE_NAME ": All done.\n"); + pr_info("All done.\n"); return 0; } static void __exit cleanup(void) { - pr_debug(MODULE_NAME ": unloaded.\n"); + pr_debug("unloaded.\n"); } module_init(init); diff --git a/block/blk-core.c b/block/blk-core.c index 81f34311659..ac0fa10f8fa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io) part_stat_inc(cpu, part, merges[rw]); else { part_round_stats(cpu, part); - part_inc_in_flight(part); + part_inc_in_flight(part, rw); } part_stat_unlock(); @@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, if (now == part->stamp) return; - if (part->in_flight) { + if (part_in_flight(part)) { __part_stat_add(cpu, part, time_in_queue, - part->in_flight * (now - part->stamp)); + part_in_flight(part) * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; @@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req) part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); part_round_stats(cpu, part); - part_dec_in_flight(part); + part_dec_in_flight(part, rw); part_stat_unlock(); } @@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *work, - unsigned long delay) -{ - return queue_delayed_work(kblockd_workqueue, work, delay); -} -EXPORT_SYMBOL(kblockd_schedule_delayed_work); - int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * diff --git a/block/blk-merge.c b/block/blk-merge.c index b0de8574fdc..99cb5cf1f44 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); part_round_stats(cpu, part); - part_dec_in_flight(part); + part_dec_in_flight(part, rq_data_dir(req)); part_stat_unlock(); } diff --git a/block/blk-settings.c b/block/blk-settings.c index e0695bca702..66d4aa8799b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors); /** * blk_queue_max_discard_sectors - set max sectors for a single discard * @q: the request queue for the device - * @max_discard: maximum number of sectors to discard + * @max_discard_sectors: maximum number of sectors to discard **/ void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) diff --git a/block/blk-tag.c b/block/blk-tag.c index 2e5cfeb5933..6b0f52c2096 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) max_depth -= 2; if (!max_depth) max_depth = 1; - if (q->in_flight[0] > max_depth) + if (q->in_flight[BLK_RW_ASYNC] > max_depth) return 1; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9c4b679908f..069a61017c0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -150,7 +150,7 @@ struct cfq_data { * idle window management */ struct timer_list idle_slice_timer; - struct delayed_work unplug_work; + struct work_struct unplug_work; struct cfq_queue *active_queue; struct cfq_io_context *active_cic; @@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop); blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) static void cfq_dispatch_insert(struct request_queue *, struct request *); -static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, +static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, struct io_context *, gfp_t); static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, struct io_context *); @@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd) } static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, - int is_sync) + bool is_sync) { - return cic->cfqq[!!is_sync]; + return cic->cfqq[is_sync]; } static inline void cic_set_cfqq(struct cfq_io_context *cic, - struct cfq_queue *cfqq, int is_sync) + struct cfq_queue *cfqq, bool is_sync) { - cic->cfqq[!!is_sync] = cfqq; + cic->cfqq[is_sync] = cfqq; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ -static inline int cfq_bio_sync(struct bio *bio) +static inline bool cfq_bio_sync(struct bio *bio) { - if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) - return 1; - - return 0; + return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); } /* * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ -static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, - unsigned long delay) +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, - delay); + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); } } @@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q) * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ -static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, +static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, unsigned short prio) { const int base_slice = cfqd->cfq_slice[sync]; @@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) * isn't valid until the first request from the dispatch is activated * and the slice time set. */ -static inline int cfq_slice_used(struct cfq_queue *cfqq) +static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return 0; @@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, * we will service the queues. */ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int add_front) + bool add_front) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; @@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, } else rb_key += jiffies; } else if (!add_front) { + /* + * Get our rb key offset. Subtract any residual slice + * value carried from last service. A negative resid + * count indicates slice overrun, and this should position + * the next service time further away in the tree. + */ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; - rb_key += cfqq->slice_resid; + rb_key -= cfqq->slice_resid; cfqq->slice_resid = 0; - } else - rb_key = 0; + } else { + rb_key = -HZ; + __cfqq = cfq_rb_first(&cfqd->service_tree); + rb_key += __cfqq ? __cfqq->rb_key : jiffies; + } if (!RB_EMPTY_NODE(&cfqq->rb_node)) { /* @@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, n = &(*p)->rb_left; else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) n = &(*p)->rb_right; - else if (rb_key < __cfqq->rb_key) + else if (time_before(rb_key, __cfqq->rb_key)) n = &(*p)->rb_left; else n = &(*p)->rb_right; @@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && - time_before(next->start_time, rq->start_time)) + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } cfq_remove_request(next); } @@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, * Disallow merge of a sync bio into an async request. */ if (cfq_bio_sync(bio) && !rq_is_sync(rq)) - return 0; + return false; /* * Lookup the cfqq that this bio will be queued with. Allow @@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, */ cic = cfq_cic_lookup(cfqd, current->io_context); if (!cic) - return 0; + return false; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); - if (cfqq == RQ_CFQQ(rq)) - return 1; - - return 0; + return cfqq == RQ_CFQQ(rq); } static void __cfq_set_active_queue(struct cfq_data *cfqd, @@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int timed_out) + bool timed_out) { cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); @@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, } } -static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) +static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) { struct cfq_queue *cfqq = cfqd->active_queue; @@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq, - int probe) + bool probe) { struct cfq_queue *cfqq; @@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) if (!cic || !atomic_read(&cic->ioc->nr_tasks)) return; + /* + * If our average think time is larger than the remaining time + * slice, then don't idle. This avoids overrunning the allotted + * time slice. + */ + if (sample_valid(cic->ttime_samples) && + (cfqq->slice_end - jiffies < cic->ttime_mean)) + return; + cfq_mark_cfqq_wait_request(cfqq); /* @@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) */ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) { - struct cfq_data *cfqd = cfqq->cfqd; - struct request *rq; - int fifo; + struct request *rq = NULL; if (cfq_cfqq_fifo_expire(cfqq)) return NULL; @@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) if (list_empty(&cfqq->fifo)) return NULL; - fifo = cfq_cfqq_sync(cfqq); rq = rq_entry_fifo(cfqq->fifo.next); - - if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) + if (time_before(jiffies, rq_fifo_time(rq))) rq = NULL; - cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); return rq; } @@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) return dispatched; } -/* - * Dispatch a request from cfqq, moving them to the request queue - * dispatch list. - */ -static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - struct request *rq; - - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); - - /* - * follow expired path, else get first next available - */ - rq = cfq_check_fifo(cfqq); - if (!rq) - rq = cfqq->next_rq; - - /* - * insert request into driver dispatch list - */ - cfq_dispatch_insert(cfqd->queue, rq); - - if (!cfqd->active_cic) { - struct cfq_io_context *cic = RQ_CIC(rq); - - atomic_long_inc(&cic->ioc->refcount); - cfqd->active_cic = cic; - } -} - -/* - * Find the cfqq that we need to service and move a request from that to the - * dispatch list - */ -static int cfq_dispatch_requests(struct request_queue *q, int force) -{ - struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; unsigned int max_dispatch; - if (!cfqd->busy_queues) - return 0; - - if (unlikely(force)) - return cfq_forced_dispatch(cfqd); - - cfqq = cfq_select_queue(cfqd); - if (!cfqq) - return 0; - /* * Drain async requests before we start sync IO */ if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) - return 0; + return false; /* * If this is an async queue and we have sync IO in flight, let it wait */ if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) - return 0; + return false; max_dispatch = cfqd->cfq_quantum; if (cfq_class_idle(cfqq)) @@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) * idle queue must always only have a single IO in flight */ if (cfq_class_idle(cfqq)) - return 0; + return false; /* * We have other queues, don't allow more IO from this one */ if (cfqd->busy_queues > 1) - return 0; + return false; /* * Sole queue user, allow bigger slice @@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) max_dispatch = depth; } - if (cfqq->dispatched >= max_dispatch) + /* + * If we're below the current max, allow a dispatch + */ + return cfqq->dispatched < max_dispatch; +} + +/* + * Dispatch a request from cfqq, moving them to the request queue + * dispatch list. + */ +static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct request *rq; + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ + rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; + + /* + * insert request into driver dispatch list + */ + cfq_dispatch_insert(cfqd->queue, rq); + + if (!cfqd->active_cic) { + struct cfq_io_context *cic = RQ_CIC(rq); + + atomic_long_inc(&cic->ioc->refcount); + cfqd->active_cic = cic; + } + + return true; +} + +/* + * Find the cfqq that we need to service and move a request from that to the + * dispatch list + */ +static int cfq_dispatch_requests(struct request_queue *q, int force) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq; + + if (!cfqd->busy_queues) + return 0; + + if (unlikely(force)) + return cfq_forced_dispatch(cfqd); + + cfqq = cfq_select_queue(cfqd); + if (!cfqq) return 0; /* - * Dispatch a request from this cfqq + * Dispatch a request from this cfqq, if it is allowed */ - cfq_dispatch_request(cfqd, cfqq); + if (!cfq_dispatch_request(cfqd, cfqq)) + return 0; + cfqq->slice_dispatch++; cfq_clear_cfqq_must_dispatch(cfqq); @@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) if (unlikely(cfqd->active_queue == cfqq)) { __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } kmem_cache_free(cfq_pool, cfqq); @@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } cfq_put_queue(cfqq); @@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) } static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, - pid_t pid, int is_sync) + pid_t pid, bool is_sync) { RB_CLEAR_NODE(&cfqq->rb_node); RB_CLEAR_NODE(&cfqq->p_node); @@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, } static struct cfq_queue * -cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, +cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; @@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) } static struct cfq_queue * -cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, +cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { const int ioprio = task_ioprio(ioc); @@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { - if (cic->ttime_mean > cfqd->cfq_slice_idle) + unsigned int slice_idle = cfqd->cfq_slice_idle; + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) + slice_idle = msecs_to_jiffies(CFQ_MIN_TT); + if (cic->ttime_mean > slice_idle) enable_idle = 0; else enable_idle = 1; @@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ -static int +static bool cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, struct request *rq) { @@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, cfqq = cfqd->active_queue; if (!cfqq) - return 0; + return false; if (cfq_slice_used(cfqq)) - return 1; + return true; if (cfq_class_idle(new_cfqq)) - return 0; + return false; if (cfq_class_idle(cfqq)) - return 1; + return true; /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) - return 1; + return true; /* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if (rq_is_meta(rq) && !cfqq->meta_pending) - return 1; + return false; /* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) - return 1; + return true; if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) - return 0; + return false; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ if (cfq_rq_close(cfqd, rq)) - return 1; + return true; - return 0; + return false; } /* @@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) cfq_add_rq_rb(rq); + rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_rq_enqueued(cfqd, cfqq, rq); @@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } if (!rq_in_driver(cfqd)) - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } /* @@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - const int is_sync = rq_is_sync(rq); + const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; unsigned long flags; @@ -2341,7 +2366,7 @@ queue_fail: if (cic) put_io_context(cic->ioc); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); cfq_log(cfqd, "set_request fail"); return 1; @@ -2350,7 +2375,7 @@ queue_fail: static void cfq_kick_queue(struct work_struct *work) { struct cfq_data *cfqd = - container_of(work, struct cfq_data, unplug_work.work); + container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); @@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data) expire: cfq_slice_expired(cfqd, timed_out); out_kick: - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } @@ -2412,7 +2437,7 @@ out_cont: static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); - cancel_delayed_work_sync(&cfqd->unplug_work); + cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) @@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; diff --git a/block/elevator.c b/block/elevator.c index 1975b619c86..a847046c6e5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, return count; strlcpy(elevator_name, name, sizeof(elevator_name)); - strstrip(elevator_name); - - e = elevator_get(elevator_name); + e = elevator_get(strstrip(elevator_name)); if (!e) { printk(KERN_ERR "elevator: type %s not found\n", elevator_name); return -EINVAL; diff --git a/block/genhd.c b/block/genhd.c index 5a0861da324..517e4332cb3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); @@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_alignment_offset.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, + &dev_attr_inflight.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif @@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) part_stat_read(hd, merges[1]), (unsigned long long)part_stat_read(hd, sectors[1]), jiffies_to_msecs(part_stat_read(hd, ticks[1])), - hd->in_flight, + part_in_flight(hd), jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)) ); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index fb5be2d95d5..6399e5090df 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" MODULE_VERSION("3.6.20"); MODULE_LICENSE("GPL"); +static int cciss_allow_hpsa; +module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(cciss_allow_hpsa, + "Prevent cciss driver from accessing hardware known to be " + " supported by the hpsa driver"); + #include "cciss_cmd.h" #include "cciss.h" #include <linux/cciss_ioctl.h> @@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; @@ -123,8 +127,6 @@ static struct board_type products[] = { {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access}, - {0x3223103C, "Smart Array P800", &SA5_access}, - {0x3234103C, "Smart Array P400", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access}, @@ -132,6 +134,10 @@ static struct board_type products[] = { {0x3214103C, "Smart Array E200i", &SA5_access}, {0x3215103C, "Smart Array E200i", &SA5_access}, {0x3237103C, "Smart Array E500", &SA5_access}, +/* controllers below this line are also supported by the hpsa driver. */ +#define HPSA_BOUNDARY 0x3223103C + {0x3223103C, "Smart Array P800", &SA5_access}, + {0x3234103C, "Smart Array P400", &SA5_access}, {0x323D103C, "Smart Array P700m", &SA5_access}, {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, @@ -140,7 +146,6 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access}, - {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ @@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) __u64 cfg_offset; __u32 cfg_base_addr; __u64 cfg_base_addr_index; - int i, err; + int i, prod_index, err; + + subsystem_vendor_id = pdev->subsystem_vendor; + subsystem_device_id = pdev->subsystem_device; + board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | + subsystem_vendor_id); + + for (i = 0; i < ARRAY_SIZE(products); i++) { + /* Stand aside for hpsa driver on request */ + if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) + return -ENODEV; + if (board_id == products[i].board_id) + break; + } + prod_index = i; + if (prod_index == ARRAY_SIZE(products)) { + dev_warn(&pdev->dev, + "unrecognized board ID: 0x%08lx, ignoring.\n", + (unsigned long) board_id); + return -ENODEV; + } /* check to see if controller has been disabled */ /* BEFORE trying to enable it */ @@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) return err; } - subsystem_vendor_id = pdev->subsystem_vendor; - subsystem_device_id = pdev->subsystem_device; - board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | - subsystem_vendor_id); - #ifdef CCISS_DEBUG printk("command = %x\n", command); printk("irq = %x\n", pdev->irq); @@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) * leave a little room for ioctl calls. */ c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); - for (i = 0; i < ARRAY_SIZE(products); i++) { - if (board_id == products[i].board_id) { - c->product_name = products[i].product_name; - c->access = *(products[i].access); - c->nr_cmds = c->max_commands - 4; - break; - } - } + c->product_name = products[prod_index].product_name; + c->access = *(products[prod_index].access); + c->nr_cmds = c->max_commands - 4; if ((readb(&c->cfgtable->Signature[0]) != 'C') || (readb(&c->cfgtable->Signature[1]) != 'I') || (readb(&c->cfgtable->Signature[2]) != 'S') || @@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) err = -ENODEV; goto err_out_free_res; } - /* We didn't find the controller in our list. We know the - * signature is valid. If it's an HP device let's try to - * bind to the device and fire it up. Otherwise we bail. - */ - if (i == ARRAY_SIZE(products)) { - if (subsystem_vendor_id == PCI_VENDOR_ID_HP) { - c->product_name = products[i-1].product_name; - c->access = *(products[i-1].access); - c->nr_cmds = c->max_commands - 4; - printk(KERN_WARNING "cciss: This is an unknown " - "Smart Array controller.\n" - "cciss: Please update to the latest driver " - "available from www.hp.com.\n"); - } else { - printk(KERN_WARNING "cciss: Sorry, I don't know how" - " to access the Smart Array controller %08lx\n" - , (unsigned long)board_id); - err = -ENODEV; - goto err_out_free_res; - } - } #ifdef CONFIG_X86 { /* Need to enable prefetch in the SCSI core for 6400 in x86 */ @@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, mutex_init(&hba[i]->busy_shutting_down); if (cciss_pci_init(hba[i], pdev) != 0) - goto clean0; + goto clean_no_release_regions; sprintf(hba[i]->devname, "cciss%d", i); hba[i]->ctlr = i; @@ -4391,13 +4385,14 @@ clean2: clean1: cciss_destroy_hba_sysfs_entry(hba[i]); clean0: + pci_release_regions(pdev); +clean_no_release_regions: hba[i]->busy_initializing = 0; /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ - pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_hba(i); return -1; diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index aac0985a572..31e7c91c2d9 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c @@ -43,6 +43,7 @@ #define RTC_VERSION "1.07" #include <linux/module.h> +#include <linux/sched.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index e0d0f8b2696..bc4ab3e5455 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -74,6 +74,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> +#include <linux/sched.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index fd3dced9777..8c262aaf7c2 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -36,6 +36,7 @@ */ #include <linux/module.h> +#include <linux/sched.h> #include <linux/input.h> #include <linux/pci.h> #include <linux/init.h> diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 3108991c5c8..66fa4e10d76 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work) container_of(work, struct tty_struct, buf.work.work); unsigned long flags; struct tty_ldisc *disc; - struct tty_buffer *tbuf, *head; - char *char_buf; - unsigned char *flag_buf; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ return; spin_lock_irqsave(&tty->buf.lock, flags); - /* So we know a flush is running */ - set_bit(TTY_FLUSHING, &tty->flags); - head = tty->buf.head; - if (head != NULL) { - tty->buf.head = NULL; - for (;;) { - int count = head->commit - head->read; + + if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { + struct tty_buffer *head; + while ((head = tty->buf.head) != NULL) { + int count; + char *char_buf; + unsigned char *flag_buf; + + count = head->commit - head->read; if (!count) { if (head->next == NULL) break; - tbuf = head; - head = head->next; - tty_buffer_free(tty, tbuf); + tty->buf.head = head->next; + tty_buffer_free(tty, head); continue; } /* Ldisc or user is trying to flush the buffers @@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work) flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); } - /* Restore the queue head */ - tty->buf.head = head; + clear_bit(TTY_FLUSHING, &tty->flags); } + /* We may have a deferred request to flush the input buffer, if so pull the chain under the lock and empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { @@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work) clear_bit(TTY_FLUSHPENDING, &tty->flags); wake_up(&tty->read_wait); } - clear_bit(TTY_FLUSHING, &tty->flags); spin_unlock_irqrestore(&tty->buf.lock, flags); tty_ldisc_deref(disc); @@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work) */ void tty_flush_to_ldisc(struct tty_struct *tty) { - flush_to_ldisc(&tty->buf.work.work); + flush_delayed_work(&tty->buf.work); } /** diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 50f0176de61..98dbbda3ad4 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt) /* Impossible login_id, to detect logout attempt before successful login */ #define INVALID_LOGIN_ID 0x10000 -/* - * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be - * provided in the config rom. Most devices do provide a value, which - * we'll use for login management orbs, but with some sane limits. - */ -#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ -#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ -#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ +#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ #define SBP2_ORB_NULL 0x80000000 #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ @@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, { struct fw_csr_iterator ci; int key, value; - unsigned int timeout; fw_csr_iterator_init(&ci, directory); while (fw_csr_iterator_next(&ci, &key, &value)) { @@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, case SBP2_CSR_UNIT_CHARACTERISTICS: /* the timeout value is stored in 500ms units */ - timeout = ((unsigned int) value >> 8 & 0xff) * 500; - timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT); - tgt->mgt_orb_timeout = - min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT); - - if (timeout > tgt->mgt_orb_timeout) - fw_notify("%s: config rom contains %ds " - "management ORB timeout, limiting " - "to %ds\n", tgt->bus_id, - timeout / 1000, - tgt->mgt_orb_timeout / 1000); + tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; break; case SBP2_CSR_LOGICAL_UNIT_NUMBER: @@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, return 0; } +/* + * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be + * provided in the config rom. Most devices do provide a value, which + * we'll use for login management orbs, but with some sane limits. + */ +static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) +{ + unsigned int timeout = tgt->mgt_orb_timeout; + + if (timeout > 40000) + fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n", + tgt->bus_id, timeout / 1000); + + tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); +} + static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, u32 firmware_revision) { @@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev) &firmware_revision) < 0) goto fail_tgt_put; + sbp2_clamp_management_orb_timeout(tgt); sbp2_init_workarounds(tgt, model, firmware_revision); /* diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index be34d32906b..7d05c4bb201 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event); * @type: HID report type (HID_*_REPORT) * @data: report contents * @size: size of data parameter - * @interrupt: called from atomic? + * @interrupt: distinguish between interrupt and control transfers * * This is data entry for lower layers. */ diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c index b05f602c051..c40afc57fc8 100644 --- a/drivers/hid/hid-twinhan.c +++ b/drivers/hid/hid-twinhan.c @@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = { .input_mapping = twinhan_input_mapping, }; -static int twinhan_init(void) +static int __init twinhan_init(void) { return hid_register_driver(&twinhan_driver); } -static void twinhan_exit(void) +static void __exit twinhan_exit(void) { hid_unregister_driver(&twinhan_driver); } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index ba05275e510..cdd136942bc 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -48,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, char *report; DECLARE_WAITQUEUE(wait, current); - while (ret == 0) { - - mutex_lock(&list->read_mutex); + mutex_lock(&list->read_mutex); + while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hidraw->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 23e76fe0d35..376f1ab48a2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -130,7 +130,7 @@ struct mapped_device { /* * A list of ios that arrived while we were suspended. */ - atomic_t pending; + atomic_t pending[2]; wait_queue_head_t wait; struct work_struct work; struct bio_list deferred; @@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; int cpu; + int rw = bio_data_dir(io->bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(cpu, &dm_disk(md)->part0); part_stat_unlock(); - dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); } static void end_io_acct(struct dm_io *io) @@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io) * After this is decremented the bio must not be touched if it is * a barrier. */ - dm_disk(md)->part0.in_flight = pending = - atomic_dec_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = pending = + atomic_dec_return(&md->pending[rw]); + pending += atomic_read(&md->pending[rw^0x1]); /* nudge anyone waiting on suspend queue */ if (!pending) @@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor) if (!md->disk) goto bad_disk; - atomic_set(&md->pending, 0); + atomic_set(&md->pending[0], 0); + atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); @@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) break; } spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending)) + } else if (!atomic_read(&md->pending[0]) && + !atomic_read(&md->pending[1])) break; if (interruptible == TASK_INTERRUPTIBLE && diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index e424cf6d8e9..e832e975da6 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -480,7 +480,6 @@ static int add_children(struct twl4030_platform_data *pdata, unsigned long features) { struct device *child; - struct device *usb_transceiver = NULL; if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) { child = add_child(3, "twl4030_bci", @@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) } if (twl_has_usb() && pdata->usb) { + + static struct regulator_consumer_supply usb1v5 = { + .supply = "usb1v5", + }; + static struct regulator_consumer_supply usb1v8 = { + .supply = "usb1v8", + }; + static struct regulator_consumer_supply usb3v1 = { + .supply = "usb3v1", + }; + + /* First add the regulators so that they can be used by transceiver */ + if (twl_has_regulator()) { + /* this is a template that gets copied */ + struct regulator_init_data usb_fixed = { + .constraints.valid_modes_mask = + REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .constraints.valid_ops_mask = + REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }; + + child = add_regulator_linked(TWL4030_REG_VUSB1V5, + &usb_fixed, &usb1v5, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator_linked(TWL4030_REG_VUSB1V8, + &usb_fixed, &usb1v8, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator_linked(TWL4030_REG_VUSB3V1, + &usb_fixed, &usb3v1, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + } + child = add_child(0, "twl4030_usb", pdata->usb, sizeof(*pdata->usb), true, /* irq0 = USB_PRES, irq1 = USB */ pdata->irq_base + 8 + 2, pdata->irq_base + 4); + if (IS_ERR(child)) return PTR_ERR(child); /* we need to connect regulators to this transceiver */ - usb_transceiver = child; + if (twl_has_regulator() && child) { + usb1v5.dev = child; + usb1v8.dev = child; + usb3v1.dev = child; + } } if (twl_has_watchdog()) { @@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) return PTR_ERR(child); } - if (twl_has_regulator() && usb_transceiver) { - static struct regulator_consumer_supply usb1v5 = { - .supply = "usb1v5", - }; - static struct regulator_consumer_supply usb1v8 = { - .supply = "usb1v8", - }; - static struct regulator_consumer_supply usb3v1 = { - .supply = "usb3v1", - }; - - /* this is a template that gets copied */ - struct regulator_init_data usb_fixed = { - .constraints.valid_modes_mask = - REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .constraints.valid_ops_mask = - REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }; - - usb1v5.dev = usb_transceiver; - usb1v8.dev = usb_transceiver; - usb3v1.dev = usb_transceiver; - - child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed, - &usb1v5, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed, - &usb1v8, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed, - &usb3v1, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - } - /* maybe add LDOs that are omitted on cost-reduced parts */ if (twl_has_regulator() && !(features & TPS_SUBSET)) { child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 5e0b1529964..b00d6731905 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -693,7 +693,7 @@ static int pxamci_probe(struct platform_device *pdev) if (gpio_is_valid(gpio_ro)) { ret = gpio_request(gpio_ro, "mmc card read only"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); goto err_gpio_ro; } gpio_direction_input(gpio_ro); @@ -701,7 +701,7 @@ static int pxamci_probe(struct platform_device *pdev) if (gpio_is_valid(gpio_cd)) { ret = gpio_request(gpio_cd, "mmc card detect"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); goto err_gpio_cd; } gpio_direction_input(gpio_cd); diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 9693b0fd323..0bd898c9475 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/capability.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index 83da596e205..58c66819f39 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/capability.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/fcntl.h> diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index a52f29c72c3..f1340faaf02 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/capability.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/fcntl.h> diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 2b7ae366ceb..5df60a6b677 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -35,12 +35,23 @@ static size_t buffer_pos; /* atomic_t because wait_event checks it outside of buffer_mutex */ static atomic_t buffer_ready = ATOMIC_INIT(0); -/* Add an entry to the event buffer. When we - * get near to the end we wake up the process - * sleeping on the read() of the file. +/* + * Add an entry to the event buffer. When we get near to the end we + * wake up the process sleeping on the read() of the file. To protect + * the event_buffer this function may only be called when buffer_mutex + * is set. */ void add_event_entry(unsigned long value) { + /* + * This shouldn't happen since all workqueues or handlers are + * canceled or flushed before the event buffer is freed. + */ + if (!event_buffer) { + WARN_ON_ONCE(1); + return; + } + if (buffer_pos == buffer_size) { atomic_inc(&oprofile_stats.event_lost_overflow); return; @@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void) int alloc_event_buffer(void) { - int err = -ENOMEM; unsigned long flags; spin_lock_irqsave(&oprofilefs_lock, flags); @@ -80,21 +90,22 @@ int alloc_event_buffer(void) if (buffer_watershed >= buffer_size) return -EINVAL; + buffer_pos = 0; event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); if (!event_buffer) - goto out; + return -ENOMEM; - err = 0; -out: - return err; + return 0; } void free_event_buffer(void) { + mutex_lock(&buffer_mutex); vfree(event_buffer); - + buffer_pos = 0; event_buffer = NULL; + mutex_unlock(&buffer_mutex); } @@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, mutex_lock(&buffer_mutex); + /* May happen if the buffer is freed during pending reads. */ + if (!event_buffer) { + retval = -EINTR; + goto out; + } + atomic_set(&buffer_ready, 0); retval = -EFAULT; diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 14bbaa17e2c..22b02c6df85 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -354,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_atsr *atsr; + struct acpi_dmar_rhsa *rhsa; switch (header->type) { case ACPI_DMAR_TYPE_HARDWARE_UNIT: @@ -375,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) atsr = container_of(header, struct acpi_dmar_atsr, header); printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); break; + case ACPI_DMAR_HARDWARE_AFFINITY: + rhsa = container_of(header, struct acpi_dmar_rhsa, header); + printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", + (unsigned long long)rhsa->base_address, + rhsa->proximity_domain); + break; } } @@ -459,9 +466,13 @@ parse_dmar_table(void) ret = dmar_parse_one_atsr(entry_header); #endif break; + case ACPI_DMAR_HARDWARE_AFFINITY: + /* We don't do anything with RHSA (yet?) */ + break; default: printk(KERN_WARNING PREFIX - "Unknown DMAR structure type\n"); + "Unknown DMAR structure type %d\n", + entry_header->type); ret = 0; /* for forward compatibility */ break; } diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 53836001d51..9c6a9fd2681 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,6 +32,7 @@ #include <asm/io.h> /* for read? and write? functions */ #include <linux/delay.h> /* for delays */ #include <linux/mutex.h> +#include <linux/sched.h> /* for signal_pending() */ #define MY_NAME "cpqphp" diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 855dd7ca47f..b1e97e68250 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -48,6 +48,7 @@ #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) +#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) #define IOAPIC_RANGE_START (0xfee00000) #define IOAPIC_RANGE_END (0xfeefffff) @@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p) /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; +static void __init check_tylersburg_isoch(void); static int rwbf_quirk; /* @@ -1934,6 +1936,9 @@ error: } static int iommu_identity_mapping; +#define IDENTMAP_ALL 1 +#define IDENTMAP_GFX 2 +#define IDENTMAP_AZALIA 4 static int iommu_domain_identity_map(struct dmar_domain *domain, unsigned long long start, @@ -2151,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain, static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { - if (iommu_identity_mapping == 2) - return IS_GFX_DEVICE(pdev); + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) + return 1; + + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) + return 1; + + if (!(iommu_identity_mapping & IDENTMAP_ALL)) + return 0; /* * We want to start off with all devices in the 1:1 domain, and @@ -2332,11 +2343,14 @@ int __init init_dmars(void) } if (iommu_pass_through) - iommu_identity_mapping = 1; + iommu_identity_mapping |= IDENTMAP_ALL; + #ifdef CONFIG_DMAR_BROKEN_GFX_WA - else - iommu_identity_mapping = 2; + iommu_identity_mapping |= IDENTMAP_GFX; #endif + + check_tylersburg_isoch(); + /* * If pass through is not set or not enabled, setup context entries for * identity mappings for rmrr, gfx, and isa and may fall back to static @@ -3670,3 +3684,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); + +/* On Tylersburg chipsets, some BIOSes have been known to enable the + ISOCH DMAR unit for the Azalia sound device, but not give it any + TLB entries, which causes it to deadlock. Check for that. We do + this in a function called from init_dmars(), instead of in a PCI + quirk, because we don't want to print the obnoxious "BIOS broken" + message if VT-d is actually disabled. +*/ +static void __init check_tylersburg_isoch(void) +{ + struct pci_dev *pdev; + uint32_t vtisochctrl; + + /* If there's no Azalia in the system anyway, forget it. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); + if (!pdev) + return; + pci_dev_put(pdev); + + /* System Management Registers. Might be hidden, in which case + we can't do the sanity check. But that's OK, because the + known-broken BIOSes _don't_ actually hide it, so far. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); + if (!pdev) + return; + + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { + pci_dev_put(pdev); + return; + } + + pci_dev_put(pdev); + + /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ + if (vtisochctrl & 1) + return; + + /* Drop all bits other than the number of TLB entries */ + vtisochctrl &= 0x1c; + + /* If we have the recommended number of TLB entries (16), fine. */ + if (vtisochctrl == 0x10) + return; + + /* Zero TLB entries? You get to ride the short bus to school. */ + if (!vtisochctrl) { + WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + iommu_identity_mapping |= IDENTMAP_AZALIA; + return; + } + + printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", + vtisochctrl); +} diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6edecff0b41..4e4c295a049 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -513,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(PCI_PM_D2_DELAY); - dev->current_state = state; + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + if (dev->current_state != state && printk_ratelimit()) + dev_info(&dev->dev, "Refused to change power state, " + "currently in D%d\n", dev->current_state); /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning @@ -2542,10 +2546,10 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) /** * pci_set_vga_state - set VGA decode state on device and parents if requested - * @dev the PCI device - * @decode - true = enable decoding, false = disable decoding - * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY - * @change_bridge - traverse ancestors and change bridges + * @dev: the PCI device + * @decode: true = enable decoding, false = disable decoding + * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY + * @change_bridge: traverse ancestors and change bridges */ int pci_set_vga_state(struct pci_dev *dev, bool decode, unsigned int command_bits, bool change_bridge) @@ -2719,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) return 1; } -static int __devinit pci_init(void) -{ - struct pci_dev *dev = NULL; - - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - pci_fixup_device(pci_fixup_final, dev); - } - - return 0; -} - static int __init pci_setup(char *str) { while (str) { @@ -2767,8 +2760,6 @@ static int __init pci_setup(char *str) } early_param("pci", pci_setup); -device_initcall(pci_init); - EXPORT_SYMBOL(pci_reenable_device); EXPORT_SYMBOL(pci_enable_device_io); EXPORT_SYMBOL(pci_enable_device_mem); diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index d49ecc94bd4..40c3cc5d1ca 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = { static struct pcie_port_service_driver aerdriver = { .name = "aer", - .port_type = PCIE_ANY_PORT, + .port_type = PCIE_RC_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 6df5c984a79..f635e476d63 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* global data */ -static const char device_name[] = "pcieport-driver"; static int pcie_portdrv_restore_config(struct pci_dev *dev) { @@ -262,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = { }; static struct pci_driver pcie_portdriver = { - .name = (char *)device_name, + .name = "pcieport", .id_table = &port_pci_ids[0], .probe = pcie_portdrv_probe, diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6099facecd7..a790b1771f9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -670,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); +/* + * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: + * Disable fast back-to-back on the secondary bus segment + */ +static void __devinit quirk_xio2000a(struct pci_dev *dev) +{ + struct pci_dev *pdev; + u16 command; + + dev_warn(&dev->dev, "TI XIO2000a quirk detected; " + "secondary bus fast back-to-back transfers disabled\n"); + list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { + pci_read_config_word(pdev, PCI_COMMAND, &command); + if (command & PCI_COMMAND_FAST_BACK) + pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, + quirk_xio2000a); #ifdef CONFIG_X86_IO_APIC @@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) } pci_do_fixups(dev, start, end); } + +static int __init pci_apply_final_quirks(void) +{ + struct pci_dev *dev = NULL; + + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + pci_fixup_device(pci_fixup_final, dev); + } + + return 0; +} + +fs_initcall_sync(pci_apply_final_quirks); #else void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} #endif diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index cb1a027eb55..0959430534b 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -299,8 +299,17 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon r = bus->resource[i]; if (r == &ioport_resource || r == &iomem_resource) continue; - if (r && (r->flags & type_mask) == type && !r->parent) - return r; + if (r && (r->flags & type_mask) == type) { + if (!r->parent) + return r; + /* + * if there is no child under that, we should release + * and use it. don't need to reset it, pbus_size_* will + * set it again + */ + if (!r->child && !release_resource(r)) + return r; + } } return NULL; } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 706f82d8111..c54526b206b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -205,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno) return ret; } -#if 0 -int pci_assign_resource_fixed(struct pci_dev *dev, int resno) -{ - struct pci_bus *bus = dev->bus; - struct resource *res = dev->resource + resno; - unsigned int type_mask; - int i, ret = -EBUSY; - - type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; - - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { - struct resource *r = bus->resource[i]; - if (!r) - continue; - - /* type_mask must match */ - if ((res->flags ^ r->flags) & type_mask) - continue; - - ret = request_resource(r, res); - - if (ret == 0) - break; - } - - if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", - resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); - } else if (resno < PCI_BRIDGE_RESOURCES) { - pci_update_resource(dev, resno); - } - - return ret; -} -EXPORT_SYMBOL_GPL(pci_assign_resource_fixed); -#endif - /* Sort resources by alignment */ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) { diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 53b8c255360..aaccc8ecfa8 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2533,6 +2533,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, { struct dasd_ccw_req *cqr; struct ccw1 *ccw; + unsigned long *idaw; cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); @@ -2546,9 +2547,17 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, ccw = cqr->cpaddr; ccw->cmd_code = CCW_CMD_RDC; - ccw->cda = (__u32)(addr_t)rdc_buffer; - ccw->count = rdc_buffer_size; + if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { + idaw = (unsigned long *) (cqr->data); + ccw->cda = (__u32)(addr_t) idaw; + ccw->flags = CCW_FLAG_IDA; + idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); + } else { + ccw->cda = (__u32)(addr_t) rdc_buffer; + ccw->flags = 0; + } + ccw->count = rdc_buffer_size; cqr->startdev = device; cqr->memdev = device; cqr->expires = 10*HZ; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 0be7c15f45c..417b97cd3f9 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3216,6 +3216,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) struct dasd_eckd_characteristics temp_rdc_data; int is_known, rc; struct dasd_uid temp_uid; + unsigned long flags; private = (struct dasd_eckd_private *) device->private; @@ -3228,7 +3229,8 @@ int dasd_eckd_restore_device(struct dasd_device *device) rc = dasd_eckd_generate_uid(device, &private->uid); dasd_get_uid(device->cdev, &temp_uid); if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) - dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); + dev_err(&device->cdev->dev, "The UID of the DASD has " + "changed\n"); if (rc) goto out_err; dasd_set_uid(device->cdev, &private->uid); @@ -3256,9 +3258,9 @@ int dasd_eckd_restore_device(struct dasd_device *device) "device: %s", rc, dev_name(&device->cdev->dev)); goto out_err; } - spin_lock(get_ccwdev_lock(device->cdev)); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); - spin_unlock(get_ccwdev_lock(device->cdev)); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); /* add device to alias management */ dasd_alias_add_device(device); diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index daaec185ed3..a4f68e5b9c9 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -62,7 +62,7 @@ static struct notifier_block call_home_panic_nb = { .priority = INT_MAX, }; -static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp, +static int proc_handler_callhome(struct ctl_table *ctl, int write, void __user *buffer, size_t *count, loff_t *ppos) { @@ -100,7 +100,7 @@ static struct ctl_table callhome_table[] = { { .procname = "callhome", .mode = 0644, - .proc_handler = &proc_handler_callhome, + .proc_handler = proc_handler_callhome, }, { .ctl_name = 0 } }; diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 178724f2a4c..b9d2a007e93 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -705,21 +705,6 @@ out_driver: } __initcall(sclp_vt220_tty_init); -#ifdef CONFIG_SCLP_VT220_CONSOLE - -static void -sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) -{ - __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0); -} - -static struct tty_driver * -sclp_vt220_con_device(struct console *c, int *index) -{ - *index = 0; - return sclp_vt220_driver; -} - static void __sclp_vt220_flush_buffer(void) { unsigned long flags; @@ -776,6 +761,21 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg, } } +#ifdef CONFIG_SCLP_VT220_CONSOLE + +static void +sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) +{ + __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0); +} + +static struct tty_driver * +sclp_vt220_con_device(struct console *c, int *index) +{ + *index = 0; + return sclp_vt220_driver; +} + static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 64f57ef2763..0c0705b91c2 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -162,9 +162,10 @@ tapeblock_requeue(struct work_struct *work) { spin_lock_irq(&device->blk_data.request_queue_lock); while ( !blk_queue_plugged(queue) && - (req = blk_fetch_request(queue)) && + blk_peek_request(queue) && nr_queued < TAPEBLOCK_MIN_REQUEUE ) { + req = blk_fetch_request(queue); if (rq_data_dir(req) == WRITE) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); spin_unlock_irq(&device->blk_data.request_queue_lock); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 2ee093ec86e..2490b741e16 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1250,8 +1250,7 @@ static int io_subchannel_probe(struct subchannel *sch) unsigned long flags; struct ccw_dev_id dev_id; - cdev = sch_get_cdev(sch); - if (cdev) { + if (cio_is_console(sch->schid)) { rc = sysfs_create_group(&sch->dev.kobj, &io_subchannel_attr_group); if (rc) @@ -1260,13 +1259,13 @@ static int io_subchannel_probe(struct subchannel *sch) "0.%x.%04x (rc=%d)\n", sch->schid.ssid, sch->schid.sch_no, rc); /* - * This subchannel already has an associated ccw_device. + * The console subchannel already has an associated ccw_device. * Throw the delayed uevent for the subchannel, register - * the ccw_device and exit. This happens for all early - * devices, e.g. the console. + * the ccw_device and exit. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + cdev = sch_get_cdev(sch); cdev->dev.groups = ccwdev_attr_groups; device_initialize(&cdev->dev); ccw_device_register(cdev); diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 1689bda1d13..dcc72444e8e 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -1270,6 +1270,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp) BUG_ON(!kernel_locked()); + if (!state) + return; + uport = state->uart_port; port = &state->port; @@ -1316,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp) */ if (port->flags & ASYNC_INITIALIZED) { unsigned long flags; - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&uport->lock, flags); uport->ops->stop_rx(uport); - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock_irqrestore(&uport->lock, flags); /* * Before we drop DTR, make sure the UART transmitter * has completely drained; this is especially diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 958a3ffc898..ff5bbb9c43c 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -1826,7 +1826,7 @@ static struct amba_id pl022_ids[] = { * ST Micro derivative, this has 32bit wide * and 32 locations deep TX/RX FIFO */ - .id = 0x00108022, + .id = 0x01080022, .mask = 0xffffffff, .data = &vendor_st, }, diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 760e7271d17..b84abd8ee8a 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -9,7 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options" # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller config USB_MUSB_HDRC depends on (USB || USB_GADGET) - depends on !SUPERH + depends on (ARM || BLACKFIN) select NOP_USB_XCEIV if ARCH_DAVINCI select TWL4030_USB if MACH_OMAP_3430SDP select NOP_USB_XCEIV if MACH_OMAP3EVM diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 1e8f02f440e..d3c824dc235 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -206,7 +206,7 @@ static int __devinit riowd_probe(struct of_device *op, dev_set_drvdata(&op->dev, p); riowd_device = p; - err = 0; + return 0; out_iounmap: of_iounmap(&op->resource[0], p->regs, 2); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 72743d36050..7a520a862f4 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -2321,7 +2321,18 @@ static int ext3_commit_super(struct super_block *sb, if (!sbh) return error; - es->s_wtime = cpu_to_le32(get_seconds()); + /* + * If the file system is mounted read-only, don't update the + * superblock write time. This avoids updating the superblock + * write time when we are mounting the root file system + * read/only but we need to replay the journal; at that point, + * for people who are east of GMT and who make their clock + * tick in localtime for Windows bug-for-bug compatibility, + * the clock is set in the future, and this will cause e2fsck + * to complain and force a full file system check. + */ + if (!(sb->s_flags & MS_RDONLY)) + es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 6dabf6feec9..a2c18acb856 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1848,8 +1848,8 @@ nfs_compare_remount_data(struct nfs_server *nfss, data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || data->nfs_server.port != nfss->port || data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || - !rpc_cmp_addr(&data->nfs_server.address, - &nfss->nfs_client->cl_addr)) + !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address, + (struct sockaddr *)&nfss->nfs_client->cl_addr)) return -EINVAL; return 0; diff --git a/fs/partitions/check.c b/fs/partitions/check.c index f38fee0311a..7b685e10cba 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -248,11 +248,19 @@ ssize_t part_stat_show(struct device *dev, part_stat_read(p, merges[WRITE]), (unsigned long long)part_stat_read(p, sectors[WRITE]), jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), - p->in_flight, + part_in_flight(p), jiffies_to_msecs(part_stat_read(p, io_ticks)), jiffies_to_msecs(part_stat_read(p, time_in_queue))); } +ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + + return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); +} + #ifdef CONFIG_FAIL_MAKE_REQUEST ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -281,6 +289,7 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); @@ -292,6 +301,7 @@ static struct attribute *part_attrs[] = { &dev_attr_size.attr, &dev_attr_alignment_offset.attr, &dev_attr_stat.attr, + &dev_attr_inflight.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 25119041e03..221cecd86bd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1172,11 +1172,7 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -struct delayed_work; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *work, - unsigned long delay); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4ec5e67e18c..d11770472bc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -144,7 +144,7 @@ extern char *trace_profile_buf_nmi; #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ extern void destroy_preds(struct ftrace_event_call *call); -extern int filter_match_preds(struct ftrace_event_call *call, void *rec); +extern int filter_match_preds(struct event_filter *filter, void *rec); extern int filter_current_check_discard(struct ring_buffer *buffer, struct ftrace_event_call *call, void *rec, @@ -186,4 +186,13 @@ do { \ __trace_printk(ip, fmt, ##args); \ } while (0) +#ifdef CONFIG_EVENT_PROFILE +struct perf_event; +extern int ftrace_profile_enable(int event_id); +extern void ftrace_profile_disable(int event_id); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str); +extern void ftrace_profile_free_filter(struct perf_event *event); +#endif + #endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 7beaa21b388..297df45ffd0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -98,7 +98,7 @@ struct hd_struct { int make_it_fail; #endif unsigned long stamp; - int in_flight; + int in_flight[2]; #ifdef CONFIG_SMP struct disk_stats *dkstats; #else @@ -322,18 +322,23 @@ static inline void free_part_stats(struct hd_struct *part) #define part_stat_sub(cpu, gendiskp, field, subnd) \ part_stat_add(cpu, gendiskp, field, -subnd) -static inline void part_inc_in_flight(struct hd_struct *part) +static inline void part_inc_in_flight(struct hd_struct *part, int rw) { - part->in_flight++; + part->in_flight[rw]++; if (part->partno) - part_to_disk(part)->part0.in_flight++; + part_to_disk(part)->part0.in_flight[rw]++; } -static inline void part_dec_in_flight(struct hd_struct *part) +static inline void part_dec_in_flight(struct hd_struct *part, int rw) { - part->in_flight--; + part->in_flight[rw]--; if (part->partno) - part_to_disk(part)->part0.in_flight--; + part_to_disk(part)->part0.in_flight[rw]--; +} + +static inline int part_in_flight(struct hd_struct *part) +{ + return part->in_flight[0] + part->in_flight[1]; } /* block/blk-core.c */ @@ -546,6 +551,8 @@ extern ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf); #ifdef CONFIG_FAIL_MAKE_REQUEST extern ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d3cd23f3003..f4e3184fa05 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -659,6 +659,12 @@ extern int do_sysinfo(struct sysinfo *info); #endif /* __KERNEL__ */ +#ifndef __EXPORTED_HEADERS__ +#ifndef __KERNEL__ +#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders +#endif /* __KERNEL__ */ +#endif /* __EXPORTED_HEADERS__ */ + #define SI_LOAD_SHIFT 16 struct sysinfo { long uptime; /* Seconds since boot */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index da1fda8623e..f490e7a7307 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -776,6 +776,7 @@ #define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_XX12_FM 0x803b +#define PCI_DEVICE_ID_TI_XIO2000A 0x8231 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7b7fbf433cf..91a2b4309e7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -225,6 +225,7 @@ struct perf_counter_attr { #define PERF_COUNTER_IOC_RESET _IO ('$', 3) #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_counter_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f9741..df9d964c15f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -221,6 +221,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, @@ -633,7 +634,12 @@ struct perf_event { struct pid_namespace *ns; u64 id; + +#ifdef CONFIG_EVENT_PROFILE + struct event_filter *filter; #endif + +#endif /* CONFIG_PERF_EVENTS */ }; /** diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 813be59bf34..2ea1dd1ba21 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task) return 0; } -extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); -extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); +extern void __lockfunc +_lock_kernel(const char *func, const char *file, int line) +__acquires(kernel_lock); + +extern void __lockfunc +_unlock_kernel(const char *func, const char *file, int line) +__releases(kernel_lock); + +#define lock_kernel() do { \ + _lock_kernel(__func__, __FILE__, __LINE__); \ +} while (0) + +#define unlock_kernel() do { \ + _unlock_kernel(__func__, __FILE__, __LINE__); \ +} while (0) /* * Various legacy drivers don't really need the BKL in a specific @@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void) #else -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) +#define lock_kernel() +#define unlock_kernel() #define release_kernel_lock(task) do { } while(0) #define cycle_kernel_lock() do { } while(0) #define reacquire_kernel_lock(task) 0 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 7ef0c7b94f3..cf24c20de9e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -207,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); +extern void flush_delayed_work(struct delayed_work *work); extern int schedule_work(struct work_struct *work); extern int schedule_work_on(int cpu, struct work_struct *work); diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h new file mode 100644 index 00000000000..1af72dc2427 --- /dev/null +++ b/include/trace/events/bkl.h @@ -0,0 +1,61 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bkl + +#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BKL_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(lock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field( int, depth ) + __field_ext( const char *, func, FILTER_PTR_STRING ) + __field_ext( const char *, file, FILTER_PTR_STRING ) + __field( int, line ) + ), + + TP_fast_assign( + /* We want to record the lock_depth after lock is acquired */ + __entry->depth = current->lock_depth + 1; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, + __entry->file, __entry->line, __entry->func) +); + +TRACE_EVENT(unlock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field(int, depth ) + __field(const char *, func ) + __field(const char *, file ) + __field(int, line ) + ), + + TP_fast_assign( + __entry->depth = current->lock_depth; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, + __entry->file, __entry->line, __entry->func) +); + +#endif /* _TRACE_BKL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index b89f9db4a40..dcfcd440762 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry, __assign_str(name, action->name); ), - TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) + TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) ); /** @@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit, __entry->ret = ret; ), - TP_printk("irq=%d return=%s", + TP_printk("irq=%d ret=%s", __entry->irq, __entry->ret ? "handled" : "unhandled") ); @@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); @@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); diff --git a/include/trace/events/power.h b/include/trace/events/power.h index ea6d579261a..9bb96e5a284 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -16,8 +16,6 @@ enum { }; #endif - - TRACE_EVENT(power_start, TP_PROTO(unsigned int type, unsigned int state), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4069c43f418..b50b9856c59 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop, __entry->pid = t->pid; ), - TP_printk("task %s:%d", __entry->comm, __entry->pid) + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); /* @@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret, __entry->ret = ret; ), - TP_printk("ret %d", __entry->ret) + TP_printk("ret=%d", __entry->ret) ); /* @@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch, __entry->next_prio = next->prio; ), - TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, __entry->prev_state ? __print_flags(__entry->prev_state, "|", @@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task, __entry->dest_cpu = dest_cpu; ), - TP_printk("task %s:%d [%d] from: %d to: %d", + TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu) ); @@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait, __entry->prio = current->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork, __entry->child_pid = child->pid; ), - TP_printk("parent %s:%d child %s:%d", + TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid, __entry->child_comm, __entry->child_pid) ); @@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send, __entry->sig = sig; ), - TP_printk("sig: %d task %s:%d", + TP_printk("sig=%d comm=%s pid=%d", __entry->sig, __entry->comm, __entry->pid) ); @@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait, __perf_count(delay); ), - TP_printk("task: %s:%d wait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime, __perf_count(runtime); ), - TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", + TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->runtime, (unsigned long long)__entry->vruntime) @@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep, __perf_count(delay); ), - TP_printk("task: %s:%d sleep: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait, __perf_count(delay); ), - TP_printk("task: %s:%d iowait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1844c48d640..e5ce87a0498 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -26,7 +26,7 @@ TRACE_EVENT(timer_init, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -54,7 +54,7 @@ TRACE_EVENT(timer_start, __entry->now = jiffies; ), - TP_printk("timer %p: func %pf, expires %lu, timeout %ld", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now) ); @@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry, __entry->now = jiffies; ), - TP_printk("timer %p: now %lu", __entry->timer, __entry->now) + TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) ); /** @@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel, */ TRACE_EVENT(hrtimer_init, - TP_PROTO(struct hrtimer *timer, clockid_t clockid, + TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, enum hrtimer_mode mode), - TP_ARGS(timer, clockid, mode), + TP_ARGS(hrtimer, clockid, mode), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( clockid_t, clockid ) __field( enum hrtimer_mode, mode ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; __entry->clockid = clockid; __entry->mode = mode; ), - TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, __entry->clockid == CLOCK_REALTIME ? "CLOCK_REALTIME" : "CLOCK_MONOTONIC", __entry->mode == HRTIMER_MODE_ABS ? @@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init, */ TRACE_EVENT(hrtimer_start, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( void *, function ) __field( s64, expires ) __field( s64, softexpires ) ), TP_fast_assign( - __entry->timer = timer; - __entry->function = timer->function; - __entry->expires = hrtimer_get_expires(timer).tv64; - __entry->softexpires = hrtimer_get_softexpires(timer).tv64; + __entry->hrtimer = hrtimer; + __entry->function = hrtimer->function; + __entry->expires = hrtimer_get_expires(hrtimer).tv64; + __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; ), - TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", - __entry->timer, __entry->function, + TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", + __entry->hrtimer, __entry->function, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->expires }), (unsigned long long)ktime_to_ns((ktime_t) { @@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start, */ TRACE_EVENT(hrtimer_expire_entry, - TP_PROTO(struct hrtimer *timer, ktime_t *now), + TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), - TP_ARGS(timer, now), + TP_ARGS(hrtimer, now), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( s64, now ) ), TP_fast_assign( - __entry->timer = timer; - __entry->now = now->tv64; + __entry->hrtimer = hrtimer; + __entry->now = now->tv64; ), - TP_printk("hrtimer %p, now %llu", __entry->timer, - (unsigned long long)ktime_to_ns((ktime_t) { - .tv64 = __entry->now })) + TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); /** @@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry, */ TRACE_EVENT(hrtimer_expire_exit, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** * hrtimer_cancel - called when the hrtimer is canceled - * @timer: pointer to struct hrtimer + * @hrtimer: pointer to struct hrtimer */ TRACE_EVENT(hrtimer_cancel, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** @@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state, __entry->interval_usec = value->it_interval.tv_usec; ), - TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", + TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu", __entry->which, __entry->expires, __entry->value_sec, __entry->value_usec, __entry->interval_sec, __entry->interval_usec) @@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire, __entry->pid = pid_nr(pid); ), - TP_printk("which %d, pid %d, now %lu", __entry->which, + TP_printk("which=%d pid=%d now=%lu", __entry->which, (int) __entry->pid, __entry->now) ); diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index cc0d9667e18..c9bbcab95fb 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -120,9 +120,10 @@ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; @@ -132,19 +133,21 @@ #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), \ __data_loc_##item), \ - (unsigned int)sizeof(field.__data_loc_##item)); \ + (unsigned int)sizeof(field.__data_loc_##item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 5dc283ba5ae..e972f0a40f8 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -33,7 +33,7 @@ struct syscall_metadata { }; #ifdef CONFIG_FTRACE_SYSCALLS -extern struct syscall_metadata *syscall_nr_to_meta(int nr); +extern unsigned long arch_syscall_addr(int nr); extern int syscall_name_to_nr(char *name); void set_syscall_enter_id(int num, int id); void set_syscall_exit_id(int num, int id); diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3815ac1d58b..9af56723c09 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); +static inline u64 lockstat_clock(void) +{ + return cpu_clock(smp_processor_id()); +} + static int lock_point(unsigned long points[], unsigned long ip) { int i; @@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) return i; } -static void lock_time_inc(struct lock_time *lt, s64 time) +static void lock_time_inc(struct lock_time *lt, u64 time) { if (time > lt->max) lt->max = time; @@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; - s64 holdtime; + u64 holdtime; if (!lock_stat) return; - holdtime = sched_clock() - hlock->holdtime_stamp; + holdtime = lockstat_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) @@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; - hlock->holdtime_stamp = sched_clock(); + hlock->holdtime_stamp = lockstat_clock(); #endif if (check == 2 && !mark_irqflags(curr, hlock)) @@ -3322,7 +3327,7 @@ found_it: if (hlock->instance != lock) return; - hlock->waittime_stamp = sched_clock(); + hlock->waittime_stamp = lockstat_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, @@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; - u64 now; - s64 waittime = 0; + u64 now, waittime = 0; int i, cpu; depth = curr->lockdep_depth; @@ -3374,7 +3378,7 @@ found_it: cpu = smp_processor_id(); if (hlock->waittime_stamp) { - now = sched_clock(); + now = lockstat_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index afb7ef3dbc4..9ecaa45ab6b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -28,6 +28,7 @@ #include <linux/anon_inodes.h> #include <linux/kernel_stat.h> #include <linux/perf_event.h> +#include <linux/ftrace_event.h> #include <asm/irq_regs.h> @@ -1658,6 +1659,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) return ERR_PTR(err); } +static void perf_event_free_filter(struct perf_event *event); + static void free_event_rcu(struct rcu_head *head) { struct perf_event *event; @@ -1665,6 +1668,7 @@ static void free_event_rcu(struct rcu_head *head) event = container_of(head, struct perf_event, rcu_head); if (event->ns) put_pid_ns(event->ns); + perf_event_free_filter(event); kfree(event); } @@ -1974,7 +1978,8 @@ unlock: return ret; } -int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_filter(struct perf_event *event, void __user *arg); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2002,6 +2007,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: return perf_event_set_output(event, arg); + case PERF_EVENT_IOC_SET_FILTER: + return perf_event_set_filter(event, (void __user *)arg); + default: return -ENOTTY; } @@ -3806,9 +3814,14 @@ static int perf_swevent_is_counting(struct perf_event *event) return 1; } +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data); + static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, - u32 event_id, struct pt_regs *regs) + u32 event_id, + struct perf_sample_data *data, + struct pt_regs *regs) { if (!perf_swevent_is_counting(event)) return 0; @@ -3826,6 +3839,10 @@ static int perf_swevent_match(struct perf_event *event, return 0; } + if (event->attr.type == PERF_TYPE_TRACEPOINT && + !perf_tp_event_match(event, data)) + return 0; + return 1; } @@ -3842,7 +3859,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_swevent_match(event, type, event_id, regs)) + if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_add(event, nr, nmi, data, regs); } rcu_read_unlock(); @@ -4086,6 +4103,7 @@ static const struct pmu perf_ops_task_clock = { }; #ifdef CONFIG_EVENT_PROFILE + void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size) { @@ -4109,8 +4127,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, } EXPORT_SYMBOL_GPL(perf_tp_event); -extern int ftrace_profile_enable(int); -extern void ftrace_profile_disable(int); +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} static void tp_perf_event_destroy(struct perf_event *event) { @@ -4135,12 +4160,53 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) return &perf_ops_generic; } + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + char *filter_str; + int ret; + + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -EINVAL; + + filter_str = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(filter_str)) + return PTR_ERR(filter_str); + + ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); + + kfree(filter_str); + return ret; +} + +static void perf_event_free_filter(struct perf_event *event) +{ + ftrace_profile_free_filter(event); +} + #else + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + return 1; +} + static const struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; } -#endif + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + return -ENOENT; +} + +static void perf_event_free_filter(struct perf_event *event) +{ +} + +#endif /* CONFIG_EVENT_PROFILE */ atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; @@ -4394,7 +4460,7 @@ err_size: goto out; } -int perf_event_set_output(struct perf_event *event, int output_fd) +static int perf_event_set_output(struct perf_event *event, int output_fd) { struct perf_event *output_event = NULL; struct file *output_file = NULL; diff --git a/kernel/sched.c b/kernel/sched.c index 76c0e9691fc..e88689522e6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) /** * runqueue_is_locked + * @cpu: the processor in question. * * Returns true if the current cpu runqueue is locked. * This interface allows printk to be called with the runqueue lock @@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; - struct rq *rq; + struct rq *rq, *orig_rq; if (!sched_feat(SYNC_WAKEUPS)) wake_flags &= ~WF_SYNC; @@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, this_cpu = get_cpu(); smp_wmb(); - rq = task_rq_lock(p, &flags); + rq = orig_rq = task_rq_lock(p, &flags); update_rq_clock(rq); if (!(p->state & state)) goto out; @@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, set_task_cpu(p, cpu); rq = task_rq_lock(p, &flags); + + if (rq != orig_rq) + update_rq_clock(rq); + WARN_ON(p->state != TASK_WAKING); cpu = task_cpu(p); @@ -3656,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. + * @sd: The sched_domain whose statistics are to be updated. * @group: sched_group whose statistics are to be updated. * @this_cpu: Cpu for which load balance is currently performed. * @idle: Idle status of this_cpu @@ -6718,9 +6724,6 @@ EXPORT_SYMBOL(yield); /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. - * - * But don't do that if it is a deliberate, throttling IO wait (this task - * has set its backing_dev_info: the queue against which it should throttle) */ void __sched io_schedule(void) { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 37ba67e3326..b10c0d90a6f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -60,6 +60,13 @@ static int last_ftrace_enabled; /* Quick disabling of function tracer. */ int function_trace_stop; +/* List for set_ftrace_pid's pids. */ +LIST_HEAD(ftrace_pids); +struct ftrace_pid { + struct list_head list; + struct pid *pid; +}; + /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); +#endif + static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { struct ftrace_ops *op = ftrace_list; @@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) else func = ftrace_list_func; - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_list->next == &ftrace_list_end) { ftrace_func_t func = ftrace_list->func; - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void) func = __ftrace_trace_function; #endif - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } else { @@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) } #endif /* CONFIG_FUNCTION_PROFILER */ -/* set when tracing only a pid */ -struct pid *ftrace_pid_trace; static struct pid * const ftrace_swapper_pid = &init_struct_pid; #ifdef CONFIG_DYNAMIC_FTRACE @@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod) ftrace_new_addrs = p->newlist; p->flags = 0L; - /* convert record (i.e, patch mcount-call with NOP) */ - if (ftrace_code_disable(mod, p)) { - p->flags |= FTRACE_FL_CONVERTED; - ftrace_update_cnt++; - } else + /* + * Do the initial record convertion from mcount jump + * to the NOP instructions. + */ + if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); + continue; + } + + p->flags |= FTRACE_FL_CONVERTED; + ftrace_update_cnt++; + + /* + * If the tracing is enabled, go ahead and enable the record. + * + * The reason not to enable the record immediatelly is the + * inherent check of ftrace_make_nop/ftrace_make_call for + * correct previous instructions. Making first the NOP + * conversion puts the module to the correct state, thus + * passing the ftrace_make_call check. + */ + if (ftrace_start_up) { + int failed = __ftrace_replace_code(p, 1); + if (failed) { + ftrace_bug(failed, p->ip); + ftrace_free_rec(p); + } + } } stop = ftrace_now(raw_smp_processor_id()); @@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) return ret; } -enum { - MATCH_FULL, - MATCH_FRONT_ONLY, - MATCH_MIDDLE_ONLY, - MATCH_END_ONLY, -}; - -/* - * (static function - no need for kernel doc) - * - * Pass in a buffer containing a glob and this function will - * set search to point to the search part of the buffer and - * return the type of search it is (see enum above). - * This does modify buff. - * - * Returns enum type. - * search returns the pointer to use for comparison. - * not returns 1 if buff started with a '!' - * 0 otherwise. - */ -static int -ftrace_setup_glob(char *buff, int len, char **search, int *not) -{ - int type = MATCH_FULL; - int i; - - if (buff[0] == '!') { - *not = 1; - buff++; - len--; - } else - *not = 0; - - *search = buff; - - for (i = 0; i < len; i++) { - if (buff[i] == '*') { - if (!i) { - *search = buff + 1; - type = MATCH_END_ONLY; - } else { - if (type == MATCH_END_ONLY) - type = MATCH_MIDDLE_ONLY; - else - type = MATCH_FRONT_ONLY; - buff[i] = 0; - break; - } - } - } - - return type; -} - static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; @@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable) int not; flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - type = ftrace_setup_glob(buff, len, &search, ¬); + type = filter_parse_regex(buff, len, &search, ¬); search_len = strlen(search); @@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) } if (strlen(buff)) { - type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); + type = filter_parse_regex(buff, strlen(buff), &search, ¬); search_len = strlen(search); } @@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, int count = 0; char *search; - type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + type = filter_parse_regex(glob, strlen(glob), &search, ¬); len = strlen(search); /* we do not support '!' for function probes */ @@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, else if (glob) { int not; - type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + type = filter_parse_regex(glob, strlen(glob), &search, ¬); len = strlen(search); /* we do not support '!' for function probes */ @@ -2297,6 +2274,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; +static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static int __init set_ftrace_notrace(char *str) { @@ -2312,6 +2290,31 @@ static int __init set_ftrace_filter(char *str) } __setup("ftrace_filter=", set_ftrace_filter); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int __init set_graph_function(char *str) +{ + strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); + return 1; +} +__setup("ftrace_graph_filter=", set_graph_function); + +static void __init set_ftrace_early_graph(char *buf) +{ + int ret; + char *func; + + while (buf) { + func = strsep(&buf, ","); + /* we allow only one expression at a time */ + ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, + func); + if (ret) + printk(KERN_DEBUG "ftrace: function %s not " + "traceable\n", func); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + static void __init set_ftrace_early_filter(char *buf, int enable) { char *func; @@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void) set_ftrace_early_filter(ftrace_filter_buf, 1); if (ftrace_notrace_buf[0]) set_ftrace_early_filter(ftrace_notrace_buf, 0); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (ftrace_graph_buf[0]) + set_ftrace_early_graph(ftrace_graph_buf); +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ } static int @@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) return -ENODEV; /* decode regex */ - type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); + type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); if (not) return -EINVAL; @@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) return 0; } -static int ftrace_convert_nops(struct module *mod, +static int ftrace_process_locs(struct module *mod, unsigned long *start, unsigned long *end) { @@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod, { if (ftrace_disabled || start == end) return; - ftrace_convert_nops(mod, start, end); + ftrace_process_locs(mod, start, end); } static int ftrace_module_notify(struct notifier_block *self, @@ -2745,7 +2752,7 @@ void __init ftrace_init(void) last_ftrace_enabled = ftrace_enabled = 1; - ret = ftrace_convert_nops(NULL, + ret = ftrace_process_locs(NULL, __start_mcount_loc, __stop_mcount_loc); @@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { } # define ftrace_shutdown_sysctl() do { } while (0) #endif /* CONFIG_DYNAMIC_FTRACE */ -static ssize_t -ftrace_pid_read(struct file *file, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[64]; - int r; - - if (ftrace_pid_trace == ftrace_swapper_pid) - r = sprintf(buf, "swapper tasks\n"); - else if (ftrace_pid_trace) - r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace)); - else - r = sprintf(buf, "no pid\n"); - - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - static void clear_ftrace_swapper(void) { struct task_struct *p; @@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid) rcu_read_unlock(); } -static void clear_ftrace_pid_task(struct pid **pid) +static void clear_ftrace_pid_task(struct pid *pid) { - if (*pid == ftrace_swapper_pid) + if (pid == ftrace_swapper_pid) clear_ftrace_swapper(); else - clear_ftrace_pid(*pid); - - *pid = NULL; + clear_ftrace_pid(pid); } static void set_ftrace_pid_task(struct pid *pid) @@ -2863,11 +2851,140 @@ static void set_ftrace_pid_task(struct pid *pid) set_ftrace_pid(pid); } +static int ftrace_pid_add(int p) +{ + struct pid *pid; + struct ftrace_pid *fpid; + int ret = -EINVAL; + + mutex_lock(&ftrace_lock); + + if (!p) + pid = ftrace_swapper_pid; + else + pid = find_get_pid(p); + + if (!pid) + goto out; + + ret = 0; + + list_for_each_entry(fpid, &ftrace_pids, list) + if (fpid->pid == pid) + goto out_put; + + ret = -ENOMEM; + + fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); + if (!fpid) + goto out_put; + + list_add(&fpid->list, &ftrace_pids); + fpid->pid = pid; + + set_ftrace_pid_task(pid); + + ftrace_update_pid_func(); + ftrace_startup_enable(0); + + mutex_unlock(&ftrace_lock); + return 0; + +out_put: + if (pid != ftrace_swapper_pid) + put_pid(pid); + +out: + mutex_unlock(&ftrace_lock); + return ret; +} + +static void ftrace_pid_reset(void) +{ + struct ftrace_pid *fpid, *safe; + + mutex_lock(&ftrace_lock); + list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { + struct pid *pid = fpid->pid; + + clear_ftrace_pid_task(pid); + + list_del(&fpid->list); + kfree(fpid); + } + + ftrace_update_pid_func(); + ftrace_startup_enable(0); + + mutex_unlock(&ftrace_lock); +} + +static void *fpid_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&ftrace_lock); + + if (list_empty(&ftrace_pids) && (!*pos)) + return (void *) 1; + + return seq_list_start(&ftrace_pids, *pos); +} + +static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) +{ + if (v == (void *)1) + return NULL; + + return seq_list_next(v, &ftrace_pids, pos); +} + +static void fpid_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&ftrace_lock); +} + +static int fpid_show(struct seq_file *m, void *v) +{ + const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); + + if (v == (void *)1) { + seq_printf(m, "no pid\n"); + return 0; + } + + if (fpid->pid == ftrace_swapper_pid) + seq_printf(m, "swapper tasks\n"); + else + seq_printf(m, "%u\n", pid_vnr(fpid->pid)); + + return 0; +} + +static const struct seq_operations ftrace_pid_sops = { + .start = fpid_start, + .next = fpid_next, + .stop = fpid_stop, + .show = fpid_show, +}; + +static int +ftrace_pid_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + if ((file->f_mode & FMODE_WRITE) && + (file->f_flags & O_TRUNC)) + ftrace_pid_reset(); + + if (file->f_mode & FMODE_READ) + ret = seq_open(file, &ftrace_pid_sops); + + return ret; +} + static ssize_t ftrace_pid_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct pid *pid; char buf[64]; long val; int ret; @@ -2880,57 +2997,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, buf[cnt] = 0; + /* + * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" + * to clean the filter quietly. + */ + strstrip(buf); + if (strlen(buf) == 0) + return 1; + ret = strict_strtol(buf, 10, &val); if (ret < 0) return ret; - mutex_lock(&ftrace_lock); - if (val < 0) { - /* disable pid tracing */ - if (!ftrace_pid_trace) - goto out; - - clear_ftrace_pid_task(&ftrace_pid_trace); - - } else { - /* swapper task is special */ - if (!val) { - pid = ftrace_swapper_pid; - if (pid == ftrace_pid_trace) - goto out; - } else { - pid = find_get_pid(val); + ret = ftrace_pid_add(val); - if (pid == ftrace_pid_trace) { - put_pid(pid); - goto out; - } - } - - if (ftrace_pid_trace) - clear_ftrace_pid_task(&ftrace_pid_trace); - - if (!pid) - goto out; - - ftrace_pid_trace = pid; - - set_ftrace_pid_task(ftrace_pid_trace); - } - - /* update the function call */ - ftrace_update_pid_func(); - ftrace_startup_enable(0); + return ret ? ret : cnt; +} - out: - mutex_unlock(&ftrace_lock); +static int +ftrace_pid_release(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) + seq_release(inode, file); - return cnt; + return 0; } static const struct file_operations ftrace_pid_fops = { - .read = ftrace_pid_read, - .write = ftrace_pid_write, + .open = ftrace_pid_open, + .write = ftrace_pid_write, + .read = seq_read, + .llseek = seq_lseek, + .release = ftrace_pid_release, }; static __init int ftrace_init_debugfs(void) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff0197054..e43c928356e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) int ret; ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" - "offset:0;\tsize:%u;\n", - (unsigned int)sizeof(field.time_stamp)); + "offset:0;\tsize:%u;\tsigned:%u;\n", + (unsigned int)sizeof(field.time_stamp), + (unsigned int)is_signed_type(u64)); ret = trace_seq_printf(s, "\tfield: local_t commit;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), commit), - (unsigned int)sizeof(field.commit)); + (unsigned int)sizeof(field.commit), + (unsigned int)is_signed_type(long)); ret = trace_seq_printf(s, "\tfield: char data;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), data), - (unsigned int)BUF_PAGE_SIZE); + (unsigned int)BUF_PAGE_SIZE, + (unsigned int)is_signed_type(char)); return ret; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb..026e715a0c7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; -static int __init set_ftrace(char *str) +static int __init set_cmdline_ftrace(char *str) { strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; @@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) ring_buffer_expanded = 1; return 1; } -__setup("ftrace=", set_ftrace); +__setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { @@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { - return trace_array_printk(&global_trace, ip, fmt, args); + return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 405cb850b75..4959ada9e0b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr) return 0; } #else -static inline int ftrace_trace_addr(unsigned long addr) -{ - return 1; -} static inline int ftrace_graph_addr(unsigned long addr) { return 1; @@ -500,12 +496,12 @@ print_graph_function(struct trace_iterator *iter) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -extern struct pid *ftrace_pid_trace; +extern struct list_head ftrace_pids; #ifdef CONFIG_FUNCTION_TRACER static inline int ftrace_trace_task(struct task_struct *task) { - if (!ftrace_pid_trace) + if (list_empty(&ftrace_pids)) return 1; return test_tsk_trace_trace(task); @@ -687,7 +683,6 @@ struct event_filter { int n_preds; struct filter_pred **preds; char *filter_string; - bool no_reset; }; struct event_subsystem { @@ -699,22 +694,40 @@ struct event_subsystem { }; struct filter_pred; +struct regex; typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, int val1, int val2); +typedef int (*regex_match_func)(char *str, struct regex *r, int len); + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY, + MATCH_MIDDLE_ONLY, + MATCH_END_ONLY, +}; + +struct regex { + char pattern[MAX_FILTER_STR_VAL]; + int len; + int field_len; + regex_match_func match; +}; + struct filter_pred { - filter_pred_fn_t fn; - u64 val; - char str_val[MAX_FILTER_STR_VAL]; - int str_len; - char *field_name; - int offset; - int not; - int op; - int pop_n; + filter_pred_fn_t fn; + u64 val; + struct regex regex; + char *field_name; + int offset; + int not; + int op; + int pop_n; }; +extern enum regex_type +filter_parse_regex(char *buff, int len, char **search, int *not); extern void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s); extern int apply_event_filter(struct ftrace_event_call *call, @@ -730,7 +743,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { - if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { + if (unlikely(call->filter_active) && + !filter_match_preds(call->filter, rec)) { ring_buffer_discard_commit(buffer, event); return 1; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d128f65778e..7c18d154ea2 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -507,7 +507,7 @@ extern char *__bad_type_size(void); #define FIELD(type, name) \ sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ #type, "common_" #name, offsetof(typeof(field), name), \ - sizeof(field.name) + sizeof(field.name), is_signed_type(type) static int trace_write_header(struct trace_seq *s) { @@ -515,17 +515,17 @@ static int trace_write_header(struct trace_seq *s) /* struct trace_entry */ return trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\n", - FIELD(unsigned short, type), - FIELD(unsigned char, flags), - FIELD(unsigned char, preempt_count), - FIELD(int, pid), - FIELD(int, lock_depth)); + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\n", + FIELD(unsigned short, type), + FIELD(unsigned char, flags), + FIELD(unsigned char, preempt_count), + FIELD(int, pid), + FIELD(int, lock_depth)); } static ssize_t @@ -878,9 +878,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events) "'%s/filter' entry\n", name); } - entry = trace_create_file("enable", 0644, system->entry, - (void *)system->name, - &ftrace_system_enable_fops); + trace_create_file("enable", 0644, system->entry, + (void *)system->name, + &ftrace_system_enable_fops); return system->entry; } @@ -892,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, const struct file_operations *filter, const struct file_operations *format) { - struct dentry *entry; int ret; /* @@ -910,12 +909,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, } if (call->regfunc) - entry = trace_create_file("enable", 0644, call->dir, call, - enable); + trace_create_file("enable", 0644, call->dir, call, + enable); if (call->id && call->profile_enable) - entry = trace_create_file("id", 0444, call->dir, call, - id); + trace_create_file("id", 0444, call->dir, call, + id); if (call->define_fields) { ret = call->define_fields(call); @@ -924,16 +923,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, " events/%s\n", call->name); return ret; } - entry = trace_create_file("filter", 0644, call->dir, call, - filter); + trace_create_file("filter", 0644, call->dir, call, + filter); } /* A trace may not want to export its format */ if (!call->show_format) return 0; - entry = trace_create_file("format", 0444, call->dir, call, - format); + trace_create_file("format", 0444, call->dir, call, + format); return 0; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927..21d34757b95 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -18,11 +18,10 @@ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> */ -#include <linux/debugfs.h> -#include <linux/uaccess.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/mutex.h> +#include <linux/perf_event.h> #include "trace.h" #include "trace_output.h" @@ -31,6 +30,7 @@ enum filter_op_ids { OP_OR, OP_AND, + OP_GLOB, OP_NE, OP_EQ, OP_LT, @@ -48,16 +48,17 @@ struct filter_op { }; static struct filter_op filter_ops[] = { - { OP_OR, "||", 1 }, - { OP_AND, "&&", 2 }, - { OP_NE, "!=", 4 }, - { OP_EQ, "==", 4 }, - { OP_LT, "<", 5 }, - { OP_LE, "<=", 5 }, - { OP_GT, ">", 5 }, - { OP_GE, ">=", 5 }, - { OP_NONE, "OP_NONE", 0 }, - { OP_OPEN_PAREN, "(", 0 }, + { OP_OR, "||", 1 }, + { OP_AND, "&&", 2 }, + { OP_GLOB, "~", 4 }, + { OP_NE, "!=", 4 }, + { OP_EQ, "==", 4 }, + { OP_LT, "<", 5 }, + { OP_LE, "<=", 5 }, + { OP_GT, ">", 5 }, + { OP_GE, ">=", 5 }, + { OP_NONE, "OP_NONE", 0 }, + { OP_OPEN_PAREN, "(", 0 }, }; enum { @@ -197,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event, char *addr = (char *)(event + pred->offset); int cmp, match; - cmp = strncmp(addr, pred->str_val, pred->str_len); + cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -211,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, char **addr = (char **)(event + pred->offset); int cmp, match; - cmp = strncmp(*addr, pred->str_val, pred->str_len); + cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -237,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event, char *addr = (char *)(event + str_loc); int cmp, match; - cmp = strncmp(addr, pred->str_val, str_len); + cmp = pred->regex.match(addr, &pred->regex, str_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -250,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event, return 0; } +/* Basic regex callbacks */ +static int regex_match_full(char *str, struct regex *r, int len) +{ + if (strncmp(str, r->pattern, len) == 0) + return 1; + return 0; +} + +static int regex_match_front(char *str, struct regex *r, int len) +{ + if (strncmp(str, r->pattern, len) == 0) + return 1; + return 0; +} + +static int regex_match_middle(char *str, struct regex *r, int len) +{ + if (strstr(str, r->pattern)) + return 1; + return 0; +} + +static int regex_match_end(char *str, struct regex *r, int len) +{ + char *ptr = strstr(str, r->pattern); + + if (ptr && (ptr[r->len] == 0)) + return 1; + return 0; +} + +/** + * filter_parse_regex - parse a basic regex + * @buff: the raw regex + * @len: length of the regex + * @search: will point to the beginning of the string to compare + * @not: tell whether the match will have to be inverted + * + * This passes in a buffer containing a regex and this function will + * set search to point to the search part of the buffer and + * return the type of search it is (see enum above). + * This does modify buff. + * + * Returns enum type. + * search returns the pointer to use for comparison. + * not returns 1 if buff started with a '!' + * 0 otherwise. + */ +enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) +{ + int type = MATCH_FULL; + int i; + + if (buff[0] == '!') { + *not = 1; + buff++; + len--; + } else + *not = 0; + + *search = buff; + + for (i = 0; i < len; i++) { + if (buff[i] == '*') { + if (!i) { + *search = buff + 1; + type = MATCH_END_ONLY; + } else { + if (type == MATCH_END_ONLY) + type = MATCH_MIDDLE_ONLY; + else + type = MATCH_FRONT_ONLY; + buff[i] = 0; + break; + } + } + } + + return type; +} + +static void filter_build_regex(struct filter_pred *pred) +{ + struct regex *r = &pred->regex; + char *search; + enum regex_type type = MATCH_FULL; + int not = 0; + + if (pred->op == OP_GLOB) { + type = filter_parse_regex(r->pattern, r->len, &search, ¬); + r->len = strlen(search); + memmove(r->pattern, search, r->len+1); + } + + switch (type) { + case MATCH_FULL: + r->match = regex_match_full; + break; + case MATCH_FRONT_ONLY: + r->match = regex_match_front; + break; + case MATCH_MIDDLE_ONLY: + r->match = regex_match_middle; + break; + case MATCH_END_ONLY: + r->match = regex_match_end; + break; + } + + pred->not ^= not; +} + /* return 1 if event matches, 0 otherwise (discard) */ -int filter_match_preds(struct ftrace_event_call *call, void *rec) +int filter_match_preds(struct event_filter *filter, void *rec) { - struct event_filter *filter = call->filter; int match, top = 0, val1 = 0, val2 = 0; int stack[MAX_FILTER_PRED]; struct filter_pred *pred; @@ -396,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred) { kfree(pred->field_name); pred->field_name = NULL; - pred->str_len = 0; + pred->regex.len = 0; } static int filter_set_pred(struct filter_pred *dest, @@ -426,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call) filter->preds[i]->fn = filter_pred_none; } -void destroy_preds(struct ftrace_event_call *call) +static void __free_preds(struct event_filter *filter) { - struct event_filter *filter = call->filter; int i; if (!filter) @@ -441,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call) kfree(filter->preds); kfree(filter->filter_string); kfree(filter); +} + +void destroy_preds(struct ftrace_event_call *call) +{ + __free_preds(call->filter); call->filter = NULL; + call->filter_active = 0; } -static int init_preds(struct ftrace_event_call *call) +static struct event_filter *__alloc_preds(void) { struct event_filter *filter; struct filter_pred *pred; int i; - if (call->filter) - return 0; - - filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!call->filter) - return -ENOMEM; + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); filter->n_preds = 0; @@ -471,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call) filter->preds[i] = pred; } - return 0; + return filter; oom: - destroy_preds(call); + __free_preds(filter); + return ERR_PTR(-ENOMEM); +} + +static int init_preds(struct ftrace_event_call *call) +{ + if (call->filter) + return 0; + + call->filter_active = 0; + call->filter = __alloc_preds(); + if (IS_ERR(call->filter)) + return PTR_ERR(call->filter); - return -ENOMEM; + return 0; } static int init_subsystem_preds(struct event_subsystem *system) @@ -499,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system) return 0; } -enum { - FILTER_DISABLE_ALL, - FILTER_INIT_NO_RESET, - FILTER_SKIP_NO_RESET, -}; - -static void filter_free_subsystem_preds(struct event_subsystem *system, - int flag) +static void filter_free_subsystem_preds(struct event_subsystem *system) { struct ftrace_event_call *call; @@ -517,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, if (strcmp(call->system, system->name) != 0) continue; - if (flag == FILTER_INIT_NO_RESET) { - call->filter->no_reset = false; - continue; - } - - if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) - continue; - filter_disable_preds(call); remove_filter_string(call->filter); } @@ -532,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, static int filter_add_pred_fn(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, filter_pred_fn_t fn) { - struct event_filter *filter = call->filter; int idx, err; if (filter->n_preds == MAX_FILTER_PRED) { @@ -550,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps, return err; filter->n_preds++; - call->filter_active = 1; return 0; } @@ -575,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field) static int is_legal_op(struct ftrace_event_field *field, int op) { - if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) + if (is_string_field(field) && + (op != OP_EQ && op != OP_NE && op != OP_GLOB)) + return 0; + if (!is_string_field(field) && op == OP_GLOB) return 0; return 1; @@ -626,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size, static int filter_add_pred(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, bool dry_run) { @@ -660,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps, } if (is_string_field(field)) { - pred->str_len = field->size; + filter_build_regex(pred); - if (field->filter_type == FILTER_STATIC_STRING) + if (field->filter_type == FILTER_STATIC_STRING) { fn = filter_pred_string; - else if (field->filter_type == FILTER_DYN_STRING) + pred->regex.field_len = field->size; + } else if (field->filter_type == FILTER_DYN_STRING) fn = filter_pred_strloc; else { fn = filter_pred_pchar; - pred->str_len = strlen(pred->str_val); + pred->regex.field_len = strlen(pred->regex.pattern); } } else { if (field->is_signed) - ret = strict_strtoll(pred->str_val, 0, &val); + ret = strict_strtoll(pred->regex.pattern, 0, &val); else - ret = strict_strtoull(pred->str_val, 0, &val); + ret = strict_strtoull(pred->regex.pattern, 0, &val); if (ret) { parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); return -EINVAL; @@ -694,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps, add_pred_fn: if (!dry_run) - return filter_add_pred_fn(ps, call, pred, fn); - return 0; -} - -static int filter_add_subsystem_pred(struct filter_parse_state *ps, - struct event_subsystem *system, - struct filter_pred *pred, - char *filter_string, - bool dry_run) -{ - struct ftrace_event_call *call; - int err = 0; - bool fail = true; - - list_for_each_entry(call, &ftrace_events, list) { - - if (!call->define_fields) - continue; - - if (strcmp(call->system, system->name)) - continue; - - if (call->filter->no_reset) - continue; - - err = filter_add_pred(ps, call, pred, dry_run); - if (err) - call->filter->no_reset = true; - else - fail = false; - - if (!dry_run) - replace_filter_string(call->filter, filter_string); - } - - if (fail) { - parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); - return err; - } + return filter_add_pred_fn(ps, call, filter, pred, fn); return 0; } @@ -933,8 +1010,9 @@ static void postfix_clear(struct filter_parse_state *ps) while (!list_empty(&ps->postfix)) { elt = list_first_entry(&ps->postfix, struct postfix_elt, list); - kfree(elt->operand); list_del(&elt->list); + kfree(elt->operand); + kfree(elt); } } @@ -1044,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2) return NULL; } - strcpy(pred->str_val, operand2); - pred->str_len = strlen(operand2); + strcpy(pred->regex.pattern, operand2); + pred->regex.len = strlen(pred->regex.pattern); pred->op = op; @@ -1089,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps) return 0; } -static int replace_preds(struct event_subsystem *system, - struct ftrace_event_call *call, +static int replace_preds(struct ftrace_event_call *call, + struct event_filter *filter, struct filter_parse_state *ps, char *filter_string, bool dry_run) @@ -1137,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system, add_pred: if (!pred) return -ENOMEM; - if (call) - err = filter_add_pred(ps, call, pred, false); - else - err = filter_add_subsystem_pred(ps, system, pred, - filter_string, dry_run); + err = filter_add_pred(ps, call, filter, pred, dry_run); filter_free_pred(pred); if (err) return err; @@ -1152,10 +1226,50 @@ add_pred: return 0; } -int apply_event_filter(struct ftrace_event_call *call, char *filter_string) +static int replace_system_preds(struct event_subsystem *system, + struct filter_parse_state *ps, + char *filter_string) { + struct event_filter *filter = system->filter; + struct ftrace_event_call *call; + bool fail = true; int err; + list_for_each_entry(call, &ftrace_events, list) { + + if (!call->define_fields) + continue; + + if (strcmp(call->system, system->name) != 0) + continue; + + /* try to see if the filter can be applied */ + err = replace_preds(call, filter, ps, filter_string, true); + if (err) + continue; + + /* really apply the filter */ + filter_disable_preds(call); + err = replace_preds(call, filter, ps, filter_string, false); + if (err) + filter_disable_preds(call); + else { + call->filter_active = 1; + replace_filter_string(filter, filter_string); + } + fail = false; + } + + if (fail) { + parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); + return -EINVAL; + } + return 0; +} + +int apply_event_filter(struct ftrace_event_call *call, char *filter_string) +{ + int err; struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1167,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) if (!strcmp(strstrip(filter_string), "0")) { filter_disable_preds(call); remove_filter_string(call->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1186,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) goto out; } - err = replace_preds(NULL, call, ps, filter_string, false); + err = replace_preds(call, call->filter, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); - + else + call->filter_active = 1; out: filter_opstack_clear(ps); postfix_clear(ps); @@ -1204,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, char *filter_string) { int err; - struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1214,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out_unlock; if (!strcmp(strstrip(filter_string), "0")) { - filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); + filter_free_subsystem_preds(system); remove_filter_string(system->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1234,31 +1346,87 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out; } - filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); - - /* try to see the filter can be applied to which events */ - err = replace_preds(system, NULL, ps, filter_string, true); - if (err) { + err = replace_system_preds(system, ps, filter_string); + if (err) append_filter_err(ps, system->filter); - goto out; + +out: + filter_opstack_clear(ps); + postfix_clear(ps); + kfree(ps); +out_unlock: + mutex_unlock(&event_mutex); + + return err; +} + +#ifdef CONFIG_EVENT_PROFILE + +void ftrace_profile_free_filter(struct perf_event *event) +{ + struct event_filter *filter = event->filter; + + event->filter = NULL; + __free_preds(filter); +} + +int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str) +{ + int err; + struct event_filter *filter; + struct filter_parse_state *ps; + struct ftrace_event_call *call = NULL; + + mutex_lock(&event_mutex); + + list_for_each_entry(call, &ftrace_events, list) { + if (call->id == event_id) + break; } - filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); + err = -EINVAL; + if (!call) + goto out_unlock; - /* really apply the filter to the events */ - err = replace_preds(system, NULL, ps, filter_string, false); - if (err) { - append_filter_err(ps, system->filter); - filter_free_subsystem_preds(system, 2); + err = -EEXIST; + if (event->filter) + goto out_unlock; + + filter = __alloc_preds(); + if (IS_ERR(filter)) { + err = PTR_ERR(filter); + goto out_unlock; } -out: + err = -ENOMEM; + ps = kzalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + goto free_preds; + + parse_init(ps, filter_ops, filter_str); + err = filter_parse(ps); + if (err) + goto free_ps; + + err = replace_preds(call, filter, ps, filter_str, false); + if (!err) + event->filter = filter; + +free_ps: filter_opstack_clear(ps); postfix_clear(ps); kfree(ps); + +free_preds: + if (err) + __free_preds(filter); + out_unlock: mutex_unlock(&event_mutex); return err; } +#endif /* CONFIG_EVENT_PROFILE */ + diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 9753fcc61bc..31da218ee10 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), item), \ - sizeof(field.item)); \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __field_desc #define __field_desc(type, container, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ - offsetof(typeof(field), item), \ - sizeof(field.item)); \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __array_desc #define __array_desc(type, container, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:0;\n", \ - offsetof(typeof(field), item)); \ + "offset:%zu;\tsize:0;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 527e17eae57..d00d1a8f1f2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -14,6 +14,69 @@ static int sys_refcount_exit; static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; + +static struct syscall_metadata **syscalls_metadata; + +static struct syscall_metadata *find_syscall_meta(unsigned long syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup(syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + /* + * Only compare after the "sys" prefix. Archs that use + * syscall wrappers may have syscalls symbols aliases prefixed + * with "SyS" instead of "sys", leading to an unwanted + * mismatch. + */ + if (start->name && !strcmp(start->name + 3, str + 3)) + return start; + } + return NULL; +} + +static struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +int syscall_name_to_nr(char *name) +{ + int i; + + if (!syscalls_metadata) + return -1; + + for (i = 0; i < NR_syscalls; i++) { + if (syscalls_metadata[i]) { + if (!strcmp(syscalls_metadata[i]->name, name)) + return i; + } + } + return -1; +} + +void set_syscall_enter_id(int num, int id) +{ + syscalls_metadata[num]->enter_id = id; +} + +void set_syscall_exit_id(int num, int id) +{ + syscalls_metadata[num]->exit_id = id; +} + enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags) { @@ -103,7 +166,8 @@ extern char *__bad_type_size(void); #define SYSCALL_FIELD(type, name) \ sizeof(type) != sizeof(trace.name) ? \ __bad_type_size() : \ - #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) + #type, #name, offsetof(typeof(trace), name), \ + sizeof(trace.name), is_signed_type(type) int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) { @@ -120,7 +184,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) if (!entry) return 0; - ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr)); if (!ret) return 0; @@ -130,8 +195,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) entry->args[i]); if (!ret) return 0; - ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, - sizeof(unsigned long)); + ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;" + "\tsigned:%u;\n", offset, + sizeof(unsigned long), + is_signed_type(unsigned long)); if (!ret) return 0; offset += sizeof(unsigned long); @@ -163,8 +230,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) struct syscall_trace_exit trace; ret = trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr), SYSCALL_FIELD(long, ret)); if (!ret) @@ -212,7 +281,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) if (ret) return ret; - ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, + ret = trace_define_field(call, SYSCALL_FIELD(long, ret), FILTER_OTHER); return ret; @@ -375,6 +444,29 @@ struct trace_event event_syscall_exit = { .trace = print_syscall_exit, }; +int __init init_ftrace_syscalls(void) +{ + struct syscall_metadata *meta; + unsigned long addr; + int i; + + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * + NR_syscalls, GFP_KERNEL); + if (!syscalls_metadata) { + WARN_ON(1); + return -ENOMEM; + } + + for (i = 0; i < NR_syscalls; i++) { + addr = arch_syscall_addr(i); + meta = find_syscall_meta(addr); + syscalls_metadata[i] = meta; + } + + return 0; +} +core_initcall(init_ftrace_syscalls); + #ifdef CONFIG_EVENT_PROFILE static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b..ccefe574dcf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, EXPORT_SYMBOL(schedule_delayed_work); /** + * flush_delayed_work - block until a dwork_struct's callback has terminated + * @dwork: the delayed work which is to be flushed + * + * Any timeout is cancelled, and any pending work is run immediately. + */ +void flush_delayed_work(struct delayed_work *dwork) +{ + if (del_timer(&dwork->timer)) { + struct cpu_workqueue_struct *cwq; + cwq = wq_per_cpu(keventd_wq, get_cpu()); + __queue_work(cwq, &dwork->work); + put_cpu(); + } + flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e352..4ebfa5a164d 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -5,10 +5,13 @@ * relegated to obsolescence, but used by various less * important (or lazy) subsystems. */ -#include <linux/smp_lock.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/semaphore.h> +#include <linux/smp_lock.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/bkl.h> /* * The 'big kernel lock' @@ -113,21 +116,26 @@ static inline void __unlock_kernel(void) * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ -void __lockfunc lock_kernel(void) +void __lockfunc _lock_kernel(const char *func, const char *file, int line) { - int depth = current->lock_depth+1; + int depth = current->lock_depth + 1; + + trace_lock_kernel(func, file, line); + if (likely(!depth)) __lock_kernel(); current->lock_depth = depth; } -void __lockfunc unlock_kernel(void) +void __lockfunc _unlock_kernel(const char *func, const char *file, int line) { BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); + + trace_unlock_kernel(func, file, line); } -EXPORT_SYMBOL(lock_kernel); -EXPORT_SYMBOL(unlock_kernel); +EXPORT_SYMBOL(_lock_kernel); +EXPORT_SYMBOL(_unlock_kernel); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 3d3accb1f80..5a37e205571 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -92,7 +92,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) "BdiDirtyThresh: %8lu kB\n" "DirtyThresh: %8lu kB\n" "BackgroundThresh: %8lu kB\n" - "WriteBack threads:%8lu\n" + "WritebackThreads: %8lu\n" "b_dirty: %8lu\n" "b_io: %8lu\n" "b_more_io: %8lu\n" diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a3b14090b1f..2c5d79236ea 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -566,7 +566,8 @@ static void balance_dirty_pages(struct address_space *mapping, if (pages_written >= write_chunk) break; /* We've done our duty */ - schedule_timeout_interruptible(pause); + __set_current_state(TASK_INTERRUPTIBLE); + io_schedule_timeout(pause); /* * Increase the delay for each loop, up to our previous diff --git a/mm/percpu.c b/mm/percpu.c index 4a048abad04..6af78c1ee70 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1870,13 +1870,14 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, max_distance = 0; for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; - max_distance = max(max_distance, ai->groups[group].base_offset); + max_distance = max_t(size_t, max_distance, + ai->groups[group].base_offset); } max_distance += ai->unit_size; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { - pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc " + pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " "space 0x%lx\n", max_distance, VMALLOC_END - VMALLOC_START); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 4f9c1908593..c67e73ecd5b 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -100,7 +100,7 @@ as-option = $(call try-run,\ # Usage: cflags-y += $(call as-instr,instr,option1,option2) as-instr = $(call try-run,\ - echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3)) + /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3)) # cc-option # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 7a7778746ea..ffdafb26f53 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -208,7 +208,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \ # Bzip2 and LZMA do not include size in file... so we have to fake that; # append the size as a 32-bit littleendian number as gzip does. -size_append = echo -ne $(shell \ +size_append = /bin/echo -ne $(shell \ dec_size=0; \ for F in $1; do \ fsize=$$(stat -c "%s" $$F); \ diff --git a/scripts/checkkconfigsymbols.sh b/scripts/checkkconfigsymbols.sh index 39677c82747..46be3c5a62b 100755 --- a/scripts/checkkconfigsymbols.sh +++ b/scripts/checkkconfigsymbols.sh @@ -9,7 +9,7 @@ paths="$@" # Doing this once at the beginning saves a lot of time, on a cache-hot tree. Kconfigs="`find . -name 'Kconfig' -o -name 'Kconfig*[^~]'`" -echo -e "File list \tundefined symbol used" +/bin/echo -e "File list \tundefined symbol used" find $paths -name '*.[chS]' -o -name 'Makefile' -o -name 'Makefile*[^~]'| while read i do # Output the bare Kconfig variable and the filename; the _MODULE part at @@ -54,6 +54,6 @@ while read symb files; do # beyond the purpose of this script. symb_bare=`echo $symb | sed -e 's/_MODULE//'` if ! grep -q "\<$symb_bare\>" $Kconfigs; then - echo -e "$files: \t$symb" + /bin/echo -e "$files: \t$symb" fi done|sort diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl index c6ae4052ab4..b89ca2c58fd 100644 --- a/scripts/headers_install.pl +++ b/scripts/headers_install.pl @@ -20,7 +20,7 @@ use strict; my ($readdir, $installdir, $arch, @files) = @ARGV; -my $unifdef = "scripts/unifdef -U__KERNEL__"; +my $unifdef = "scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__"; foreach my $file (@files) { local *INFILE; diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 6a12dd9f118..bce3d0fe6fb 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -1,3 +1,5 @@ +#!/bin/sh + TARGET=$1 ARCH=$2 SMP=$3 @@ -50,7 +52,7 @@ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length UTS_LEN=64 -UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/" +UTS_TRUNCATE="cut -b -$UTS_LEN" # Generate a temporary compile.h @@ -66,9 +68,13 @@ UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/" echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\" if [ -x /bin/dnsdomainname ]; then - echo \#define LINUX_COMPILE_DOMAIN \"`dnsdomainname | $UTS_TRUNCATE`\" + domain=`dnsdomainname 2> /dev/null` elif [ -x /bin/domainname ]; then - echo \#define LINUX_COMPILE_DOMAIN \"`domainname | $UTS_TRUNCATE`\" + domain=`domainname 2> /dev/null` + fi + + if [ -n "$domain" ]; then + echo \#define LINUX_COMPILE_DOMAIN \"`echo $domain | $UTS_TRUNCATE`\" else echo \#define LINUX_COMPILE_DOMAIN fi diff --git a/scripts/package/Makefile b/scripts/package/Makefile index fa4a0a17b7e..f67cc885c80 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -18,6 +18,9 @@ # e) generate the rpm files, based on kernel.spec # - Use /. to avoid tar packing just the symlink +# Note that the rpm-pkg target cannot be used with KBUILD_OUTPUT, +# but the binrpm-pkg target can; for some reason O= gets ignored. + # Do we have rpmbuild, otherwise fall back to the older rpm RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \ else echo rpm; fi) @@ -33,6 +36,12 @@ $(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile $(CONFIG_SHELL) $(MKSPEC) > $@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE + @if test -n "$(KBUILD_OUTPUT)"; then \ + echo "Building source + binary RPM is not possible outside the"; \ + echo "kernel source tree. Don't set KBUILD_OUTPUT, or use the"; \ + echo "binrpm-pkg target instead."; \ + false; \ + fi $(MAKE) clean $(PREV) ln -sf $(srctree) $(KERNELPATH) $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion @@ -61,7 +70,7 @@ binrpm-pkg: $(objtree)/binkernel.spec FORCE set -e; \ mv -f $(objtree)/.tmp_version $(objtree)/.version - $(RPM) $(RPMOPTS) --define "_builddir $(srctree)" --target \ + $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \ $(UTS_MACHINE) -bb $< clean-files += $(objtree)/binkernel.spec diff --git a/scripts/package/mkspec b/scripts/package/mkspec index 3d93f8c8125..47bdd2f99b7 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules' echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware' echo "%endif" -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} modules_install' +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install' echo "%ifarch ia64" echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE" echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/" diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 090d300d739..bfb8b2cdd92 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -119,6 +119,7 @@ my %text_sections = ( ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, + ".text.unlikely" => 1, ); $objdump = "objdump" if ((length $objdump) == 0); diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index dc78272fc39..1f0f8213e2d 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -937,6 +937,7 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci) struct snd_ac97 *ac97; int ret; + writel(0, aaci->base + AC97_POWERDOWN); /* * Assert AACIRESET for 2us */ diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c index 24585c6c6d0..4e2b925a94c 100644 --- a/sound/pci/bt87x.c +++ b/sound/pci/bt87x.c @@ -808,6 +808,8 @@ static struct pci_device_id snd_bt87x_ids[] = { BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1002, 0x0001, GENERIC), /* Leadtek Winfast tv 2000xp delux */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC), + /* Pinnacle PCTV */ + BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x11bd, 0x0012, GENERIC), /* Voodoo TV 200 */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC), /* Askey Computer Corp. MagicTView'99 */ diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index c8435c9a97f..9fb60276f5c 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c @@ -29,6 +29,9 @@ #include "hda_codec.h" #include "hda_local.h" +/* define below to restrict the supported rates and formats */ +/* #define LIMITED_RATE_FMT_SUPPORT */ + struct nvhdmi_spec { struct hda_multi_out multiout; @@ -60,6 +63,22 @@ static struct hda_verb nvhdmi_basic_init[] = { {} /* terminator */ }; +#ifdef LIMITED_RATE_FMT_SUPPORT +/* support only the safe format and rate */ +#define SUPPORTED_RATES SNDRV_PCM_RATE_48000 +#define SUPPORTED_MAXBPS 16 +#define SUPPORTED_FORMATS SNDRV_PCM_FMTBIT_S16_LE +#else +/* support all rates and formats */ +#define SUPPORTED_RATES \ + (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ + SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\ + SNDRV_PCM_RATE_192000) +#define SUPPORTED_MAXBPS 24 +#define SUPPORTED_FORMATS \ + (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) +#endif + /* * Controls */ @@ -258,9 +277,9 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch = { .channels_min = 2, .channels_max = 8, .nid = Nv_Master_Convert_nid, - .rates = SNDRV_PCM_RATE_48000, - .maxbps = 16, - .formats = SNDRV_PCM_FMTBIT_S16_LE, + .rates = SUPPORTED_RATES, + .maxbps = SUPPORTED_MAXBPS, + .formats = SUPPORTED_FORMATS, .ops = { .open = nvhdmi_dig_playback_pcm_open, .close = nvhdmi_dig_playback_pcm_close_8ch, @@ -273,9 +292,9 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_2ch = { .channels_min = 2, .channels_max = 2, .nid = Nv_Master_Convert_nid, - .rates = SNDRV_PCM_RATE_48000, - .maxbps = 16, - .formats = SNDRV_PCM_FMTBIT_S16_LE, + .rates = SUPPORTED_RATES, + .maxbps = SUPPORTED_MAXBPS, + .formats = SUPPORTED_FORMATS, .ops = { .open = nvhdmi_dig_playback_pcm_open, .close = nvhdmi_dig_playback_pcm_close_2ch, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 470fd74a0a1..c08ca660dab 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -275,7 +275,7 @@ struct alc_spec { struct snd_kcontrol_new *cap_mixer; /* capture mixer */ unsigned int beep_amp; /* beep amp value, set via set_beep_amp() */ - const struct hda_verb *init_verbs[5]; /* initialization verbs + const struct hda_verb *init_verbs[10]; /* initialization verbs * don't forget NULL * termination! */ diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index a9b26828a65..66c0876bf73 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -158,6 +158,7 @@ enum { STAC_D965_5ST_NO_FP, STAC_DELL_3ST, STAC_DELL_BIOS, + STAC_927X_VOLKNOB, STAC_927X_MODELS }; @@ -907,6 +908,16 @@ static struct hda_verb d965_core_init[] = { {} }; +static struct hda_verb dell_3st_core_init[] = { + /* don't set delta bit */ + {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, + /* unmute node 0x1b */ + {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000}, + /* select node 0x03 as DAC */ + {0x0b, AC_VERB_SET_CONNECT_SEL, 0x01}, + {} +}; + static struct hda_verb stac927x_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, @@ -915,6 +926,14 @@ static struct hda_verb stac927x_core_init[] = { {} }; +static struct hda_verb stac927x_volknob_core_init[] = { + /* don't set delta bit */ + {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, + /* enable analog pc beep path */ + {0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, + {} +}; + static struct hda_verb stac9205_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, @@ -1999,6 +2018,7 @@ static unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = { [STAC_D965_5ST_NO_FP] = d965_5st_no_fp_pin_configs, [STAC_DELL_3ST] = dell_3st_pin_configs, [STAC_DELL_BIOS] = NULL, + [STAC_927X_VOLKNOB] = NULL, }; static const char *stac927x_models[STAC_927X_MODELS] = { @@ -2010,6 +2030,7 @@ static const char *stac927x_models[STAC_927X_MODELS] = { [STAC_D965_5ST_NO_FP] = "5stack-no-fp", [STAC_DELL_3ST] = "dell-3stack", [STAC_DELL_BIOS] = "dell-bios", + [STAC_927X_VOLKNOB] = "volknob", }; static struct snd_pci_quirk stac927x_cfg_tbl[] = { @@ -2045,6 +2066,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { "Intel D965", STAC_D965_5ST), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2500, "Intel D965", STAC_D965_5ST), + /* volume-knob fixes */ + SND_PCI_QUIRK_VENDOR(0x10cf, "FSC", STAC_927X_VOLKNOB), {} /* terminator */ }; @@ -5612,10 +5635,14 @@ static int patch_stac927x(struct hda_codec *codec) spec->dmic_nids = stac927x_dmic_nids; spec->num_dmics = STAC927X_NUM_DMICS; - spec->init = d965_core_init; + spec->init = dell_3st_core_init; spec->dmux_nids = stac927x_dmux_nids; spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids); break; + case STAC_927X_VOLKNOB: + spec->num_dmics = 0; + spec->init = stac927x_volknob_core_init; + break; default: spec->num_dmics = 0; spec->init = stac927x_core_init; diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c index 37564300b50..6da21a2bcad 100644 --- a/sound/pci/ice1712/amp.c +++ b/sound/pci/ice1712/amp.c @@ -52,11 +52,13 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice) /* only use basic functionality for now */ - ice->num_total_dacs = 2; /* only PSDOUT0 is connected */ + /* VT1616 6ch codec connected to PSDOUT0 using packed mode */ + ice->num_total_dacs = 6; ice->num_total_adcs = 2; - /* Chaintech AV-710 has another codecs, which need initialization */ - /* initialize WM8728 codec */ + /* Chaintech AV-710 has another WM8728 codec connected to PSDOUT4 + (shared with the SPDIF output). Mixer control for this codec + is not yet supported. */ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AV710) { for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2) wm_put(ice, wm_inits[i], wm_inits[i+1]); diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index 76b717dae4b..10fc92c0557 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c @@ -648,7 +648,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, (inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) { /* running? we cannot change the rate now... */ spin_unlock_irqrestore(&ice->reg_lock, flags); - return -EBUSY; + return ((rate == ice->cur_rate) && !force) ? 0 : -EBUSY; } if (!force && is_pro_rate_locked(ice)) { spin_unlock_irqrestore(&ice->reg_lock, flags); diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 742a32eee8f..64c6b7b57d8 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -328,8 +328,27 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h +LIB_H += util/include/linux/bitmap.h +LIB_H += util/include/linux/bitops.h +LIB_H += util/include/linux/compiler.h +LIB_H += util/include/linux/ctype.h +LIB_H += util/include/linux/kernel.h LIB_H += util/include/linux/list.h +LIB_H += util/include/linux/module.h +LIB_H += util/include/linux/poison.h +LIB_H += util/include/linux/prefetch.h +LIB_H += util/include/linux/rbtree.h +LIB_H += util/include/linux/string.h +LIB_H += util/include/linux/types.h +LIB_H += util/include/asm/asm-offsets.h +LIB_H += util/include/asm/bitops.h +LIB_H += util/include/asm/byteorder.h +LIB_H += util/include/asm/swab.h +LIB_H += util/include/asm/system.h +LIB_H += util/include/asm/types.h +LIB_H += util/include/asm/uaccess.h LIB_H += perf.h +LIB_H += util/event.h LIB_H += util/types.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h @@ -343,9 +362,12 @@ LIB_H += util/strlist.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h -LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h +LIB_H += util/sort.h +LIB_H += util/hist.h +LIB_H += util/thread.h +LIB_H += util/data_map.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -359,6 +381,9 @@ LIB_OBJS += util/parse-options.o LIB_OBJS += util/parse-events.o LIB_OBJS += util/path.o LIB_OBJS += util/rbtree.o +LIB_OBJS += util/bitmap.o +LIB_OBJS += util/hweight.o +LIB_OBJS += util/find_next_bit.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o @@ -368,7 +393,6 @@ LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o LIB_OBJS += util/symbol.o -LIB_OBJS += util/module.o LIB_OBJS += util/color.o LIB_OBJS += util/pager.o LIB_OBJS += util/header.o @@ -381,6 +405,9 @@ LIB_OBJS += util/trace-event-parse.o LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o +LIB_OBJS += util/sort.o +LIB_OBJS += util/hist.o +LIB_OBJS += util/data_map.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o @@ -422,8 +449,12 @@ ifeq ($(uname_S),Darwin) PTHREAD_LIBS = endif +ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); +endif + ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); + msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif ifdef NO_DEMANGLE @@ -784,6 +815,19 @@ util/config.o: util/config.c PERF-CFLAGS util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing +# from <string.h> that comes from kernel headers wrapping. +KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` + +util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/hweight.o: ../../lib/hweight.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 1ec74161581..56ba71658d7 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -22,15 +22,13 @@ #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" +#include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,symbol"; -static char *sort_order = default_sort_order; - static int force; static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; @@ -39,258 +37,12 @@ static int print_line; static unsigned long page_size; static unsigned long mmap_window = 32; -static struct rb_root threads; -static struct thread *last_match; - - struct sym_ext { struct rb_node node; double percent; char *path; }; -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - u64 ip; - char level; - - uint32_t count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *); -}; - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = " Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) { - if (!comm_l && !comm_r) - return 0; - else if (!comm_l) - return -1; - else - return 1; - } - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s", self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) { - if (!dso_l && !dso_r) - return 0; - else if (!dso_l) - return -1; - else - return 1; - } - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self) -{ - if (self->dso) - return fprintf(fp, "%-25s", self->dso->name); - - return fprintf(fp, "%016llx ", (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object ", - .cmp = sort__dso_cmp, - .print = sort__dso_print, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self) -{ - size_t ret = 0; - - if (verbose) - ret += fprintf(fp, "%#018llx ", (u64)self->ip); - - if (self->sym) { - ret += fprintf(fp, "[%c] %s", - self->dso == kernel_dso ? 'k' : '.', self->sym->name); - } else { - ret += fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -static int sort__need_collapse = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} /* * collect histogram counts @@ -306,6 +58,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) return; sym_size = sym->end - sym->start; + ip = he->map->map_ip(he->map, ip); offset = ip - sym->start; if (offset >= sym_size) @@ -322,173 +75,27 @@ static void hist_hit(struct hist_entry *he, u64 ip) sym->hist[offset]); } -static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, u64 ip, char level) +static int hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, u64 ip, u64 count, char level) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *he; - struct hist_entry entry = { - .thread = thread, - .map = map, - .dso = dso, - .sym = sym, - .ip = ip, - .level = level, - .count = 1, - }; - int cmp; - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - hist_hit(he, ip); - - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) + bool hit; + struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip, + count, level, &hit); + if (he == NULL) return -ENOMEM; - *he = entry; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); - + if (hit) + hist_hit(he, ip); return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n); - } -} - -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0; - static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct symbol *sym = NULL; + struct thread *thread = threads__findnew(event->ip.pid); dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", (void *)(offset + head), @@ -497,60 +104,53 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) event->ip.pid, (void *)(long)ip); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (event->header.misc & PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : "<not found>"); } else if (event->header.misc & PERF_RECORD_MISC_USER) { - - show = SHOW_USER; level = '.'; - map = thread__find_map(thread, ip); if (map != NULL) { +got_map: ip = map->map_ip(map, ip); - dso = map->dso; + sym = map->dso->find_symbol(map->dso, ip); } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) - dso = kernel_dso; + if ((long long)ip < 0) { + map = kernel_map; + goto got_map; + } } - dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); - + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : "<not found>"); } else { - show = SHOW_HV; level = 'H'; dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - struct symbol *sym = NULL; - - if (dso) - sym = dso->find_symbol(dso, ip); - - if (hist_entry__add(thread, map, dso, sym, ip, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, 1, level)) { + fprintf(stderr, "problem incrementing symbol count, " + "skipping event\n"); + return -1; } total++; @@ -560,10 +160,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, NULL, 0); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -588,9 +186,8 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; + struct thread *thread = threads__findnew(event->comm.pid); - thread = threads__findnew(event->comm.pid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -609,11 +206,9 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_fork_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -666,7 +261,7 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } static int -parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) +parse_line(FILE *file, struct symbol *sym, u64 len) { char *line = NULL, *tmp, *tmp2; static const char *prev_line; @@ -716,7 +311,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) const char *color; struct sym_ext *sym_ext = sym->priv; - offset = line_ip - start; + offset = line_ip - sym->start; if (offset < len) hits = sym->hist[offset]; @@ -795,7 +390,7 @@ static void free_source_line(struct symbol *sym, int len) /* Get the filename:line for the colored entries */ static void -get_source_line(struct symbol *sym, u64 start, int len, const char *filename) +get_source_line(struct symbol *sym, int len, const char *filename) { int i; char cmd[PATH_MAX * 2]; @@ -820,7 +415,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename) if (sym_ext[i].percent <= 0.5) continue; - offset = start + i; + offset = sym->start + i; sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) @@ -872,31 +467,23 @@ static void print_summary(const char *filename) static void annotate_sym(struct dso *dso, struct symbol *sym) { - const char *filename = dso->name, *d_filename; - u64 start, end, len; + const char *filename = dso->long_name, *d_filename; + u64 len; char command[PATH_MAX*2]; FILE *file; if (!filename) return; - if (sym->module) - filename = sym->module->path; - else if (dso == kernel_dso) - filename = vmlinux_name; - - start = sym->obj_start; - if (!start) - start = sym->start; + if (full_paths) d_filename = filename; else d_filename = basename(filename); - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; if (print_line) { - get_source_line(sym, start, len, filename); + get_source_line(sym, len, filename); print_summary(filename); } @@ -905,10 +492,11 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) printf("------------------------------------------------\n"); if (verbose >= 2) - printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); + printf("annotating [%p] %30s : [%p] %30s\n", + dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - (u64)start, (u64)end, filename, filename); + sym->start, sym->end, filename, filename); if (verbose >= 3) printf("doing: %s\n", command); @@ -918,7 +506,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) return; while (!feof(file)) { - if (parse_line(file, sym, start, len) < 0) + if (parse_line(file, sym, len) < 0) break; } @@ -959,7 +547,7 @@ static int __cmd_annotate(void) uint32_t size; char *buf; - register_idle_thread(&threads, &last_match); + register_idle_thread(); input = open(input_name, O_RDONLY); if (input < 0) { @@ -1059,14 +647,14 @@ more: if (dump_trace) return 0; - if (verbose >= 3) - threads__fprintf(stdout, &threads); + if (verbose > 3) + threads__fprintf(stdout); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); - output__resort(); + output__resort(total); find_annotations(); @@ -1139,5 +727,11 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) setup_pager(); + if (field_sep && *field_sep == '.') { + fputs("'.' is the only non valid --field-separator argument\n", + stderr); + exit(129); + } + return __cmd_annotate(); } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3eeef339c78..f0467ff0d8a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -17,55 +17,51 @@ #include "util/header.h" #include "util/event.h" #include "util/debug.h" -#include "util/trace-event.h" #include <unistd.h> #include <sched.h> -#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static long default_interval = 100000; +static long default_interval = 0; -static int nr_cpus = 0; +static int nr_cpus = 0; static unsigned int page_size; -static unsigned int mmap_pages = 128; -static int freq = 0; +static unsigned int mmap_pages = 128; +static int freq = 1000; static int output; static const char *output_name = "perf.data"; -static int group = 0; -static unsigned int realtime_prio = 0; -static int raw_samples = 0; -static int system_wide = 0; -static int profile_cpu = -1; -static pid_t target_pid = -1; -static pid_t child_pid = -1; -static int inherit = 1; -static int force = 0; -static int append_file = 0; -static int call_graph = 0; -static int inherit_stat = 0; -static int no_samples = 0; -static int sample_address = 0; -static int multiplex = 0; -static int multiplex_fd = -1; - -static long samples; +static int group = 0; +static unsigned int realtime_prio = 0; +static int raw_samples = 0; +static int system_wide = 0; +static int profile_cpu = -1; +static pid_t target_pid = -1; +static pid_t child_pid = -1; +static int inherit = 1; +static int force = 0; +static int append_file = 0; +static int call_graph = 0; +static int inherit_stat = 0; +static int no_samples = 0; +static int sample_address = 0; +static int multiplex = 0; +static int multiplex_fd = -1; + +static long samples = 0; static struct timeval last_read; static struct timeval this_read; -static u64 bytes_written; +static u64 bytes_written = 0; static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; -static int nr_poll; -static int nr_cpu; +static int nr_poll = 0; +static int nr_cpu = 0; -static int file_new = 1; +static int file_new = 1; -struct perf_header *header; +struct perf_header *header = NULL; struct mmap_data { int counter; @@ -375,9 +371,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n static void create_counter(int counter, int cpu, pid_t pid) { + char *filter = filters[counter]; struct perf_event_attr *attr = attrs + counter; struct perf_header_attr *h_attr; int track = !counter; /* only the first counter needs these */ + int ret; struct { u64 count; u64 time_enabled; @@ -480,7 +478,6 @@ try_again: multiplex_fd = fd[nr_cpu][counter]; if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { - int ret; ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); assert(ret != -1); @@ -500,6 +497,16 @@ try_again: } } + if (filter != NULL) { + ret = ioctl(fd[nr_cpu][counter], + PERF_EVENT_IOC_SET_FILTER, filter); + if (ret) { + error("failed to set filter with %d (%s)\n", errno, + strerror(errno)); + exit(-1); + } + } + ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); } @@ -566,17 +573,17 @@ static int __cmd_record(int argc, const char **argv) else header = perf_header__new(); - if (raw_samples) { - read_tracing_data(attrs, nr_counters); + perf_header__feat_trace_info(header); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - read_tracing_data(attrs, nr_counters); + perf_header__feat_trace_info(header); break; } } } + atexit(atexit_header); if (!system_wide) { @@ -677,6 +684,8 @@ static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", "event selector. use 'perf list' to list available events", parse_events), + OPT_CALLBACK(0, "filter", NULL, "filter", + "event filter", parse_filter), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, @@ -731,6 +740,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; } + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + for (counter = 0; counter < nr_counters; counter++) { if (attrs[counter].sample_period) continue; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 19669c20088..a4f8cc20915 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -26,20 +26,18 @@ #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/data_map.h" #include "util/thread.h" +#include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,dso,symbol"; -static char *sort_order = default_sort_order; static char *dso_list_str, *comm_list_str, *sym_list_str, *col_width_list_str; static struct strlist *dso_list, *comm_list, *sym_list; -static char *field_sep; static int force; -static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; static int show_nr_samples; @@ -50,368 +48,17 @@ static struct perf_read_values show_threads_values; static char default_pretty_printing_style[] = "normal"; static char *pretty_printing_style = default_pretty_printing_style; -static unsigned long page_size; -static unsigned long mmap_window = 32; - -static char default_parent_pattern[] = "^sys_|^do_page_fault"; -static char *parent_pattern = default_parent_pattern; -static regex_t parent_regex; - static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -static int callchain; - -static char __cwd[PATH_MAX]; -static char *cwd = __cwd; +static char *cwd; static int cwdlen; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; -static -struct callchain_param callchain_param = { - .mode = CHAIN_GRAPH_REL, - .min_percent = 0.5 -}; - static u64 sample_type; -static int repsep_fprintf(FILE *fp, const char *fmt, ...) -{ - int n; - va_list ap; - - va_start(ap, fmt); - if (!field_sep) - n = vfprintf(fp, fmt, ap); - else { - char *bf = NULL; - n = vasprintf(&bf, fmt, ap); - if (n > 0) { - char *sep = bf; - - while (1) { - sep = strchr(sep, *field_sep); - if (sep == NULL) - break; - *sep = '.'; - } - } - fputs(bf, fp); - free(bf); - } - va_end(ap); - return n; -} - -static unsigned int dsos__col_width, - comms__col_width, - threads__col_width; - -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - struct symbol *parent; - u64 ip; - char level; - struct callchain_node callchain; - struct rb_root sorted_chain; - - u64 count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); - unsigned int *width; - bool elide; -}; - -static int64_t cmp_null(void *l, void *r) -{ - if (!l && !r) - return 0; - else if (!l) - return -1; - else - return 1; -} - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s:%5d", width - 6, - self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = "Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, - .width = &threads__col_width, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s", width, self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = "Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, - .width = &comms__col_width, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - if (self->dso) - return repsep_fprintf(fp, "%-*s", width, self->dso->name); - - return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object", - .cmp = sort__dso_cmp, - .print = sort__dso_print, - .width = &dsos__col_width, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) -{ - size_t ret = 0; - - if (verbose) - ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, - dso__symtab_origin(self->dso)); - - ret += repsep_fprintf(fp, "[%c] ", self->level); - if (self->sym) { - ret += repsep_fprintf(fp, "%s", self->sym->name); - - if (self->sym->module) - ret += repsep_fprintf(fp, "\t[%s]", - self->sym->module->name); - } else { - ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -/* --sort parent */ - -static int64_t -sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct symbol *sym_l = left->parent; - struct symbol *sym_r = right->parent; - - if (!sym_l || !sym_r) - return cmp_null(sym_l, sym_r); - - return strcmp(sym_l->name, sym_r->name); -} - -static size_t -sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%-*s", width, - self->parent ? self->parent->name : "[other]"); -} - -static unsigned int parent_symbol__col_width; - -static struct sort_entry sort_parent = { - .header = "Parent symbol", - .cmp = sort__parent_cmp, - .print = sort__parent_print, - .width = &parent_symbol__col_width, -}; - -static int sort__need_collapse = 0; -static int sort__has_parent = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, - { .name = "parent", .entry = &sort_parent, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(const char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - if (sd->entry == &sort_parent) { - int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); - if (ret) { - char err[BUFSIZ]; - - regerror(ret, &parent_regex, err, sizeof(err)); - fprintf(stderr, "Invalid regex: %s\n%s", - parent_pattern, err); - exit(-1); - } - sort__has_parent = 1; - } - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} - static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) { int i; @@ -610,7 +257,6 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, return ret; } - static size_t hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) { @@ -695,22 +341,17 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) static struct symbol * -resolve_symbol(struct thread *thread, struct map **mapp, - struct dso **dsop, u64 *ipp) +resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp) { - struct dso *dso = dsop ? *dsop : NULL; struct map *map = mapp ? *mapp : NULL; u64 ip = *ipp; - if (!thread) - return NULL; - - if (dso) - goto got_dso; - if (map) goto got_map; + if (!thread) + return NULL; + map = thread__find_map(thread, ip); if (map != NULL) { /* @@ -725,29 +366,26 @@ resolve_symbol(struct thread *thread, struct map **mapp, *mapp = map; got_map: ip = map->map_ip(map, ip); - - dso = map->dso; } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ if ((long long)ip < 0) - dso = kernel_dso; + return kernel_maps__find_symbol(ip, mapp); } - dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : "<not found>"); dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - if (dsop) - *dsop = dso; - - if (!dso) - return NULL; -got_dso: - return dso->find_symbol(dso, ip); + return map ? map->dso->find_symbol(map->dso, ip) : NULL; } static int call__match(struct symbol *sym) @@ -758,9 +396,9 @@ static int call__match(struct symbol *sym) return 0; } -static struct symbol ** -resolve_callchain(struct thread *thread, struct map *map __used, - struct ip_callchain *chain, struct hist_entry *entry) +static struct symbol **resolve_callchain(struct thread *thread, struct map *map, + struct ip_callchain *chain, + struct symbol **parent) { u64 context = PERF_CONTEXT_MAX; struct symbol **syms = NULL; @@ -776,8 +414,7 @@ resolve_callchain(struct thread *thread, struct map *map __used, for (i = 0; i < chain->nr; i++) { u64 ip = chain->ips[i]; - struct dso *dso = NULL; - struct symbol *sym; + struct symbol *sym = NULL; if (ip >= PERF_CONTEXT_MAX) { context = ip; @@ -786,21 +423,18 @@ resolve_callchain(struct thread *thread, struct map *map __used, switch (context) { case PERF_CONTEXT_HV: - dso = hypervisor_dso; break; case PERF_CONTEXT_KERNEL: - dso = kernel_dso; + sym = kernel_maps__find_symbol(ip, &map); break; default: + sym = resolve_symbol(thread, &map, &ip); break; } - sym = resolve_symbol(thread, NULL, &dso, &ip); - if (sym) { - if (sort__has_parent && call__match(sym) && - !entry->parent) - entry->parent = sym; + if (sort__has_parent && !*parent && call__match(sym)) + *parent = sym; if (!callchain) break; syms[i] = sym; @@ -815,177 +449,35 @@ resolve_callchain(struct thread *thread, struct map *map __used, */ static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, +hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, struct ip_callchain *chain, char level, u64 count) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; + struct symbol **syms = NULL, *parent = NULL; + bool hit; struct hist_entry *he; - struct symbol **syms = NULL; - struct hist_entry entry = { - .thread = thread, - .map = map, - .dso = dso, - .sym = sym, - .ip = ip, - .level = level, - .count = count, - .parent = NULL, - .sorted_chain = RB_ROOT - }; - int cmp; if ((sort__has_parent || callchain) && chain) - syms = resolve_callchain(thread, map, chain, &entry); - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); + syms = resolve_callchain(thread, map, chain, &parent); - if (!cmp) { - he->count += count; - if (callchain) { - append_chain(&he->callchain, chain, syms); - free(syms); - } - return 0; - } + he = __hist_entry__add(thread, map, sym, parent, + ip, count, level, &hit); + if (he == NULL) + return -ENOMEM; - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } + if (hit) + he->count += count; - he = malloc(sizeof(*he)); - if (!he) - return -ENOMEM; - *he = entry; if (callchain) { - callchain_init(&he->callchain); + if (!hit) + callchain_init(&he->callchain); append_chain(&he->callchain, chain, syms); free(syms); } - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - if (callchain) - callchain_param.sort(&he->sorted_chain, &he->callchain, - min_callchain_hits, &callchain_param); - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(u64 total_samples) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - u64 min_callchain_hits; - - min_callchain_hits = total_samples * (callchain_param.min_percent / 100); - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n, min_callchain_hits); - } -} - static size_t output__fprintf(FILE *fp, u64 total_samples) { struct hist_entry *pos; @@ -1080,13 +572,6 @@ print_entries: return ret; } -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0, - total_lost = 0; - static int validate_chain(struct ip_callchain *chain, event_t *event) { unsigned int chain_size; @@ -1104,17 +589,14 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread; + struct symbol *sym = NULL; u64 ip = event->ip.ip; u64 period = 1; struct map *map = NULL; void *more_data = event->ip.__more_data; struct ip_callchain *chain = NULL; int cpumode; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_PERIOD) { period = *(u64 *)more_data; @@ -1147,56 +629,49 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (comm_list && !strlist__has_entry(comm_list, thread->comm)) return 0; cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : "<not found>"); } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; level = '.'; + sym = resolve_symbol(thread, &map, &ip); } else { - show = SHOW_HV; level = 'H'; - - dso = hypervisor_dso; - dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); - - if (dso_list && (!dso || !dso->name || - !strlist__has_entry(dso_list, dso->name))) - return 0; + if (dso_list && + (!map || !map->dso || + !(strlist__has_entry(dso_list, map->dso->short_name) || + (map->dso->short_name != map->dso->long_name && + strlist__has_entry(dso_list, map->dso->long_name))))) + return 0; - if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name))) - return 0; + if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) + return 0; - if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { - eprintf("problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, + chain, level, period)) { + eprintf("problem incrementing symbol count, skipping event\n"); + return -1; } + total += period; return 0; @@ -1205,10 +680,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, cwd, cwdlen); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -1234,9 +707,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -1256,11 +727,8 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_task_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; - - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", (void *)(offset + head), @@ -1331,216 +799,79 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) +static int sample_type_check(u64 type) { - trace_event(event); - - switch (event->header.type) { - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MMAP: - return process_mmap_event(event, offset, head); - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_FORK: - case PERF_RECORD_EXIT: - return process_task_event(event, offset, head); - - case PERF_RECORD_LOST: - return process_lost_event(event, offset, head); - - case PERF_RECORD_READ: - return process_read_event(event, offset, head); - - /* - * We dont process them right now but they are fine: - */ - - case PERF_RECORD_THROTTLE: - case PERF_RECORD_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - -static int __cmd_report(void) -{ - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head, shift; - struct stat input_stat; - struct thread *idle; - event_t *event; - uint32_t size; - char *buf; - - idle = register_idle_thread(&threads, &last_match); - thread__comm_adjust(idle); - - if (show_threads) - perf_read_values_init(&show_threads_values); - - input = open(input_name, O_RDONLY); - if (input < 0) { - fprintf(stderr, " failed to open file: %s", input_name); - if (!strcmp(input_name, "perf.data")) - fprintf(stderr, " (try 'perf record' first)"); - fprintf(stderr, "\n"); - exit(-1); - } - - ret = fstat(input, &input_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - fprintf(stderr, "file: %s not owned by current user or root\n", input_name); - exit(-1); - } - - if (!input_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - header = perf_header__read(input); - head = header->data_offset; - - sample_type = perf_header__sample_type(header); + sample_type = type; if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { fprintf(stderr, "selected --sort parent, but no" " callchain data. Did you call" " perf record without -g?\n"); - exit(-1); + return -1; } if (callchain) { fprintf(stderr, "selected -g but no callchain data." " Did you call perf record without" " -g?\n"); - exit(-1); + return -1; } } else if (callchain_param.mode != CHAIN_NONE && !callchain) { callchain = 1; if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain" " params\n"); - exit(-1); + return -1; } } - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - if (!full_paths) { - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwd = NULL; - cwdlen = 0; - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - int munmap_ret; - - shift = page_size * (head / page_size); - - munmap_ret = munmap(buf, page_size * mmap_window); - assert(munmap_ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dump_printf("\n%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, offset, head) < 0) { - - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - total_unknown++; + return 0; +} - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_mmap_event = process_mmap_event, + .process_comm_event = process_comm_event, + .process_exit_event = process_task_event, + .process_fork_event = process_task_event, + .process_lost_event = process_lost_event, + .process_read_event = process_read_event, + .sample_type_check = sample_type_check, +}; - if (unlikely(head & 7)) - head &= ~7ULL; - size = 8; - } +static int __cmd_report(void) +{ + struct thread *idle; + int ret; - head += size; + idle = register_idle_thread(); + thread__comm_adjust(idle); - if (offset + head >= header->data_offset + header->data_size) - goto done; + if (show_threads) + perf_read_values_init(&show_threads_values); - if (offset + head < (unsigned long)input_stat.st_size) - goto more; + register_perf_file_handler(&file_handler); -done: - rc = EXIT_SUCCESS; - close(input); + ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths, + &cwdlen, &cwd); + if (ret) + return ret; dump_printf(" IP events: %10ld\n", total); dump_printf(" mmap events: %10ld\n", total_mmap); dump_printf(" comm events: %10ld\n", total_comm); dump_printf(" fork events: %10ld\n", total_fork); dump_printf(" lost events: %10ld\n", total_lost); - dump_printf(" unknown events: %10ld\n", total_unknown); + dump_printf(" unknown events: %10ld\n", file_handler.total_unknown); if (dump_trace) return 0; - if (verbose >= 3) - threads__fprintf(stdout, &threads); + if (verbose > 3) + threads__fprintf(stdout); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); @@ -1550,7 +881,7 @@ done: if (show_threads) perf_read_values_destroy(&show_threads_values); - return rc; + return ret; } static int @@ -1606,7 +937,8 @@ setup: return 0; } -static const char * const report_usage[] = { +//static const char * const report_usage[] = { +const char * const report_usage[] = { "perf report [<options>] <command>", NULL }; @@ -1692,8 +1024,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); - argc = parse_options(argc, argv, options, report_usage, 0); setup_sorting(); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index ce2d5be4f30..807ca66e7a8 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -11,6 +11,7 @@ #include "util/trace-event.h" #include "util/debug.h" +#include "util/data_map.h" #include <sys/types.h> #include <sys/prctl.h> @@ -20,26 +21,23 @@ #include <math.h> static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; static char default_sort_order[] = "avg, max, switch, runtime"; static char *sort_order = default_sort_order; +static int profile_cpu = -1; + +static char *cwd; +static int cwdlen; + #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 -#define BUG_ON(x) assert(!(x)) - static u64 run_measurement_overhead; static u64 sleep_measurement_overhead; @@ -74,6 +72,7 @@ enum sched_event_type { SCHED_EVENT_RUN, SCHED_EVENT_SLEEP, SCHED_EVENT_WAKEUP, + SCHED_EVENT_MIGRATION, }; struct sched_atom { @@ -398,6 +397,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) ret = sem_post(atom->wait_sem); BUG_ON(ret); break; + case SCHED_EVENT_MIGRATION: + break; default: BUG_ON(1); } @@ -635,9 +636,7 @@ static void test_calibrations(void) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.tid); dump_printf("%p [%p]: perf_event_comm: %s:%d\n", (void *)(offset + head), @@ -745,6 +744,22 @@ struct trace_fork_event { u32 child_pid; }; +struct trace_migrate_task_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char comm[16]; + u32 pid; + + u32 prio; + u32 cpu; +}; + struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, struct event *, @@ -769,6 +784,12 @@ struct trace_sched_handler { int cpu, u64 timestamp, struct thread *thread); + + void (*migrate_task_event)(struct trace_migrate_task_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); }; @@ -1058,8 +1079,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1092,13 +1113,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, u64 timestamp, struct thread *this_thread __used) { - struct work_atoms *atoms; - struct thread *thread; + struct thread *thread = threads__findnew(runtime_event->pid); + struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); - - thread = threads__findnew(runtime_event->pid, &threads, &last_match); - atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); if (!atoms) { thread_atoms_insert(thread); atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); @@ -1125,7 +1143,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); + wakee = threads__findnew(wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1139,7 +1157,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom = list_entry(atoms->work_list.prev, struct work_atom, list); - if (atom->state != THREAD_SLEEPING) + /* + * You WILL be missing events if you've recorded only + * one CPU, or are only looking at only one, so don't + * make useless noise. + */ + if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) nr_state_machine_bugs++; nr_timestamps++; @@ -1152,11 +1175,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom->wake_up_time = timestamp; } +static void +latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct work_atoms *atoms; + struct work_atom *atom; + struct thread *migrant; + + /* + * Only need to worry about migration when profiling one CPU. + */ + if (profile_cpu == -1) + return; + + migrant = threads__findnew(migrate_task_event->pid); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) { + thread_atoms_insert(migrant); + register_pid(migrant->pid, migrant->comm); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) + die("migration-event: Internal tree error"); + add_sched_out_event(atoms, 'R', timestamp); + } + + BUG_ON(list_empty(&atoms->work_list)); + + atom = list_entry(atoms->work_list.prev, struct work_atom, list); + atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; + + nr_timestamps++; + + if (atom->sched_out_time > timestamp) + nr_unordered_timestamps++; +} + static struct trace_sched_handler lat_ops = { .wakeup_event = latency_wakeup_event, .switch_event = latency_switch_event, .runtime_event = latency_runtime_event, .fork_event = latency_fork_event, + .migrate_task_event = latency_migrate_task_event, }; static void output_lat_thread(struct work_atoms *work_list) @@ -1385,8 +1448,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1517,6 +1580,26 @@ process_sched_exit_event(struct event *event, } static void +process_sched_migrate_task_event(struct raw_event_sample *raw, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_migrate_task_event migrate_task_event; + + FILL_COMMON_FIELDS(migrate_task_event, event, raw->data); + + FILL_ARRAY(migrate_task_event, comm, event, raw->data); + FILL_FIELD(migrate_task_event, pid, event, raw->data); + FILL_FIELD(migrate_task_event, prio, event, raw->data); + FILL_FIELD(migrate_task_event, cpu, event, raw->data); + + if (trace_handler->migrate_task_event) + trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); +} + +static void process_raw_event(event_t *raw_event __used, void *more_data, int cpu, u64 timestamp, struct thread *thread) { @@ -1539,23 +1622,24 @@ process_raw_event(event_t *raw_event __used, void *more_data, process_sched_fork_event(raw, event, cpu, timestamp, thread); if (!strcmp(event->name, "sched_process_exit")) process_sched_exit_event(event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_migrate_task")) + process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); } static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; - thread = threads__findnew(event->ip.pid, &threads, &last_match); + if (!(sample_type & PERF_SAMPLE_RAW)) + return 0; + + thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -1581,169 +1665,60 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - dump_printf(" ...... dso: [hypervisor]\n"); - } + if (profile_cpu != -1 && profile_cpu != (int) cpu) + return 0; - if (sample_type & PERF_SAMPLE_RAW) - process_raw_event(event, more_data, cpu, timestamp, thread); + process_raw_event(event, more_data, cpu, timestamp, thread); return 0; } static int -process_event(event_t *event, unsigned long offset, unsigned long head) +process_lost_event(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) { - trace_event(event); + nr_lost_chunks++; + nr_lost_events += event->lost.lost; - nr_events++; - switch (event->header.type) { - case PERF_RECORD_MMAP: - return 0; - case PERF_RECORD_LOST: - nr_lost_chunks++; - nr_lost_events += event->lost.lost; - return 0; - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; + return 0; +} - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); +static int sample_type_check(u64 type) +{ + sample_type = type; - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .process_lost_event = process_lost_event, + .sample_type_check = sample_type_check, +}; + static int read_events(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - - trace_report(); - register_idle_thread(&threads, &last_match); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int res; - - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); + register_idle_thread(); + register_perf_file_handler(&file_handler); - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static void print_bad_events(void) @@ -1883,6 +1858,8 @@ static const struct option latency_options[] = { "sort by key(s): runtime, switch, avg, max"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_END() @@ -1961,7 +1938,6 @@ static int __cmd_record(int argc, const char **argv) int cmd_sched(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3db31e7bf17..c6df3770b87 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -50,15 +50,17 @@ static struct perf_event_attr default_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; @@ -125,6 +127,7 @@ struct stats event_res_stats[MAX_COUNTERS][3]; struct stats runtime_nsecs_stats; struct stats walltime_nsecs_stats; struct stats runtime_cycles_stats; +struct stats runtime_branches_stats; #define MATCH_EVENT(t, c, counter) \ (attrs[counter].type == PERF_TYPE_##t && \ @@ -235,6 +238,8 @@ static void read_counter(int counter) update_stats(&runtime_nsecs_stats, count[0]); if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) update_stats(&runtime_cycles_stats, count[0]); + if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) + update_stats(&runtime_branches_stats, count[0]); } static int run_perf_stat(int argc __used, const char **argv) @@ -352,6 +357,14 @@ static void abs_printout(int counter, double avg) ratio = avg / total; fprintf(stderr, " # %10.3f IPC ", ratio); + } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) { + total = avg_stats(&runtime_branches_stats); + + if (total) + ratio = avg * 100 / total; + + fprintf(stderr, " # %10.3f %% ", ratio); + } else { total = avg_stats(&runtime_nsecs_stats); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a1b1d10912d..cc662863030 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -22,6 +22,7 @@ #include "util/symbol.h" #include "util/color.h" +#include "util/thread.h" #include "util/util.h" #include <linux/rbtree.h> #include "util/parse-options.h" @@ -54,26 +55,26 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int system_wide = 0; +static int system_wide = 0; -static int default_interval = 100000; +static int default_interval = 0; -static int count_filter = 5; -static int print_entries = 15; +static int count_filter = 5; +static int print_entries = 15; -static int target_pid = -1; -static int inherit = 0; -static int profile_cpu = -1; -static int nr_cpus = 0; -static unsigned int realtime_prio = 0; -static int group = 0; +static int target_pid = -1; +static int inherit = 0; +static int profile_cpu = -1; +static int nr_cpus = 0; +static unsigned int realtime_prio = 0; +static int group = 0; static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int freq = 0; +static unsigned int mmap_pages = 16; +static int freq = 1000; /* 1 KHz */ -static int delay_secs = 2; -static int zero; -static int dump_symtab; +static int delay_secs = 2; +static int zero = 0; +static int dump_symtab = 0; /* * Source @@ -86,19 +87,16 @@ struct source_line { struct source_line *next; }; -static char *sym_filter = NULL; -struct sym_entry *sym_filter_entry = NULL; -static int sym_pcnt_filter = 5; -static int sym_counter = 0; -static int display_weighted = -1; +static char *sym_filter = NULL; +struct sym_entry *sym_filter_entry = NULL; +static int sym_pcnt_filter = 5; +static int sym_counter = 0; +static int display_weighted = -1; /* * Symbols */ -static u64 min_ip; -static u64 max_ip = -1ll; - struct sym_entry { struct rb_node rb_node; struct list_head node; @@ -106,6 +104,7 @@ struct sym_entry { unsigned long snap_count; double weight; int skip; + struct map *map; struct source_line *source; struct source_line *lines; struct source_line **lines_tail; @@ -119,12 +118,11 @@ struct sym_entry { static void parse_source(struct sym_entry *syme) { struct symbol *sym; - struct module *module; - struct section *section = NULL; + struct map *map; FILE *file; char command[PATH_MAX*2]; - const char *path = vmlinux_name; - u64 start, end, len; + const char *path; + u64 len; if (!syme) return; @@ -135,27 +133,15 @@ static void parse_source(struct sym_entry *syme) } sym = (struct symbol *)(syme + 1); - module = sym->module; - - if (module) - path = module->path; - if (!path) - return; - - start = sym->obj_start; - if (!start) - start = sym->start; + map = syme->map; + path = map->dso->long_name; - if (module) { - section = module->sections->find_section(module->sections, ".text"); - if (section) - start -= section->vma; - } - - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; - sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path); + sprintf(command, + "objdump --start-address=0x%016Lx " + "--stop-address=0x%016Lx -dS %s", + sym->start, sym->end, path); file = popen(command, "r"); if (!file) @@ -187,13 +173,11 @@ static void parse_source(struct sym_entry *syme) if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } if (strlen(src->line)>8 && src->line[16] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } } pclose(file); @@ -245,16 +229,9 @@ static void lookup_sym_source(struct sym_entry *syme) struct symbol *symbol = (struct symbol *)(syme + 1); struct source_line *line; char pattern[PATH_MAX]; - char *idx; sprintf(pattern, "<%s>:", symbol->name); - if (symbol->module) { - idx = strstr(pattern, "\t"); - if (idx) - *idx = 0; - } - pthread_mutex_lock(&syme->source_lock); for (line = syme->lines; line; line = line->next) { if (strstr(line->line, pattern)) { @@ -516,8 +493,8 @@ static void print_sym_table(void) if (verbose) printf(" - %016llx", sym->start); printf(" : %s", sym->name); - if (sym->module) - printf("\t[%s]", sym->module->name); + if (syme->map->dso->name[0] == '[') + printf(" \t%s", syme->map->dso->name); printf("\n"); } } @@ -790,7 +767,7 @@ static const char *skip_symbols[] = { NULL }; -static int symbol_filter(struct dso *self, struct symbol *sym) +static int symbol_filter(struct map *map, struct symbol *sym) { struct sym_entry *syme; const char *name = sym->name; @@ -812,7 +789,8 @@ static int symbol_filter(struct dso *self, struct symbol *sym) strstr(name, "_text_end")) return 1; - syme = dso__sym_priv(self, sym); + syme = dso__sym_priv(map->dso, sym); + syme->map = map; pthread_mutex_init(&syme->source_lock, NULL); if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) sym_filter_entry = syme; @@ -829,34 +807,14 @@ static int symbol_filter(struct dso *self, struct symbol *sym) static int parse_symbols(void) { - struct rb_node *node; - struct symbol *sym; - int use_modules = vmlinux_name ? 1 : 0; - - kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); - if (kernel_dso == NULL) + if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), + symbol_filter, verbose, 1) <= 0) return -1; - if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) - goto out_delete_dso; - - node = rb_first(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - min_ip = sym->start; - - node = rb_last(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - max_ip = sym->end; - if (dump_symtab) - dso__fprintf(kernel_dso, stderr); + dsos__fprintf(stderr); return 0; - -out_delete_dso: - dso__delete(kernel_dso); - kernel_dso = NULL; - return -1; } /* @@ -864,10 +822,11 @@ out_delete_dso: */ static void record_ip(u64 ip, int counter) { - struct symbol *sym = dso__find_symbol(kernel_dso, ip); + struct map *map; + struct symbol *sym = kernel_maps__find_symbol(ip, &map); if (sym != NULL) { - struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); + struct sym_entry *syme = dso__sym_priv(map->dso, sym); if (!syme->skip) { syme->count[counter]++; @@ -913,8 +872,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) return head; } -struct timeval last_read, this_read; - static void mmap_read_counter(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -922,8 +879,6 @@ static void mmap_read_counter(struct mmap_data *md) unsigned char *data = md->base + page_size; int diff; - gettimeofday(&this_read, NULL); - /* * If we're further behind than half the buffer, there's a chance * the writer will bite our tail and mess up the samples under us. @@ -934,14 +889,7 @@ static void mmap_read_counter(struct mmap_data *md) */ diff = head - old; if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); + fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); /* * head points to a known good entry, start there. @@ -949,8 +897,6 @@ static void mmap_read_counter(struct mmap_data *md) old = head; } - last_read = this_read; - for (; old != head;) { event_t *event = (event_t *)&data[old & md->mask]; @@ -1018,7 +964,13 @@ static void start_counter(int i, int counter) attr = attrs + counter; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr->freq = freq; + + if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = freq; + } + attr->inherit = (cpu < 0) && inherit; try_again: @@ -1173,11 +1125,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (argc) usage_with_options(top_usage, options); - if (freq) { - default_interval = freq; - freq = 1; - } - /* CPU and PID are mutually exclusive */ if (target_pid != -1 && profile_cpu != -1) { printf("WARNING: PID switch overriding CPU\n"); @@ -1194,6 +1141,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) parse_symbols(); parse_source(sym_filter_entry); + + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + /* * Fill in the ones not specifically initialized via -c: */ diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 0c5e4f72f2b..4c129ff0bb1 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -12,28 +12,24 @@ #include "util/debug.h" #include "util/trace-event.h" +#include "util/data_map.h" static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total = 0; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; +static char *cwd; +static int cwdlen; + static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -53,18 +49,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -90,37 +80,13 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; - - dump_printf(" ...... dso: [hypervisor]\n"); - } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); if (sample_type & PERF_SAMPLE_RAW) { struct { @@ -140,121 +106,32 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) +static int sample_type_check(u64 type) { - trace_event(event); - - switch (event->header.type) { - case PERF_RECORD_MMAP ... PERF_RECORD_LOST: - return 0; - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); + sample_type = type; - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; - - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .sample_type_check = sample_type_check, +}; + static int __cmd_trace(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - - trace_report(); - register_idle_thread(&threads, &last_match); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int res; - - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); + register_idle_thread(); + register_perf_file_handler(&file_handler); - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static const char * const annotate_usage[] = { @@ -267,13 +144,14 @@ static const struct option options[] = { "dump raw trace in ASCII"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('l', "latency", &latency_format, + "show latency attributes (irqs/preemption disabled, etc)"), OPT_END() }; int cmd_trace(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 19fc7feb9d5..624e62d9d1e 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -89,8 +89,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) /* * Check remaining flags. */ - if (!prefixcmp(cmd, "--exec-path")) { - cmd += 11; + if (!prefixcmp(cmd, CMD_EXEC_PATH)) { + cmd += strlen(CMD_EXEC_PATH); if (*cmd == '=') perf_set_argv_exec_path(cmd + 1); else { @@ -117,8 +117,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) (*argv)++; (*argc)--; handled++; - } else if (!prefixcmp(cmd, "--perf-dir=")) { - setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); + } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { + setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { @@ -131,8 +131,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--work-tree=")) { - setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); + } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { + setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--debugfs-dir")) { @@ -146,8 +146,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--debugfs-dir=")) { - strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN); + } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { + strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); debugfs_mntpt[MAXPATHLEN - 1] = '\0'; if (envchanged) *envchanged = 1; diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 6f8ea9d210b..918eb376abe 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -1,10 +1,15 @@ -#ifndef CACHE_H -#define CACHE_H +#ifndef __PERF_CACHE_H +#define __PERF_CACHE_H #include "util.h" #include "strbuf.h" #include "../perf.h" +#define CMD_EXEC_PATH "--exec-path" +#define CMD_PERF_DIR "--perf-dir=" +#define CMD_WORK_TREE "--work-tree=" +#define CMD_DEBUGFS_DIR "--debugfs-dir=" + #define PERF_DIR_ENVIRONMENT "PERF_DIR" #define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" #define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" @@ -117,4 +122,4 @@ extern char *perf_pathdup(const char *fmt, ...) extern size_t strlcpy(char *dest, const char *src, size_t size); -#endif /* CACHE_H */ +#endif /* __PERF_CACHE_H */ diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 43cf3ea9e08..ad4626de4c2 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node) int register_callchain_param(struct callchain_param *param); void append_chain(struct callchain_node *root, struct ip_callchain *chain, struct symbol **syms); -#endif +#endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 58d597564b9..24e8809210b 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -1,5 +1,5 @@ -#ifndef COLOR_H -#define COLOR_H +#ifndef __PERF_COLOR_H +#define __PERF_COLOR_H /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ #define COLOR_MAXLEN 24 @@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu int percent_color_fprintf(FILE *fp, const char *fmt, double percent); const char *get_percent_color(double percent); -#endif /* COLOR_H */ +#endif /* __PERF_COLOR_H */ diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c new file mode 100644 index 00000000000..242b0555ab9 --- /dev/null +++ b/tools/perf/util/data_map.c @@ -0,0 +1,222 @@ +#include "data_map.h" +#include "symbol.h" +#include "util.h" +#include "debug.h" + + +static struct perf_file_handler *curr_handler; +static unsigned long mmap_window = 32; +static char __cwd[PATH_MAX]; + +static int +process_event_stub(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) +{ + return 0; +} + +void register_perf_file_handler(struct perf_file_handler *handler) +{ + if (!handler->process_sample_event) + handler->process_sample_event = process_event_stub; + if (!handler->process_mmap_event) + handler->process_mmap_event = process_event_stub; + if (!handler->process_comm_event) + handler->process_comm_event = process_event_stub; + if (!handler->process_fork_event) + handler->process_fork_event = process_event_stub; + if (!handler->process_exit_event) + handler->process_exit_event = process_event_stub; + if (!handler->process_lost_event) + handler->process_lost_event = process_event_stub; + if (!handler->process_read_event) + handler->process_read_event = process_event_stub; + if (!handler->process_throttle_event) + handler->process_throttle_event = process_event_stub; + if (!handler->process_unthrottle_event) + handler->process_unthrottle_event = process_event_stub; + + curr_handler = handler; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + trace_event(event); + + switch (event->header.type) { + case PERF_RECORD_SAMPLE: + return curr_handler->process_sample_event(event, offset, head); + case PERF_RECORD_MMAP: + return curr_handler->process_mmap_event(event, offset, head); + case PERF_RECORD_COMM: + return curr_handler->process_comm_event(event, offset, head); + case PERF_RECORD_FORK: + return curr_handler->process_fork_event(event, offset, head); + case PERF_RECORD_EXIT: + return curr_handler->process_exit_event(event, offset, head); + case PERF_RECORD_LOST: + return curr_handler->process_lost_event(event, offset, head); + case PERF_RECORD_READ: + return curr_handler->process_read_event(event, offset, head); + case PERF_RECORD_THROTTLE: + return curr_handler->process_throttle_event(event, offset, head); + case PERF_RECORD_UNTHROTTLE: + return curr_handler->process_unthrottle_event(event, offset, head); + default: + curr_handler->total_unknown++; + return -1; + } +} + +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd) +{ + int ret, rc = EXIT_FAILURE; + struct perf_header *header; + unsigned long head, shift; + unsigned long offset = 0; + struct stat input_stat; + size_t page_size; + u64 sample_type; + event_t *event; + uint32_t size; + int input; + char *buf; + + if (!curr_handler) + die("Forgot to register perf file handler"); + + page_size = getpagesize(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + fprintf(stderr, " failed to open file: %s", input_name); + if (!strcmp(input_name, "perf.data")) + fprintf(stderr, " (try 'perf record' first)"); + fprintf(stderr, "\n"); + exit(-1); + } + + ret = fstat(input, &input_stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { + fprintf(stderr, "file: %s not owned by current user or root\n", + input_name); + exit(-1); + } + + if (!input_stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + *pheader = perf_header__read(input); + header = *pheader; + head = header->data_offset; + + sample_type = perf_header__sample_type(header); + + if (curr_handler->sample_type_check) + if (curr_handler->sample_type_check(sample_type) < 0) + exit(-1); + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + *cwd = __cwd; + *cwdlen = strlen(*cwd); + } else { + *cwd = NULL; + *cwdlen = 0; + } + + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + int munmap_ret; + + shift = page_size * (head / page_size); + + munmap_ret = munmap(buf, page_size * mmap_window); + assert(munmap_ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dump_printf("\n%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dump_printf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head >= header->data_offset + header->data_size) + goto done; + + if (offset + head < (unsigned long)input_stat.st_size) + goto more; + +done: + rc = EXIT_SUCCESS; + close(input); + + return rc; +} + + diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h new file mode 100644 index 00000000000..716d1053b07 --- /dev/null +++ b/tools/perf/util/data_map.h @@ -0,0 +1,31 @@ +#ifndef __PERF_DATAMAP_H +#define __PERF_DATAMAP_H + +#include "event.h" +#include "header.h" + +typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long); + +struct perf_file_handler { + event_type_handler_t process_sample_event; + event_type_handler_t process_mmap_event; + event_type_handler_t process_comm_event; + event_type_handler_t process_fork_event; + event_type_handler_t process_exit_event; + event_type_handler_t process_lost_event; + event_type_handler_t process_read_event; + event_type_handler_t process_throttle_event; + event_type_handler_t process_unthrottle_event; + int (*sample_type_check)(u64 sample_type); + unsigned long total_unknown; +}; + +void register_perf_file_handler(struct perf_file_handler *handler); +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd); + +#endif diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 437eea58ce4..02d1fa1c246 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -1,4 +1,6 @@ /* For debugging general purposes */ +#ifndef __PERF_DEBUG_H +#define __PERF_DEBUG_H extern int verbose; extern int dump_trace; @@ -6,3 +8,5 @@ extern int dump_trace; int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); + +#endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2c9c26d6ded..c2e62be6279 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -1,14 +1,10 @@ #ifndef __PERF_RECORD_H #define __PERF_RECORD_H + #include "../perf.h" #include "util.h" #include <linux/list.h> - -enum { - SHOW_KERNEL = 1, - SHOW_USER = 2, - SHOW_HV = 4, -}; +#include <linux/rbtree.h> /* * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * @@ -78,7 +74,10 @@ typedef union event_union { } event_t; struct map { - struct list_head node; + union { + struct rb_node rb_node; + struct list_head node; + }; u64 start; u64 end; u64 pgoff; @@ -101,4 +100,4 @@ struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -#endif +#endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h index effe25eb154..31647ac92ed 100644 --- a/tools/perf/util/exec_cmd.h +++ b/tools/perf/util/exec_cmd.h @@ -1,5 +1,5 @@ -#ifndef PERF_EXEC_CMD_H -#define PERF_EXEC_CMD_H +#ifndef __PERF_EXEC_CMD_H +#define __PERF_EXEC_CMD_H extern void perf_set_argv_exec_path(const char *exec_path); extern const char *perf_extract_argv0_path(const char *path); @@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */ extern int execl_perf_cmd(const char *cmd, ...); extern const char *system_path(const char *path); -#endif /* PERF_EXEC_CMD_H */ +#endif /* __PERF_EXEC_CMD_H */ diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e306857b2c2..622c60e4525 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -5,6 +5,8 @@ #include "util.h" #include "header.h" +#include "../perf.h" +#include "trace-event.h" /* * Create new perf.data header attribute: @@ -46,23 +48,17 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) */ struct perf_header *perf_header__new(void) { - struct perf_header *self = malloc(sizeof(*self)); + struct perf_header *self = calloc(sizeof(*self), 1); if (!self) die("nomem"); - self->frozen = 0; - - self->attrs = 0; self->size = 1; self->attr = malloc(sizeof(void *)); if (!self->attr) die("nomem"); - self->data_offset = 0; - self->data_size = 0; - return self; } @@ -145,8 +141,14 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; +void perf_header__feat_trace_info(struct perf_header *header) +{ + set_bit(HEADER_TRACE_INFO, header->adds_features); +} + static void do_write(int fd, void *buf, size_t size) { while (size) { @@ -160,6 +162,32 @@ static void do_write(int fd, void *buf, size_t size) } } +static void perf_header__adds_write(struct perf_header *self, int fd) +{ + struct perf_file_section trace_sec; + u64 cur_offset = lseek(fd, 0, SEEK_CUR); + unsigned long *feat_mask = self->adds_features; + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + /* Write trace info */ + trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR); + read_tracing_data(fd, attrs, nr_counters); + trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset; + + /* Write trace info headers */ + lseek(fd, cur_offset, SEEK_SET); + do_write(fd, &trace_sec, sizeof(trace_sec)); + + /* + * Update cur_offset. So that other (future) + * features can set their own infos in this place. But if we are + * the only feature, at least that seeks to the place the data + * should begin. + */ + cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + void perf_header__write(struct perf_header *self, int fd) { struct perf_file_header f_header; @@ -198,6 +226,7 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); + perf_header__adds_write(self, fd); self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -219,6 +248,8 @@ void perf_header__write(struct perf_header *self, int fd) }, }; + memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); + lseek(fd, 0, SEEK_SET); do_write(fd, &f_header, sizeof(f_header)); lseek(fd, self->data_offset + self->data_size, SEEK_SET); @@ -241,6 +272,20 @@ static void do_read(int fd, void *buf, size_t size) } } +static void perf_header__adds_read(struct perf_header *self, int fd) +{ + const unsigned long *feat_mask = self->adds_features; + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + struct perf_file_section trace_sec; + + do_read(fd, &trace_sec, sizeof(trace_sec)); + lseek(fd, trace_sec.offset, SEEK_SET); + trace_report(fd); + lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + struct perf_header *perf_header__read(int fd) { struct perf_header *self = perf_header__new(); @@ -254,10 +299,16 @@ struct perf_header *perf_header__read(int fd) do_read(fd, &f_header, sizeof(f_header)); if (f_header.magic != PERF_MAGIC || - f_header.size != sizeof(f_header) || f_header.attr_size != sizeof(f_attr)) die("incompatible file format"); + if (f_header.size != sizeof(f_header)) { + /* Support the previous format */ + if (f_header.size == offsetof(typeof(f_header), adds_features)) + bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS); + else + die("incompatible file format"); + } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -290,6 +341,11 @@ struct perf_header *perf_header__read(int fd) do_read(fd, events, f_header.event_types.size); event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } + + memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features)); + + perf_header__adds_read(self, fd); + self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a0761bc7863..2ea9dfb1236 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,10 +1,12 @@ -#ifndef _PERF_HEADER_H -#define _PERF_HEADER_H +#ifndef __PERF_HEADER_H +#define __PERF_HEADER_H #include "../../../include/linux/perf_event.h" #include <sys/types.h> #include "types.h" +#include <linux/bitmap.h> + struct perf_header_attr { struct perf_event_attr attr; int ids, size; @@ -12,15 +14,20 @@ struct perf_header_attr { off_t id_offset; }; +#define HEADER_TRACE_INFO 1 + +#define HEADER_FEAT_BITS 256 + struct perf_header { - int frozen; - int attrs, size; + int frozen; + int attrs, size; struct perf_header_attr **attr; - s64 attr_offset; - u64 data_offset; - u64 data_size; - u64 event_offset; - u64 event_size; + s64 attr_offset; + u64 data_offset; + u64 data_size; + u64 event_offset; + u64 event_size; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; struct perf_header *perf_header__read(int fd); @@ -40,8 +47,8 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); - +void perf_header__feat_trace_info(struct perf_header *header); struct perf_header *perf_header__new(void); -#endif /* _PERF_HEADER_H */ +#endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h index 7128783637b..7f5c6dedd71 100644 --- a/tools/perf/util/help.h +++ b/tools/perf/util/help.h @@ -1,5 +1,5 @@ -#ifndef HELP_H -#define HELP_H +#ifndef __PERF_HELP_H +#define __PERF_HELP_H struct cmdnames { size_t alloc; @@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s); void list_commands(const char *title, struct cmdnames *main_cmds, struct cmdnames *other_cmds); -#endif /* HELP_H */ +#endif /* __PERF_HELP_H */ diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c new file mode 100644 index 00000000000..7393a02fd8d --- /dev/null +++ b/tools/perf/util/hist.c @@ -0,0 +1,210 @@ +#include "hist.h" + +struct rb_root hist; +struct rb_root collapse_hists; +struct rb_root output_hists; +int callchain; + +struct callchain_param callchain_param = { + .mode = CHAIN_GRAPH_REL, + .min_percent = 0.5 +}; + +unsigned long total; +unsigned long total_mmap; +unsigned long total_comm; +unsigned long total_fork; +unsigned long total_unknown; +unsigned long total_lost; + +/* + * histogram, sorted on item, collects counts + */ + +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, + struct symbol *sym_parent, + u64 ip, u64 count, char level, bool *hit) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .sym = sym, + .ip = ip, + .level = level, + .count = count, + .parent = sym_parent, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + *hit = true; + return he; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return NULL; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + *hit = false; + return he; +} + +int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + if (callchain) + callchain_param.sort(&he->sorted_chain, &he->callchain, + min_callchain_hits, &callchain_param); + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +void output__resort(u64 total_samples) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + u64 min_callchain_hits; + + min_callchain_hits = + total_samples * (callchain_param.min_percent / 100); + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n, min_callchain_hits); + } +} diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h new file mode 100644 index 00000000000..ac2149c559b --- /dev/null +++ b/tools/perf/util/hist.h @@ -0,0 +1,50 @@ +#ifndef __PERF_HIST_H +#define __PERF_HIST_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include <linux/list.h> +#include "cache.h" +#include <linux/rbtree.h> +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern struct rb_root hist; +extern struct rb_root collapse_hists; +extern struct rb_root output_hists; +extern int callchain; +extern struct callchain_param callchain_param; +extern unsigned long total; +extern unsigned long total_mmap; +extern unsigned long total_comm; +extern unsigned long total_fork; +extern unsigned long total_unknown; +extern unsigned long total_lost; + +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, struct symbol *parent, + u64 ip, u64 count, char level, bool *hit); +extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); +extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); +extern void hist_entry__free(struct hist_entry *); +extern void collapse__insert_entry(struct hist_entry *); +extern void collapse__resort(void); +extern void output__insert_entry(struct hist_entry *, u64); +extern void output__resort(u64); + +#endif /* __PERF_HIST_H */ diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h new file mode 100644 index 00000000000..ed538942523 --- /dev/null +++ b/tools/perf/util/include/asm/asm-offsets.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h new file mode 100644 index 00000000000..fbe4d921291 --- /dev/null +++ b/tools/perf/util/include/asm/bitops.h @@ -0,0 +1,6 @@ +#include "../../../../include/asm-generic/bitops/__fls.h" +#include "../../../../include/asm-generic/bitops/fls.h" +#include "../../../../include/asm-generic/bitops/fls64.h" +#include "../../../../include/asm-generic/bitops/__ffs.h" +#include "../../../../include/asm-generic/bitops/ffz.h" +#include "../../../../include/asm-generic/bitops/hweight.h" diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h new file mode 100644 index 00000000000..39f367cfaf5 --- /dev/null +++ b/tools/perf/util/include/asm/byteorder.h @@ -0,0 +1,2 @@ +#include "../asm/types.h" +#include "../../../../include/linux/swab.h" diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h new file mode 100644 index 00000000000..ed538942523 --- /dev/null +++ b/tools/perf/util/include/asm/swab.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/asm/types.h b/tools/perf/util/include/asm/types.h new file mode 100644 index 00000000000..06703c6cd50 --- /dev/null +++ b/tools/perf/util/include/asm/types.h @@ -0,0 +1,17 @@ +#ifndef PERF_ASM_TYPES_H_ +#define PERF_ASM_TYPES_H_ + +#include <linux/compiler.h> +#include "../../types.h" +#include <sys/types.h> + +/* CHECKME: Not sure both always match */ +#define BITS_PER_LONG __WORDSIZE + +typedef u64 __u64; +typedef u32 __u32; +typedef u16 __u16; +typedef u8 __u8; +typedef s64 __s64; + +#endif /* PERF_ASM_TYPES_H_ */ diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h new file mode 100644 index 00000000000..d0f72b8fcc3 --- /dev/null +++ b/tools/perf/util/include/asm/uaccess.h @@ -0,0 +1,14 @@ +#ifndef _PERF_ASM_UACCESS_H_ +#define _PERF_ASM_UACCESS_H_ + +#define __get_user(src, dest) \ +({ \ + (src) = *dest; \ + 0; \ +}) + +#define get_user __get_user + +#define access_ok(type, addr, size) 1 + +#endif diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h new file mode 100644 index 00000000000..821c1033bcc --- /dev/null +++ b/tools/perf/util/include/linux/bitmap.h @@ -0,0 +1,2 @@ +#include "../../../../include/linux/bitmap.h" +#include "../../../../include/asm-generic/bitops/find.h" diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h new file mode 100644 index 00000000000..ace57c36d1d --- /dev/null +++ b/tools/perf/util/include/linux/bitops.h @@ -0,0 +1,27 @@ +#ifndef _PERF_LINUX_BITOPS_H_ +#define _PERF_LINUX_BITOPS_H_ + +#define __KERNEL__ + +#define CONFIG_GENERIC_FIND_NEXT_BIT +#define CONFIG_GENERIC_FIND_FIRST_BIT +#include "../../../../include/linux/bitops.h" + +static inline void set_bit(int nr, unsigned long *addr) +{ + addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); +} + +static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) +{ + return ((1UL << (nr % BITS_PER_LONG)) & + (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +#endif diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h new file mode 100644 index 00000000000..dfb0713ed47 --- /dev/null +++ b/tools/perf/util/include/linux/compiler.h @@ -0,0 +1,10 @@ +#ifndef _PERF_LINUX_COMPILER_H_ +#define _PERF_LINUX_COMPILER_H_ + +#ifndef __always_inline +#define __always_inline inline +#endif +#define __user +#define __attribute_const__ + +#endif diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h new file mode 100644 index 00000000000..bae5783282e --- /dev/null +++ b/tools/perf/util/include/linux/ctype.h @@ -0,0 +1 @@ +#include "../../../../include/linux/ctype.h" diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index a6b87390cb5..4b9204d9b26 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -1,6 +1,16 @@ #ifndef PERF_LINUX_KERNEL_H_ #define PERF_LINUX_KERNEL_H_ +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif @@ -26,4 +36,53 @@ _max1 > _max2 ? _max1 : _max2; }) #endif +#ifndef min +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) +#endif + +#ifndef BUG_ON +#define BUG_ON(cond) assert(!(cond)) +#endif + +/* + * Both need more care to handle endianness + * (Don't use bitmap_copy_le() for now) + */ +#define cpu_to_le64(x) (x) +#define cpu_to_le32(x) (x) + +static inline int +vscnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int i; + ssize_t ssize = size; + + i = vsnprintf(buf, size, fmt, args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) +{ + va_list args; + ssize_t ssize = size; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline unsigned long +simple_strtoul(const char *nptr, char **endptr, int base) +{ + return strtoul(nptr, endptr, base); +} + #endif diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h new file mode 100644 index 00000000000..3b2f5900276 --- /dev/null +++ b/tools/perf/util/include/linux/string.h @@ -0,0 +1 @@ +#include <string.h> diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h new file mode 100644 index 00000000000..858a38d0843 --- /dev/null +++ b/tools/perf/util/include/linux/types.h @@ -0,0 +1,7 @@ +#ifndef _PERF_LINUX_TYPES_H_ +#define _PERF_LINUX_TYPES_H_ + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#endif diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h index 0173abeef52..b0fcb6d8a88 100644 --- a/tools/perf/util/levenshtein.h +++ b/tools/perf/util/levenshtein.h @@ -1,8 +1,8 @@ -#ifndef LEVENSHTEIN_H -#define LEVENSHTEIN_H +#ifndef __PERF_LEVENSHTEIN_H +#define __PERF_LEVENSHTEIN_H int levenshtein(const char *string1, const char *string2, int swap_penalty, int substition_penalty, int insertion_penalty, int deletion_penalty); -#endif +#endif /* __PERF_LEVENSHTEIN_H */ diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c deleted file mode 100644 index 0d8c85defcd..00000000000 --- a/tools/perf/util/module.c +++ /dev/null @@ -1,545 +0,0 @@ -#include "util.h" -#include "../perf.h" -#include "string.h" -#include "module.h" - -#include <libelf.h> -#include <libgen.h> -#include <gelf.h> -#include <elf.h> -#include <dirent.h> -#include <sys/utsname.h> - -static unsigned int crc32(const char *p, unsigned int len) -{ - int i; - unsigned int crc = 0; - - while (len--) { - crc ^= *p++; - for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); - } - return crc; -} - -/* module section methods */ - -struct sec_dso *sec_dso__new_dso(const char *name) -{ - struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->secs = RB_ROOT; - self->find_section = sec_dso__find_section; - } - - return self; -} - -static void sec_dso__delete_section(struct section *self) -{ - free(((void *)self)); -} - -void sec_dso__delete_sections(struct sec_dso *self) -{ - struct section *pos; - struct rb_node *next = rb_first(&self->secs); - - while (next) { - pos = rb_entry(next, struct section, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->secs); - sec_dso__delete_section(pos); - } -} - -void sec_dso__delete_self(struct sec_dso *self) -{ - sec_dso__delete_sections(self); - free(self); -} - -static void sec_dso__insert_section(struct sec_dso *self, struct section *sec) -{ - struct rb_node **p = &self->secs.rb_node; - struct rb_node *parent = NULL; - const u64 hash = sec->hash; - struct section *s; - - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct section, rb_node); - if (hash < s->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&sec->rb_node, parent, p); - rb_insert_color(&sec->rb_node, &self->secs); -} - -struct section *sec_dso__find_section(struct sec_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->secs.rb_node; - - while (n) { - struct section *s = rb_entry(n, struct section, rb_node); - - if (hash < s->hash) - n = n->rb_left; - else if (hash > s->hash) - n = n->rb_right; - else { - if (!strcmp(name, s->name)) - return s; - else - n = rb_next(&s->rb_node); - } - } - - return NULL; -} - -static size_t sec_dso__fprintf_section(struct section *self, FILE *fp) -{ - return fprintf(fp, "name:%s vma:%llx path:%s\n", - self->name, self->vma, self->path); -} - -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp) -{ - size_t ret = fprintf(fp, "dso: %s\n", self->name); - - struct rb_node *nd; - for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) { - struct section *pos = rb_entry(nd, struct section, rb_node); - ret += sec_dso__fprintf_section(pos, fp); - } - - return ret; -} - -static struct section *section__new(const char *name, const char *path) -{ - struct section *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -/* module methods */ - -struct mod_dso *mod_dso__new_dso(const char *name) -{ - struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->mods = RB_ROOT; - self->find_module = mod_dso__find_module; - } - - return self; -} - -static void mod_dso__delete_module(struct module *self) -{ - free(((void *)self)); -} - -void mod_dso__delete_modules(struct mod_dso *self) -{ - struct module *pos; - struct rb_node *next = rb_first(&self->mods); - - while (next) { - pos = rb_entry(next, struct module, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->mods); - mod_dso__delete_module(pos); - } -} - -void mod_dso__delete_self(struct mod_dso *self) -{ - mod_dso__delete_modules(self); - free(self); -} - -static void mod_dso__insert_module(struct mod_dso *self, struct module *mod) -{ - struct rb_node **p = &self->mods.rb_node; - struct rb_node *parent = NULL; - const u64 hash = mod->hash; - struct module *m; - - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct module, rb_node); - if (hash < m->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&mod->rb_node, parent, p); - rb_insert_color(&mod->rb_node, &self->mods); -} - -struct module *mod_dso__find_module(struct mod_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->mods.rb_node; - - while (n) { - struct module *m = rb_entry(n, struct module, rb_node); - - if (hash < m->hash) - n = n->rb_left; - else if (hash > m->hash) - n = n->rb_right; - else { - if (!strcmp(name, m->name)) - return m; - else - n = rb_next(&m->rb_node); - } - } - - return NULL; -} - -static size_t mod_dso__fprintf_module(struct module *self, FILE *fp) -{ - return fprintf(fp, "name:%s path:%s\n", self->name, self->path); -} - -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp) -{ - struct rb_node *nd; - size_t ret; - - ret = fprintf(fp, "dso: %s\n", self->name); - - for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) { - struct module *pos = rb_entry(nd, struct module, rb_node); - - ret += mod_dso__fprintf_module(pos, fp); - } - - return ret; -} - -static struct module *module__new(const char *name, const char *path) -{ - struct module *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -static int mod_dso__load_sections(struct module *mod) -{ - int count = 0, path_len; - struct dirent *entry; - char *line = NULL; - char *dir_path; - DIR *dir; - size_t n; - - path_len = strlen("/sys/module/"); - path_len += strlen(mod->name); - path_len += strlen("/sections/"); - - dir_path = calloc(1, path_len + 1); - if (dir_path == NULL) - goto out_failure; - - strcat(dir_path, "/sys/module/"); - strcat(dir_path, mod->name); - strcat(dir_path, "/sections/"); - - dir = opendir(dir_path); - if (dir == NULL) - goto out_free; - - while ((entry = readdir(dir))) { - struct section *section; - char *path, *vma; - int line_len; - FILE *file; - - if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name)) - continue; - - path = calloc(1, path_len + strlen(entry->d_name) + 1); - if (path == NULL) - break; - strcat(path, dir_path); - strcat(path, entry->d_name); - - file = fopen(path, "r"); - if (file == NULL) { - free(path); - break; - } - - line_len = getline(&line, &n, file); - if (line_len < 0) { - free(path); - fclose(file); - break; - } - - if (!line) { - free(path); - fclose(file); - break; - } - - line[--line_len] = '\0'; /* \n */ - - vma = strstr(line, "0x"); - if (!vma) { - free(path); - fclose(file); - break; - } - vma += 2; - - section = section__new(entry->d_name, path); - if (!section) { - fprintf(stderr, "load_sections: allocation error\n"); - free(path); - fclose(file); - break; - } - - hex2u64(vma, §ion->vma); - sec_dso__insert_section(mod->sections, section); - - free(path); - fclose(file); - count++; - } - - closedir(dir); - free(line); - free(dir_path); - - return count; - -out_free: - free(dir_path); - -out_failure: - return count; -} - -static int mod_dso__load_module_paths(struct mod_dso *self) -{ - struct utsname uts; - int count = 0, len, err = -1; - char *line = NULL; - FILE *file; - char *dpath, *dir; - size_t n; - - if (uname(&uts) < 0) - return err; - - len = strlen("/lib/modules/"); - len += strlen(uts.release); - len += strlen("/modules.dep"); - - dpath = calloc(1, len + 1); - if (dpath == NULL) - return err; - - strcat(dpath, "/lib/modules/"); - strcat(dpath, uts.release); - strcat(dpath, "/modules.dep"); - - file = fopen(dpath, "r"); - if (file == NULL) - goto out_failure; - - dir = dirname(dpath); - if (!dir) - goto out_failure; - strcat(dir, "/"); - - while (!feof(file)) { - struct module *module; - char *name, *path, *tmp; - FILE *modfile; - int line_len; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - break; - - line[--line_len] = '\0'; /* \n */ - - path = strchr(line, ':'); - if (!path) - break; - *path = '\0'; - - path = strdup(line); - if (!path) - break; - - if (!strstr(path, dir)) { - if (strncmp(path, "kernel/", 7)) - break; - - free(path); - path = calloc(1, strlen(dir) + strlen(line) + 1); - if (!path) - break; - strcat(path, dir); - strcat(path, line); - } - - modfile = fopen(path, "r"); - if (modfile == NULL) - break; - fclose(modfile); - - name = strdup(path); - if (!name) - break; - - name = strtok(name, "/"); - tmp = name; - - while (tmp) { - tmp = strtok(NULL, "/"); - if (tmp) - name = tmp; - } - - name = strsep(&name, "."); - if (!name) - break; - - /* Quirk: replace '-' with '_' in all modules */ - for (len = strlen(name); len; len--) { - if (*(name+len) == '-') - *(name+len) = '_'; - } - - module = module__new(name, path); - if (!module) - break; - mod_dso__insert_module(self, module); - - module->sections = sec_dso__new_dso("sections"); - if (!module->sections) - break; - - module->active = mod_dso__load_sections(module); - - if (module->active > 0) - count++; - } - - if (feof(file)) - err = count; - else - fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n"); - -out_failure: - if (dpath) - free(dpath); - if (file) - fclose(file); - if (line) - free(line); - - return err; -} - -int mod_dso__load_modules(struct mod_dso *dso) -{ - int err; - - err = mod_dso__load_module_paths(dso); - - return err; -} diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h deleted file mode 100644 index 8a592ef641c..00000000000 --- a/tools/perf/util/module.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _PERF_MODULE_ -#define _PERF_MODULE_ 1 - -#include <linux/types.h> -#include "../types.h" -#include <linux/list.h> -#include <linux/rbtree.h> - -struct section { - struct rb_node rb_node; - u64 hash; - u64 vma; - char *name; - char *path; -}; - -struct sec_dso { - struct list_head node; - struct rb_root secs; - struct section *(*find_section)(struct sec_dso *, const char *name); - char name[0]; -}; - -struct module { - struct rb_node rb_node; - u64 hash; - char *name; - char *path; - struct sec_dso *sections; - int active; -}; - -struct mod_dso { - struct list_head node; - struct rb_root mods; - struct module *(*find_module)(struct mod_dso *, const char *name); - char name[0]; -}; - -struct sec_dso *sec_dso__new_dso(const char *name); -void sec_dso__delete_sections(struct sec_dso *self); -void sec_dso__delete_self(struct sec_dso *self); -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp); -struct section *sec_dso__find_section(struct sec_dso *self, const char *name); - -struct mod_dso *mod_dso__new_dso(const char *name); -void mod_dso__delete_modules(struct mod_dso *self); -void mod_dso__delete_self(struct mod_dso *self); -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); -struct module *mod_dso__find_module(struct mod_dso *self, const char *name); -int mod_dso__load_modules(struct mod_dso *dso); - -#endif /* _PERF_MODULE_ */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8cfb48cbbea..b097570e962 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -8,9 +8,10 @@ #include "cache.h" #include "header.h" -int nr_counters; +int nr_counters; struct perf_event_attr attrs[MAX_COUNTERS]; +char *filters[MAX_COUNTERS]; struct event_symbol { u8 type; @@ -708,7 +709,6 @@ static void store_event_type(const char *orgname) perf_header__push_event(id, orgname); } - int parse_events(const struct option *opt __used, const char *str, int unset __used) { struct perf_event_attr attr; @@ -745,6 +745,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u return 0; } +int parse_filter(const struct option *opt __used, const char *str, + int unset __used) +{ + int i = nr_counters - 1; + int len = strlen(str); + + if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { + fprintf(stderr, + "-F option should follow a -e tracepoint option\n"); + return -1; + } + + filters[i] = malloc(len + 1); + if (!filters[i]) { + fprintf(stderr, "not enough memory to hold filter string\n"); + return -1; + } + strcpy(filters[i], str); + + return 0; +} + static const char * const event_type_descriptors[] = { "", "Hardware event", diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 30c60811284..b8c1f64bc93 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -1,5 +1,5 @@ -#ifndef _PARSE_EVENTS_H -#define _PARSE_EVENTS_H +#ifndef __PERF_PARSE_EVENTS_H +#define __PERF_PARSE_EVENTS_H /* * Parse symbolic events/counts passed in as options: */ @@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); extern int nr_counters; extern struct perf_event_attr attrs[MAX_COUNTERS]; +extern char *filters[MAX_COUNTERS]; extern const char *event_name(int ctr); extern const char *__event_name(int type, u64 config); extern int parse_events(const struct option *opt, const char *str, int unset); +extern int parse_filter(const struct option *opt, const char *str, int unset); #define EVENTS_HELP_MAX (128*1024) @@ -31,4 +33,4 @@ extern char debugfs_path[]; extern int valid_debugfs_mount(const char *debugfs); -#endif /* _PARSE_EVENTS_H */ +#endif /* __PERF_PARSE_EVENTS_H */ diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index 2ee248ff27e..948805af43c 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h @@ -1,5 +1,5 @@ -#ifndef PARSE_OPTIONS_H -#define PARSE_OPTIONS_H +#ifndef __PERF_PARSE_OPTIONS_H +#define __PERF_PARSE_OPTIONS_H enum parse_opt_type { /* special types */ @@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int); extern const char *parse_options_fix_filename(const char *prefix, const char *file); -#endif +#endif /* __PERF_PARSE_OPTIONS_H */ diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h index a5454a1d1c1..b6a01973391 100644 --- a/tools/perf/util/quote.h +++ b/tools/perf/util/quote.h @@ -1,5 +1,5 @@ -#ifndef QUOTE_H -#define QUOTE_H +#ifndef __PERF_QUOTE_H +#define __PERF_QUOTE_H #include <stddef.h> #include <stdio.h> @@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src); extern void python_quote_print(FILE *stream, const char *src); extern void tcl_quote_print(FILE *stream, const char *src); -#endif +#endif /* __PERF_QUOTE_H */ diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h index cc1837deba8..d79028727ce 100644 --- a/tools/perf/util/run-command.h +++ b/tools/perf/util/run-command.h @@ -1,5 +1,5 @@ -#ifndef RUN_COMMAND_H -#define RUN_COMMAND_H +#ifndef __PERF_RUN_COMMAND_H +#define __PERF_RUN_COMMAND_H enum { ERR_RUN_COMMAND_FORK = 10000, @@ -85,4 +85,4 @@ struct async { int start_async(struct async *async); int finish_async(struct async *async); -#endif +#endif /* __PERF_RUN_COMMAND_H */ diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h index 618083bce0c..1a53c11265f 100644 --- a/tools/perf/util/sigchain.h +++ b/tools/perf/util/sigchain.h @@ -1,5 +1,5 @@ -#ifndef SIGCHAIN_H -#define SIGCHAIN_H +#ifndef __PERF_SIGCHAIN_H +#define __PERF_SIGCHAIN_H typedef void (*sigchain_fun)(int); @@ -8,4 +8,4 @@ int sigchain_pop(int sig); void sigchain_push_common(sigchain_fun f); -#endif /* SIGCHAIN_H */ +#endif /* __PERF_SIGCHAIN_H */ diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c new file mode 100644 index 00000000000..40c9acd41ca --- /dev/null +++ b/tools/perf/util/sort.c @@ -0,0 +1,276 @@ +#include "sort.h" + +regex_t parent_regex; +char default_parent_pattern[] = "^sys_|^do_page_fault"; +char *parent_pattern = default_parent_pattern; +char default_sort_order[] = "comm,dso,symbol"; +char *sort_order = default_sort_order; +int sort__need_collapse = 0; +int sort__has_parent = 0; + +unsigned int dsos__col_width; +unsigned int comms__col_width; +unsigned int threads__col_width; +static unsigned int parent_symbol__col_width; +char * field_sep; + +LIST_HEAD(hist_entry__sort_list); + +struct sort_entry sort_thread = { + .header = "Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, + .width = &threads__col_width, +}; + +struct sort_entry sort_comm = { + .header = "Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, + .width = &comms__col_width, +}; + +struct sort_entry sort_dso = { + .header = "Shared Object", + .cmp = sort__dso_cmp, + .print = sort__dso_print, + .width = &dsos__col_width, +}; + +struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +struct sort_entry sort_parent = { + .header = "Parent symbol", + .cmp = sort__parent_cmp, + .print = sort__parent_print, + .width = &parent_symbol__col_width, +}; + +struct sort_dimension { + const char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, + { .name = "parent", .entry = &sort_parent, }, +}; + +int64_t cmp_null(void *l, void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + +/* --sort pid */ + +int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int repsep_fprintf(FILE *fp, const char *fmt, ...) +{ + int n; + va_list ap; + + va_start(ap, fmt); + if (!field_sep) + n = vfprintf(fp, fmt, ap); + else { + char *bf = NULL; + n = vasprintf(&bf, fmt, ap); + if (n > 0) { + char *sep = bf; + + while (1) { + sep = strchr(sep, *field_sep); + if (sep == NULL) + break; + *sep = '.'; + } + } + fputs(bf, fp); + free(bf); + } + va_end(ap); + return n; +} + +size_t +sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s:%5d", width - 6, + self->thread->comm ?: "", self->thread->pid); +} + +size_t +sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s", width, self->thread->comm); +} + +/* --sort dso */ + +int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->map ? left->map->dso : NULL; + struct dso *dso_r = right->map ? right->map->dso : NULL; + const char *dso_name_l, *dso_name_r; + + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); + + if (verbose) { + dso_name_l = dso_l->long_name; + dso_name_r = dso_r->long_name; + } else { + dso_name_l = dso_l->short_name; + dso_name_r = dso_r->short_name; + } + + return strcmp(dso_name_l, dso_name_r); +} + +size_t +sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + if (self->map && self->map->dso) { + const char *dso_name = !verbose ? self->map->dso->short_name : + self->map->dso->long_name; + return repsep_fprintf(fp, "%-*s", width, dso_name); + } + + return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); +} + +/* --sort symbol */ + +int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + u64 ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + + +size_t +sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) +{ + size_t ret = 0; + + if (verbose) { + char o = self->map ? dso__symtab_origin(self->map->dso) : '!'; + ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o); + } + + ret += repsep_fprintf(fp, "[%c] ", self->level); + if (self->sym) + ret += repsep_fprintf(fp, "%s", self->sym->name); + else + ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); + + return ret; +} + +/* --sort comm */ + +int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) + return cmp_null(comm_l, comm_r); + + return strcmp(comm_l, comm_r); +} + +/* --sort parent */ + +int64_t +sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct symbol *sym_l = left->parent; + struct symbol *sym_r = right->parent; + + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + return strcmp(sym_l->name, sym_r->name); +} + +size_t +sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%-*s", width, + self->parent ? self->parent->name : "[other]"); +} + +int sort_dimension__add(const char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + if (sd->entry == &sort_parent) { + int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); + if (ret) { + char err[BUFSIZ]; + + regerror(ret, &parent_regex, err, sizeof(err)); + fprintf(stderr, "Invalid regex: %s\n%s", + parent_pattern, err); + exit(-1); + } + sort__has_parent = 1; + } + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h new file mode 100644 index 00000000000..13806d782af --- /dev/null +++ b/tools/perf/util/sort.h @@ -0,0 +1,90 @@ +#ifndef __PERF_SORT_H +#define __PERF_SORT_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include <linux/list.h> +#include "cache.h" +#include <linux/rbtree.h> +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern regex_t parent_regex; +extern char *sort_order; +extern char default_parent_pattern[]; +extern char *parent_pattern; +extern char default_sort_order[]; +extern int sort__need_collapse; +extern int sort__has_parent; +extern char *field_sep; +extern struct sort_entry sort_comm; +extern struct sort_entry sort_dso; +extern struct sort_entry sort_sym; +extern struct sort_entry sort_parent; +extern unsigned int dsos__col_width; +extern unsigned int comms__col_width; +extern unsigned int threads__col_width; + +struct hist_entry { + struct rb_node rb_node; + u64 count; + struct thread *thread; + struct map *map; + struct symbol *sym; + u64 ip; + char level; + struct symbol *parent; + struct callchain_node callchain; + struct rb_root sorted_chain; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + const char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); + unsigned int *width; + bool elide; +}; + +extern struct sort_entry sort_thread; +extern struct list_head hist_entry__sort_list; + +extern int repsep_fprintf(FILE *fp, const char *fmt, ...); +extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used); +extern int64_t cmp_null(void *, void *); +extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *); +extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); +extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); +extern int sort_dimension__add(const char *); + +#endif /* __PERF_SORT_H */ diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h index d2aa86c014c..a3d121d6c83 100644 --- a/tools/perf/util/strbuf.h +++ b/tools/perf/util/strbuf.h @@ -1,5 +1,5 @@ -#ifndef STRBUF_H -#define STRBUF_H +#ifndef __PERF_STRBUF_H +#define __PERF_STRBUF_H /* * Strbuf's can be use in many ways: as a byte array, or to store arbitrary @@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co extern int strbuf_branchname(struct strbuf *sb, const char *name); extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); -#endif /* STRBUF_H */ +#endif /* __PERF_STRBUF_H */ diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index c93eca9a7be..04743d3e903 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -1,3 +1,4 @@ +#include <string.h> #include "string.h" static int hex(char ch) @@ -32,3 +33,13 @@ int hex2u64(const char *ptr, u64 *long_val) return p - ptr; } + +char *strxfrchar(char *s, char from, char to) +{ + char *p = s; + + while ((p = strchr(p, from)) != NULL) + *p++ = to; + + return s; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index bf39dfadfd2..2c84bf65ba0 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -1,11 +1,12 @@ -#ifndef _PERF_STRING_H_ -#define _PERF_STRING_H_ +#ifndef __PERF_STRING_H_ +#define __PERF_STRING_H_ #include "types.h" int hex2u64(const char *ptr, u64 *val); +char *strxfrchar(char *s, char from, char to); #define _STR(x) #x #define STR(x) _STR(x) -#endif +#endif /* __PERF_STRING_H */ diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index 921818e44a5..cb4659306d7 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -1,5 +1,5 @@ -#ifndef STRLIST_H_ -#define STRLIST_H_ +#ifndef __PERF_STRLIST_H +#define __PERF_STRLIST_H #include <linux/rbtree.h> #include <stdbool.h> @@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self) } int strlist__parse_list(struct strlist *self, const char *s); -#endif /* STRLIST_H_ */ +#endif /* __PERF_STRLIST_H */ diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h index cd93195aedb..e0781989cc3 100644 --- a/tools/perf/util/svghelper.h +++ b/tools/perf/util/svghelper.h @@ -1,5 +1,5 @@ -#ifndef _INCLUDE_GUARD_SVG_HELPER_ -#define _INCLUDE_GUARD_SVG_HELPER_ +#ifndef __PERF_SVGHELPER_H +#define __PERF_SVGHELPER_H #include "types.h" @@ -25,4 +25,4 @@ extern void svg_close(void); extern int svg_page_width; -#endif +#endif /* __PERF_SVGHELPER_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 47ea0609a76..faa84f5d4f5 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2,12 +2,14 @@ #include "../perf.h" #include "string.h" #include "symbol.h" +#include "thread.h" #include "debug.h" #include <libelf.h> #include <gelf.h> #include <elf.h> +#include <sys/utsname.h> const char *sym_hist_filter; @@ -18,12 +20,65 @@ enum dso_origin { DSO__ORIG_UBUNTU, DSO__ORIG_BUILDID, DSO__ORIG_DSO, + DSO__ORIG_KMODULE, DSO__ORIG_NOT_FOUND, }; -static struct symbol *symbol__new(u64 start, u64 len, - const char *name, unsigned int priv_size, - u64 obj_start, int v) +static void dsos__add(struct dso *dso); +static struct dso *dsos__find(const char *name); +static struct map *map__new2(u64 start, struct dso *dso); +static void kernel_maps__insert(struct map *map); + +static struct rb_root kernel_maps; + +static void dso__fixup_sym_end(struct dso *self) +{ + struct rb_node *nd, *prevnd = rb_first(&self->syms); + struct symbol *curr, *prev; + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct symbol, rb_node); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct symbol, rb_node); + + if (prev->end == prev->start) + prev->end = curr->start - 1; + } + + /* Last entry */ + if (curr->end == curr->start) + curr->end = roundup(curr->start, 4096); +} + +static void kernel_maps__fixup_end(void) +{ + struct map *prev, *curr; + struct rb_node *nd, *prevnd = rb_first(&kernel_maps); + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct map, rb_node); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct map, rb_node); + prev->end = curr->start - 1; + } + + nd = rb_last(&curr->dso->syms); + if (nd) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + curr->end = sym->end; + } +} + +static struct symbol *symbol__new(u64 start, u64 len, const char *name, + unsigned int priv_size, int v) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -31,11 +86,10 @@ static struct symbol *symbol__new(u64 start, u64 len, if (!self) return NULL; - if (v >= 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", - (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); + if (v > 2) + printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", + start, (unsigned long)len, name, self->hist); - self->obj_start= obj_start; self->hist = NULL; self->hist_sum = 0; @@ -60,12 +114,8 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size) static size_t symbol__fprintf(struct symbol *self, FILE *fp) { - if (!self->module) - return fprintf(fp, " %llx-%llx %s\n", + return fprintf(fp, " %llx-%llx %s\n", self->start, self->end, self->name); - else - return fprintf(fp, " %llx-%llx %s \t[%s]\n", - self->start, self->end, self->name, self->module->name); } struct dso *dso__new(const char *name, unsigned int sym_priv_size) @@ -74,6 +124,8 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) if (self != NULL) { strcpy(self->name, name); + self->long_name = self->name; + self->short_name = self->name; self->syms = RB_ROOT; self->sym_priv_size = sym_priv_size; self->find_symbol = dso__find_symbol; @@ -100,6 +152,8 @@ static void dso__delete_symbols(struct dso *self) void dso__delete(struct dso *self) { dso__delete_symbols(self); + if (self->long_name != self->name) + free(self->long_name); free(self); } @@ -147,7 +201,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->name); + size_t ret = fprintf(fp, "dso: %s\n", self->short_name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { @@ -158,13 +212,16 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) +/* + * Loads the function entries in /proc/kallsyms into kernel_map->dso, + * so that we can in the next step set the symbol ->end address and then + * call kernel_maps__split_kallsyms. + */ +static int kernel_maps__load_all_kallsyms(int v) { - struct rb_node *nd, *prevnd; char *line = NULL; size_t n; FILE *file = fopen("/proc/kallsyms", "r"); - int count = 0; if (file == NULL) goto out_failure; @@ -174,6 +231,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) struct symbol *sym; int line_len, len; char symbol_type; + char *symbol_name; line_len = getline(&line, &n, file); if (line_len < 0) @@ -196,44 +254,24 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) */ if (symbol_type != 'T' && symbol_type != 'W') continue; + + symbol_name = line + len + 2; /* - * Well fix up the end later, when we have all sorted. + * Will fix up the end later, when we have all symbols sorted. */ - sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, 0, v); + sym = symbol__new(start, 0, symbol_name, + kernel_map->dso->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) - symbol__delete(sym, self->sym_priv_size); - else { - dso__insert_symbol(self, sym); - count++; - } - } - - /* - * Now that we have all sorted out, just set the ->end of all - * symbols - */ - prevnd = rb_first(&self->syms); - - if (prevnd == NULL) - goto out_delete_line; - - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); - - prev->end = curr->start - 1; - prevnd = nd; + dso__insert_symbol(kernel_map->dso, sym); } free(line); fclose(file); - return count; + return 0; out_delete_line: free(line); @@ -241,14 +279,125 @@ out_failure: return -1; } -static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) +/* + * Split the symbols into maps, making sure there are no overlaps, i.e. the + * kernel range is broken in several maps, named [kernel].N, as we don't have + * the original ELF section names vmlinux have. + */ +static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) +{ + struct map *map = kernel_map; + struct symbol *pos; + int count = 0; + struct rb_node *next = rb_first(&kernel_map->dso->syms); + int kernel_range = 0; + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) { + if (!use_modules) + goto delete_symbol; + + *module++ = '\0'; + + if (strcmp(map->dso->name, module)) { + map = kernel_maps__find_by_dso_name(module); + if (!map) { + fputs("/proc/{kallsyms,modules} " + "inconsistency!\n", stderr); + return -1; + } + } + /* + * So that we look just like we get from .ko files, + * i.e. not prelinked, relative to map->start. + */ + pos->start = map->map_ip(map, pos->start); + pos->end = map->map_ip(map, pos->end); + } else if (map != kernel_map) { + char dso_name[PATH_MAX]; + struct dso *dso; + + snprintf(dso_name, sizeof(dso_name), "[kernel].%d", + kernel_range++); + + dso = dso__new(dso_name, + kernel_map->dso->sym_priv_size); + if (dso == NULL) + return -1; + + map = map__new2(pos->start, dso); + if (map == NULL) { + dso__delete(dso); + return -1; + } + + map->map_ip = vdso__map_ip; + kernel_maps__insert(map); + ++kernel_range; + } + + if (filter && filter(map, pos)) { +delete_symbol: + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + symbol__delete(pos, kernel_map->dso->sym_priv_size); + } else { + if (map != kernel_map) { + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + dso__insert_symbol(map->dso, pos); + } + count++; + } + } + + return count; +} + + +static int kernel_maps__load_kallsyms(symbol_filter_t filter, + int use_modules, int v) +{ + if (kernel_maps__load_all_kallsyms(v)) + return -1; + + dso__fixup_sym_end(kernel_map->dso); + + return kernel_maps__split_kallsyms(filter, use_modules); +} + +static size_t kernel_maps__fprintf(FILE *fp, int v) +{ + size_t printed = fprintf(stderr, "Kernel maps:\n"); + struct rb_node *nd; + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + + printed += fprintf(fp, "Map:"); + printed += map__fprintf(pos, fp); + if (v > 1) { + printed += dso__fprintf(pos->dso, fp); + printed += fprintf(fp, "--\n"); + } + } + + return printed + fprintf(stderr, "END kernel maps\n"); +} + +static int dso__load_perf_map(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { char *line = NULL; size_t n; FILE *file; int nr_syms = 0; - file = fopen(self->name, "r"); + file = fopen(self->long_name, "r"); if (file == NULL) goto out_failure; @@ -279,12 +428,12 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) continue; sym = symbol__new(start, size, line + len, - self->sym_priv_size, start, v); + self->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) + if (filter && filter(map, sym)) symbol__delete(sym, self->sym_priv_size); else { dso__insert_symbol(self, sym); @@ -409,7 +558,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) Elf *elf; int nr = 0, symidx, fd, err = 0; - fd = open(self->name, O_RDONLY); + fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -477,7 +626,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -495,7 +644,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -514,13 +663,17 @@ out_close: return nr; out: fprintf(stderr, "%s: problems reading %s PLT info.\n", - __func__, self->name); + __func__, self->long_name); return 0; } -static int dso__load_sym(struct dso *self, int fd, const char *name, - symbol_filter_t filter, int v, struct module *mod) +static int dso__load_sym(struct dso *self, struct map *map, const char *name, + int fd, symbol_filter_t filter, int kernel, + int kmodule, int v) { + struct map *curr_map = map; + struct dso *curr_dso = self; + size_t dso_name_len = strlen(self->short_name); Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -531,7 +684,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, GElf_Sym sym; Elf_Scn *sec, *sec_strndx; Elf *elf; - int nr = 0, kernel = !strcmp("[kernel]", self->name); + int nr = 0; elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { @@ -587,9 +740,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name; - char *demangled; - u64 obj_start; - struct section *section = NULL; + char *demangled = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; @@ -605,52 +756,85 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, if (is_label && !elf_sec__is_text(&shdr, secstrs)) continue; + elf_name = elf_sym__name(&sym, symstrs); section_name = elf_sec__name(&shdr, secstrs); - obj_start = sym.st_value; - if (self->adjust_symbols) { - if (v >= 2) + if (kernel || kmodule) { + char dso_name[PATH_MAX]; + + if (strcmp(section_name, + curr_dso->short_name + dso_name_len) == 0) + goto new_symbol; + + if (strcmp(section_name, ".text") == 0) { + curr_map = map; + curr_dso = self; + goto new_symbol; + } + + snprintf(dso_name, sizeof(dso_name), + "%s%s", self->short_name, section_name); + + curr_map = kernel_maps__find_by_dso_name(dso_name); + if (curr_map == NULL) { + u64 start = sym.st_value; + + if (kmodule) + start += map->start + shdr.sh_offset; + + curr_dso = dso__new(dso_name, self->sym_priv_size); + if (curr_dso == NULL) + goto out_elf_end; + curr_map = map__new2(start, curr_dso); + if (curr_map == NULL) { + dso__delete(curr_dso); + goto out_elf_end; + } + curr_map->map_ip = vdso__map_ip; + curr_dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(curr_map); + dsos__add(curr_dso); + } else + curr_dso = curr_map->dso; + + goto new_symbol; + } + + if (curr_dso->adjust_symbols) { + if (v > 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } - - if (mod) { - section = mod->sections->find_section(mod->sections, section_name); - if (section) - sym.st_value += section->vma; - else { - fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n", - mod->name, section_name); - goto out_elf_end; - } - } /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access * to it... */ - elf_name = elf_sym__name(&sym, symstrs); demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); if (demangled != NULL) elf_name = demangled; - +new_symbol: f = symbol__new(sym.st_value, sym.st_size, elf_name, - self->sym_priv_size, obj_start, v); + curr_dso->sym_priv_size, v); free(demangled); if (!f) goto out_elf_end; - if (filter && filter(self, f)) - symbol__delete(f, self->sym_priv_size); + if (filter && filter(curr_map, f)) + symbol__delete(f, curr_dso->sym_priv_size); else { - f->module = mod; - dso__insert_symbol(self, f); + dso__insert_symbol(curr_dso, f); nr++; } } + /* + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) + dso__fixup_sym_end(self); err = nr; out_elf_end: elf_end(elf); @@ -670,7 +854,7 @@ static char *dso__read_build_id(struct dso *self, int v) char *build_id = NULL, *bid; unsigned char *raw; Elf *elf; - int fd = open(self->name, O_RDONLY); + int fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -679,7 +863,7 @@ static char *dso__read_build_id(struct dso *self, int v) if (elf == NULL) { if (v) fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->name); + __func__, self->long_name); goto out_close; } @@ -708,7 +892,7 @@ static char *dso__read_build_id(struct dso *self, int v) bid += 2; } if (v >= 2) - printf("%s(%s): %s\n", __func__, self->name, build_id); + printf("%s(%s): %s\n", __func__, self->long_name, build_id); out_elf_end: elf_end(elf); out_close: @@ -726,6 +910,7 @@ char dso__symtab_origin(const struct dso *self) [DSO__ORIG_UBUNTU] = 'u', [DSO__ORIG_BUILDID] = 'b', [DSO__ORIG_DSO] = 'd', + [DSO__ORIG_KMODULE] = 'K', }; if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) @@ -733,7 +918,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -746,7 +931,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int v) self->adjust_symbols = 0; if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, filter, v); + ret = dso__load_perf_map(self, map, filter, v); self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : DSO__ORIG_NOT_FOUND; return ret; @@ -759,10 +944,12 @@ more: self->origin++; switch (self->origin) { case DSO__ORIG_FEDORA: - snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + snprintf(name, size, "/usr/lib/debug%s.debug", + self->long_name); break; case DSO__ORIG_UBUNTU: - snprintf(name, size, "/usr/lib/debug%s", self->name); + snprintf(name, size, "/usr/lib/debug%s", + self->long_name); break; case DSO__ORIG_BUILDID: build_id = dso__read_build_id(self, v); @@ -776,7 +963,7 @@ more: self->origin++; /* Fall thru */ case DSO__ORIG_DSO: - snprintf(name, size, "%s", self->name); + snprintf(name, size, "%s", self->long_name); break; default: @@ -786,7 +973,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, fd, name, filter, v, NULL); + ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v); close(fd); /* @@ -807,89 +994,243 @@ out: return ret; } -static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name, - symbol_filter_t filter, int v) +struct map *kernel_map; + +static void kernel_maps__insert(struct map *map) +{ + maps__insert(&kernel_maps, map); +} + +struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) { - struct module *mod = mod_dso__find_module(mods, name); - int err = 0, fd; + struct map *map = maps__find(&kernel_maps, ip); - if (mod == NULL || !mod->active) - return err; + if (mapp) + *mapp = map; - fd = open(mod->path, O_RDONLY); + if (map) { + ip = map->map_ip(map, ip); + return map->dso->find_symbol(map->dso, ip); + } - if (fd < 0) + return NULL; +} + +struct map *kernel_maps__find_by_dso_name(const char *name) +{ + struct rb_node *nd; + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + + if (map->dso && strcmp(map->dso->name, name) == 0) + return map; + } + + return NULL; +} + +static int dso__load_module_sym(struct dso *self, struct map *map, + symbol_filter_t filter, int v) +{ + int err = 0, fd = open(self->long_name, O_RDONLY); + + if (fd < 0) { + if (v) + fprintf(stderr, "%s: cannot open %s\n", + __func__, self->long_name); return err; + } - err = dso__load_sym(self, fd, name, filter, v, mod); + err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v); close(fd); return err; } -int dso__load_modules(struct dso *self, symbol_filter_t filter, int v) +static int dsos__load_modules_sym_dir(char *dirname, + symbol_filter_t filter, int v) { - struct mod_dso *mods = mod_dso__new_dso("modules"); - struct module *pos; - struct rb_node *next; - int err, count = 0; + struct dirent *dent; + int nr_symbols = 0, err; + DIR *dir = opendir(dirname); - err = mod_dso__load_modules(mods); + if (!dir) { + if (v) + fprintf(stderr, "%s: cannot open %s dir\n", __func__, + dirname); + return -1; + } - if (err <= 0) - return err; + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; + + if (dent->d_type == DT_DIR) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); + err = dsos__load_modules_sym_dir(path, filter, v); + if (err < 0) + goto failure; + } else { + char *dot = strrchr(dent->d_name, '.'), + dso_name[PATH_MAX]; + struct map *map; + struct rb_node *last; + + if (dot == NULL || strcmp(dot, ".ko")) + continue; + snprintf(dso_name, sizeof(dso_name), "[%.*s]", + (int)(dot - dent->d_name), dent->d_name); + + strxfrchar(dso_name, '-', '_'); + map = kernel_maps__find_by_dso_name(dso_name); + if (map == NULL) + continue; + + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); + + map->dso->long_name = strdup(path); + if (map->dso->long_name == NULL) + goto failure; + + err = dso__load_module_sym(map->dso, map, filter, v); + if (err < 0) + goto failure; + last = rb_last(&map->dso->syms); + if (last) { + struct symbol *sym; + /* + * We do this here as well, even having the + * symbol size found in the symtab because + * misannotated ASM symbols may have the size + * set to zero. + */ + dso__fixup_sym_end(map->dso); + + sym = rb_entry(last, struct symbol, rb_node); + map->end = map->start + sym->end; + } + } + nr_symbols += err; + } - /* - * Iterate over modules, and load active symbols. - */ - next = rb_first(&mods->mods); - while (next) { - pos = rb_entry(next, struct module, rb_node); - err = dso__load_module(self, mods, pos->name, filter, v); + return nr_symbols; +failure: + closedir(dir); + return -1; +} - if (err < 0) - break; +static int dsos__load_modules_sym(symbol_filter_t filter, int v) +{ + struct utsname uts; + char modules_path[PATH_MAX]; - next = rb_next(&pos->rb_node); - count += err; - } + if (uname(&uts) < 0) + return -1; - if (err < 0) { - mod_dso__delete_modules(mods); - mod_dso__delete_self(mods); - return err; - } + snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", + uts.release); - return count; + return dsos__load_modules_sym_dir(modules_path, filter, v); } -static inline void dso__fill_symbol_holes(struct dso *self) +/* + * Constructor variant for modules (where we know from /proc/modules where + * they are loaded) and for vmlinux, where only after we load all the + * symbols we'll know where it starts and ends. + */ +static struct map *map__new2(u64 start, struct dso *dso) { - struct symbol *prev = NULL; - struct rb_node *nd; + struct map *self = malloc(sizeof(*self)); - for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + if (self != NULL) { + self->start = start; + /* + * Will be filled after we load all the symbols + */ + self->end = 0; - if (prev) { - u64 hole = 0; - int alias = pos->start == prev->start; + self->pgoff = 0; + self->dso = dso; + self->map_ip = map__map_ip; + RB_CLEAR_NODE(&self->rb_node); + } + return self; +} - if (!alias) - hole = prev->start - pos->end - 1; +static int dsos__load_modules(unsigned int sym_priv_size) +{ + char *line = NULL; + size_t n; + FILE *file = fopen("/proc/modules", "r"); + struct map *map; - if (hole || alias) { - if (alias) - pos->end = prev->end; - else if (hole) - pos->end = prev->start - 1; - } + if (file == NULL) + return -1; + + while (!feof(file)) { + char name[PATH_MAX]; + u64 start; + struct dso *dso; + char *sep; + int line_len; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; + + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) + continue; + + *sep = '\0'; + + snprintf(name, sizeof(name), "[%s]", line); + dso = dso__new(name, sym_priv_size); + + if (dso == NULL) + goto out_delete_line; + + map = map__new2(start, dso); + if (map == NULL) { + dso__delete(dso); + goto out_delete_line; } - prev = pos; + + dso->origin = DSO__ORIG_KMODULE; + kernel_maps__insert(map); + dsos__add(dso); } + + free(line); + fclose(file); + + return 0; + +out_delete_line: + free(line); +out_failure: + return -1; } -static int dso__load_vmlinux(struct dso *self, const char *vmlinux, +static int dso__load_vmlinux(struct dso *self, struct map *map, + const char *vmlinux, symbol_filter_t filter, int v) { int err, fd = open(vmlinux, O_RDONLY); @@ -897,47 +1238,82 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux, if (fd < 0) return -1; - err = dso__load_sym(self, fd, vmlinux, filter, v, NULL); - - if (err > 0) - dso__fill_symbol_holes(self); + err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v); close(fd); return err; } -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int v, int use_modules) +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int v, int use_modules) { int err = -1; + struct dso *dso = dso__new(vmlinux, sym_priv_size); + + if (dso == NULL) + return -1; + + dso->short_name = "[kernel]"; + kernel_map = map__new2(0, dso); + if (kernel_map == NULL) + goto out_delete_dso; + + kernel_map->map_ip = vdso__map_ip; + + if (use_modules && dsos__load_modules(sym_priv_size) < 0) { + fprintf(stderr, "Failed to load list of modules in use! " + "Continuing...\n"); + use_modules = 0; + } if (vmlinux) { - err = dso__load_vmlinux(self, vmlinux, filter, v); + err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); if (err > 0 && use_modules) { - int syms = dso__load_modules(self, filter, v); + int syms = dsos__load_modules_sym(filter, v); - if (syms < 0) { - fprintf(stderr, "dso__load_modules failed!\n"); - return syms; - } - err += syms; + if (syms < 0) + fprintf(stderr, "Failed to read module symbols!" + " Continuing...\n"); + else + err += syms; } } if (err <= 0) - err = dso__load_kallsyms(self, filter, v); + err = kernel_maps__load_kallsyms(filter, use_modules, v); - if (err > 0) - self->origin = DSO__ORIG_KERNEL; + if (err > 0) { + struct rb_node *node = rb_first(&dso->syms); + struct symbol *sym = rb_entry(node, struct symbol, rb_node); + + kernel_map->start = sym->start; + node = rb_last(&dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + kernel_map->end = sym->end; + + dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(kernel_map); + /* + * Now that we have all sorted out, just set the ->end of all + * maps: + */ + kernel_maps__fixup_end(); + dsos__add(dso); + + if (v > 0) + kernel_maps__fprintf(stderr, v); + } return err; + +out_delete_dso: + dso__delete(dso); + return -1; } LIST_HEAD(dsos); -struct dso *kernel_dso; struct dso *vdso; -struct dso *hypervisor_dso; const char *vmlinux_name = "vmlinux"; int modules; @@ -969,7 +1345,7 @@ struct dso *dsos__findnew(const char *name) if (!dso) goto out_delete_dso; - nr = dso__load(dso, NULL, verbose); + nr = dso__load(dso, NULL, NULL, verbose); if (nr < 0) { eprintf("Failed to open: %s\n", name); goto out_delete_dso; @@ -994,43 +1370,20 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip) -{ - return dso__find_symbol(dso, ip); -} - int load_kernel(void) { - int err; - - kernel_dso = dso__new("[kernel]", 0); - if (!kernel_dso) + if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0) return -1; - err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules); - if (err <= 0) { - dso__delete(kernel_dso); - kernel_dso = NULL; - } else - dsos__add(kernel_dso); - vdso = dso__new("[vdso]", 0); if (!vdso) return -1; - vdso->find_symbol = vdso__find_symbol; - dsos__add(vdso); - hypervisor_dso = dso__new("[hypervisor]", 0); - if (!hypervisor_dso) - return -1; - dsos__add(hypervisor_dso); - - return err; + return 0; } - void symbol__init(void) { elf_version(EV_CURRENT); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 6e849071640..2e4522edeb0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -1,11 +1,10 @@ -#ifndef _PERF_SYMBOL_ -#define _PERF_SYMBOL_ 1 +#ifndef __PERF_SYMBOL +#define __PERF_SYMBOL 1 #include <linux/types.h> #include "types.h" #include <linux/list.h> #include <linux/rbtree.h> -#include "module.h" #include "event.h" #ifdef HAVE_CPLUS_DEMANGLE @@ -36,10 +35,8 @@ struct symbol { struct rb_node rb_node; u64 start; u64 end; - u64 obj_start; u64 hist_sum; u64 *hist; - struct module *module; void *priv; char name[0]; }; @@ -52,12 +49,14 @@ struct dso { unsigned char adjust_symbols; unsigned char slen_calculated; unsigned char origin; + const char *short_name; + char *long_name; char name[0]; }; extern const char *sym_hist_filter; -typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -69,10 +68,10 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose, int modules); -int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); -int dso__load(struct dso *self, symbol_filter_t filter, int verbose); +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int verbose, int modules); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, + int verbose); struct dso *dsos__findnew(const char *name); void dsos__fprintf(FILE *fp); @@ -84,9 +83,8 @@ int load_kernel(void); void symbol__init(void); extern struct list_head dsos; -extern struct dso *kernel_dso; +extern struct map *kernel_map; extern struct dso *vdso; -extern struct dso *hypervisor_dso; extern const char *vmlinux_name; extern int modules; -#endif /* _PERF_SYMBOL_ */ +#endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 45efb5db0d1..f53fad7c0a8 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,6 +6,9 @@ #include "util.h" #include "debug.h" +static struct rb_root threads; +static struct thread *last_match; + static struct thread *thread__new(pid_t pid) { struct thread *self = calloc(1, sizeof(*self)); @@ -15,7 +18,8 @@ static struct thread *thread__new(pid_t pid) self->comm = malloc(32); if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); - INIT_LIST_HEAD(&self->maps); + self->maps = RB_ROOT; + INIT_LIST_HEAD(&self->removed_maps); } return self; @@ -31,19 +35,27 @@ int thread__set_comm(struct thread *self, const char *comm) static size_t thread__fprintf(struct thread *self, FILE *fp) { + struct rb_node *nd; struct map *pos; - size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n", + self->pid, self->comm); + + for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct map, rb_node); + ret += map__fprintf(pos, fp); + } + + ret = fprintf(fp, "Removed maps:\n"); - list_for_each_entry(pos, &self->maps, node) + list_for_each_entry(pos, &self->removed_maps, node) ret += map__fprintf(pos, fp); return ret; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +struct thread *threads__findnew(pid_t pid) { - struct rb_node **p = &threads->rb_node; + struct rb_node **p = &threads.rb_node; struct rb_node *parent = NULL; struct thread *th; @@ -52,15 +64,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) * so most of the time we dont have to look up * the full rbtree: */ - if (*last_match && (*last_match)->pid == pid) - return *last_match; + if (last_match && last_match->pid == pid) + return last_match; while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); if (th->pid == pid) { - *last_match = th; + last_match = th; return th; } @@ -73,17 +85,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, threads); - *last_match = th; + rb_insert_color(&th->rb_node, &threads); + last_match = th; } return th; } -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match) +struct thread *register_idle_thread(void) { - struct thread *thread = threads__findnew(0, threads, last_match); + struct thread *thread = threads__findnew(0); if (!thread || thread__set_comm(thread, "swapper")) { fprintf(stderr, "problem inserting idle task.\n"); @@ -93,42 +104,82 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match) return thread; } -void thread__insert_map(struct thread *self, struct map *map) +static void thread__remove_overlappings(struct thread *self, struct map *map) { - struct map *pos, *tmp; - - list_for_each_entry_safe(pos, tmp, &self->maps, node) { - if (map__overlap(pos, map)) { - if (verbose >= 2) { - printf("overlapping maps:\n"); - map__fprintf(map, stdout); - map__fprintf(pos, stdout); - } - - if (map->start <= pos->start && map->end > pos->start) - pos->start = map->end; - - if (map->end >= pos->end && map->start < pos->end) - pos->end = map->start; - - if (verbose >= 2) { - printf("after collision:\n"); - map__fprintf(pos, stdout); - } - - if (pos->start >= pos->end) { - list_del_init(&pos->node); - free(pos); - } + struct rb_node *next = rb_first(&self->maps); + + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + next = rb_next(&pos->rb_node); + + if (!map__overlap(pos, map)) + continue; + + if (verbose >= 2) { + printf("overlapping maps:\n"); + map__fprintf(map, stdout); + map__fprintf(pos, stdout); } + + rb_erase(&pos->rb_node, &self->maps); + /* + * We may have references to this map, for instance in some + * hist_entry instances, so just move them to a separate + * list. + */ + list_add_tail(&pos->node, &self->removed_maps); + } +} + +void maps__insert(struct rb_root *maps, struct map *map) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + const u64 ip = map->start; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&map->rb_node, parent, p); + rb_insert_color(&map->rb_node, maps); +} + +struct map *maps__find(struct rb_root *maps, u64 ip) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else if (ip > m->end) + p = &(*p)->rb_right; + else + return m; } - list_add_tail(&map->node, &self->maps); + return NULL; +} + +void thread__insert_map(struct thread *self, struct map *map) +{ + thread__remove_overlappings(self, map); + maps__insert(&self->maps, map); } int thread__fork(struct thread *self, struct thread *parent) { - struct map *map; + struct rb_node *nd; if (self->comm) free(self->comm); @@ -136,7 +187,8 @@ int thread__fork(struct thread *self, struct thread *parent) if (!self->comm) return -ENOMEM; - list_for_each_entry(map, &parent->maps, node) { + for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); struct map *new = map__clone(map); if (!new) return -ENOMEM; @@ -146,26 +198,12 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -struct map *thread__find_map(struct thread *self, u64 ip) -{ - struct map *pos; - - if (self == NULL) - return NULL; - - list_for_each_entry(pos, &self->maps, node) - if (ip >= pos->start && ip <= pos->end) - return pos; - - return NULL; -} - -size_t threads__fprintf(FILE *fp, struct rb_root *threads) +size_t threads__fprintf(FILE *fp) { size_t ret = 0; struct rb_node *nd; - for (nd = rb_first(threads); nd; nd = rb_next(nd)) { + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 32aea3c1c2a..1abef3b7455 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -1,22 +1,35 @@ +#ifndef __PERF_THREAD_H +#define __PERF_THREAD_H + #include <linux/rbtree.h> -#include <linux/list.h> #include <unistd.h> #include "symbol.h" struct thread { struct rb_node rb_node; - struct list_head maps; + struct rb_root maps; + struct list_head removed_maps; pid_t pid; char shortname[3]; char *comm; }; int thread__set_comm(struct thread *self, const char *comm); -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match); +struct thread *threads__findnew(pid_t pid); +struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -struct map *thread__find_map(struct thread *self, u64 ip); -size_t threads__fprintf(FILE *fp, struct rb_root *threads); +size_t threads__fprintf(FILE *fp); + +void maps__insert(struct rb_root *maps, struct map *map); +struct map *maps__find(struct rb_root *maps, u64 ip); + +struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp); +struct map *kernel_maps__find_by_dso_name(const char *name); + +static inline struct map *thread__find_map(struct thread *self, u64 ip) +{ + return self ? maps__find(&self->maps, ip) : NULL; +} + +#endif /* __PERF_THREAD_H */ diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index af4b0573b37..831052d4b4f 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -496,14 +496,12 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) return path.next; } -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) { char buf[BUFSIZ]; struct tracepoint_path *tps; - output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644); - if (output_fd < 0) - die("creating file '%s'", output_file); + output_fd = fd; buf[0] = 23; buf[1] = 8; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 55c9659a56e..4b61b497040 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -40,6 +40,8 @@ int header_page_size_size; int header_page_data_offset; int header_page_data_size; +int latency_format; + static char *input_buf; static unsigned long long input_buf_ptr; static unsigned long long input_buf_siz; @@ -284,18 +286,16 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) char *line; char *next = NULL; char *addr_str; - int ret; + char *fmt; int i; line = strtok_r(file, "\n", &next); while (line) { item = malloc_or_die(sizeof(*item)); - ret = sscanf(line, "%as : %as", - (float *)(void *)&addr_str, /* workaround gcc warning */ - (float *)(void *)&item->printk); + addr_str = strtok_r(line, ":", &fmt); item->addr = strtoull(addr_str, NULL, 16); - free(addr_str); - + /* fmt still has a space, skip it */ + item->printk = strdup(fmt+1); item->next = list; list = item; line = strtok_r(NULL, "\n", &next); @@ -522,7 +522,10 @@ static enum event_type __read_token(char **tok) last_ch = ch; ch = __read_char(); buf[i++] = ch; - } while (ch != quote_ch && last_ch != '\\'); + /* the '\' '\' will cancel itself */ + if (ch == '\\' && last_ch == '\\') + last_ch = 0; + } while (ch != quote_ch || last_ch == '\\'); /* remove the last quote */ i--; goto out; @@ -610,7 +613,7 @@ static enum event_type read_token_item(char **tok) static int test_type(enum event_type type, enum event_type expect) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } @@ -621,13 +624,13 @@ static int test_type_token(enum event_type type, char *token, enum event_type expect, const char *expect_tok) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } if (strcmp(token, expect_tok) != 0) { - die("Error: expected '%s' but read '%s'", + warning("Error: expected '%s' but read '%s'", expect_tok, token); return -1; } @@ -665,7 +668,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ free_token(token); - return 0; + return ret; } static int read_expected(enum event_type expect, const char *str) @@ -682,10 +685,10 @@ static char *event_read_name(void) { char *token; - if (read_expected(EVENT_ITEM, (char *)"name") < 0) + if (read_expected(EVENT_ITEM, "name") < 0) return NULL; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return NULL; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -703,10 +706,10 @@ static int event_read_id(void) char *token; int id; - if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0) + if (read_expected_item(EVENT_ITEM, "ID") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -721,6 +724,24 @@ static int event_read_id(void) return -1; } +static int field_is_string(struct format_field *field) +{ + if ((field->flags & FIELD_IS_ARRAY) && + (!strstr(field->type, "char") || !strstr(field->type, "u8") || + !strstr(field->type, "s8"))) + return 1; + + return 0; +} + +static int field_is_dynamic(struct format_field *field) +{ + if (!strcmp(field->type, "__data_loc")) + return 1; + + return 0; +} + static int event_read_fields(struct event *event, struct format_field **fields) { struct format_field *field = NULL; @@ -738,7 +759,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) count++; - if (test_type_token(type, token, EVENT_ITEM, (char *)"field")) + if (test_type_token(type, token, EVENT_ITEM, "field")) goto fail; free_token(token); @@ -753,7 +774,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) type = read_token(&token); } - if (test_type_token(type, token, EVENT_OP, (char *)":") < 0) + if (test_type_token(type, token, EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -865,14 +886,20 @@ static int event_read_fields(struct event *event, struct format_field **fields) free(brackets); } - if (test_type_token(type, token, EVENT_OP, (char *)";")) + if (field_is_string(field)) { + field->flags |= FIELD_IS_STRING; + if (field_is_dynamic(field)) + field->flags |= FIELD_IS_DYNAMIC; + } + + if (test_type_token(type, token, EVENT_OP, ";")) goto fail; free_token(token); - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -880,13 +907,13 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->offset = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -894,11 +921,33 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->size = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; - if (read_expect_type(EVENT_NEWLINE, &token) < 0) - goto fail; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (test_type_token(type, token, EVENT_ITEM, "signed")) + goto fail; + + free_token(token); + + if (read_expected(EVENT_OP, ":") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + /* add signed type */ + + free_token(token); + if (read_expected(EVENT_OP, ";") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } + free_token(token); *fields = field; @@ -921,10 +970,10 @@ static int event_read_format(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"format") < 0) + if (read_expected_item(EVENT_ITEM, "format") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -984,7 +1033,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok) *tok = NULL; type = process_arg(event, left, &token); - if (test_type_token(type, token, EVENT_OP, (char *)":")) + if (test_type_token(type, token, EVENT_OP, ":")) goto out_free; arg->op.op = token; @@ -1004,6 +1053,35 @@ out_free: return EVENT_ERROR; } +static enum event_type +process_array(struct event *event, struct print_arg *top, char **tok) +{ + struct print_arg *arg; + enum event_type type; + char *token = NULL; + + arg = malloc_or_die(sizeof(*arg)); + memset(arg, 0, sizeof(*arg)); + + *tok = NULL; + type = process_arg(event, arg, &token); + if (test_type_token(type, token, EVENT_OP, "]")) + goto out_free; + + top->op.right = arg; + + free_token(token); + type = read_token_item(&token); + *tok = token; + + return type; + +out_free: + free_token(*tok); + free_arg(arg); + return EVENT_ERROR; +} + static int get_op_prio(char *op) { if (!op[1]) { @@ -1128,6 +1206,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok) strcmp(token, "*") == 0 || strcmp(token, "^") == 0 || strcmp(token, "/") == 0 || + strcmp(token, "<") == 0 || + strcmp(token, ">") == 0 || strcmp(token, "==") == 0 || strcmp(token, "!=") == 0) { @@ -1144,17 +1224,46 @@ process_op(struct event *event, struct print_arg *arg, char **tok) right = malloc_or_die(sizeof(*right)); - type = process_arg(event, right, tok); + type = read_token_item(&token); + *tok = token; + + /* could just be a type pointer */ + if ((strcmp(arg->op.op, "*") == 0) && + type == EVENT_DELIM && (strcmp(token, ")") == 0)) { + if (left->type != PRINT_ATOM) + die("bad pointer type"); + left->atom.atom = realloc(left->atom.atom, + sizeof(left->atom.atom) + 3); + strcat(left->atom.atom, " *"); + *arg = *left; + free(arg); + + return type; + } + + type = process_arg_token(event, right, tok, type); arg->op.right = right; + } else if (strcmp(token, "[") == 0) { + + left = malloc_or_die(sizeof(*left)); + *left = *arg; + + arg->type = PRINT_OP; + arg->op.op = token; + arg->op.left = left; + + arg->op.prio = 0; + type = process_array(event, arg, tok); + } else { - die("unknown op '%s'", token); + warning("unknown op '%s'", token); + event->flags |= EVENT_FL_FAILED; /* the arg is now the left side */ return EVENT_NONE; } - if (type == EVENT_OP) { int prio; @@ -1178,7 +1287,7 @@ process_entry(struct event *event __unused, struct print_arg *arg, char *field; char *token; - if (read_expected(EVENT_OP, (char *)"->") < 0) + if (read_expected(EVENT_OP, "->") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1338,14 +1447,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) do { free_token(token); type = read_token_item(&token); - if (test_type_token(type, token, EVENT_OP, (char *)"{")) + if (test_type_token(type, token, EVENT_OP, "{")) break; arg = malloc_or_die(sizeof(*arg)); free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; field = malloc_or_die(sizeof(*field)); @@ -1356,7 +1465,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_OP, (char *)"}")) + if (test_type_token(type, token, EVENT_OP, "}")) goto out_free; value = arg_eval(arg); @@ -1391,13 +1500,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_FLAGS; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->flags.field = field; @@ -1408,11 +1517,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) type = read_token_item(&token); } - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; type = process_fields(event, &arg->flags.flags, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1434,19 +1543,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_SYMBOL; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->symbol.field = field; type = process_fields(event, &arg->symbol.symbols, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1463,7 +1572,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) { struct print_arg *item_arg; enum event_type type; - int ptr_cast = 0; char *token; type = process_arg(event, arg, &token); @@ -1471,28 +1579,13 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) if (type == EVENT_ERROR) return EVENT_ERROR; - if (type == EVENT_OP) { - /* handle the ptr casts */ - if (!strcmp(token, "*")) { - /* - * FIXME: should we zapp whitespaces before ')' ? - * (may require a peek_token_item()) - */ - if (__peek_char() == ')') { - ptr_cast = 1; - free_token(token); - type = read_token_item(&token); - } - } - if (!ptr_cast) { - type = process_op(event, arg, &token); + if (type == EVENT_OP) + type = process_op(event, arg, &token); - if (type == EVENT_ERROR) - return EVENT_ERROR; - } - } + if (type == EVENT_ERROR) + return EVENT_ERROR; - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { + if (test_type_token(type, token, EVENT_DELIM, ")")) { free_token(token); return EVENT_ERROR; } @@ -1516,13 +1609,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) item_arg = malloc_or_die(sizeof(*item_arg)); arg->type = PRINT_TYPE; - if (ptr_cast) { - char *old = arg->atom.atom; - - arg->atom.atom = malloc_or_die(strlen(old + 3)); - sprintf(arg->atom.atom, "%s *", old); - free(old); - } arg->typecast.type = arg->atom.atom; arg->typecast.item = item_arg; type = process_arg_token(event, item_arg, &token, type); @@ -1540,7 +1626,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) enum event_type type; char *token; - if (read_expected(EVENT_DELIM, (char *)"(") < 0) + if (read_expected(EVENT_DELIM, "(") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1550,7 +1636,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) arg->string.string = token; arg->string.offset = -1; - if (read_expected(EVENT_DELIM, (char *)")") < 0) + if (read_expected(EVENT_DELIM, ")") < 0) return EVENT_ERROR; type = read_token(&token); @@ -1637,12 +1723,18 @@ process_arg_token(struct event *event, struct print_arg *arg, static int event_read_print_args(struct event *event, struct print_arg **list) { - enum event_type type; + enum event_type type = EVENT_ERROR; struct print_arg *arg; char *token; int args = 0; do { + if (type == EVENT_NEWLINE) { + free_token(token); + type = read_token_item(&token); + continue; + } + arg = malloc_or_die(sizeof(*arg)); memset(arg, 0, sizeof(*arg)); @@ -1683,18 +1775,19 @@ static int event_read_print(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"print") < 0) + if (read_expected_item(EVENT_ITEM, "print") < 0) return -1; - if (read_expected(EVENT_ITEM, (char *)"fmt") < 0) + if (read_expected(EVENT_ITEM, "fmt") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_DQUOTE, &token) < 0) goto fail; + concat: event->print_fmt.format = token; event->print_fmt.args = NULL; @@ -1704,7 +1797,22 @@ static int event_read_print(struct event *event) if (type == EVENT_NONE) return 0; - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + /* Handle concatination of print lines */ + if (type == EVENT_DQUOTE) { + char *cat; + + cat = malloc_or_die(strlen(event->print_fmt.format) + + strlen(token) + 1); + strcpy(cat, event->print_fmt.format); + strcat(cat, token); + free_token(token); + free_token(event->print_fmt.format); + event->print_fmt.format = NULL; + token = cat; + goto concat; + } + + if (test_type_token(type, token, EVENT_DELIM, ",")) goto fail; free_token(token); @@ -1713,7 +1821,7 @@ static int event_read_print(struct event *event) if (ret < 0) return -1; - return 0; + return ret; fail: free_token(token); @@ -1822,37 +1930,67 @@ static int get_common_info(const char *type, int *offset, int *size) return 0; } -int trace_parse_common_type(void *data) +static int __parse_common(void *data, int *size, int *offset, + const char *name) { - static int type_offset; - static int type_size; int ret; - if (!type_size) { - ret = get_common_info("common_type", - &type_offset, - &type_size); + if (!*size) { + ret = get_common_info(name, offset, size); if (ret < 0) return ret; } - return read_size(data + type_offset, type_size); + return read_size(data + *offset, *size); +} + +int trace_parse_common_type(void *data) +{ + static int type_offset; + static int type_size; + + return __parse_common(data, &type_size, &type_offset, + "common_type"); } static int parse_common_pid(void *data) { static int pid_offset; static int pid_size; + + return __parse_common(data, &pid_size, &pid_offset, + "common_pid"); +} + +static int parse_common_pc(void *data) +{ + static int pc_offset; + static int pc_size; + + return __parse_common(data, &pc_size, &pc_offset, + "common_preempt_count"); +} + +static int parse_common_flags(void *data) +{ + static int flags_offset; + static int flags_size; + + return __parse_common(data, &flags_size, &flags_offset, + "common_flags"); +} + +static int parse_common_lock_depth(void *data) +{ + static int ld_offset; + static int ld_size; int ret; - if (!pid_size) { - ret = get_common_info("common_pid", - &pid_offset, - &pid_size); - if (ret < 0) - return ret; - } + ret = __parse_common(data, &ld_size, &ld_offset, + "common_lock_depth"); + if (ret < 0) + return -1; - return read_size(data + pid_offset, pid_size); + return ret; } struct event *trace_find_event(int id) @@ -1871,6 +2009,7 @@ static unsigned long long eval_num_arg(void *data, int size, { unsigned long long val = 0; unsigned long long left, right; + struct print_arg *larg; switch (arg->type) { case PRINT_NULL: @@ -1897,6 +2036,26 @@ static unsigned long long eval_num_arg(void *data, int size, return 0; break; case PRINT_OP: + if (strcmp(arg->op.op, "[") == 0) { + /* + * Arrays are special, since we don't want + * to read the arg as is. + */ + if (arg->op.left->type != PRINT_FIELD) + goto default_op; /* oops, all bets off */ + larg = arg->op.left; + if (!larg->field.field) { + larg->field.field = + find_any_field(event, larg->field.name); + if (!larg->field.field) + die("field %s not found", larg->field.name); + } + right = eval_num_arg(data, size, event, arg->op.right); + val = read_size(data + larg->field.field->offset + + right * long_size, long_size); + break; + } + default_op: left = eval_num_arg(data, size, event, arg->op.left); right = eval_num_arg(data, size, event, arg->op.right); switch (arg->op.op[0]) { @@ -1947,6 +2106,12 @@ static unsigned long long eval_num_arg(void *data, int size, die("unknown op '%s'", arg->op.op); val = left == right; break; + case '-': + val = left - right; + break; + case '+': + val = left + right; + break; default: die("unknown op '%s'", arg->op.op); } @@ -2145,8 +2310,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc case 'u': case 'x': case 'i': - bptr = (void *)(((unsigned long)bptr + (long_size - 1)) & - ~(long_size - 1)); + /* the pointers are always 4 bytes aligned */ + bptr = (void *)(((unsigned long)bptr + 3) & + ~3); switch (ls) { case 0: case 1: @@ -2270,7 +2436,27 @@ static void pretty_print(void *data, int size, struct event *event) for (; *ptr; ptr++) { ls = 0; - if (*ptr == '%') { + if (*ptr == '\\') { + ptr++; + switch (*ptr) { + case 'n': + printf("\n"); + break; + case 't': + printf("\t"); + break; + case 'r': + printf("\r"); + break; + case '\\': + printf("\\"); + break; + default: + printf("%c", *ptr); + break; + } + + } else if (*ptr == '%') { saveptr = ptr; show_func = 0; cont_process: @@ -2377,6 +2563,41 @@ static inline int log10_cpu(int nb) return 1; } +static void print_lat_fmt(void *data, int size __unused) +{ + unsigned int lat_flags; + unsigned int pc; + int lock_depth; + int hardirq; + int softirq; + + lat_flags = parse_common_flags(data); + pc = parse_common_pc(data); + lock_depth = parse_common_lock_depth(data); + + hardirq = lat_flags & TRACE_FLAG_HARDIRQ; + softirq = lat_flags & TRACE_FLAG_SOFTIRQ; + + printf("%c%c%c", + (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ? + 'X' : '.', + (lat_flags & TRACE_FLAG_NEED_RESCHED) ? + 'N' : '.', + (hardirq && softirq) ? 'H' : + hardirq ? 'h' : softirq ? 's' : '.'); + + if (pc) + printf("%x", pc); + else + printf("."); + + if (lock_depth < 0) + printf("."); + else + printf("%d", lock_depth); +} + /* taken from Linux, written by Frederic Weisbecker */ static void print_graph_cpu(int cpu) { @@ -2620,6 +2841,11 @@ pretty_print_func_ent(void *data, int size, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "func"); if (!field) die("function entry does not have func field"); @@ -2663,6 +2889,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "rettime"); if (!field) die("can't find rettime in return graph"); @@ -2724,7 +2955,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, event = trace_find_event(type); if (!event) { - printf("ug! no event found for type %d\n", type); + warning("ug! no event found for type %d", type); return; } @@ -2734,9 +2965,20 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, return pretty_print_func_graph(data, size, event, cpu, pid, comm, secs, usecs); - printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ", - comm, pid, cpu, - secs, nsecs, event->name); + if (latency_format) { + printf("%8.8s-%-5d %3d", + comm, pid, cpu); + print_lat_fmt(data, size); + } else + printf("%16s-%-5d [%03d]", comm, pid, cpu); + + printf(" %5lu.%06lu: %s: ", secs, usecs, event->name); + + if (event->flags & EVENT_FL_FAILED) { + printf("EVENT '%s' FAILED TO PARSE\n", + event->name); + return; + } pretty_print(data, size, event); printf("\n"); @@ -2807,46 +3049,71 @@ static void print_args(struct print_arg *args) } } -static void parse_header_field(char *type, +static void parse_header_field(const char *field, int *offset, int *size) { char *token; + int type; - if (read_expected(EVENT_ITEM, (char *)"field") < 0) + if (read_expected(EVENT_ITEM, "field") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; + /* type */ if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; free_token(token); - if (read_expected(EVENT_ITEM, type) < 0) + if (read_expected(EVENT_ITEM, field) < 0) return; - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *offset = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *size = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) - return; - if (read_expect_type(EVENT_NEWLINE, &token) < 0) + if (read_expected(EVENT_OP, ";") < 0) return; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (type != EVENT_ITEM) + goto fail; + + if (strcmp(token, "signed") != 0) + goto fail; + + free_token(token); + + if (read_expected(EVENT_OP, ":") < 0) + return; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + free_token(token); + if (read_expected(EVENT_OP, ";") < 0) + return; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } + fail: free_token(token); } @@ -2854,11 +3121,11 @@ int parse_header_page(char *buf, unsigned long size) { init_input_buf(buf, size); - parse_header_field((char *)"timestamp", &header_page_ts_offset, + parse_header_field("timestamp", &header_page_ts_offset, &header_page_ts_size); - parse_header_field((char *)"commit", &header_page_size_offset, + parse_header_field("commit", &header_page_size_offset, &header_page_size_size); - parse_header_field((char *)"data", &header_page_data_offset, + parse_header_field("data", &header_page_data_offset, &header_page_data_size); return 0; @@ -2909,6 +3176,9 @@ int parse_ftrace_file(char *buf, unsigned long size) if (ret < 0) die("failed to read ftrace event print fmt"); + /* New ftrace handles args */ + if (ret > 0) + return 0; /* * The arguments for ftrace files are parsed by the fields. * Set up the fields as their arguments. @@ -2926,7 +3196,7 @@ int parse_ftrace_file(char *buf, unsigned long size) return 0; } -int parse_event_file(char *buf, unsigned long size, char *system__unused __unused) +int parse_event_file(char *buf, unsigned long size, char *sys) { struct event *event; int ret; @@ -2946,12 +3216,18 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse die("failed to read event id"); ret = event_read_format(event); - if (ret < 0) - die("failed to read event format"); + if (ret < 0) { + warning("failed to read event format for %s", event->name); + goto event_failed; + } ret = event_read_print(event); - if (ret < 0) - die("failed to read event print fmt"); + if (ret < 0) { + warning("failed to read event print fmt for %s", event->name); + goto event_failed; + } + + event->system = strdup(sys); #define PRINT_ARGS 0 if (PRINT_ARGS && event->print_fmt.args) @@ -2959,6 +3235,12 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse add_event(event); return 0; + + event_failed: + event->flags |= EVENT_FL_FAILED; + /* still add it even if it failed */ + add_event(event); + return -1; } void parse_set_info(int nr_cpus, int long_sz) diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 1b5c847d2c2..44292e06cca 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu) return data; } -void trace_report(void) +void trace_report(int fd) { - const char *input_file = "trace.info"; char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; @@ -468,9 +467,7 @@ void trace_report(void) int show_funcs = 0; int show_printk = 0; - input_fd = open(input_file, O_RDONLY); - if (input_fd < 0) - die("opening '%s'\n", input_file); + input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 693f815c942..f6637c2fa1f 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -1,5 +1,5 @@ -#ifndef _TRACE_EVENTS_H -#define _TRACE_EVENTS_H +#ifndef __PERF_TRACE_EVENTS_H +#define __PERF_TRACE_EVENTS_H #include "parse-events.h" @@ -26,6 +26,9 @@ enum { enum format_flags { FIELD_IS_ARRAY = 1, FIELD_IS_POINTER = 2, + FIELD_IS_SIGNED = 4, + FIELD_IS_STRING = 8, + FIELD_IS_DYNAMIC = 16, }; struct format_field { @@ -132,15 +135,18 @@ struct event { int flags; struct format format; struct print_fmt print_fmt; + char *system; }; enum { - EVENT_FL_ISFTRACE = 1, - EVENT_FL_ISPRINT = 2, - EVENT_FL_ISBPRINT = 4, - EVENT_FL_ISFUNC = 8, - EVENT_FL_ISFUNCENT = 16, - EVENT_FL_ISFUNCRET = 32, + EVENT_FL_ISFTRACE = 0x01, + EVENT_FL_ISPRINT = 0x02, + EVENT_FL_ISBPRINT = 0x04, + EVENT_FL_ISFUNC = 0x08, + EVENT_FL_ISFUNCENT = 0x10, + EVENT_FL_ISFUNCRET = 0x20, + + EVENT_FL_FAILED = 0x80000000 }; struct record { @@ -154,7 +160,7 @@ struct record *trace_read_data(int cpu); void parse_set_info(int nr_cpus, int long_sz); -void trace_report(void); +void trace_report(int fd); void *malloc_or_die(unsigned int size); @@ -166,7 +172,7 @@ void print_funcs(void); void print_printk(void); int parse_ftrace_file(char *buf, unsigned long size); -int parse_event_file(char *buf, unsigned long size, char *system); +int parse_event_file(char *buf, unsigned long size, char *sys); void print_event(int cpu, void *data, int size, unsigned long long nsecs, char *comm); @@ -233,6 +239,8 @@ extern int header_page_size_size; extern int header_page_data_offset; extern int header_page_data_size; +extern int latency_format; + int parse_header_page(char *buf, unsigned long size); int trace_parse_common_type(void *data); struct event *trace_find_event(int id); @@ -240,6 +248,15 @@ unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); + +/* taken from kernel/trace/trace.h */ +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, +}; -#endif /* _TRACE_EVENTS_H */ +#endif /* __PERF_TRACE_EVENTS_H */ diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h index 5e75f900594..7d6b8331f89 100644 --- a/tools/perf/util/types.h +++ b/tools/perf/util/types.h @@ -1,5 +1,5 @@ -#ifndef _PERF_TYPES_H -#define _PERF_TYPES_H +#ifndef __PERF_TYPES_H +#define __PERF_TYPES_H /* * We define u64 as unsigned long long for every architecture @@ -14,4 +14,4 @@ typedef signed short s16; typedef unsigned char u8; typedef signed char s8; -#endif /* _PERF_TYPES_H */ +#endif /* __PERF_TYPES_H */ diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index cadf8cf2a59..2fa967e1a88 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h @@ -1,5 +1,5 @@ -#ifndef _PERF_VALUES_H -#define _PERF_VALUES_H +#ifndef __PERF_VALUES_H +#define __PERF_VALUES_H #include "types.h" @@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values, void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw); -#endif /* _PERF_VALUES_H */ +#endif /* __PERF_VALUES_H */ |