diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-18 23:01:55 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-18 23:01:55 -0700 |
commit | 2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (patch) | |
tree | fa7f8400ac685fb52e96f64997c7c682fc2aa021 /include | |
parent | 7b39f90fabcf9e2af0cd79d0a60440d821e22b56 (diff) | |
parent | 537b60d17894b7c19a6060feae40299d7109d6e7 (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
include/linux/mod_devicetable.h
scripts/mod/file2alias.c
Diffstat (limited to 'include')
42 files changed, 704 insertions, 440 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3..c33749f95b3 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -33,7 +33,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 00000000000..6a211f40665 --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ + +#include <asm/types.h> + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 00000000000..fa2a50b7ee6 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1,42 @@ +#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ + +/* + * Compile time versions of __arch_hweightN() + */ +#define __const_hweight8(w) \ + ( (!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))) ) + +#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) +#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) +#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) + +/* + * Generic interface. + */ +#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) +#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) +#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) +#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) + +/* + * Interface for known constant arguments + */ +#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) +#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) +#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) +#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) + +/* + * Type invariant interface to the compile time constant hweight functions. + */ +#define HWEIGHT(w) HWEIGHT64((u64)w) + +#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index fbbc383771d..a94d6519c7e 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h @@ -1,11 +1,7 @@ #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ -#include <asm/types.h> - -extern unsigned int hweight32(unsigned int w); -extern unsigned int hweight16(unsigned int w); -extern unsigned int hweight8(unsigned int w); -extern unsigned long hweight64(__u64 w); +#include <asm-generic/bitops/arch_hweight.h> +#include <asm-generic/bitops/const_hweight.h> #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b926afe8c03..3da73f5f0ae 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags; int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); #ifdef CONFIG_X86_IO_APIC -extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); +extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else -#define acpi_get_override_irq(bus, trigger, polarity) (-1) +#define acpi_get_override_irq(gsi, trigger, polarity) (-1) #endif /* * This function undoes the effect of one call to acpi_register_gsi(). diff --git a/include/linux/bitops.h b/include/linux/bitops.h index b796eab5ca7..fc68053378c 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -10,6 +10,11 @@ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #endif +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + /* * Include this here because some architectures need generic_ffs/fls in * scope @@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } -/* - * Clearly slow versions of the hweightN() functions, their benefit is - * of course compile time evaluation of constant arguments. - */ -#define HWEIGHT8(w) \ - ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ - (!!((w) & (1ULL << 0))) + \ - (!!((w) & (1ULL << 1))) + \ - (!!((w) & (1ULL << 2))) + \ - (!!((w) & (1ULL << 3))) + \ - (!!((w) & (1ULL << 4))) + \ - (!!((w) & (1ULL << 5))) + \ - (!!((w) & (1ULL << 6))) + \ - (!!((w) & (1ULL << 7))) ) - -#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) -#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) -#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) - -/* - * Type invariant version that simply casts things to the - * largest type. - */ -#define HWEIGHT(w) HWEIGHT64((u64)(w)) - /** * rol32 - rotate a 32-bit value left * @word: value to rotate diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4de02b10007..9f15150ce8d 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -278,6 +278,27 @@ struct freq_attr { ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); }; +#define cpufreq_freq_attr_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_perm(_name, _perm) \ +static struct freq_attr _name = \ +__ATTR(_name, _perm, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_old(_name) \ +static struct freq_attr _name##_old = \ +__ATTR(_name, 0444, show_##_name##_old, NULL) + +#define cpufreq_freq_attr_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define cpufreq_freq_attr_rw_old(_name) \ +static struct freq_attr _name##_old = \ +__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) + + struct global_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, @@ -286,6 +307,15 @@ struct global_attr { const char *c, size_t count); }; +#define define_one_global_ro(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_global_rw(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + + /********************************************************************* * CPUFREQ 2.6. INTERFACE * *********************************************************************/ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index a5740fc4d04..a73454aec33 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); -extern void cpuset_cpus_allowed_locked(struct task_struct *p, - struct cpumask *mask); +extern int cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -69,9 +68,6 @@ struct seq_file; extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); -extern void cpuset_lock(void); -extern void cpuset_unlock(void); - extern int cpuset_mem_spread_node(void); static inline int cpuset_do_page_mem_spread(void) @@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, { cpumask_copy(mask, cpu_possible_mask); } -static inline void cpuset_cpus_allowed_locked(struct task_struct *p, - struct cpumask *mask) + +static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) { - cpumask_copy(mask, cpu_possible_mask); + cpumask_copy(&p->cpus_allowed, cpu_possible_mask); + return cpumask_any(cpu_active_mask); } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) @@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m, { } -static inline void cpuset_lock(void) {} -static inline void cpuset_unlock(void) {} - static inline int cpuset_mem_spread_node(void) { return 0; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 30b93b2a01a..eebb617c17d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -186,6 +186,8 @@ d_iput: no no no yes #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ +#define DCACHE_CANT_MOUNT 0x0100 + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; @@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry) return d_unhashed(dentry) && !IS_ROOT(dentry); } +static inline int cant_mount(struct dentry *dentry) +{ + return (dentry->d_flags & DCACHE_CANT_MOUNT); +} + +static inline void dont_mount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_CANT_MOUNT; + spin_unlock(&dentry->d_lock); +} + static inline struct dentry *dget_parent(struct dentry *dentry) { struct dentry *ret; diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a..597692f1fc8 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -20,12 +20,14 @@ struct debug_obj_descr; * struct debug_obj - representaion of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state + * @astate: current active state * @object: pointer to the real object * @descr: pointer to an object type specific debug description structure */ struct debug_obj { struct hlist_node node; enum debug_obj_state state; + unsigned int astate; void *object; struct debug_obj_descr *descr; }; @@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +/* + * Active state: + * - Set at 0 upon initialization. + * - Must return to 0 before deactivation. + */ +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); #else diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 01e6adea07e..41e46330d9b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -82,9 +82,13 @@ void clear_ftrace_function(void); extern void ftrace_stub(unsigned long a0, unsigned long a1); #else /* !CONFIG_FUNCTION_TRACER */ -# define register_ftrace_function(ops) do { } while (0) -# define unregister_ftrace_function(ops) do { } while (0) -# define clear_ftrace_function(ops) do { } while (0) +/* + * (un)register_ftrace_function must be a macro since the ops parameter + * must not be evaluated. + */ +#define register_ftrace_function(ops) ({ 0; }) +#define unregister_ftrace_function(ops) ({ 0; }) +static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } static inline void ftrace_stop(void) { } static inline void ftrace_start(void) { } @@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); #else -# define skip_trace(ip) ({ 0; }) -# define ftrace_force_update() ({ 0; }) -# define ftrace_set_filter(buf, len, reset) do { } while (0) -# define ftrace_disable_daemon() do { } while (0) -# define ftrace_enable_daemon() do { } while (0) +static inline int skip_trace(unsigned long ip) { return 0; } +static inline int ftrace_force_update(void) { return 0; } +static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) +{ +} +static inline void ftrace_disable_daemon(void) { } +static inline void ftrace_enable_daemon(void) { } static inline void ftrace_release_mod(struct module *mod) {} static inline int register_ftrace_command(struct ftrace_func_command *cmd) { @@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled) extern void time_hardirqs_on(unsigned long a0, unsigned long a1); extern void time_hardirqs_off(unsigned long a0, unsigned long a1); #else -# define time_hardirqs_on(a0, a1) do { } while (0) -# define time_hardirqs_off(a0, a1) do { } while (0) + static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } + static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } #endif #ifdef CONFIG_PREEMPT_TRACER extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1); #else -# define trace_preempt_on(a0, a1) do { } while (0) -# define trace_preempt_off(a0, a1) do { } while (0) + static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } + static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD @@ -352,6 +358,10 @@ struct ftrace_graph_ret { int depth; }; +/* Type of the callback handlers for tracing function graph*/ +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* for init task */ @@ -400,10 +410,6 @@ extern char __irqentry_text_end[]; #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 -/* Type of the callback handlers for tracing function graph*/ -typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ -typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ - extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc); @@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void) static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) +{ + return -1; +} +static inline void unregister_ftrace_graph(void) { } + static inline int task_curr_ret_stack(struct task_struct *tsk) { return -1; @@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) return tsk->trace & TSK_TRACE_FL_GRAPH; } -extern int ftrace_dump_on_oops; +enum ftrace_dump_mode; + +extern enum ftrace_dump_mode ftrace_dump_on_oops; #ifdef CONFIG_PREEMPT #define INIT_TRACE_RECURSION .trace_recursion = 0, @@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops; #define INIT_TRACE_RECURSION #endif -#ifdef CONFIG_HW_BRANCH_TRACER - -void trace_hw_branch(u64 from, u64 to); -void trace_hw_branch_oops(void); - -#else /* CONFIG_HW_BRANCH_TRACER */ - -static inline void trace_hw_branch(u64 from, u64 to) {} -static inline void trace_hw_branch_oops(void) {} - -#endif /* CONFIG_HW_BRANCH_TRACER */ - #ifdef CONFIG_FTRACE_SYSCALLS unsigned long arch_syscall_addr(int nr); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c71..39e71b0a3bf 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -58,6 +58,7 @@ struct trace_iterator { /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; + unsigned long lost_events; int leftover; int cpu; u64 ts; diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index c70d27af03f..a2d6ea49ec5 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -9,9 +9,22 @@ enum { }; enum { - HW_BREAKPOINT_R = 1, - HW_BREAKPOINT_W = 2, - HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, +}; + +enum bp_type_idx { + TYPE_INST = 0, +#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS + TYPE_DATA = 0, +#else + TYPE_DATA = 1, +#endif + TYPE_MAX }; #ifdef __KERNEL__ @@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr) attr->sample_period = 1; } +static inline void ptrace_breakpoint_init(struct perf_event_attr *attr) +{ + hw_breakpoint_init(attr); + attr->exclude_kernel = 1; +} + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a..7996fc2c9ba 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -49,7 +49,6 @@ extern struct group_info init_groups; { .first = &init_task.pids[PIDTYPE_PGID].node }, \ { .first = &init_task.pids[PIDTYPE_SID].node }, \ }, \ - .rcu = RCU_HEAD_INIT, \ .level = 0, \ .numbers = { { \ .nr = 0, \ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3af4ffd591b..be22ad83689 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -37,9 +37,9 @@ struct iommu_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - void (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); + phys_addr_t paddr, int gfp_order, int prot); + int (*unmap)(struct iommu_domain *domain, unsigned long iova, + int gfp_order); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); int (*domain_has_cap)(struct iommu_domain *domain, @@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); -extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, - size_t size); +extern int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot); +extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova); extern int iommu_domain_has_cap(struct iommu_domain *domain, @@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain, { } -static inline int iommu_map_range(struct iommu_domain *domain, - unsigned long iova, phys_addr_t paddr, - size_t size, int prot) +static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) { return -ENODEV; } -static inline void iommu_unmap_range(struct iommu_domain *domain, - unsigned long iova, size_t size) +static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order) { + return -ENODEV; } static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a38d6bd6fde..fc33af91185 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -492,6 +492,13 @@ static inline void tracing_off(void) { } static inline void tracing_off_permanent(void) { } static inline int tracing_is_on(void) { return 0; } #endif + +enum ftrace_dump_mode { + DUMP_NONE, + DUMP_ALL, + DUMP_ORIG, +}; + #ifdef CONFIG_TRACING extern void tracing_start(void); extern void tracing_stop(void); @@ -573,7 +580,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); extern int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); -extern void ftrace_dump(void); +extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else static inline void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } @@ -594,7 +601,7 @@ ftrace_vprintk(const char *fmt, va_list ap) { return 0; } -static inline void ftrace_dump(void) { } +static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ /* diff --git a/include/linux/mm.h b/include/linux/mm.h index 462acaf36f3..fb19bb92b80 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -19,7 +19,6 @@ struct anon_vma; struct file_ra_state; struct user_struct; struct writeback_control; -struct rlimit; #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; @@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page, int vmemmap_populate(struct page *start_page, unsigned long pages, int node); void vmemmap_populate_print_last(void); -extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, - size_t size); -extern void refund_locked_memory(struct mm_struct *mm, size_t size); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 55f1f9c9506..007fbaafead 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -500,4 +500,13 @@ struct mdio_device_id { __u32 phy_id_mask; }; +struct zorro_device_id { + __u32 id; /* Device ID or ZORRO_WILDCARD */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +#define ZORRO_WILDCARD (0xffffffff) /* not official */ + +#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 515d53ae6a7..6914fcad467 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -465,8 +465,7 @@ static inline void __module_get(struct module *module) if (module) { preempt_disable(); __this_cpu_inc(module->refptr->incs); - trace_module_get(module, _THIS_IP_, - __this_cpu_read(module->refptr->incs)); + trace_module_get(module, _THIS_IP_); preempt_enable(); } } @@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module) if (likely(module_is_live(module))) { __this_cpu_inc(module->refptr->incs); - trace_module_get(module, _THIS_IP_, - __this_cpu_read(module->refptr->incs)); + trace_module_get(module, _THIS_IP_); } else ret = 0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c8e37544040..3fd5c82e0e1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -203,8 +203,19 @@ struct perf_event_attr { enable_on_exec : 1, /* next exec enables */ task : 1, /* trace fork/exit */ watermark : 1, /* wakeup_watermark */ - - __reserved_1 : 49; + /* + * precise_ip: + * + * 0 - SAMPLE_IP can have arbitrary skid + * 1 - SAMPLE_IP must have constant skid + * 2 - SAMPLE_IP requested to have 0 skid + * 3 - SAMPLE_IP must have 0 skid + * + * See also PERF_RECORD_MISC_EXACT_IP + */ + precise_ip : 2, /* skid constraint */ + + __reserved_1 : 47; union { __u32 wakeup_events; /* wakeup every n events */ @@ -287,11 +298,24 @@ struct perf_event_mmap_page { __u64 data_tail; /* user-space written tail */ }; -#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) +#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0) #define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) +#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) +#define PERF_RECORD_MISC_GUEST_USER (5 << 0) + +/* + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + */ +#define PERF_RECORD_MISC_EXACT_IP (1 << 14) +/* + * Reserve the last bit to indicate some extended misc field + */ +#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { __u32 type; @@ -439,6 +463,12 @@ enum perf_callchain_context { # include <asm/perf_event.h> #endif +struct perf_guest_info_callbacks { + int (*is_in_guest) (void); + int (*is_user_mode) (void); + unsigned long (*get_guest_ip) (void); +}; + #ifdef CONFIG_HAVE_HW_BREAKPOINT #include <asm/hw_breakpoint.h> #endif @@ -468,6 +498,17 @@ struct perf_raw_record { void *data; }; +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 flags; +}; + +struct perf_branch_stack { + __u64 nr; + struct perf_branch_entry entries[0]; +}; + struct task_struct; /** @@ -506,6 +547,8 @@ struct hw_perf_event { struct perf_event; +#define PERF_EVENT_TXN_STARTED 1 + /** * struct pmu - generic performance monitoring unit */ @@ -516,6 +559,16 @@ struct pmu { void (*stop) (struct perf_event *event); void (*read) (struct perf_event *event); void (*unthrottle) (struct perf_event *event); + + /* + * group events scheduling is treated as a transaction, + * add group events as a whole and perform one schedulability test. + * If test fails, roll back the whole group + */ + + void (*start_txn) (const struct pmu *pmu); + void (*cancel_txn) (const struct pmu *pmu); + int (*commit_txn) (const struct pmu *pmu); }; /** @@ -571,6 +624,14 @@ enum perf_group_flag { PERF_GROUP_SOFTWARE = 0x1, }; +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) + +struct swevent_hlist { + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; +}; + /** * struct perf_event - performance event kernel representation: */ @@ -579,6 +640,7 @@ struct perf_event { struct list_head group_entry; struct list_head event_entry; struct list_head sibling_list; + struct hlist_node hlist_entry; int nr_siblings; int group_flags; struct perf_event *group_leader; @@ -726,6 +788,9 @@ struct perf_cpu_context { int active_oncpu; int max_pertask; int exclusive; + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; /* * Recursion avoidance: @@ -769,9 +834,6 @@ extern void perf_disable(void); extern void perf_enable(void); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); -extern int hw_perf_group_sched_in(struct perf_event *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * @@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) __perf_event_mmap(vma); } +extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); +extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + extern void perf_event_comm(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); @@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, static inline void perf_bp_event(struct perf_event *event, void *data) { } +static inline int perf_register_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } +static inline int perf_unregister_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } + static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 212da17d06a..5417944d368 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *); extern int platform_add_devices(struct platform_device **, int); extern struct platform_device *platform_device_register_simple(const char *, int id, - struct resource *, unsigned int); + const struct resource *, unsigned int); extern struct platform_device *platform_device_register_data(struct device *, const char *, int, const void *, size_t); extern struct platform_device *platform_device_alloc(const char *name, int id); -extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); +extern int platform_device_add_resources(struct platform_device *pdev, + const struct resource *res, + unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e1fb6072997..4272521e29e 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, #define arch_ptrace_stop(code, info) do { } while (0) #endif -#ifndef arch_ptrace_untrace -/* - * Do machine-specific work before untracing child. - * - * This is called for a normal detach as well as from ptrace_exit() - * when the tracing task dies. - * - * Called with write_lock(&tasklist_lock) held. - */ -#define arch_ptrace_untrace(task) do { } while (0) -#endif - extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 5210a5c6087..fe1872e5b37 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -110,6 +110,7 @@ struct rb_node struct rb_root { struct rb_node *rb_node; + void (*augment_cb)(struct rb_node *node); }; @@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color) rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } -#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT (struct rb_root) { NULL, NULL, } +#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x} + #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index db266bbed23..b653b4aaa8a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -56,8 +56,6 @@ struct rcu_head { }; /* Exported common interfaces */ -extern void synchronize_rcu_bh(void); -extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); @@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); /* Internal to kernel */ extern void rcu_init(void); -extern int rcu_scheduler_active; -extern void rcu_scheduler_starting(void); #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include <linux/rcutree.h> @@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void); (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +static inline void init_rcu_head_on_stack(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) +{ +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; @@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void); /** * rcu_read_lock_held - might we be in RCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ static inline int rcu_read_lock_held(void) { @@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an - * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, - * this assumes we are in an RCU-sched read-side critical section unless it - * can prove otherwise. Note that disabling of preemption (including - * disabling irqs) counts as an RCU-sched read-side critical section. + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) @@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void) #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { - return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); + return preempt_count() != 0 || irqs_disabled(); } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) @@ -192,6 +199,15 @@ static inline int rcu_read_lock_sched_held(void) extern int rcu_my_thread_group_empty(void); +#define __do_rcu_dereference_check(c) \ + do { \ + static bool __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ + __warned = true; \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + } \ + } while (0) + /** * rcu_dereference_check - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing @@ -221,8 +237,7 @@ extern int rcu_my_thread_group_empty(void); */ #define rcu_dereference_check(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ rcu_dereference_raw(p); \ }) @@ -239,8 +254,7 @@ extern int rcu_my_thread_group_empty(void); */ #define rcu_dereference_protected(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ (p); \ }) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480..e2e893144a8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -29,6 +29,10 @@ void rcu_sched_qs(int cpu); void rcu_bh_qs(int cpu); +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); +} #define __rcu_read_lock() preempt_disable() #define __rcu_read_unlock() preempt_enable() @@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void) return 0; } -extern int rcu_expedited_torture_stats(char *page); - static inline void rcu_force_quiescent_state(void) { } @@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void) { } -#define synchronize_rcu synchronize_sched +extern void synchronize_sched(void); + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} static inline void synchronize_rcu_expedited(void) { @@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void) return 0; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern int rcu_scheduler_active __read_mostly; +extern void rcu_scheduler_starting(void); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rcu_scheduler_starting(void) +{ +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779..c0ed1c056f2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -34,8 +34,8 @@ struct notifier_block; extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); +extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); -extern int rcu_expedited_torture_stats(char *page); #ifdef CONFIG_TREE_PREEMPT_RCU @@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void) extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) @@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void) return num_online_cpus() == 1; } +extern void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; + #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5fcc31ed577..25b4f686d91 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); struct ring_buffer_event * -ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); struct ring_buffer_event * -ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); struct ring_buffer_iter * -ring_buffer_read_start(struct ring_buffer *buffer, int cpu); +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); +void ring_buffer_read_prepare_sync(void); +void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * diff --git a/include/linux/sched.h b/include/linux/sched.h index 2b7b81df78b..b55e988988b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -99,7 +99,6 @@ struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; -struct bts_context; struct perf_event_context; /* @@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); extern int get_nohz_load_balancer(void); +extern int nohz_ratelimit(int cpu); #else static inline int select_nohz_load_balancer(int cpu) { return 0; } + +static inline int nohz_ratelimit(int cpu) +{ + return 0; +} #endif /* @@ -954,6 +959,7 @@ struct sched_domain { char *name; #endif + unsigned int span_weight; /* * Span of all CPUs in this domain. * @@ -1026,12 +1032,17 @@ struct sched_domain; #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ #define WF_FORK 0x02 /* child wakeup after fork */ +#define ENQUEUE_WAKEUP 1 +#define ENQUEUE_WAKING 2 +#define ENQUEUE_HEAD 4 + +#define DEQUEUE_SLEEP 1 + struct sched_class { const struct sched_class *next; - void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, - bool head); - void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); + void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); void (*yield_task) (struct rq *rq); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); @@ -1040,7 +1051,8 @@ struct sched_class { void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP - int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); + int (*select_task_rq)(struct rq *rq, struct task_struct *p, + int sd_flag, int flags); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); @@ -1077,36 +1089,8 @@ struct load_weight { unsigned long weight, inv_weight; }; -/* - * CFS stats for a schedulable entity (task, task-group etc) - * - * Current field usage histogram: - * - * 4 se->block_start - * 4 se->run_node - * 4 se->sleep_start - * 6 se->load.weight - */ -struct sched_entity { - struct load_weight load; /* for load-balancing */ - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; - - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; - - u64 last_wakeup; - u64 avg_overlap; - - u64 nr_migrations; - - u64 start_runtime; - u64 avg_wakeup; - #ifdef CONFIG_SCHEDSTATS +struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; @@ -1138,6 +1122,24 @@ struct sched_entity { u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; +}; +#endif + +struct sched_entity { + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + + u64 nr_migrations; + +#ifdef CONFIG_SCHEDSTATS + struct sched_statistics statistics; #endif #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1272,12 +1274,6 @@ struct task_struct { struct list_head ptraced; struct list_head ptrace_entry; - /* - * This is the tracer handle for the ptrace BTS extension. - * This field actually belongs to the ptracer task. - */ - struct bts_context *bts; - /* PID/PID hash table linkage. */ struct pid_link pids[PIDTYPE_MAX]; struct list_head thread_group; @@ -1846,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); #ifdef CONFIG_HOTPLUG_CPU +extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p); extern void idle_task_exit(void); #else static inline void idle_task_exit(void) {} @@ -2122,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_context_switch(struct task_struct *p); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else -static inline void wait_task_context_switch(struct task_struct *p) {} static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af..4d5d2f546db 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -27,6 +27,8 @@ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H +#include <linux/mutex.h> + struct srcu_struct_array { int c[2]; }; @@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp); /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. */ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index baba3a23a81..6b524a0d02e 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -1,13 +1,101 @@ #ifndef _LINUX_STOP_MACHINE #define _LINUX_STOP_MACHINE -/* "Bogolock": stop the entire machine, disable interrupts. This is a - very heavy lock, which is equivalent to grabbing every spinlock - (and more). So the "read" side to such a lock is anything which - disables preeempt. */ + #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/list.h> #include <asm/system.h> +/* + * stop_cpu[s]() is simplistic per-cpu maximum priority cpu + * monopolization mechanism. The caller can specify a non-sleeping + * function to be executed on a single or multiple cpus preempting all + * other processes and monopolizing those cpus until it finishes. + * + * Resources for this mechanism are preallocated when a cpu is brought + * up and requests are guaranteed to be served as long as the target + * cpus are online. + */ +typedef int (*cpu_stop_fn_t)(void *arg); + +#ifdef CONFIG_SMP + +struct cpu_stop_work { + struct list_head list; /* cpu_stopper->works */ + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; +}; + +int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); +void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf); +int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); + +#else /* CONFIG_SMP */ + +#include <linux/workqueue.h> + +struct cpu_stop_work { + struct work_struct work; + cpu_stop_fn_t fn; + void *arg; +}; + +static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) +{ + int ret = -ENOENT; + preempt_disable(); + if (cpu == smp_processor_id()) + ret = fn(arg); + preempt_enable(); + return ret; +} + +static void stop_one_cpu_nowait_workfn(struct work_struct *work) +{ + struct cpu_stop_work *stwork = + container_of(work, struct cpu_stop_work, work); + preempt_disable(); + stwork->fn(stwork->arg); + preempt_enable(); +} + +static inline void stop_one_cpu_nowait(unsigned int cpu, + cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf) +{ + if (cpu == smp_processor_id()) { + INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); + work_buf->fn = fn; + work_buf->arg = arg; + schedule_work(&work_buf->work); + } +} + +static inline int stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) + return stop_one_cpu(raw_smp_processor_id(), fn, arg); + return -ENOENT; +} + +static inline int try_stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + return stop_cpus(cpumask, fn, arg); +} + +#endif /* CONFIG_SMP */ + +/* + * stop_machine "Bogolock": stop the entire machine, disable + * interrupts. This is a very heavy lock, which is equivalent to + * grabbing every spinlock (and more). So the "read" side to such a + * lock is anything which disables preeempt. + */ #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) /** @@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); */ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); -/** - * stop_machine_create: create all stop_machine threads - * - * Description: This causes all stop_machine threads to be created before - * stop_machine actually gets called. This can be used by subsystems that - * need a non failing stop_machine infrastructure. - */ -int stop_machine_create(void); - -/** - * stop_machine_destroy: destroy all stop_machine threads - * - * Description: This causes all stop_machine threads which were created with - * stop_machine_create to be destroyed again. - */ -void stop_machine_destroy(void); - -#else +#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ static inline int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) @@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data, return ret; } -static inline int stop_machine_create(void) { return 0; } -static inline void stop_machine_destroy(void) { } - -#endif /* CONFIG_SMP */ -#endif /* _LINUX_STOP_MACHINE */ +#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ +#endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/tick.h b/include/linux/tick.h index d2ae79e21be..b232ccc0ee2 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -42,6 +42,7 @@ enum tick_nohz_mode { * @idle_waketime: Time when the idle was interrupted * @idle_exittime: Time when the idle state was left * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped + * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding * @sleep_length: Duration of the current idle sleep * @do_timer_lst: CPU was the last one doing do_timer before going idle */ @@ -60,7 +61,7 @@ struct tick_sched { ktime_t idle_waketime; ktime_t idle_exittime; ktime_t idle_sleeptime; - ktime_t idle_lastupdate; + ktime_t iowait_sleeptime; ktime_t sleep_length; unsigned long last_jiffies; unsigned long next_jiffies; @@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle); extern void tick_nohz_restart_sched_tick(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); +extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); # else static inline void tick_nohz_stop_sched_tick(int inidle) { } static inline void tick_nohz_restart_sched_tick(void) { } @@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) return len; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } +static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } # endif /* !NO_HZ */ #endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 78b4bd3be49..1d85f9a6a19 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -33,6 +33,65 @@ struct tracepoint { * Keep in sync with vmlinux.lds.h. */ +/* + * Connect a probe to a tracepoint. + * Internal API, should not be used directly. + */ +extern int tracepoint_probe_register(const char *name, void *probe); + +/* + * Disconnect a probe from a tracepoint. + * Internal API, should not be used directly. + */ +extern int tracepoint_probe_unregister(const char *name, void *probe); + +extern int tracepoint_probe_register_noupdate(const char *name, void *probe); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); +extern void tracepoint_probe_update_all(void); + +struct tracepoint_iter { + struct module *module; + struct tracepoint *tracepoint; +}; + +extern void tracepoint_iter_start(struct tracepoint_iter *iter); +extern void tracepoint_iter_next(struct tracepoint_iter *iter); +extern void tracepoint_iter_stop(struct tracepoint_iter *iter); +extern void tracepoint_iter_reset(struct tracepoint_iter *iter); +extern int tracepoint_get_iter_range(struct tracepoint **tracepoint, + struct tracepoint *begin, struct tracepoint *end); + +/* + * tracepoint_synchronize_unregister must be called between the last tracepoint + * probe unregistration and the end of module exit to make sure there is no + * caller executing a probe when it is freed. + */ +static inline void tracepoint_synchronize_unregister(void) +{ + synchronize_sched(); +} + +#define PARAMS(args...) args + +#ifdef CONFIG_TRACEPOINTS +extern void tracepoint_update_probe_range(struct tracepoint *begin, + struct tracepoint *end); +#else +static inline void tracepoint_update_probe_range(struct tracepoint *begin, + struct tracepoint *end) +{ } +#endif /* CONFIG_TRACEPOINTS */ + +#endif /* _LINUX_TRACEPOINT_H */ + +/* + * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include + * file ifdef protection. + * This is due to the way trace events work. If a file includes two + * trace event headers under one "CREATE_TRACE_POINTS" the first include + * will override the TRACE_EVENT and break the second include. + */ + #ifndef DECLARE_TRACE #define TP_PROTO(args...) args @@ -96,9 +155,6 @@ struct tracepoint { #define EXPORT_TRACEPOINT_SYMBOL(name) \ EXPORT_SYMBOL(__tracepoint_##name) -extern void tracepoint_update_probe_range(struct tracepoint *begin, - struct tracepoint *end); - #else /* !CONFIG_TRACEPOINTS */ #define DECLARE_TRACE(name, proto, args) \ static inline void _do_trace_##name(struct tracepoint *tp, proto) \ @@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) -static inline void tracepoint_update_probe_range(struct tracepoint *begin, - struct tracepoint *end) -{ } #endif /* CONFIG_TRACEPOINTS */ #endif /* DECLARE_TRACE */ -/* - * Connect a probe to a tracepoint. - * Internal API, should not be used directly. - */ -extern int tracepoint_probe_register(const char *name, void *probe); - -/* - * Disconnect a probe from a tracepoint. - * Internal API, should not be used directly. - */ -extern int tracepoint_probe_unregister(const char *name, void *probe); - -extern int tracepoint_probe_register_noupdate(const char *name, void *probe); -extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); -extern void tracepoint_probe_update_all(void); - -struct tracepoint_iter { - struct module *module; - struct tracepoint *tracepoint; -}; - -extern void tracepoint_iter_start(struct tracepoint_iter *iter); -extern void tracepoint_iter_next(struct tracepoint_iter *iter); -extern void tracepoint_iter_stop(struct tracepoint_iter *iter); -extern void tracepoint_iter_reset(struct tracepoint_iter *iter); -extern int tracepoint_get_iter_range(struct tracepoint **tracepoint, - struct tracepoint *begin, struct tracepoint *end); - -/* - * tracepoint_synchronize_unregister must be called between the last tracepoint - * probe unregistration and the end of module exit to make sure there is no - * caller executing a probe when it is freed. - */ -static inline void tracepoint_synchronize_unregister(void) -{ - synchronize_sched(); -} - -#define PARAMS(args...) args - -#endif /* _LINUX_TRACEPOINT_H */ - -/* - * Note: we keep the TRACE_EVENT outside the include file ifdef protection. - * This is due to the way trace events work. If a file includes two - * trace event headers under one "CREATE_TRACE_POINTS" the first include - * will override the TRACE_EVENT and break the second include. - */ - #ifndef TRACE_EVENT /* * For use with the TRACE_EVENT macro: diff --git a/include/linux/types.h b/include/linux/types.h index c42724f8c80..23d237a075e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -188,12 +188,12 @@ typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; typedef struct { - volatile int counter; + int counter; } atomic_t; #ifdef CONFIG_64BIT typedef struct { - volatile long counter; + long counter; } atomic64_t; #endif diff --git a/include/linux/wait.h b/include/linux/wait.h index a48e16b77d5..76d96d035ea 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) /* * Used for wake-one threads: */ +static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, + wait_queue_t *wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue(q, wait); +} + static inline void __add_wait_queue_tail(wait_queue_head_t *head, - wait_queue_t *new) + wait_queue_t *new) { list_add_tail(&new->task_list, &head->task_list); } +static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, + wait_queue_t *wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue_tail(q, wait); +} + static inline void __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) { @@ -404,25 +418,6 @@ do { \ }) /* - * Must be called with the spinlock in the wait_queue_head_t held. - */ -static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, - wait_queue_t * wait) -{ - wait->flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue_tail(q, wait); -} - -/* - * Must be called with the spinlock in the wait_queue_head_t held. - */ -static inline void remove_wait_queue_locked(wait_queue_head_t *q, - wait_queue_t * wait) -{ - __remove_wait_queue(q, wait); -} - -/* * These are the old interfaces to sleep waiting for an event. * They are racy. DO NOT use them, use the wait_event* interfaces above. * We plan to remove these interfaces. diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 913bfc226dd..7bf9db525e9 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -38,8 +38,6 @@ typedef __u32 zorro_id; -#define ZORRO_WILDCARD (0xffffffff) /* not official */ - /* Include the ID list */ #include <linux/zorro_ids.h> @@ -116,6 +114,7 @@ struct ConfigDev { #include <linux/init.h> #include <linux/ioport.h> +#include <linux/mod_devicetable.h> #include <asm/zorro.h> @@ -142,29 +141,10 @@ struct zorro_dev { * Zorro bus */ -struct zorro_bus { - struct list_head devices; /* list of devices on this bus */ - unsigned int num_resources; /* number of resources */ - struct resource resources[4]; /* address space routed to this bus */ - struct device dev; - char name[10]; -}; - -extern struct zorro_bus zorro_bus; /* single Zorro bus */ extern struct bus_type zorro_bus_type; /* - * Zorro device IDs - */ - -struct zorro_device_id { - zorro_id id; /* Device ID or ZORRO_WILDCARD */ - unsigned long driver_data; /* Data private to the driver */ -}; - - - /* * Zorro device drivers */ diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 5acfb1eb4df..1dfab540151 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -65,6 +65,10 @@ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +/* Make all open coded DECLARE_TRACE nops */ +#undef DECLARE_TRACE +#define DECLARE_TRACE(name, proto, args) + #ifdef CONFIG_EVENT_TRACING #include <trace/ftrace.h> #endif @@ -75,6 +79,7 @@ #undef DEFINE_EVENT #undef DEFINE_EVENT_PRINT #undef TRACE_HEADER_MULTI_READ +#undef DECLARE_TRACE /* Only undef what we defined in this file */ #ifdef UNDEF_TRACE_INCLUDE_FILE diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h index 5c1dcfc16c6..2821b86de63 100644 --- a/include/trace/events/lock.h +++ b/include/trace/events/lock.h @@ -35,15 +35,15 @@ TRACE_EVENT(lock_acquire, __get_str(name)) ); -TRACE_EVENT(lock_release, +DECLARE_EVENT_CLASS(lock, - TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), + TP_PROTO(struct lockdep_map *lock, unsigned long ip), - TP_ARGS(lock, nested, ip), + TP_ARGS(lock, ip), TP_STRUCT__entry( - __string(name, lock->name) - __field(void *, lockdep_addr) + __string( name, lock->name ) + __field( void *, lockdep_addr ) ), TP_fast_assign( @@ -51,51 +51,30 @@ TRACE_EVENT(lock_release, __entry->lockdep_addr = lock; ), - TP_printk("%p %s", - __entry->lockdep_addr, __get_str(name)) + TP_printk("%p %s", __entry->lockdep_addr, __get_str(name)) ); -#ifdef CONFIG_LOCK_STAT - -TRACE_EVENT(lock_contended, +DEFINE_EVENT(lock, lock_release, TP_PROTO(struct lockdep_map *lock, unsigned long ip), - TP_ARGS(lock, ip), + TP_ARGS(lock, ip) +); - TP_STRUCT__entry( - __string(name, lock->name) - __field(void *, lockdep_addr) - ), +#ifdef CONFIG_LOCK_STAT - TP_fast_assign( - __assign_str(name, lock->name); - __entry->lockdep_addr = lock; - ), +DEFINE_EVENT(lock, lock_contended, - TP_printk("%p %s", - __entry->lockdep_addr, __get_str(name)) -); + TP_PROTO(struct lockdep_map *lock, unsigned long ip), -TRACE_EVENT(lock_acquired, - TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), + TP_ARGS(lock, ip) +); - TP_ARGS(lock, ip, waittime), +DEFINE_EVENT(lock, lock_acquired, - TP_STRUCT__entry( - __string(name, lock->name) - __field(s64, wait_nsec) - __field(void *, lockdep_addr) - ), + TP_PROTO(struct lockdep_map *lock, unsigned long ip), - TP_fast_assign( - __assign_str(name, lock->name); - __entry->wait_nsec = waittime; - __entry->lockdep_addr = lock; - ), - TP_printk("%p %s (%llu ns)", __entry->lockdep_addr, - __get_str(name), - __entry->wait_nsec) + TP_ARGS(lock, ip) ); #endif diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 4b0f48ba16a..c7bb2f0482f 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -51,11 +51,14 @@ TRACE_EVENT(module_free, TP_printk("%s", __get_str(name)) ); +#ifdef CONFIG_MODULE_UNLOAD +/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ + DECLARE_EVENT_CLASS(module_refcnt, - TP_PROTO(struct module *mod, unsigned long ip, int refcnt), + TP_PROTO(struct module *mod, unsigned long ip), - TP_ARGS(mod, ip, refcnt), + TP_ARGS(mod, ip), TP_STRUCT__entry( __field( unsigned long, ip ) @@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt, TP_fast_assign( __entry->ip = ip; - __entry->refcnt = refcnt; + __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); __assign_str(name, mod->name); ), @@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt, DEFINE_EVENT(module_refcnt, module_get, - TP_PROTO(struct module *mod, unsigned long ip, int refcnt), + TP_PROTO(struct module *mod, unsigned long ip), - TP_ARGS(mod, ip, refcnt) + TP_ARGS(mod, ip) ); DEFINE_EVENT(module_refcnt, module_put, - TP_PROTO(struct module *mod, unsigned long ip, int refcnt), + TP_PROTO(struct module *mod, unsigned long ip), - TP_ARGS(mod, ip, refcnt) + TP_ARGS(mod, ip) ); +#endif /* CONFIG_MODULE_UNLOAD */ TRACE_EVENT(module_request, diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index a8989c4547e..188deca2f3c 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -1,4 +1,7 @@ -#ifndef _TRACE_NAPI_H_ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM napi + +#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NAPI_H_ #include <linux/netdevice.h> @@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll, TP_PROTO(struct napi_struct *napi), TP_ARGS(napi)); -#endif +#endif /* _TRACE_NAPI_H_ */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index cfceb0b73e2..4f733ecea46 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret, /* * Tracepoint for waiting on task to unschedule: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ TRACE_EVENT(sched_wait_task, - TP_PROTO(struct rq *rq, struct task_struct *p), + TP_PROTO(struct task_struct *p), - TP_ARGS(rq, p), + TP_ARGS(p), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task, /* * Tracepoint for waking up a task: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ DECLARE_EVENT_CLASS(sched_wakeup_template, - TP_PROTO(struct rq *rq, struct task_struct *p, int success), + TP_PROTO(struct task_struct *p, int success), - TP_ARGS(rq, p, success), + TP_ARGS(p, success), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, ); DEFINE_EVENT(sched_wakeup_template, sched_wakeup, - TP_PROTO(struct rq *rq, struct task_struct *p, int success), - TP_ARGS(rq, p, success)); + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); /* * Tracepoint for waking up a new task: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, - TP_PROTO(struct rq *rq, struct task_struct *p, int success), - TP_ARGS(rq, p, success)); + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); /* * Tracepoint for task switches, performed by the scheduler: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ TRACE_EVENT(sched_switch, - TP_PROTO(struct rq *rq, struct task_struct *prev, + TP_PROTO(struct task_struct *prev, struct task_struct *next), - TP_ARGS(rq, prev, next), + TP_ARGS(prev, next), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a510b75ac30..814566c99d2 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver, __entry->sa_handler, __entry->sa_flags) ); -/** - * signal_overflow_fail - called when signal queue is overflow - * @sig: signal number - * @group: signal to process group or not (bool) - * @info: pointer to struct siginfo - * - * Kernel fails to generate 'sig' signal with 'info' siginfo, because - * siginfo queue is overflow, and the signal is dropped. - * 'group' is not 0 if the signal will be sent to a process group. - * 'sig' is always one of RT signals. - */ -TRACE_EVENT(signal_overflow_fail, +DECLARE_EVENT_CLASS(signal_queue_overflow, TP_PROTO(int sig, int group, struct siginfo *info), @@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail, ); /** + * signal_overflow_fail - called when signal queue is overflow + * @sig: signal number + * @group: signal to process group or not (bool) + * @info: pointer to struct siginfo + * + * Kernel fails to generate 'sig' signal with 'info' siginfo, because + * siginfo queue is overflow, and the signal is dropped. + * 'group' is not 0 if the signal will be sent to a process group. + * 'sig' is always one of RT signals. + */ +DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info) +); + +/** * signal_lose_info - called when siginfo is lost * @sig: signal number * @group: signal to process group or not (bool) @@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail, * 'group' is not 0 if the signal will be sent to a process group. * 'sig' is always one of non-RT signals. */ -TRACE_EVENT(signal_lose_info, +DEFINE_EVENT(signal_queue_overflow, signal_lose_info, TP_PROTO(int sig, int group, struct siginfo *info), - TP_ARGS(sig, group, info), - - TP_STRUCT__entry( - __field( int, sig ) - __field( int, group ) - __field( int, errno ) - __field( int, code ) - ), - - TP_fast_assign( - __entry->sig = sig; - __entry->group = group; - TP_STORE_SIGINFO(__entry, info); - ), - - TP_printk("sig=%d group=%d errno=%d code=%d", - __entry->sig, __entry->group, __entry->errno, __entry->code) + TP_ARGS(sig, group, info) ); + #endif /* _TRACE_SIGNAL_H */ /* This part must be outside protection */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index ea6f9d4a20e..16253db38d7 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -154,9 +154,11 @@ * * field = (typeof(field))entry; * - * p = get_cpu_var(ftrace_event_seq); + * p = &get_cpu_var(ftrace_event_seq); * trace_seq_init(p); - * ret = trace_seq_printf(s, <TP_printk> "\n"); + * ret = trace_seq_printf(s, "%s: ", <call>); + * if (ret) + * ret = trace_seq_printf(s, <TP_printk> "\n"); * put_cpu(); * if (!ret) * return TRACE_TYPE_PARTIAL_LINE; @@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * * static void ftrace_raw_event_<call>(proto) * { + * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_<call> *entry; <-- defined in stage 1 * struct ring_buffer *buffer; * unsigned long irq_flags; + * int __data_size; * int pc; * * local_save_flags(irq_flags); * pc = preempt_count(); * + * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); + * * event = trace_current_buffer_lock_reserve(&buffer, * event_<call>.id, - * sizeof(struct ftrace_raw_<call>), + * sizeof(*entry) + __data_size, * irq_flags, pc); * if (!event) * return; * entry = ring_buffer_event_data(event); * - * <assign>; <-- Here we assign the entries by the __field and - * __array macros. + * { <assign>; } <-- Here we assign the entries by the __field and + * __array macros. * - * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); + * if (!filter_current_check_discard(buffer, event_call, entry, event)) + * trace_current_buffer_unlock_commit(buffer, + * event, irq_flags, pc); * } * * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) * { - * int ret; - * - * ret = register_trace_<call>(ftrace_raw_event_<call>); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to <call>"); - * return ret; + * return register_trace_<call>(ftrace_raw_event_<call>); * } * * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) @@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * .trace = ftrace_raw_output_<call>, <-- stage 2 * }; * + * static const char print_fmt_<call>[] = <TP_printk>; + * * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_<call> = { @@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * .raw_init = trace_event_raw_init, * .regfunc = ftrace_reg_event_<call>, * .unregfunc = ftrace_unreg_event_<call>, + * .print_fmt = print_fmt_<call>, + * .define_fields = ftrace_define_fields_<call>, * } * */ @@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ return; \ entry = ring_buffer_event_data(event); \ \ - \ tstruct \ \ { assign; } \ @@ -758,13 +763,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ - proto) \ + struct pt_regs *__regs, proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ - struct pt_regs *__regs; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -785,20 +789,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ - __regs = &__get_cpu_var(perf_trace_regs); \ - perf_fetch_caller_regs(__regs, 2); \ - \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, irq_flags, __regs); \ } #undef DEFINE_EVENT -#define DEFINE_EVENT(template, call, proto, args) \ -static notrace void perf_trace_##call(proto) \ -{ \ - struct ftrace_event_call *event_call = &event_##call; \ - \ - perf_trace_templ_##template(event_call, args); \ +#define DEFINE_EVENT(template, call, proto, args) \ +static notrace void perf_trace_##call(proto) \ +{ \ + struct ftrace_event_call *event_call = &event_##call; \ + struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \ + \ + perf_fetch_caller_regs(__regs, 1); \ + \ + perf_trace_templ_##template(event_call, __regs, args); \ + \ + put_cpu_var(perf_trace_regs); \ } #undef DEFINE_EVENT_PRINT |