diff options
Diffstat (limited to 'include')
50 files changed, 930 insertions, 287 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 2bcc5c7c22a..61e03dd7939 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -30,6 +30,9 @@ typedef u64 cputime64_t; #define cputime64_to_jiffies64(__ct) (__ct) #define jiffies64_to_cputime64(__jif) (__jif) #define cputime_to_cputime64(__ct) ((u64) __ct) +#define cputime64_gt(__a, __b) ((__a) > (__b)) + +#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) /* diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index 0fc16e3f0bf..84793c7025e 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -80,6 +80,10 @@ #define O_SYNC (__O_SYNC|O_DSYNC) #endif +#ifndef O_PATH +#define O_PATH 010000000 +#endif + #ifndef O_NDELAY #define O_NDELAY O_NONBLOCK #endif diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 3c2344f4813..01f227e1425 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,7 +6,7 @@ #include <asm/errno.h> static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index b3bfabc258f..c1a1216e29c 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -11,6 +11,7 @@ extern char _sinittext[], _einittext[]; extern char _end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __entry_text_start[], __entry_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index b969770196c..57af0338d27 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -646,9 +646,13 @@ __SYSCALL(__NR_prlimit64, sys_prlimit64) __SYSCALL(__NR_fanotify_init, sys_fanotify_init) #define __NR_fanotify_mark 263 __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) +#define __NR_name_to_handle_at 264 +__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) +#define __NR_open_by_handle_at 265 +__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) #undef __NR_syscalls -#define __NR_syscalls 264 +#define __NR_syscalls 266 /* * All syscalls below here should go away really, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fe77e3395b4..906c3ceca9a 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -424,6 +424,12 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +#define ENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__entry_text_start) = .; \ + *(.entry.text) \ + VMLINUX_SYMBOL(__entry_text_end) = .; + #ifdef CONFIG_FUNCTION_GRAPH_TRACER #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ce104e33cd2..e654fa23991 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -474,7 +474,8 @@ struct cgroup_subsys { struct cgroup *old_cgrp, struct task_struct *tsk, bool threadgroup); void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); - void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); + void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup *old_cgrp, struct task_struct *task); int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); @@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg, /* Get id and depth of css */ unsigned short css_id(struct cgroup_subsys_state *css); unsigned short css_depth(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); #else /* !CONFIG_CGROUPS */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ccefff02b6c..cdbfcb8780e 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -65,4 +65,8 @@ SUBSYS(net_cls) SUBSYS(blkio) #endif +#ifdef CONFIG_CGROUP_PERF +SUBSYS(perf) +#endif + /* */ diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 597692f1fc8..65970b811e2 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -34,7 +34,10 @@ struct debug_obj { /** * struct debug_obj_descr - object type specific debug description structure + * * @name: name of the object typee + * @debug_hint: function returning address, which have associated + * kernel symbol, to allow identify the object * @fixup_init: fixup function, which is called when the init check * fails * @fixup_activate: fixup function, which is called when the activate check @@ -46,7 +49,7 @@ struct debug_obj { */ struct debug_obj_descr { const char *name; - + void *(*debug_hint) (void *addr); int (*fixup_init) (void *addr, enum debug_obj_state state); int (*fixup_activate) (void *addr, enum debug_obj_state state); int (*fixup_destroy) (void *addr, enum debug_obj_state state); diff --git a/include/linux/device.h b/include/linux/device.h index 1bf5cf0b451..ca5d25225aa 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -128,9 +128,7 @@ struct device_driver { bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ -#if defined(CONFIG_OF) const struct of_device_id *of_match_table; -#endif int (*probe) (struct device *dev); int (*remove) (struct device *dev); @@ -441,9 +439,8 @@ struct device { override */ /* arch specific additions */ struct dev_archdata archdata; -#ifdef CONFIG_OF - struct device_node *of_node; -#endif + + struct device_node *of_node; /* associated device tree node */ dev_t devt; /* dev_t, creates the sysfs "dev" */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 28028988c86..33a42f24b27 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -8,6 +8,9 @@ struct inode; struct super_block; struct vfsmount; +/* limit the handle size to NFSv4 handle size now */ +#define MAX_HANDLE_SZ 128 + /* * The fileid_type identifies how the file within the filesystem is encoded. * In theory this is freely set and parsed by the filesystem, but we try to @@ -121,8 +124,10 @@ struct fid { * set, the encode_fh() should store sufficient information so that a good * attempt can be made to find not only the file but also it's place in the * filesystem. This typically means storing a reference to de->d_parent in - * the filehandle fragment. encode_fh() should return the number of bytes - * stored or a negative error code such as %-ENOSPC + * the filehandle fragment. encode_fh() should return the fileid_type on + * success and on error returns 255 (if the space needed to encode fh is + * greater than @max_len*4 bytes). On error @max_len contains the minimum + * size(in 4 byte unit) needed to encode the file handle. * * fh_to_dentry: * @fh_to_dentry is given a &struct super_block (@sb) and a file handle diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index a562fa5fb4e..f550f894ba1 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -46,6 +46,7 @@ unlinking file. */ #define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ #define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ +#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ #ifdef __KERNEL__ diff --git a/include/linux/file.h b/include/linux/file.h index e85baebf627..21a79958541 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -29,6 +29,8 @@ static inline void fput_light(struct file *file, int fput_needed) extern struct file *fget(unsigned int fd); extern struct file *fget_light(unsigned int fd, int *fput_needed); +extern struct file *fget_raw(unsigned int fd); +extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); extern void set_close_on_exec(unsigned int fd, int flag); extern void put_filp(struct file *); extern int alloc_fd(unsigned start, unsigned flags); diff --git a/include/linux/fs.h b/include/linux/fs.h index e38b50a4b9d..13df14e2c42 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -102,6 +102,9 @@ struct inodes_stat_t { /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) +/* File is opened with O_PATH; almost nothing can be done with it */ +#define FMODE_PATH ((__force fmode_t)0x4000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) @@ -978,6 +981,13 @@ struct file { #endif }; +struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ + unsigned char f_handle[0]; +}; + #define get_file(x) atomic_long_inc(&(x)->f_count) #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) #define file_count(x) atomic_long_read(&(x)->f_count) @@ -1401,6 +1411,7 @@ struct super_block { wait_queue_head_t s_wait_unfrozen; char s_id[32]; /* Informational name */ + u8 s_uuid[16]; /* UUID */ void *s_fs_info; /* Filesystem private info */ fmode_t s_mode; @@ -1874,6 +1885,8 @@ extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, struct vfsmount *); extern int vfs_statfs(struct path *, struct kstatfs *); +extern int user_statfs(const char __user *, struct kstatfs *); +extern int fd_statfs(int, struct kstatfs *); extern int statfs_by_dentry(struct dentry *, struct kstatfs *); extern int freeze_super(struct super_block *super); extern int thaw_super(struct super_block *super); @@ -1990,6 +2003,8 @@ extern int do_fallocate(struct file *file, int mode, loff_t offset, extern long do_sys_open(int dfd, const char __user *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); +extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int); extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, const struct cred *); extern int filp_close(struct file *, fl_owner_t id); @@ -2205,10 +2220,6 @@ extern struct file *create_read_pipe(struct file *f, int flags); extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); -extern struct file *do_filp_open(int dfd, const char *pathname, - int open_flag, int mode, int acc_mode); -extern int may_open(struct path *, int, int); - extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern struct file * open_exec(const char *); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dcd6a7c3a43..ca29e03c1fa 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); static inline int task_curr_ret_stack(struct task_struct *t) { @@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 47e3997f7b5..22b32af1b5e 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -37,7 +37,6 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; - int lock_depth; }; #define FTRACE_MAX_EVENT \ @@ -208,7 +207,6 @@ struct ftrace_event_call { #define PERF_MAX_TRACE_SIZE 2048 -#define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ extern void destroy_preds(struct ftrace_event_call *call); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index f376ddc64c4..62f500c724f 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -54,11 +54,13 @@ enum hrtimer_restart { * 0x00 inactive * 0x01 enqueued into rbtree * 0x02 callback function running + * 0x04 timer is migrated to another cpu * * Special cases: * 0x03 callback function running and enqueued * (was requeued on another CPU) - * 0x09 timer was migrated on CPU hotunplug + * 0x05 timer was migrated on CPU hotunplug + * * The "callback function running and enqueued" status is only possible on * SMP. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer @@ -67,8 +69,11 @@ enum hrtimer_restart { * as otherwise the timer could be removed before the softirq code finishes the * the handling of the timer. * - * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to - * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario. + * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state + * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This + * also affects HRTIMER_STATE_MIGRATE where the preservation is not + * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is + * enqueued on the new cpu. * * All state transitions are protected by cpu_base->lock. */ @@ -148,7 +153,12 @@ struct hrtimer_clock_base { #endif }; -#define HRTIMER_MAX_CLOCK_BASES 2 +enum hrtimer_base_type { + HRTIMER_BASE_REALTIME, + HRTIMER_BASE_MONOTONIC, + HRTIMER_BASE_BOOTTIME, + HRTIMER_MAX_CLOCK_BASES, +}; /* * struct hrtimer_cpu_base - the per cpu clock bases @@ -308,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); +extern ktime_t ktime_get_boottime(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); @@ -370,8 +381,9 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); extern ktime_t hrtimer_get_next_event(void); /* - * A timer is active, when it is enqueued into the rbtree or the callback - * function is running. + * A timer is active, when it is enqueued into the rbtree or the + * callback function is running or it's in the state of being migrated + * to another cpu. */ static inline int hrtimer_active(const struct hrtimer *timer) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 903576df88d..06a8d9c7de9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -258,9 +258,7 @@ struct i2c_board_info { unsigned short addr; void *platform_data; struct dev_archdata *archdata; -#ifdef CONFIG_OF struct device_node *of_node; -#endif int irq; }; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e4..59b72ca1c5d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,8 @@ #include <linux/smp.h> #include <linux/percpu.h> #include <linux/hrtimer.h> +#include <linux/kref.h> +#include <linux/workqueue.h> #include <asm/atomic.h> #include <asm/ptrace.h> @@ -55,7 +57,8 @@ * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend - * + * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set + * IRQF_NO_THREAD - Interrupt cannot be threaded */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -67,22 +70,10 @@ #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 #define IRQF_NO_SUSPEND 0x00004000 +#define IRQF_FORCE_RESUME 0x00008000 +#define IRQF_NO_THREAD 0x00010000 -#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) - -/* - * Bits used by threaded handlers: - * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run - * IRQTF_DIED - handler thread died - * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed - * IRQTF_AFFINITY - irq thread is requested to adjust affinity - */ -enum { - IRQTF_RUNTHREAD, - IRQTF_DIED, - IRQTF_WARNED, - IRQTF_AFFINITY, -}; +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) /* * These values can be returned by request_any_context_irq() and @@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @thread_fn: interupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts * @thread_flags: flags related to @thread + * @thread_mask: bitmask for keeping track of @thread activity */ struct irqaction { irq_handler_t handler; @@ -120,6 +112,7 @@ struct irqaction { irq_handler_t thread_fn; struct task_struct *thread; unsigned long thread_flags; + unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; } ____cacheline_internodealigned_in_smp; @@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); + +/** + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be + * called in process context. Once registered, the + * structure must only be freed when this function is + * called or later. + */ +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; + +extern int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); + +static inline void irq_run_affinity_notifiers(void) +{ + flush_scheduled_work(); +} + #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) static inline int irq_select_affinity(unsigned int irq) { return 0; } static inline int irq_set_affinity_hint(unsigned int irq, - const struct cpumask *m) + const struct cpumask *m) { return -EINVAL; } @@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long } /* IRQ wakeup (PM) control: */ -extern int set_irq_wake(unsigned int irq, unsigned int on); +extern int irq_set_irq_wake(unsigned int irq, unsigned int on); + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT +/* Please do not use: Use the replacement functions instead */ +static inline int set_irq_wake(unsigned int irq, unsigned int on) +{ + return irq_set_irq_wake(irq, on); +} +#endif static inline int enable_irq_wake(unsigned int irq) { - return set_irq_wake(irq, 1); + return irq_set_irq_wake(irq, 1); } static inline int disable_irq_wake(unsigned int irq) { - return set_irq_wake(irq, 0); + return irq_set_irq_wake(irq, 0); } #else /* !CONFIG_GENERIC_HARDIRQS */ @@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) } #endif /* CONFIG_GENERIC_HARDIRQS */ + +#ifdef CONFIG_IRQ_FORCED_THREADING +extern bool force_irqthreads; +#else +#define force_irqthreads (0) +#endif + #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) @@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr); */ DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); +DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +static inline struct task_struct *this_cpu_ksoftirqd(void) +{ + return this_cpu_read(ksoftirqd); +} + /* Try to send a softirq to a remote cpu. If this cannot be done, the * work will be queued to the local cpu. */ @@ -645,6 +689,7 @@ static inline void init_irq_proc(void) struct seq_file; int show_interrupts(struct seq_file *p, void *v); +int arch_show_interrupts(struct seq_file *p, int prec); extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); diff --git a/include/linux/irq.h b/include/linux/irq.h index 80fcb53057b..1d3577f30d4 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -29,61 +29,104 @@ #include <asm/irq_regs.h> struct irq_desc; +struct irq_data; typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); - +typedef void (*irq_preflow_handler_t)(struct irq_data *data); /* * IRQ line status. * - * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h + * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h + * + * IRQ_TYPE_NONE - default, unspecified type + * IRQ_TYPE_EDGE_RISING - rising edge triggered + * IRQ_TYPE_EDGE_FALLING - falling edge triggered + * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered + * IRQ_TYPE_LEVEL_HIGH - high level triggered + * IRQ_TYPE_LEVEL_LOW - low level triggered + * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits + * IRQ_TYPE_SENSE_MASK - Mask for all the above bits + * IRQ_TYPE_PROBE - Special flag for probing in progress + * + * Bits which can be modified via irq_set/clear/modify_status_flags() + * IRQ_LEVEL - Interrupt is level type. Will be also + * updated in the code when the above trigger + * bits are modified via set_irq_type() + * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect + * it from affinity setting + * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing + * IRQ_NOREQUEST - Interrupt cannot be requested via + * request_irq() + * IRQ_NOAUTOEN - Interrupt is not automatically enabled in + * request/setup_irq() + * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) + * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context + * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * + * Deprecated bits. They are kept updated as long as + * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits + * are internal state of the core code and if you really need to acces + * them then talk to the genirq maintainer instead of hacking + * something weird. * - * IRQ types */ -#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ -#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ -#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ -#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) -#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ -#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ -#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ -#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ - -/* Internal flags */ -#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ -#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ -#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ -#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ -#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ -#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ -#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ -#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ -#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ -#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ -#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ -#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ -#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ -#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ -#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ -#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ -#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ -#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ -#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ -#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ -#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ +enum { + IRQ_TYPE_NONE = 0x00000000, + IRQ_TYPE_EDGE_RISING = 0x00000001, + IRQ_TYPE_EDGE_FALLING = 0x00000002, + IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), + IRQ_TYPE_LEVEL_HIGH = 0x00000004, + IRQ_TYPE_LEVEL_LOW = 0x00000008, + IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), + IRQ_TYPE_SENSE_MASK = 0x0000000f, + + IRQ_TYPE_PROBE = 0x00000010, + + IRQ_LEVEL = (1 << 8), + IRQ_PER_CPU = (1 << 9), + IRQ_NOPROBE = (1 << 10), + IRQ_NOREQUEST = (1 << 11), + IRQ_NOAUTOEN = (1 << 12), + IRQ_NO_BALANCING = (1 << 13), + IRQ_MOVE_PCNTXT = (1 << 14), + IRQ_NESTED_THREAD = (1 << 15), + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT + IRQ_INPROGRESS = (1 << 16), + IRQ_REPLAY = (1 << 17), + IRQ_WAITING = (1 << 18), + IRQ_DISABLED = (1 << 19), + IRQ_PENDING = (1 << 20), + IRQ_MASKED = (1 << 21), + IRQ_MOVE_PENDING = (1 << 22), + IRQ_AFFINITY_SET = (1 << 23), + IRQ_WAKEUP = (1 << 24), +#endif +}; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU) + IRQ_PER_CPU | IRQ_NESTED_THREAD) -#ifdef CONFIG_IRQ_PER_CPU -# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) -# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) -#else -# define CHECK_IRQ_PER_CPU(var) 0 -# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING -#endif +#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status) +{ + return status & IRQ_PER_CPU; +} + +/* + * Return value for chip->irq_set_affinity() + * + * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + */ +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY, +}; struct msi_desc; @@ -91,6 +134,8 @@ struct msi_desc; * struct irq_data - per irq and irq chip data passed down to chip functions * @irq: interrupt number * @node: node index useful for balancing + * @state_use_accessor: status information for irq chip functions. + * Use accessor functions to deal with it * @chip: low level interrupt hardware access * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip @@ -105,6 +150,7 @@ struct msi_desc; struct irq_data { unsigned int irq; unsigned int node; + unsigned int state_use_accessors; struct irq_chip *chip; void *handler_data; void *chip_data; @@ -114,6 +160,80 @@ struct irq_data { #endif }; +/* + * Bit masks for irq_data.state + * + * IRQD_TRIGGER_MASK - Mask for the trigger type bits + * IRQD_SETAFFINITY_PENDING - Affinity setting is pending + * IRQD_NO_BALANCING - Balancing disabled for this IRQ + * IRQD_PER_CPU - Interrupt is per cpu + * IRQD_AFFINITY_SET - Interrupt affinity was set + * IRQD_LEVEL - Interrupt is level triggered + * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup + * from suspend + * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process + * context + */ +enum { + IRQD_TRIGGER_MASK = 0xf, + IRQD_SETAFFINITY_PENDING = (1 << 8), + IRQD_NO_BALANCING = (1 << 10), + IRQD_PER_CPU = (1 << 11), + IRQD_AFFINITY_SET = (1 << 12), + IRQD_LEVEL = (1 << 13), + IRQD_WAKEUP_STATE = (1 << 14), + IRQD_MOVE_PCNTXT = (1 << 15), +}; + +static inline bool irqd_is_setaffinity_pending(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; +} + +static inline bool irqd_is_per_cpu(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_PER_CPU; +} + +static inline bool irqd_can_balance(struct irq_data *d) +{ + return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); +} + +static inline bool irqd_affinity_was_set(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_AFFINITY_SET; +} + +static inline u32 irqd_get_trigger_type(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_TRIGGER_MASK; +} + +/* + * Must only be called inside irq_chip.irq_set_type() functions. + */ +static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) +{ + d->state_use_accessors &= ~IRQD_TRIGGER_MASK; + d->state_use_accessors |= type & IRQD_TRIGGER_MASK; +} + +static inline bool irqd_is_level_type(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_LEVEL; +} + +static inline bool irqd_is_wakeup_set(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_WAKEUP_STATE; +} + +static inline bool irqd_can_move_in_process_context(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_MOVE_PCNTXT; +} + /** * struct irq_chip - hardware interrupt chip descriptor * @@ -150,6 +270,7 @@ struct irq_data { * @irq_set_wake: enable/disable power-management wake-on of an IRQ * @irq_bus_lock: function to lock access to slow bus (i2c) chips * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips + * @flags: chip specific flags * * @release: release function solely used by UML */ @@ -196,12 +317,27 @@ struct irq_chip { void (*irq_bus_lock)(struct irq_data *data); void (*irq_bus_sync_unlock)(struct irq_data *data); + unsigned long flags; + /* Currently used only by UML, might disappear one day.*/ #ifdef CONFIG_IRQ_RELEASE_METHOD void (*release)(unsigned int irq, void *dev_id); #endif }; +/* + * irq_chip specific flags + * + * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() + * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled + * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path + */ +enum { + IRQCHIP_SET_TYPE_MASKED = (1 << 0), + IRQCHIP_EOI_IF_HANDLED = (1 << 1), + IRQCHIP_MASK_ON_SUSPEND = (1 << 2), +}; + /* This include will go away once we isolated irq_desc usage to core code */ #include <linux/irqdesc.h> @@ -218,7 +354,7 @@ struct irq_chip { # define ARCH_IRQ_INIT_FLAGS 0 #endif -#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) +#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; extern int setup_irq(unsigned int irq, struct irqaction *new); @@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void move_native_irq(int irq); void move_masked_irq(int irq); +void irq_move_irq(struct irq_data *data); +void irq_move_masked_irq(struct irq_data *data); #else static inline void move_native_irq(int irq) { } static inline void move_masked_irq(int irq) { } +static inline void irq_move_irq(struct irq_data *data) { } +static inline void irq_move_masked_irq(struct irq_data *data) { } #endif extern int no_irq_affinity; @@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip; extern struct irq_chip dummy_irq_chip; extern void -set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, - irq_flow_handler_t handle); -extern void -set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, +irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, irq_flow_handler_t handle, const char *name); +static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + irq_set_chip_and_handler_name(irq, chip, handle, NULL); +} + extern void -__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, +__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); -/* - * Set a highlevel flow handler for a given IRQ: - */ static inline void -set_irq_handler(unsigned int irq, irq_flow_handler_t handle) +irq_set_handler(unsigned int irq, irq_flow_handler_t handle) { - __set_irq_handler(irq, handle, 0, NULL); + __irq_set_handler(irq, handle, 0, NULL); } /* @@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle) * IRQ_NOREQUEST and IRQ_NOPROBE) */ static inline void -set_irq_chained_handler(unsigned int irq, - irq_flow_handler_t handle) +irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) { - __set_irq_handler(irq, handle, 1, NULL); + __irq_set_handler(irq, handle, 1, NULL); } -extern void set_irq_nested_thread(unsigned int irq, int nest); - void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); static inline void irq_set_status_flags(unsigned int irq, unsigned long set) @@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) irq_modify_status(irq, clr, 0); } -static inline void set_irq_noprobe(unsigned int irq) +static inline void irq_set_noprobe(unsigned int irq) { irq_modify_status(irq, 0, IRQ_NOPROBE); } -static inline void set_irq_probe(unsigned int irq) +static inline void irq_set_probe(unsigned int irq) { irq_modify_status(irq, IRQ_NOPROBE, 0); } +static inline void irq_set_nested_thread(unsigned int irq, bool nest) +{ + if (nest) + irq_set_status_flags(irq, IRQ_NESTED_THREAD); + else + irq_clear_status_flags(irq, IRQ_NESTED_THREAD); +} + /* Handle dynamic irq creation and destruction */ extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); @@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq) } /* Set/get chip/data for an IRQ: */ -extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); -extern int set_irq_data(unsigned int irq, void *data); -extern int set_irq_chip_data(unsigned int irq, void *data); -extern int set_irq_type(unsigned int irq, unsigned int type); -extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); +extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); +extern int irq_set_handler_data(unsigned int irq, void *data); +extern int irq_set_chip_data(unsigned int irq, void *data); +extern int irq_set_irq_type(unsigned int irq, unsigned int type); +extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); extern struct irq_data *irq_get_irq_data(unsigned int irq); -static inline struct irq_chip *get_irq_chip(unsigned int irq) +static inline struct irq_chip *irq_get_chip(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip : NULL; @@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) return d->chip; } -static inline void *get_irq_chip_data(unsigned int irq) +static inline void *irq_get_chip_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip_data : NULL; @@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) return d->chip_data; } -static inline void *get_irq_data(unsigned int irq) +static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->handler_data : NULL; } -static inline void *irq_data_get_irq_data(struct irq_data *d) +static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { return d->handler_data; } -static inline struct msi_desc *get_irq_msi(unsigned int irq) +static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->msi_desc : NULL; @@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) return d->msi_desc; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT +/* Please do not use: Use the replacement functions instead */ +static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) +{ + return irq_set_chip(irq, chip); +} +static inline int set_irq_data(unsigned int irq, void *data) +{ + return irq_set_handler_data(irq, data); +} +static inline int set_irq_chip_data(unsigned int irq, void *data) +{ + return irq_set_chip_data(irq, data); +} +static inline int set_irq_type(unsigned int irq, unsigned int type) +{ + return irq_set_irq_type(irq, type); +} +static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) +{ + return irq_set_msi_desc(irq, entry); +} +static inline struct irq_chip *get_irq_chip(unsigned int irq) +{ + return irq_get_chip(irq); +} +static inline void *get_irq_chip_data(unsigned int irq) +{ + return irq_get_chip_data(irq); +} +static inline void *get_irq_data(unsigned int irq) +{ + return irq_get_handler_data(irq); +} +static inline void *irq_data_get_irq_data(struct irq_data *d) +{ + return irq_data_get_irq_handler_data(d); +} +static inline struct msi_desc *get_irq_msi(unsigned int irq) +{ + return irq_get_msi_desc(irq); +} +static inline void set_irq_noprobe(unsigned int irq) +{ + irq_set_noprobe(irq); +} +static inline void set_irq_probe(unsigned int irq) +{ + irq_set_probe(irq); +} +static inline void set_irq_nested_thread(unsigned int irq, int nest) +{ + irq_set_nested_thread(irq, nest); +} +static inline void +set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle, const char *name) +{ + irq_set_chip_and_handler_name(irq, chip, handle, name); +} +static inline void +set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + irq_set_chip_and_handler(irq, chip, handle); +} +static inline void +__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name) +{ + __irq_set_handler(irq, handle, is_chained, name); +} +static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) +{ + irq_set_handler(irq, handle); +} +static inline void +set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) +{ + irq_set_chained_handler(irq, handle); +} +#endif + int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); void irq_free_descs(unsigned int irq, unsigned int cnt); int irq_reserve_irqs(unsigned int from, unsigned int cnt); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c1a95b7b58d..00218371518 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -8,6 +8,7 @@ * For now it's included from <linux/irq.h> */ +struct irq_affinity_notify; struct proc_dir_entry; struct timer_rand_state; /** @@ -18,13 +19,16 @@ struct timer_rand_state; * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @action: the irq action chain * @status: status information + * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP + * @affinity_notify: context for notification of affinity changes * @pending_mask: pending rebalanced interrupts + * @threads_oneshot: bitfield to handle shared oneshot threads * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @dir: /proc/irq/ procfs entry @@ -45,6 +49,7 @@ struct irq_desc { struct { unsigned int irq; unsigned int node; + unsigned int pad_do_not_even_think_about_it; struct irq_chip *chip; void *handler_data; void *chip_data; @@ -59,9 +64,16 @@ struct irq_desc { struct timer_rand_state *timer_rand_state; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI + irq_preflow_handler_t preflow_handler; +#endif struct irqaction *action; /* IRQ action list */ +#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT + unsigned int status_use_accessors; +#else unsigned int status; /* IRQ status */ - +#endif + unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ unsigned int irq_count; /* For detecting broken IRQs */ @@ -70,10 +82,12 @@ struct irq_desc { raw_spinlock_t lock; #ifdef CONFIG_SMP const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif #endif + unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; #ifdef CONFIG_PROC_FS @@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) #ifdef CONFIG_GENERIC_HARDIRQS -#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) -#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) -#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) -#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) +static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) +{ + return &desc->irq_data; +} + +static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) +{ + return desc->irq_data.chip; +} + +static inline void *irq_desc_get_chip_data(struct irq_desc *desc) +{ + return desc->irq_data.chip_data; +} + +static inline void *irq_desc_get_handler_data(struct irq_desc *desc) +{ + return desc->irq_data.handler_data; +} + +static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) +{ + return desc->irq_data.msi_desc; +} + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT +static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) +{ + return irq_desc_get_chip(desc); +} +static inline void *get_irq_desc_data(struct irq_desc *desc) +{ + return irq_desc_get_handler_data(desc); +} + +static inline void *get_irq_desc_chip_data(struct irq_desc *desc) +{ + return irq_desc_get_chip_data(desc); +} + +static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) +{ + return irq_desc_get_msi_desc(desc); +} +#endif /* * Architectures call this to let the generic IRQ layer @@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq) return desc->action != NULL; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT static inline int irq_balancing_disabled(unsigned int irq) { struct irq_desc *desc; @@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq) desc = irq_to_desc(irq); return desc->status & IRQ_NO_BALANCING_MASK; } +#endif /* caller has locked the irq_desc and both params are valid */ static inline void __set_irq_handler_unlocked(int irq, @@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq, desc = irq_to_desc(irq); desc->handle_irq = handler; } + +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI +static inline void +__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->preflow_handler = handler; +} +#endif #endif #endif diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 6811f4bfc6e..922aa313c9f 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x); extern unsigned long clock_t_to_jiffies(unsigned long x); extern u64 jiffies_64_to_clock_t(u64 x); extern u64 nsec_to_clock_t(u64 x); +extern u64 nsecs_to_jiffies64(u64 n); extern unsigned long nsecs_to_jiffies(u64 n); #define TIMESTAMP_SIZE 30 diff --git a/include/linux/kthread.h b/include/linux/kthread.h index ce0775aa64c..7ff16f7d3ed 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -64,7 +64,7 @@ struct kthread_work { }; #define KTHREAD_WORKER_INIT(worker) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ .work_list = LIST_HEAD_INIT((worker).work_list), \ } diff --git a/include/linux/mm.h b/include/linux/mm.h index f6385fc17ad..679300c050f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1309,8 +1309,6 @@ int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); u64 __init find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit); -void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, - u64 goal, u64 limit); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); diff --git a/include/linux/namei.h b/include/linux/namei.h index f276d4fa01f..9c8603872c3 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -19,7 +19,6 @@ struct nameidata { struct path path; struct qstr last; struct path root; - struct file *file; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq; @@ -63,6 +62,10 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_EXCL 0x0400 #define LOOKUP_RENAME_TARGET 0x0800 +#define LOOKUP_JUMPED 0x1000 +#define LOOKUP_ROOT 0x2000 +#define LOOKUP_EMPTY 0x4000 + extern int user_path_at(int, const char __user *, unsigned, struct path *); #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) @@ -72,7 +75,7 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *); extern int kern_path(const char *, unsigned, struct path *); -extern int path_lookup(const char *, unsigned, struct nameidata *); +extern int kern_path_parent(const char *, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); diff --git a/include/linux/of.h b/include/linux/of.h index cad7cf0ab27..266db1d0baa 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -23,8 +23,6 @@ #include <asm/byteorder.h> -#ifdef CONFIG_OF - typedef u32 phandle; typedef u32 ihandle; @@ -65,11 +63,18 @@ struct device_node { #endif }; +#ifdef CONFIG_OF + /* Pointer for first entry in chain of all nodes. */ extern struct device_node *allnodes; extern struct device_node *of_chosen; extern rwlock_t devtree_lock; +static inline bool of_have_populated_dt(void) +{ + return allnodes != NULL; +} + static inline bool of_node_is_root(const struct device_node *node) { return node && (node->parent == NULL); @@ -222,5 +227,12 @@ extern void of_attach_node(struct device_node *); extern void of_detach_node(struct device_node *); #endif +#else + +static inline bool of_have_populated_dt(void) +{ + return false; +} + #endif /* CONFIG_OF */ #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h new file mode 100644 index 00000000000..85a27b650d7 --- /dev/null +++ b/include/linux/of_pci.h @@ -0,0 +1,9 @@ +#ifndef __OF_PCI_H +#define __OF_PCI_H + +#include <linux/pci.h> + +struct pci_dev; +struct of_irq; +int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); +#endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index dda5b0a3ff6..614615b8d42 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -225,8 +225,14 @@ struct perf_event_attr { }; __u32 bp_type; - __u64 bp_addr; - __u64 bp_len; + union { + __u64 bp_addr; + __u64 config1; /* extension of config */ + }; + union { + __u64 bp_len; + __u64 config2; /* extension of config1 */ + }; }; /* @@ -464,6 +470,7 @@ enum perf_callchain_context { #define PERF_FLAG_FD_NO_GROUP (1U << 0) #define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ #ifdef __KERNEL__ /* @@ -471,6 +478,7 @@ enum perf_callchain_context { */ #ifdef CONFIG_PERF_EVENTS +# include <linux/cgroup.h> # include <asm/perf_event.h> # include <asm/local64.h> #endif @@ -539,6 +547,9 @@ struct hw_perf_event { unsigned long event_base; int idx; int last_cpu; + unsigned int extra_reg; + u64 extra_config; + int extra_alloc; }; struct { /* software */ struct hrtimer hrtimer; @@ -716,6 +727,22 @@ struct swevent_hlist { #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 +#ifdef CONFIG_CGROUP_PERF +/* + * perf_cgroup_info keeps track of time_enabled for a cgroup. + * This is a per-cpu dynamically allocated data structure. + */ +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ +}; +#endif + /** * struct perf_event - performance event kernel representation: */ @@ -832,6 +859,11 @@ struct perf_event { struct event_filter *filter; #endif +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; /* cgroup event is attach to */ + int cgrp_defer_enabled; +#endif + #endif /* CONFIG_PERF_EVENTS */ }; @@ -886,6 +918,7 @@ struct perf_event_context { u64 generation; int pin_count; struct rcu_head rcu_head; + int nr_cgroups; /* cgroup events present */ }; /* @@ -905,6 +938,9 @@ struct perf_cpu_context { struct list_head rotation_list; int jiffies_interval; struct pmu *active_pmu; +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; +#endif }; struct perf_output_handle { @@ -1040,11 +1076,11 @@ have_event: __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern atomic_t perf_task_events; +extern atomic_t perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *task) { - COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); + COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); } static inline @@ -1052,7 +1088,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); + COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); } extern void perf_event_mmap(struct vm_area_struct *vma); @@ -1083,6 +1119,10 @@ extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; +extern int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + static inline bool perf_paranoid_tracepoint_raw(void) { return sysctl_perf_event_paranoid > -1; diff --git a/include/linux/plist.h b/include/linux/plist.h index 7254eda078e..c9b9f322c8d 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -31,15 +31,17 @@ * * Simple ASCII art explanation: * - * |HEAD | - * | | - * |prio_list.prev|<------------------------------------| - * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| - * |10 | |10| |21| |21| |21| |40| (prio) - * | | | | | | | | | | | | - * | | | | | | | | | | | | - * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| - * |node_list.prev|<------------------------------------| + * pl:prio_list (only for plist_node) + * nl:node_list + * HEAD| NODE(S) + * | + * ||------------------------------------| + * ||->|pl|<->|pl|<--------------->|pl|<-| + * | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | + * | | | | | | | | | | | + * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |-------------------------------------------| * * The nodes on the prio_list list are sorted by priority to simplify * the insertion of new nodes. There are no nodes with duplicate @@ -78,7 +80,6 @@ #include <linux/spinlock_types.h> struct plist_head { - struct list_head prio_list; struct list_head node_list; #ifdef CONFIG_DEBUG_PI_LIST raw_spinlock_t *rawlock; @@ -88,7 +89,8 @@ struct plist_head { struct plist_node { int prio; - struct plist_head plist; + struct list_head prio_list; + struct list_head node_list; }; #ifdef CONFIG_DEBUG_PI_LIST @@ -100,7 +102,6 @@ struct plist_node { #endif #define _PLIST_HEAD_INIT(head) \ - .prio_list = LIST_HEAD_INIT((head).prio_list), \ .node_list = LIST_HEAD_INIT((head).node_list) /** @@ -133,7 +134,8 @@ struct plist_node { #define PLIST_NODE_INIT(node, __prio) \ { \ .prio = (__prio), \ - .plist = { _PLIST_HEAD_INIT((node).plist) }, \ + .prio_list = LIST_HEAD_INIT((node).prio_list), \ + .node_list = LIST_HEAD_INIT((node).node_list), \ } /** @@ -144,7 +146,6 @@ struct plist_node { static inline void plist_head_init(struct plist_head *head, spinlock_t *lock) { - INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); #ifdef CONFIG_DEBUG_PI_LIST head->spinlock = lock; @@ -160,7 +161,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) static inline void plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) { - INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); #ifdef CONFIG_DEBUG_PI_LIST head->rawlock = lock; @@ -176,7 +176,8 @@ plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) static inline void plist_node_init(struct plist_node *node, int prio) { node->prio = prio; - plist_head_init(&node->plist, NULL); + INIT_LIST_HEAD(&node->prio_list); + INIT_LIST_HEAD(&node->node_list); } extern void plist_add(struct plist_node *node, struct plist_head *head); @@ -188,7 +189,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); * @head: the head for your list */ #define plist_for_each(pos, head) \ - list_for_each_entry(pos, &(head)->node_list, plist.node_list) + list_for_each_entry(pos, &(head)->node_list, node_list) /** * plist_for_each_safe - iterate safely over a plist of given type @@ -199,7 +200,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); * Iterate over a plist of given type, safe against removal of list entry. */ #define plist_for_each_safe(pos, n, head) \ - list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) + list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) /** * plist_for_each_entry - iterate over list of given type @@ -208,7 +209,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); * @mem: the name of the list_struct within the struct */ #define plist_for_each_entry(pos, head, mem) \ - list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) + list_for_each_entry(pos, &(head)->node_list, mem.node_list) /** * plist_for_each_entry_safe - iterate safely over list of given type @@ -220,7 +221,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); * Iterate over list of given type, safe against removal of list entry. */ #define plist_for_each_entry_safe(pos, n, head, m) \ - list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) + list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) /** * plist_head_empty - return !0 if a plist_head is empty @@ -237,7 +238,7 @@ static inline int plist_head_empty(const struct plist_head *head) */ static inline int plist_node_empty(const struct plist_node *node) { - return plist_head_empty(&node->plist); + return list_empty(&node->node_list); } /* All functions below assume the plist_head is not empty. */ @@ -285,7 +286,7 @@ static inline int plist_node_empty(const struct plist_node *node) static inline struct plist_node *plist_first(const struct plist_head *head) { return list_entry(head->node_list.next, - struct plist_node, plist.node_list); + struct plist_node, node_list); } /** @@ -297,7 +298,7 @@ static inline struct plist_node *plist_first(const struct plist_head *head) static inline struct plist_node *plist_last(const struct plist_head *head) { return list_entry(head->node_list.prev, - struct plist_node, plist.node_list); + struct plist_node, node_list); } #endif diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h new file mode 100644 index 00000000000..369e19d3750 --- /dev/null +++ b/include/linux/posix-clock.h @@ -0,0 +1,150 @@ +/* + * posix-clock.h - support for dynamic clock devices + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_POSIX_CLOCK_H_ +#define _LINUX_POSIX_CLOCK_H_ + +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/posix-timers.h> + +struct posix_clock; + +/** + * struct posix_clock_operations - functional interface to the clock + * + * Every posix clock is represented by a character device. Drivers may + * optionally offer extended capabilities by implementing the + * character device methods. The character device file operations are + * first handled by the clock device layer, then passed on to the + * driver by calling these functions. + * + * @owner: The clock driver should set to THIS_MODULE + * @clock_adjtime: Adjust the clock + * @clock_gettime: Read the current time + * @clock_getres: Get the clock resolution + * @clock_settime: Set the current time value + * @timer_create: Create a new timer + * @timer_delete: Remove a previously created timer + * @timer_gettime: Get remaining time and interval of a timer + * @timer_setttime: Set a timer's initial expiration and interval + * @fasync: Optional character device fasync method + * @mmap: Optional character device mmap method + * @open: Optional character device open method + * @release: Optional character device release method + * @ioctl: Optional character device ioctl method + * @read: Optional character device read method + * @poll: Optional character device poll method + */ +struct posix_clock_operations { + struct module *owner; + + int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); + + int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); + + int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); + + int (*clock_settime)(struct posix_clock *pc, + const struct timespec *ts); + + int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); + + int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); + + void (*timer_gettime)(struct posix_clock *pc, + struct k_itimer *kit, struct itimerspec *tsp); + + int (*timer_settime)(struct posix_clock *pc, + struct k_itimer *kit, int flags, + struct itimerspec *tsp, struct itimerspec *old); + /* + * Optional character device methods: + */ + int (*fasync) (struct posix_clock *pc, + int fd, struct file *file, int on); + + long (*ioctl) (struct posix_clock *pc, + unsigned int cmd, unsigned long arg); + + int (*mmap) (struct posix_clock *pc, + struct vm_area_struct *vma); + + int (*open) (struct posix_clock *pc, fmode_t f_mode); + + uint (*poll) (struct posix_clock *pc, + struct file *file, poll_table *wait); + + int (*release) (struct posix_clock *pc); + + ssize_t (*read) (struct posix_clock *pc, + uint flags, char __user *buf, size_t cnt); +}; + +/** + * struct posix_clock - represents a dynamic posix clock + * + * @ops: Functional interface to the clock + * @cdev: Character device instance for this clock + * @kref: Reference count. + * @mutex: Protects the 'zombie' field from concurrent access. + * @zombie: If 'zombie' is true, then the hardware has disappeared. + * @release: A function to free the structure when the reference count reaches + * zero. May be NULL if structure is statically allocated. + * + * Drivers should embed their struct posix_clock within a private + * structure, obtaining a reference to it during callbacks using + * container_of(). + */ +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct kref kref; + struct mutex mutex; + bool zombie; + void (*release)(struct posix_clock *clk); +}; + +/** + * posix_clock_register() - register a new clock + * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' + * @devid: Allocated device id + * + * A clock driver calls this function to register itself with the + * clock device subsystem. If 'clk' points to dynamically allocated + * memory, then the caller must provide a 'release' function to free + * that memory. + * + * Returns zero on success, non-zero otherwise. + */ +int posix_clock_register(struct posix_clock *clk, dev_t devid); + +/** + * posix_clock_unregister() - unregister a clock + * @clk: Clock instance previously registered via posix_clock_register() + * + * A clock driver calls this function to remove itself from the clock + * device subsystem. The posix_clock itself will remain (in an + * inactive state) until its reference count drops to zero, at which + * point it will be deallocated with its 'release' method. + */ +void posix_clock_unregister(struct posix_clock *clk); + +#endif diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 3e23844a699..d51243ae072 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -4,6 +4,7 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/sched.h> +#include <linux/timex.h> union cpu_time_count { cputime_t cpu; @@ -17,10 +18,21 @@ struct cpu_timer_list { int firing; }; +/* + * Bit fields within a clockid: + * + * The most significant 29 bits hold either a pid or a file descriptor. + * + * Bit 2 indicates whether a cpu clock refers to a thread or a process. + * + * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. + * + * A clockid is invalid if bits 2, 1, and 0 are all set. + */ #define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) #define CPUCLOCK_PERTHREAD(clock) \ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) -#define CPUCLOCK_PID_MASK 7 + #define CPUCLOCK_PERTHREAD_MASK 4 #define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) #define CPUCLOCK_CLOCK_MASK 3 @@ -28,12 +40,17 @@ struct cpu_timer_list { #define CPUCLOCK_VIRT 1 #define CPUCLOCK_SCHED 2 #define CPUCLOCK_MAX 3 +#define CLOCKFD CPUCLOCK_MAX +#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) #define MAKE_PROCESS_CPUCLOCK(pid, clock) \ ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) #define MAKE_THREAD_CPUCLOCK(tid, clock) \ MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) +#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) +#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) + /* POSIX.1b interval timer structure. */ struct k_itimer { struct list_head list; /* free/ allocate list */ @@ -67,10 +84,11 @@ struct k_itimer { }; struct k_clock { - int res; /* in nanoseconds */ int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); - int (*clock_set) (const clockid_t which_clock, struct timespec * tp); + int (*clock_set) (const clockid_t which_clock, + const struct timespec *tp); int (*clock_get) (const clockid_t which_clock, struct timespec * tp); + int (*clock_adj) (const clockid_t which_clock, struct timex *tx); int (*timer_create) (struct k_itimer *timer); int (*nsleep) (const clockid_t which_clock, int flags, struct timespec *, struct timespec __user *); @@ -84,28 +102,14 @@ struct k_clock { struct itimerspec * cur_setting); }; -void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock); +extern struct k_clock clock_posix_cpu; +extern struct k_clock clock_posix_dynamic; -/* error handlers for timer_create, nanosleep and settime */ -int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *, - struct timespec __user *); -int do_posix_clock_nosettime(const clockid_t, struct timespec *tp); +void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock); /* function to call to trigger timer event */ int posix_timer_event(struct k_itimer *timr, int si_private); -int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts); -int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts); -int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts); -int posix_cpu_timer_create(struct k_itimer *timer); -int posix_cpu_nsleep(const clockid_t which_clock, int flags, - struct timespec *rqtp, struct timespec __user *rmtp); -long posix_cpu_nsleep_restart(struct restart_block *restart_block); -int posix_cpu_timer_set(struct k_itimer *timer, int flags, - struct itimerspec *new, struct itimerspec *old); -int posix_cpu_timer_del(struct k_itimer *timer); -void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp); - void posix_cpu_timer_schedule(struct k_itimer *timer); void run_posix_cpu_timers(struct task_struct *task); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 8d3a2486544..ab38ac80b0f 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -100,6 +100,8 @@ void ring_buffer_free(struct ring_buffer *buffer); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); +void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); + struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length); int ring_buffer_unlock_commit(struct ring_buffer *buffer, diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 89c3e518299..2ca7e8a7806 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -133,7 +133,6 @@ extern struct class *rtc_class; * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs * - non-ioctl() chardev hooks: open(), release(), read_callback() - * - periodic irq calls: irq_set_state(), irq_set_freq() * * REVISIT those periodic irq calls *do* have ops_lock when they're * issued through ioctl() ... @@ -148,11 +147,8 @@ struct rtc_class_ops { int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*set_mmss)(struct device *, unsigned long secs); - int (*irq_set_state)(struct device *, int enabled); - int (*irq_set_freq)(struct device *, int freq); int (*read_callback)(struct device *, int data); int (*alarm_irq_enable)(struct device *, unsigned int enabled); - int (*update_irq_enable)(struct device *, unsigned int enabled); }; #define RTC_DEVICE_NAME_SIZE 20 @@ -227,6 +223,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); +int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index bd31808c7d8..cc0072e93e3 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -43,14 +43,6 @@ typedef struct { RW_DEP_MAP_INIT(lockname) } #endif -/* - * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence - * deprecated. - * - * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate. - */ -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) - #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index bdfcc252797..34701241b67 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -12,15 +12,7 @@ #error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" #endif -#include <linux/spinlock.h> -#include <linux/list.h> - #ifdef __KERNEL__ - -#include <linux/types.h> - -struct rwsem_waiter; - /* * the rw-semaphore definition * - if activity is 0 then there are no active readers or writers @@ -37,28 +29,7 @@ struct rw_semaphore { #endif }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) +#define RWSEM_UNLOCKED_VALUE 0x00000000 extern void __down_read(struct rw_semaphore *sem); extern int __down_read_trylock(struct rw_semaphore *sem); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index efd348fe8ca..a8afe9cd000 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -11,6 +11,9 @@ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> + #include <asm/system.h> #include <asm/atomic.h> @@ -19,9 +22,57 @@ struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #include <linux/rwsem-spinlock.h> /* use a generic implementation */ #else -#include <asm/rwsem.h> /* use an arch-specific implementation */ +/* All arch specific implementations share the same struct */ +struct rw_semaphore { + long count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* Include the arch specific part */ +#include <asm/rwsem.h> + +/* In all implementations count != 0 means locked */ +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return sem->count != 0; +} + +#endif + +/* Common initializer macros and functions */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) #endif +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + /* * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 777d8a5ed06..c15936fe998 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1058,6 +1058,7 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); void (*yield_task) (struct rq *rq); + bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); @@ -1084,12 +1085,10 @@ struct sched_class { void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_fork) (struct task_struct *p); - void (*switched_from) (struct rq *this_rq, struct task_struct *task, - int running); - void (*switched_to) (struct rq *this_rq, struct task_struct *task, - int running); + void (*switched_from) (struct rq *this_rq, struct task_struct *task); + void (*switched_to) (struct rq *this_rq, struct task_struct *task); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, - int oldprio, int running); + int oldprio); unsigned int (*get_rr_interval) (struct rq *rq, struct task_struct *task); @@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * Per process flags */ -#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ @@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -extern unsigned int sysctl_sched_compat_yield; - #ifdef CONFIG_SCHED_AUTOGROUP extern unsigned int sysctl_sched_autogroup_enabled; @@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p) # define rt_mutex_adjust_pi(p) do { } while (0) #endif +extern bool yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); extern int task_nice(const struct task_struct *p); @@ -2049,7 +2046,7 @@ extern void release_uids(struct user_namespace *ns); #include <asm/current.h> -extern void do_timer(unsigned long ticks); +extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -2578,13 +2575,6 @@ static inline void inc_syscw(struct task_struct *tsk) #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -/* - * Call the function if the target task is executing on a CPU right now: - */ -extern void task_oncpu_function_call(struct task_struct *p, - void (*func) (void *info), void *info); - - #ifdef CONFIG_MM_OWNER extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); diff --git a/include/linux/security.h b/include/linux/security.h index b2b7f9749f5..debbd97db7a 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -53,7 +53,7 @@ struct audit_krule; */ extern int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit); -extern int cap_settime(struct timespec *ts, struct timezone *tz); +extern int cap_settime(const struct timespec *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1387,7 +1387,7 @@ struct security_operations { int (*quotactl) (int cmds, int type, int id, struct super_block *sb); int (*quota_on) (struct dentry *dentry); int (*syslog) (int type); - int (*settime) (struct timespec *ts, struct timezone *tz); + int (*settime) (const struct timespec *ts, const struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); int (*bprm_set_creds) (struct linux_binprm *bprm); @@ -1669,7 +1669,7 @@ int security_sysctl(struct ctl_table *table, int op); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); -int security_settime(struct timespec *ts, struct timezone *tz); +int security_settime(const struct timespec *ts, const struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_vm_enough_memory_kern(long pages); @@ -1904,7 +1904,8 @@ static inline int security_syslog(int type) return 0; } -static inline int security_settime(struct timespec *ts, struct timezone *tz) +static inline int security_settime(const struct timespec *ts, + const struct timezone *tz) { return cap_settime(ts, tz); } diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 851b7783720..73548eb13a5 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -81,14 +81,6 @@ typedef struct spinlock { #define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -/* - * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence - * deprecated. - * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as - * appropriate. - */ -#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) - #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) #include <linux/rwlock_types.h> diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 98664db1be4..1f5c18e6f4f 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -62,6 +62,7 @@ struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; struct perf_event_attr; +struct file_handle; #include <linux/types.h> #include <linux/aio_abi.h> @@ -132,11 +133,11 @@ extern struct trace_event_functions exit_syscall_print_funcs; .class = &event_class_syscall_enter, \ .event.funcs = &enter_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ + .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) \ - *__event_enter_##sname = &event_enter_##sname; \ - __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY) + *__event_enter_##sname = &event_enter_##sname; #define SYSCALL_TRACE_EXIT_EVENT(sname) \ static struct syscall_metadata __syscall_meta_##sname; \ @@ -146,11 +147,11 @@ extern struct trace_event_functions exit_syscall_print_funcs; .class = &event_class_syscall_exit, \ .event.funcs = &exit_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ + .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) \ - *__event_exit_##sname = &event_exit_##sname; \ - __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY) + *__event_exit_##sname = &event_exit_##sname; #define SYSCALL_METADATA(sname, nb) \ SYSCALL_TRACE_ENTER_EVENT(sname); \ @@ -158,6 +159,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; static struct syscall_metadata __used \ __syscall_meta_##sname = { \ .name = "sys"#sname, \ + .syscall_nr = -1, /* Filled in at boot */ \ .nb_args = nb, \ .types = types_##sname, \ .args = args_##sname, \ @@ -175,6 +177,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; static struct syscall_metadata __used \ __syscall_meta__##sname = { \ .name = "sys_"#sname, \ + .syscall_nr = -1, /* Filled in at boot */ \ .nb_args = 0, \ .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ @@ -313,6 +316,8 @@ asmlinkage long sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp); asmlinkage long sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp); +asmlinkage long sys_clock_adjtime(clockid_t which_clock, + struct timex __user *tx); asmlinkage long sys_clock_getres(clockid_t which_clock, struct timespec __user *tp); asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, @@ -832,5 +837,10 @@ asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); - +asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, + struct file_handle __user *handle, + int __user *mnt_id, int flag); +asmlinkage long sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); #endif diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index c9069654417..20fc303947d 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -18,9 +18,6 @@ struct compat_timespec; struct restart_block { long (*fn)(struct restart_block *); union { - struct { - unsigned long arg0, arg1, arg2, arg3; - }; /* For futex_wait and futex_wait_requeue_pi */ struct { u32 __user *uaddr; diff --git a/include/linux/time.h b/include/linux/time.h index 1e6d3b59238..454a2620578 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -113,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, #define timespec_valid(ts) \ (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) -extern seqlock_t xtime_lock; - extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); extern int update_persistent_clock(struct timespec now); @@ -125,8 +123,9 @@ extern int timekeeping_suspended; unsigned long get_seconds(void); struct timespec current_kernel_time(void); struct timespec __current_kernel_time(void); /* does not take xtime_lock */ -struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */ struct timespec get_monotonic_coarse(void); +void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, + struct timespec *wtom, struct timespec *sleep); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) @@ -147,8 +146,9 @@ static inline u32 arch_gettimeoffset(void) { return 0; } #endif extern void do_gettimeofday(struct timeval *tv); -extern int do_settimeofday(struct timespec *tv); -extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); +extern int do_settimeofday(const struct timespec *tv); +extern int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz); #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct itimerval; @@ -162,12 +162,13 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); +extern void get_monotonic_boottime(struct timespec *ts); extern struct timespec timespec_trunc(struct timespec t, unsigned gran); extern int timekeeping_valid_for_hres(void); extern u64 timekeeping_max_deferment(void); -extern void update_wall_time(void); extern void timekeeping_leap_insert(int leapsecond); +extern int timekeeping_inject_offset(struct timespec *ts); struct tms; extern void do_sys_times(struct tms *); @@ -292,6 +293,7 @@ struct itimerval { #define CLOCK_MONOTONIC_RAW 4 #define CLOCK_REALTIME_COARSE 5 #define CLOCK_MONOTONIC_COARSE 6 +#define CLOCK_BOOTTIME 7 /* * The IDs of various hardware clocks: diff --git a/include/linux/timex.h b/include/linux/timex.h index d23999f9499..aa60fe7b6ed 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -73,7 +73,7 @@ struct timex { long tolerance; /* clock frequency tolerance (ppm) * (read only) */ - struct timeval time; /* (read only) */ + struct timeval time; /* (read only, except for ADJ_SETOFFSET) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ @@ -102,6 +102,7 @@ struct timex { #define ADJ_STATUS 0x0010 /* clock status */ #define ADJ_TIMECONST 0x0020 /* pll time constant */ #define ADJ_TAI 0x0080 /* set TAI offset */ +#define ADJ_SETOFFSET 0x0100 /* add 'time' to current time */ #define ADJ_MICRO 0x1000 /* select microsecond resolution */ #define ADJ_NANO 0x2000 /* select nanosecond resolution */ #define ADJ_TICK 0x4000 /* tick value */ diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h index 7eee77895cb..4cbbcef6baa 100644 --- a/include/trace/events/mce.h +++ b/include/trace/events/mce.h @@ -17,36 +17,36 @@ TRACE_EVENT(mce_record, TP_STRUCT__entry( __field( u64, mcgcap ) __field( u64, mcgstatus ) - __field( u8, bank ) __field( u64, status ) __field( u64, addr ) __field( u64, misc ) __field( u64, ip ) - __field( u8, cs ) __field( u64, tsc ) __field( u64, walltime ) __field( u32, cpu ) __field( u32, cpuid ) __field( u32, apicid ) __field( u32, socketid ) + __field( u8, cs ) + __field( u8, bank ) __field( u8, cpuvendor ) ), TP_fast_assign( __entry->mcgcap = m->mcgcap; __entry->mcgstatus = m->mcgstatus; - __entry->bank = m->bank; __entry->status = m->status; __entry->addr = m->addr; __entry->misc = m->misc; __entry->ip = m->ip; - __entry->cs = m->cs; __entry->tsc = m->tsc; __entry->walltime = m->time; __entry->cpu = m->extcpu; __entry->cpuid = m->cpuid; __entry->apicid = m->apicid; __entry->socketid = m->socketid; + __entry->cs = m->cs; + __entry->bank = m->bank; __entry->cpuvendor = m->cpuvendor; ), diff --git a/include/trace/events/module.h b/include/trace/events/module.h index c6bae36547e..21a546d27c0 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -108,14 +108,14 @@ TRACE_EVENT(module_request, TP_ARGS(name, wait, ip), TP_STRUCT__entry( - __field( bool, wait ) __field( unsigned long, ip ) + __field( bool, wait ) __string( name, name ) ), TP_fast_assign( - __entry->wait = wait; __entry->ip = ip; + __entry->wait = wait; __assign_str(name, name); ), @@ -129,4 +129,3 @@ TRACE_EVENT(module_request, /* This part must be outside protection */ #include <trace/define_trace.h> - diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index f10293c41b1..0c68ae22da2 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -19,14 +19,14 @@ TRACE_EVENT(kfree_skb, TP_STRUCT__entry( __field( void *, skbaddr ) - __field( unsigned short, protocol ) __field( void *, location ) + __field( unsigned short, protocol ) ), TP_fast_assign( __entry->skbaddr = skb; - __entry->protocol = ntohs(skb->protocol); __entry->location = location; + __entry->protocol = ntohs(skb->protocol); ), TP_printk("skbaddr=%p protocol=%u location=%p", diff --git a/include/xen/events.h b/include/xen/events.h index 00f53ddcc06..962da2ced5b 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -75,11 +75,9 @@ int xen_allocate_pirq(unsigned gsi, int shareable, char *name); int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); #ifdef CONFIG_PCI_MSI -/* Allocate an irq and a pirq to be used with MSIs. */ -#define XEN_ALLOC_PIRQ (1 << 0) -#define XEN_ALLOC_IRQ (1 << 1) -void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask); -int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type); +int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); +int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, + int pirq, int vector, const char *name); #endif /* De-allocates the above mentioned physical interrupt. */ diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index c2d1fa4dc1e..61e523af3c4 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t; */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 -struct blkif_request { - uint8_t operation; /* BLKIF_OP_??? */ - uint8_t nr_segments; /* number of segments */ - blkif_vdev_t handle; /* only for read/write requests */ - uint64_t id; /* private guest value, echoed in resp */ +struct blkif_request_rw { blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ @@ -65,6 +61,16 @@ struct blkif_request { } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; +struct blkif_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t id; /* private guest value, echoed in resp */ + union { + struct blkif_request_rw rw; + } u; +}; + struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ @@ -91,4 +97,25 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 +/* Xen-defined major numbers for virtual disks, they look strangely + * familiar */ +#define XEN_IDE0_MAJOR 3 +#define XEN_IDE1_MAJOR 22 +#define XEN_SCSI_DISK0_MAJOR 8 +#define XEN_SCSI_DISK1_MAJOR 65 +#define XEN_SCSI_DISK2_MAJOR 66 +#define XEN_SCSI_DISK3_MAJOR 67 +#define XEN_SCSI_DISK4_MAJOR 68 +#define XEN_SCSI_DISK5_MAJOR 69 +#define XEN_SCSI_DISK6_MAJOR 70 +#define XEN_SCSI_DISK7_MAJOR 71 +#define XEN_SCSI_DISK8_MAJOR 128 +#define XEN_SCSI_DISK9_MAJOR 129 +#define XEN_SCSI_DISK10_MAJOR 130 +#define XEN_SCSI_DISK11_MAJOR 131 +#define XEN_SCSI_DISK12_MAJOR 132 +#define XEN_SCSI_DISK13_MAJOR 133 +#define XEN_SCSI_DISK14_MAJOR 134 +#define XEN_SCSI_DISK15_MAJOR 135 + #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 2befa3e2f1b..b33257bc7e8 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -30,7 +30,7 @@ #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 -#define __HYPERVISOR_sched_op 6 +#define __HYPERVISOR_sched_op_compat 6 #define __HYPERVISOR_dom0_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 @@ -52,7 +52,7 @@ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_acm_op 27 #define __HYPERVISOR_nmi_op 28 -#define __HYPERVISOR_sched_op_new 29 +#define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 98b92154a26..03c85d7387f 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -5,9 +5,9 @@ DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); -void xen_pre_suspend(void); -void xen_post_suspend(int suspend_cancelled); -void xen_hvm_post_suspend(int suspend_cancelled); +void xen_arch_pre_suspend(void); +void xen_arch_post_suspend(int suspend_cancelled); +void xen_arch_hvm_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); |