diff options
Diffstat (limited to 'include/linux')
86 files changed, 1871 insertions, 768 deletions
diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h index c35833824e1..2ed2fd85513 100644 --- a/include/linux/ac97_codec.h +++ b/include/linux/ac97_codec.h @@ -259,7 +259,7 @@ struct ac97_codec { int type; u32 model; - int modem:1; + unsigned int modem:1; struct ac97_ops *codec_ops; diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 1eb238affb1..41788a31c43 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -7,6 +7,7 @@ #define LINUX_ATMDEV_H +#include <linux/device.h> #include <linux/atmapi.h> #include <linux/atm.h> #include <linux/atmioc.h> @@ -358,6 +359,7 @@ struct atm_dev { struct proc_dir_entry *proc_entry; /* proc entry */ char *proc_name; /* proc entry name */ #endif + struct class_device class_dev; /* sysfs class device */ struct list_head dev_list; /* linkage */ }; @@ -459,7 +461,7 @@ static inline void atm_dev_put(struct atm_dev *dev) BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); - kfree(dev); + class_device_put(&dev->class_dev); } } diff --git a/include/linux/audit.h b/include/linux/audit.h index e051ff9c5b5..b27d7debc5a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -122,10 +122,17 @@ /* Rule structure sizes -- if these change, different AUDIT_ADD and * AUDIT_LIST commands must be implemented. */ #define AUDIT_MAX_FIELDS 64 +#define AUDIT_MAX_KEY_LEN 32 #define AUDIT_BITMASK_SIZE 64 #define AUDIT_WORD(nr) ((__u32)((nr)/32)) #define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) +#define AUDIT_SYSCALL_CLASSES 16 +#define AUDIT_CLASS_DIR_WRITE 0 +#define AUDIT_CLASS_DIR_WRITE_32 1 +#define AUDIT_CLASS_CHATTR 2 +#define AUDIT_CLASS_CHATTR_32 3 + /* This bitmask is used to validate user input. It represents all bits that * are currently used in an audit field constant understood by the kernel. * If you are adding a new #define AUDIT_<whatever>, please ensure that @@ -150,12 +157,17 @@ #define AUDIT_PERS 10 #define AUDIT_ARCH 11 #define AUDIT_MSGTYPE 12 -#define AUDIT_SE_USER 13 /* security label user */ -#define AUDIT_SE_ROLE 14 /* security label role */ -#define AUDIT_SE_TYPE 15 /* security label type */ -#define AUDIT_SE_SEN 16 /* security label sensitivity label */ -#define AUDIT_SE_CLR 17 /* security label clearance label */ +#define AUDIT_SUBJ_USER 13 /* security label user */ +#define AUDIT_SUBJ_ROLE 14 /* security label role */ +#define AUDIT_SUBJ_TYPE 15 /* security label type */ +#define AUDIT_SUBJ_SEN 16 /* security label sensitivity label */ +#define AUDIT_SUBJ_CLR 17 /* security label clearance label */ #define AUDIT_PPID 18 +#define AUDIT_OBJ_USER 19 +#define AUDIT_OBJ_ROLE 20 +#define AUDIT_OBJ_TYPE 21 +#define AUDIT_OBJ_LEV_LOW 22 +#define AUDIT_OBJ_LEV_HIGH 23 /* These are ONLY useful when checking * at syscall exit time (AUDIT_AT_EXIT). */ @@ -171,6 +183,8 @@ #define AUDIT_ARG2 (AUDIT_ARG0+2) #define AUDIT_ARG3 (AUDIT_ARG0+3) +#define AUDIT_FILTERKEY 210 + #define AUDIT_NEGATE 0x80000000 /* These are the supported operators. @@ -299,6 +313,7 @@ struct mqstat; #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 #define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS ) +extern int __init audit_register_class(int class, unsigned *list); #ifdef CONFIG_AUDITSYSCALL /* These are defined in auditsc.c */ /* Public API */ diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index 7b5c5df5cb6..be512cc9879 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -27,8 +27,8 @@ extern struct inode_operations coda_dir_inode_operations; extern struct inode_operations coda_file_inode_operations; extern struct inode_operations coda_ioctl_inode_operations; -extern struct address_space_operations coda_file_aops; -extern struct address_space_operations coda_symlink_aops; +extern const struct address_space_operations coda_file_aops; +extern const struct address_space_operations coda_symlink_aops; extern const struct file_operations coda_dir_operations; extern const struct file_operations coda_file_operations; diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index 917d62e4148..269d000bb2a 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -567,11 +567,6 @@ COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER) COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST) COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST) COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT) -/* DEVFS */ -COMPATIBLE_IOCTL(DEVFSDIOC_GET_PROTO_REV) -COMPATIBLE_IOCTL(DEVFSDIOC_SET_EVENT_MASK) -COMPATIBLE_IOCTL(DEVFSDIOC_RELEASE_EVENT_QUEUE) -COMPATIBLE_IOCTL(DEVFSDIOC_SET_DEBUG_MASK) /* Raw devices */ COMPATIBLE_IOCTL(RAW_SETBIND) COMPATIBLE_IOCTL(RAW_GETBIND) diff --git a/include/linux/completion.h b/include/linux/completion.h index 90663ad217f..251c41e3ddd 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -21,6 +21,18 @@ struct completion { #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = ({ init_completion(&work); work; }) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +#endif + static inline void init_completion(struct completion *x) { x->done = 0; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a3caf6866ba..44a11f1ccaf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -87,9 +87,9 @@ int cpu_down(unsigned int cpu); #define lock_cpu_hotplug() do { } while (0) #define unlock_cpu_hotplug() do { } while (0) #define lock_cpu_hotplug_interruptible() 0 -#define hotcpu_notifier(fn, pri) -#define register_hotcpu_notifier(nb) -#define unregister_hotcpu_notifier(nb) +#define hotcpu_notifier(fn, pri) do { } while (0) +#define register_hotcpu_notifier(nb) do { } while (0) +#define unregister_hotcpu_notifier(nb) do { } while (0) /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ static inline int cpu_is_offline(int cpu) { return 0; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 466fbe9e489..35e137636b0 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -100,8 +100,10 @@ struct cpufreq_policy { #define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (2) -#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */ -#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */ +#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ +#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ +#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ +#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ /******************** cpufreq transition notifiers *******************/ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 0dd1610a94a..471781ffeab 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -114,6 +114,18 @@ struct dentry { unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h new file mode 100644 index 00000000000..6a7047851e4 --- /dev/null +++ b/include/linux/debug_locks.h @@ -0,0 +1,69 @@ +#ifndef __LINUX_DEBUG_LOCKING_H +#define __LINUX_DEBUG_LOCKING_H + +extern int debug_locks; +extern int debug_locks_silent; + +/* + * Generic 'turn off all lock debugging' function: + */ +extern int debug_locks_off(void); + +/* + * In the debug case we carry the caller's instruction pointer into + * other functions, but we dont want the function argument overhead + * in the nondebug case - hence these macros: + */ +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#define DEBUG_LOCKS_WARN_ON(c) \ +({ \ + int __ret = 0; \ + \ + if (unlikely(c)) { \ + if (debug_locks_off()) \ + WARN_ON(1); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +#else +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS + extern void locking_selftest(void); +#else +# define locking_selftest() do { } while (0) +#endif + +#ifdef CONFIG_LOCKDEP +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(struct task_struct *task); +#else +static inline void debug_show_all_locks(void) +{ +} + +static inline void debug_show_held_locks(struct task_struct *task) +{ +} + +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ +} + +static inline void +debug_check_no_locks_held(struct task_struct *task) +{ +} +#endif + +#endif diff --git a/include/linux/devfs_fs.h b/include/linux/devfs_fs.h deleted file mode 100644 index de236f43187..00000000000 --- a/include/linux/devfs_fs.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _LINUX_DEVFS_FS_H -#define _LINUX_DEVFS_FS_H - -#include <linux/ioctl.h> - -#define DEVFSD_PROTOCOL_REVISION_KERNEL 5 - -#define DEVFSD_IOCTL_BASE 'd' - -/* These are the various ioctls */ -#define DEVFSDIOC_GET_PROTO_REV _IOR(DEVFSD_IOCTL_BASE, 0, int) -#define DEVFSDIOC_SET_EVENT_MASK _IOW(DEVFSD_IOCTL_BASE, 2, int) -#define DEVFSDIOC_RELEASE_EVENT_QUEUE _IOW(DEVFSD_IOCTL_BASE, 3, int) -#define DEVFSDIOC_SET_DEBUG_MASK _IOW(DEVFSD_IOCTL_BASE, 4, int) - -#define DEVFSD_NOTIFY_REGISTERED 0 -#define DEVFSD_NOTIFY_UNREGISTERED 1 -#define DEVFSD_NOTIFY_ASYNC_OPEN 2 -#define DEVFSD_NOTIFY_CLOSE 3 -#define DEVFSD_NOTIFY_LOOKUP 4 -#define DEVFSD_NOTIFY_CHANGE 5 -#define DEVFSD_NOTIFY_CREATE 6 -#define DEVFSD_NOTIFY_DELETE 7 - -#define DEVFS_PATHLEN 1024 /* Never change this otherwise the - binary interface will change */ - -struct devfsd_notify_struct { /* Use native C types to ensure same types in kernel and user space */ - unsigned int type; /* DEVFSD_NOTIFY_* value */ - unsigned int mode; /* Mode of the inode or device entry */ - unsigned int major; /* Major number of device entry */ - unsigned int minor; /* Minor number of device entry */ - unsigned int uid; /* Uid of process, inode or device entry */ - unsigned int gid; /* Gid of process, inode or device entry */ - unsigned int overrun_count; /* Number of lost events */ - unsigned int namelen; /* Number of characters not including '\0' */ - /* The device name MUST come last */ - char devname[DEVFS_PATHLEN]; /* This will be '\0' terminated */ -}; - -#endif /* _LINUX_DEVFS_FS_H */ diff --git a/include/linux/devfs_fs_kernel.h b/include/linux/devfs_fs_kernel.h deleted file mode 100644 index 0d74a6f22ab..00000000000 --- a/include/linux/devfs_fs_kernel.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef _LINUX_DEVFS_FS_KERNEL_H -#define _LINUX_DEVFS_FS_KERNEL_H - -#include <linux/fs.h> -#include <linux/spinlock.h> -#include <linux/types.h> - -#include <asm/semaphore.h> - -#define DEVFS_SUPER_MAGIC 0x1373 - -#ifdef CONFIG_DEVFS_FS -extern int devfs_mk_bdev(dev_t dev, umode_t mode, const char *fmt, ...) - __attribute__ ((format(printf, 3, 4))); -extern int devfs_mk_cdev(dev_t dev, umode_t mode, const char *fmt, ...) - __attribute__ ((format(printf, 3, 4))); -extern int devfs_mk_symlink(const char *name, const char *link); -extern int devfs_mk_dir(const char *fmt, ...) - __attribute__ ((format(printf, 1, 2))); -extern void devfs_remove(const char *fmt, ...) - __attribute__ ((format(printf, 1, 2))); -extern int devfs_register_tape(const char *name); -extern void devfs_unregister_tape(int num); -extern void mount_devfs_fs(void); -#else /* CONFIG_DEVFS_FS */ -static inline int devfs_mk_bdev(dev_t dev, umode_t mode, const char *fmt, ...) -{ - return 0; -} -static inline int devfs_mk_cdev(dev_t dev, umode_t mode, const char *fmt, ...) -{ - return 0; -} -static inline int devfs_mk_symlink(const char *name, const char *link) -{ - return 0; -} -static inline int devfs_mk_dir(const char *fmt, ...) -{ - return 0; -} -static inline void devfs_remove(const char *fmt, ...) -{ -} -static inline int devfs_register_tape(const char *name) -{ - return -1; -} -static inline void devfs_unregister_tape(int num) -{ -} -static inline void mount_devfs_fs(void) -{ - return; -} -#endif /* CONFIG_DEVFS_FS */ -#endif /* _LINUX_DEVFS_FS_KERNEL_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 272010a6078..c94d8f1d62e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -44,7 +44,7 @@ enum dma_event { }; /** - * typedef dma_cookie_t + * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ @@ -80,14 +80,14 @@ struct dma_chan_percpu { /** * struct dma_chan - devices supply DMA channels, clients use them - * @client: ptr to the client user of this chan, will be NULL when unused - * @device: ptr to the dma device who supplies this channel, always !NULL + * @client: ptr to the client user of this chan, will be %NULL when unused + * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client - * @chan_id: - * @class_dev: + * @chan_id: channel ID for sysfs + * @class_dev: class device for sysfs * @refcount: kref, used in "bigref" slow-mode - * @slow_ref: - * @rcu: + * @slow_ref: indicates that the DMA channel is free + * @rcu: the DMA channel's RCU head * @client_node: used to add this to the client chan list * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu @@ -162,10 +162,17 @@ struct dma_client { * @chancnt: how many DMA channels are supported * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list - * @refcount: - * @done: - * @dev_id: - * Other func ptrs: used to make use of this device's capabilities + * @refcount: reference count + * @done: IO completion struct + * @dev_id: unique device ID + * @device_alloc_chan_resources: allocate resources and return the + * number of allocated descriptors + * @device_free_chan_resources: release DMA channel's resources + * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer + * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page + * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset + * @device_memcpy_complete: poll the status of an IOAT DMA transaction + * @device_memcpy_issue_pending: push appended descriptors to hardware */ struct dma_device { @@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client, * Both @dest and @src must be mappable to a bus address according to the * DMA mapping API rules for streaming mappings. * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages) + * user space pages). */ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len) @@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page * @chan: DMA channel to offload copy to * @page: destination page * @offset: offset in page to copy to @@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to - * @dest_page: destination page + * @dest_pg: destination page * @dest_off: offset in page to copy to - * @src_page: source page + * @src_pg: source page * @src_off: offset in page to copy from * @len: length * * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus * address according to the DMA mapping API rules for streaming mappings. * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages) + * (kernel memory or locked user space pages). */ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, unsigned int dest_off, struct page *src_pg, @@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, /** * dma_async_memcpy_issue_pending - flush pending copies to HW - * @chan: + * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h index 2fda1b2aabd..527504c11c5 100644 --- a/include/linux/dqblk_xfs.h +++ b/include/linux/dqblk_xfs.h @@ -125,14 +125,14 @@ typedef struct fs_disk_quota { /* * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. - * Provides a centralized way to get meta infomation about the quota subsystem. + * Provides a centralized way to get meta information about the quota subsystem. * eg. space taken up for user and group quotas, number of dquots currently * incore. */ #define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ /* - * Some basic infomation about 'quota files'. + * Some basic information about 'quota files'. */ typedef struct fs_qfilestat { __u64 qfs_ino; /* inode number */ diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h index fbfa6b52e2f..278ef449581 100644 --- a/include/linux/efs_fs.h +++ b/include/linux/efs_fs.h @@ -38,7 +38,7 @@ struct statfs; extern struct inode_operations efs_dir_inode_operations; extern const struct file_operations efs_dir_operations; -extern struct address_space_operations efs_symlink_aops; +extern const struct address_space_operations efs_symlink_aops; extern void efs_read_inode(struct inode *); extern efs_block_t efs_map_block(struct inode *, efs_block_t); diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h index 114a96d2565..6a5796c81c9 100644 --- a/include/linux/elf-em.h +++ b/include/linux/elf-em.h @@ -11,7 +11,12 @@ #define EM_486 6 /* Perhaps disused */ #define EM_860 7 #define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ + /* Next two are historical and binaries and + modules of these types will be rejected by + Linux. */ +#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */ #define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ + #define EM_PARISC 15 /* HPPA */ #define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ #define EM_PPC 20 /* PowerPC */ diff --git a/include/linux/err.h b/include/linux/err.h index ff71d2af5da..cd3b367f744 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -13,7 +13,9 @@ * This should be a per-architecture thing, to allow different * error and pointer decisions. */ -#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) +#define MAX_ERRNO 4095 + +#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) static inline void *ERR_PTR(long error) { diff --git a/include/linux/fb.h b/include/linux/fb.h index 07a08e92bc7..ffefeeeeca9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -380,7 +380,6 @@ struct fb_cursor { #include <linux/tty.h> #include <linux/device.h> #include <linux/workqueue.h> -#include <linux/devfs_fs_kernel.h> #include <linux/notifier.h> #include <linux/list.h> #include <asm/io.h> @@ -558,7 +557,7 @@ struct fb_pixmap { * Frame buffer operations * * LOCKING NOTE: those functions must _ALL_ be called with the console - * semaphore held, this is the only suitable locking mecanism we have + * semaphore held, this is the only suitable locking mechanism we have * in 2.6. Some may be called at interrupt time at this point though. */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 2d8b348c119..134b3206824 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -392,7 +392,7 @@ struct address_space { unsigned int truncate_count; /* Cover race condition with truncate */ unsigned long nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ - struct address_space_operations *a_ops; /* methods */ + const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ struct backing_dev_info *backing_dev_info; /* device readahead, etc */ spinlock_t private_lock; /* for use by the address_space */ @@ -436,6 +436,21 @@ struct block_device { }; /* + * bdev->bd_mutex nesting subclasses for the lock validator: + * + * 0: normal + * 1: 'whole' + * 2: 'partition' + */ +enum bdev_bd_mutex_lock_class +{ + BD_MUTEX_NORMAL, + BD_MUTEX_WHOLE, + BD_MUTEX_PARTITION +}; + + +/* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees */ @@ -543,6 +558,25 @@ struct inode { }; /* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: quota file + * + * The locking order between these classes is + * parent -> child -> normal -> quota + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_QUOTA +}; + +/* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic * with respect to the local cpu (unlike with preempt disabled), @@ -1276,6 +1310,8 @@ struct file_system_type { struct module *owner; struct file_system_type * next; struct list_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, @@ -1404,8 +1440,9 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, unsigned); +extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern const struct file_operations def_blk_fops; -extern struct address_space_operations def_blk_aops; +extern const struct address_space_operations def_blk_aops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; extern const struct file_operations def_fifo_fops; @@ -1414,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *, mode_t, unsigned); extern int blkdev_put(struct block_device *); +extern int blkdev_put_partition(struct block_device *); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); #ifdef CONFIG_SYSFS diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 3498a0c6818..e4af57e87c1 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -112,8 +112,6 @@ struct gendisk { sector_t capacity; int flags; - char devfs_name[64]; /* devfs crap */ - int number; /* more of the same */ struct device *driverfs_dev; struct kobject kobj; struct kobject *holder_dir; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 114ae583cca..50d8b5744cf 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -3,6 +3,7 @@ #include <linux/preempt.h> #include <linux/smp_lock.h> +#include <linux/lockdep.h> #include <asm/hardirq.h> #include <asm/system.h> @@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq); # define synchronize_irq(irq) barrier() #endif -#define nmi_enter() irq_enter() -#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) - struct task_struct; #ifndef CONFIG_VIRT_CPU_ACCOUNTING @@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ #define irq_enter() \ do { \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_system_vtime(current); \ + sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) +/* + * Exit irq context and process softirqs if needed: + */ extern void irq_exit(void); +#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0) +#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) + #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 07d7305f131..e4bccbcc275 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -91,6 +91,7 @@ struct hrtimer_base { ktime_t (*get_softirq_time)(void); struct hrtimer *curr_timer; ktime_t softirq_time; + struct lock_class_key lock_key; }; /* diff --git a/include/linux/ide.h b/include/linux/ide.h index ef7bef207f4..dc7abef1096 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -552,7 +552,6 @@ typedef struct ide_drive_s { struct hd_driveid *id; /* drive model identification info */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ struct ide_settings_s *settings;/* /proc/ide/ drive settings */ - char devfs_name[64]; /* devfs crap */ struct hwif_s *hwif; /* actually (ide_hwif_t *) */ @@ -793,6 +792,7 @@ typedef struct hwif_s { unsigned auto_poll : 1; /* supports nop auto-poll */ unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */ + unsigned err_stops_fifo : 1; /* 1=data FIFO is cleared by an error */ struct device gendev; struct completion gendev_rel_comp; /* To deal with device release() */ @@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem; * ide_drive_t->hwif: constant, no locking */ -#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0) +#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) extern struct bus_type ide_bus_type; diff --git a/include/linux/idr.h b/include/linux/idr.h index f559a719dbe..826803449db 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -66,7 +66,7 @@ struct idr { .id_free = NULL, \ .layers = 0, \ .id_free_cnt = 0, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a256957fb5..60aac2cea0c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -3,6 +3,8 @@ #include <linux/file.h> #include <linux/rcupdate.h> +#include <linux/irqflags.h> +#include <linux/lockdep.h> #define INIT_FDTABLE \ { \ @@ -21,7 +23,7 @@ .count = ATOMIC_INIT(1), \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ @@ -36,7 +38,7 @@ .user_id = 0, \ .next = NULL, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = SPIN_LOCK_UNLOCKED, \ + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ .reqs_active = 0U, \ .max_reqs = ~0U, \ } @@ -48,7 +50,7 @@ .mm_users = ATOMIC_INIT(2), \ .mm_count = ATOMIC_INIT(1), \ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = SPIN_LOCK_UNLOCKED, \ + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ } @@ -69,7 +71,7 @@ #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ - .siglock = SPIN_LOCK_UNLOCKED, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ } extern struct group_info init_groups; @@ -119,12 +121,13 @@ extern struct group_info init_groups; .list = LIST_HEAD_INIT(tsk.pending.list), \ .signal = {{0}}}, \ .blocked = {{0}}, \ - .alloc_lock = SPIN_LOCK_UNLOCKED, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ - INIT_RT_MUTEXES(tsk) \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 70741e17011..d5afee95fd4 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -10,10 +10,60 @@ #include <linux/irqreturn.h> #include <linux/hardirq.h> #include <linux/sched.h> +#include <linux/irqflags.h> #include <asm/atomic.h> #include <asm/ptrace.h> #include <asm/system.h> +/* + * These correspond to the IORESOURCE_IRQ_* defines in + * linux/ioport.h to select the interrupt line behaviour. When + * requesting an interrupt without specifying a IRQF_TRIGGER, the + * setting should be assumed to be "as already configured", which + * may be as per machine or firmware initialisation. + */ +#define IRQF_TRIGGER_NONE 0x00000000 +#define IRQF_TRIGGER_RISING 0x00000001 +#define IRQF_TRIGGER_FALLING 0x00000002 +#define IRQF_TRIGGER_HIGH 0x00000004 +#define IRQF_TRIGGER_LOW 0x00000008 +#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) +#define IRQF_TRIGGER_PROBE 0x00000010 + +/* + * These flags used only by the kernel as part of the + * irq handling routines. + * + * IRQF_DISABLED - keep irqs disabled when calling the action handler + * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator + * IRQF_SHARED - allow sharing the irq among several devices + * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur + * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + */ +#define IRQF_DISABLED 0x00000020 +#define IRQF_SAMPLE_RANDOM 0x00000040 +#define IRQF_SHARED 0x00000080 +#define IRQF_PROBE_SHARED 0x00000100 +#define IRQF_TIMER 0x00000200 +#define IRQF_PERCPU 0x00000400 + +/* + * Migration helpers. Scheduled for removal in 1/2007 + * Do not use for new code ! + */ +#define SA_INTERRUPT IRQF_DISABLED +#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM +#define SA_SHIRQ IRQF_SHARED +#define SA_PROBEIRQ IRQF_PROBE_SHARED +#define SA_PERCPU IRQF_PERCPU + +#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW +#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH +#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING +#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING +#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK + struct irqaction { irqreturn_t (*handler)(int, void *, struct pt_regs *); unsigned long flags; @@ -31,12 +81,90 @@ extern int request_irq(unsigned int, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif #ifdef CONFIG_GENERIC_HARDIRQS extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); + +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); #endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP + local_irq_enable(); +#endif + enable_irq(irq); +} + +/* IRQ wakeup (PM) control: */ +extern int set_irq_wake(unsigned int irq, unsigned int on); + +static inline int enable_irq_wake(unsigned int irq) +{ + return set_irq_wake(irq, 1); +} + +static inline int disable_irq_wake(unsigned int irq) +{ + return set_irq_wake(irq, 0); +} + +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +# ifndef CONFIG_LOCKDEP +# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_lockdep(irq) disable_irq(irq) +# define enable_irq_lockdep(irq) enable_irq(irq) +# endif + +#endif /* CONFIG_GENERIC_HARDIRQS */ #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) @@ -72,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ -/* SoftIRQ primitives. */ -#define local_bh_disable() \ - do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) -#define __local_bh_enable() \ - do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) - +extern void local_bh_disable(void); +extern void __local_bh_enable(void); +extern void _local_bh_enable(void); extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip); /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes diff --git a/include/linux/ioport.h b/include/linux/ioport.h index edfc733b157..5612dfeeae5 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -9,13 +9,15 @@ #define _LINUX_IOPORT_H #include <linux/compiler.h> +#include <linux/types.h> /* * Resources are tree-like, allowing * nesting etc.. */ struct resource { + resource_size_t start; + resource_size_t end; const char *name; - unsigned long start, end; unsigned long flags; struct resource *parent, *sibling, *child; }; @@ -53,6 +55,7 @@ struct resource_list { #define IORESOURCE_IRQ_LOWEDGE (1<<1) #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) #define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) /* ISA PnP DMA specific bits (IORESOURCE_BITS) */ #define IORESOURCE_DMA_TYPE_MASK (3<<0) @@ -96,14 +99,13 @@ extern struct resource * ____request_resource(struct resource *root, struct reso extern int release_resource(struct resource *new); extern __deprecated_for_modules int insert_resource(struct resource *parent, struct resource *new); extern int allocate_resource(struct resource *root, struct resource *new, - unsigned long size, - unsigned long min, unsigned long max, - unsigned long align, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, void (*alignf)(void *, struct resource *, - unsigned long, unsigned long), + resource_size_t, resource_size_t), void *alignf_data); -int adjust_resource(struct resource *res, unsigned long start, - unsigned long size); +int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); /* get registered SYSTEM_RAM resources in specified area */ extern int find_next_system_ram(struct resource *res); @@ -113,17 +115,21 @@ extern int find_next_system_ram(struct resource *res); #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) #define rename_region(region, newname) do { (region)->name = (newname); } while (0) -extern struct resource * __request_region(struct resource *, unsigned long start, unsigned long n, const char *name); +extern struct resource * __request_region(struct resource *, + resource_size_t start, + resource_size_t n, const char *name); /* Compatibility cruft */ #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) #define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) -extern int __check_region(struct resource *, unsigned long, unsigned long); -extern void __release_region(struct resource *, unsigned long, unsigned long); +extern int __check_region(struct resource *, resource_size_t, resource_size_t); +extern void __release_region(struct resource *, resource_size_t, + resource_size_t); -static inline int __deprecated check_region(unsigned long s, unsigned long n) +static inline int __deprecated check_region(resource_size_t s, + resource_size_t n) { return __check_region(&ioport_resource, s, n); } diff --git a/include/linux/irq.h b/include/linux/irq.h index 676e00dfb21..b48eae32dc6 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1,5 +1,5 @@ -#ifndef __irq_h -#define __irq_h +#ifndef _LINUX_IRQ_H +#define _LINUX_IRQ_H /* * Please do not include this file in generic code. There is currently @@ -11,7 +11,7 @@ #include <linux/smp.h> -#if !defined(CONFIG_S390) +#ifndef CONFIG_S390 #include <linux/linkage.h> #include <linux/cache.h> @@ -24,84 +24,172 @@ /* * IRQ line status. + * + * Bits 0-16 are reserved for the IRQF_* bits in linux/interrupt.h + * + * IRQ types */ -#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ -#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ -#define IRQ_PENDING 4 /* IRQ pending - replay on enable */ -#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ -#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ -#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ -#define IRQ_LEVEL 64 /* IRQ level triggered */ -#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ -#if defined(ARCH_HAS_IRQ_PER_CPU) -# define IRQ_PER_CPU 256 /* IRQ is per CPU */ +#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ +#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ +#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ +#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) +#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ +#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ +#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ +#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ + +/* Internal flags */ +#define IRQ_INPROGRESS 0x00010000 /* IRQ handler active - do not enter! */ +#define IRQ_DISABLED 0x00020000 /* IRQ disabled - do not enter! */ +#define IRQ_PENDING 0x00040000 /* IRQ pending - replay on enable */ +#define IRQ_REPLAY 0x00080000 /* IRQ has been replayed but not acked yet */ +#define IRQ_AUTODETECT 0x00100000 /* IRQ is being autodetected */ +#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */ +#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */ +#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */ +#ifdef CONFIG_IRQ_PER_CPU +# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */ # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) #else # define CHECK_IRQ_PER_CPU(var) 0 #endif -/* - * Interrupt controller descriptor. This is all we need - * to describe about the low-level hardware. +#define IRQ_NOPROBE 0x02000000 /* IRQ is not valid for probing */ +#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */ +#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ +#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ + +struct proc_dir_entry; + +/** + * struct irq_chip - hardware interrupt chip descriptor + * + * @name: name for /proc/interrupts + * @startup: start up the interrupt (defaults to ->enable if NULL) + * @shutdown: shut down the interrupt (defaults to ->disable if NULL) + * @enable: enable the interrupt (defaults to chip->unmask if NULL) + * @disable: disable the interrupt (defaults to chip->mask if NULL) + * @ack: start of a new interrupt + * @mask: mask an interrupt source + * @mask_ack: ack and mask an interrupt source + * @unmask: unmask an interrupt source + * @eoi: end of interrupt - chip level + * @end: end of interrupt - flow level + * @set_affinity: set the CPU affinity on SMP machines + * @retrigger: resend an IRQ to the CPU + * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ + * @set_wake: enable/disable power-management wake-on of an IRQ + * + * @release: release function solely used by UML + * @typename: obsoleted by name, kept as migration helper */ -struct hw_interrupt_type { - const char * typename; - unsigned int (*startup)(unsigned int irq); - void (*shutdown)(unsigned int irq); - void (*enable)(unsigned int irq); - void (*disable)(unsigned int irq); - void (*ack)(unsigned int irq); - void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, cpumask_t dest); +struct irq_chip { + const char *name; + unsigned int (*startup)(unsigned int irq); + void (*shutdown)(unsigned int irq); + void (*enable)(unsigned int irq); + void (*disable)(unsigned int irq); + + void (*ack)(unsigned int irq); + void (*mask)(unsigned int irq); + void (*mask_ack)(unsigned int irq); + void (*unmask)(unsigned int irq); + void (*eoi)(unsigned int irq); + + void (*end)(unsigned int irq); + void (*set_affinity)(unsigned int irq, cpumask_t dest); + int (*retrigger)(unsigned int irq); + int (*set_type)(unsigned int irq, unsigned int flow_type); + int (*set_wake)(unsigned int irq, unsigned int on); + /* Currently used only by UML, might disappear one day.*/ #ifdef CONFIG_IRQ_RELEASE_METHOD - void (*release)(unsigned int irq, void *dev_id); + void (*release)(unsigned int irq, void *dev_id); #endif + /* + * For compatibility, ->typename is copied into ->name. + * Will disappear. + */ + const char *typename; }; -typedef struct hw_interrupt_type hw_irq_controller; - -/* - * This is the "IRQ descriptor", which contains various information - * about the irq, including what kind of hardware handling it has, - * whether it is disabled etc etc. +/** + * struct irq_desc - interrupt descriptor + * + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] + * @chip: low level interrupt hardware access + * @handler_data: per-IRQ data for the irq_chip methods + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations + * @action: the irq action chain + * @status: status information + * @depth: disable-depth, for nested irq_disable() calls + * @irq_count: stats field to detect stalled irqs + * @irqs_unhandled: stats field for spurious unhandled interrupts + * @lock: locking for SMP + * @affinity: IRQ affinity on SMP + * @cpu: cpu index useful for balancing + * @pending_mask: pending rebalanced interrupts + * @move_irq: need to re-target IRQ destination + * @dir: /proc/irq/ procfs entry + * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP * * Pad this out to 32 bytes for cache and indexing reasons. */ -typedef struct irq_desc { - hw_irq_controller *handler; - void *handler_data; - struct irqaction *action; /* IRQ action list */ - unsigned int status; /* IRQ status */ - unsigned int depth; /* nested irq disables */ - unsigned int irq_count; /* For detecting broken interrupts */ - unsigned int irqs_unhandled; - spinlock_t lock; -#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) - unsigned int move_irq; /* Flag need to re-target intr dest*/ +struct irq_desc { + void fastcall (*handle_irq)(unsigned int irq, + struct irq_desc *desc, + struct pt_regs *regs); + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct irqaction *action; /* IRQ action list */ + unsigned int status; /* IRQ status */ + + unsigned int depth; /* nested irq disables */ + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned int irqs_unhandled; + spinlock_t lock; +#ifdef CONFIG_SMP + cpumask_t affinity; + unsigned int cpu; +#endif +#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) + cpumask_t pending_mask; + unsigned int move_irq; /* need to re-target IRQ dest */ +#endif +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *dir; #endif -} ____cacheline_aligned irq_desc_t; +} ____cacheline_aligned; -extern irq_desc_t irq_desc [NR_IRQS]; +extern struct irq_desc irq_desc[NR_IRQS]; -/* Return a pointer to the irq descriptor for IRQ. */ -static inline irq_desc_t * -irq_descp (int irq) -{ - return irq_desc + irq; -} +/* + * Migration helpers for obsolete names, they will go away: + */ +#define hw_interrupt_type irq_chip +typedef struct irq_chip hw_irq_controller; +#define no_irq_type no_irq_chip +typedef struct irq_desc irq_desc_t; -#include <asm/hw_irq.h> /* the arch dependent stuff */ +/* + * Pick up the arch-dependent methods: + */ +#include <asm/hw_irq.h> -extern int setup_irq(unsigned int irq, struct irqaction * new); +extern int setup_irq(unsigned int irq, struct irqaction *new); #ifdef CONFIG_GENERIC_HARDIRQS -extern cpumask_t irq_affinity[NR_IRQS]; + +#ifndef handle_dynamic_tick +# define handle_dynamic_tick(a) do { } while (0) +#endif #ifdef CONFIG_SMP static inline void set_native_irq_info(int irq, cpumask_t mask) { - irq_affinity[irq] = mask; + irq_desc[irq].affinity = mask; } #else static inline void set_native_irq_info(int irq, cpumask_t mask) @@ -111,8 +199,7 @@ static inline void set_native_irq_info(int irq, cpumask_t mask) #ifdef CONFIG_SMP -#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) -extern cpumask_t pending_irq_cpumask[NR_IRQS]; +#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) void set_pending_irq(unsigned int irq, cpumask_t mask); void move_native_irq(int irq); @@ -133,7 +220,7 @@ static inline void set_irq_info(int irq, cpumask_t mask) { } -#else // CONFIG_PCI_MSI +#else /* CONFIG_PCI_MSI */ static inline void move_irq(int irq) { @@ -144,26 +231,36 @@ static inline void set_irq_info(int irq, cpumask_t mask) { set_native_irq_info(irq, mask); } -#endif // CONFIG_PCI_MSI -#else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE +#endif /* CONFIG_PCI_MSI */ + +#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */ + +static inline void move_irq(int irq) +{ +} + +static inline void move_native_irq(int irq) +{ +} + +static inline void set_pending_irq(unsigned int irq, cpumask_t mask) +{ +} -#define move_irq(x) -#define move_native_irq(x) -#define set_pending_irq(x,y) static inline void set_irq_info(int irq, cpumask_t mask) { set_native_irq_info(irq, mask); } -#endif // CONFIG_GENERIC_PENDING_IRQ +#endif /* CONFIG_GENERIC_PENDING_IRQ */ -#else // CONFIG_SMP +#else /* CONFIG_SMP */ #define move_irq(x) #define move_native_irq(x) -#endif // CONFIG_SMP +#endif /* CONFIG_SMP */ #ifdef CONFIG_IRQBALANCE extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask); @@ -173,32 +270,139 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) } #endif +#ifdef CONFIG_AUTO_IRQ_AFFINITY +extern int select_smp_affinity(unsigned int irq); +#else +static inline int select_smp_affinity(unsigned int irq) +{ + return 1; +} +#endif + extern int no_irq_affinity; -extern int noirqdebug_setup(char *str); -extern fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, - struct irqaction *action); +/* Handle irq action chains: */ +extern int handle_IRQ_event(unsigned int irq, struct pt_regs *regs, + struct irqaction *action); + +/* + * Built-in IRQ handlers for various IRQ types, + * callable via desc->chip->handle_irq() + */ +extern void fastcall +handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs); +extern void fastcall +handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc, + struct pt_regs *regs); +extern void fastcall +handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs); +extern void fastcall +handle_simple_irq(unsigned int irq, struct irq_desc *desc, + struct pt_regs *regs); +extern void fastcall +handle_percpu_irq(unsigned int irq, struct irq_desc *desc, + struct pt_regs *regs); +extern void fastcall +handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs); + +/* + * Get a descriptive string for the highlevel handler, for + * /proc/interrupts output: + */ +extern const char * +handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *, + struct pt_regs *)); + +/* + * Monolithic do_IRQ implementation. + * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly) + */ extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); -extern void note_interrupt(unsigned int irq, irq_desc_t *desc, - int action_ret, struct pt_regs *regs); -extern int can_request_irq(unsigned int irq, unsigned long irqflags); +/* + * Architectures call this to let the generic IRQ layer + * handle an interrupt. If the descriptor is attached to an + * irqchip-style controller then we call the ->handle_irq() handler, + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + */ +static inline void generic_handle_irq(unsigned int irq, struct pt_regs *regs) +{ + struct irq_desc *desc = irq_desc + irq; + + if (likely(desc->handle_irq)) + desc->handle_irq(irq, desc, regs); + else + __do_IRQ(irq, regs); +} + +/* Handling of unhandled and spurious interrupts: */ +extern void note_interrupt(unsigned int irq, struct irq_desc *desc, + int action_ret, struct pt_regs *regs); + +/* Resending of interrupts :*/ +void check_irq_resend(struct irq_desc *desc, unsigned int irq); + +/* Initialize /proc/irq/ */ extern void init_irq_proc(void); -#ifdef CONFIG_AUTO_IRQ_AFFINITY -extern int select_smp_affinity(unsigned int irq); -#else -static inline int -select_smp_affinity(unsigned int irq) +/* Enable/disable irq debugging output: */ +extern int noirqdebug_setup(char *str); + +/* Checks whether the interrupt can be requested by request_irq(): */ +extern int can_request_irq(unsigned int irq, unsigned long irqflags); + +/* Dummy irq-chip implementations: */ +extern struct irq_chip no_irq_chip; +extern struct irq_chip dummy_irq_chip; + +extern void +set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, + void fastcall (*handle)(unsigned int, + struct irq_desc *, + struct pt_regs *)); +extern void +__set_irq_handler(unsigned int irq, + void fastcall (*handle)(unsigned int, struct irq_desc *, + struct pt_regs *), + int is_chained); + +/* + * Set a highlevel flow handler for a given IRQ: + */ +static inline void +set_irq_handler(unsigned int irq, + void fastcall (*handle)(unsigned int, struct irq_desc *, + struct pt_regs *)) { - return 1; + __set_irq_handler(irq, handle, 0); } -#endif -#endif +/* + * Set a highlevel chained flow handler for a given IRQ. + * (a chained handler is automatically enabled and set to + * IRQ_NOREQUEST and IRQ_NOPROBE) + */ +static inline void +set_irq_chained_handler(unsigned int irq, + void fastcall (*handle)(unsigned int, struct irq_desc *, + struct pt_regs *)) +{ + __set_irq_handler(irq, handle, 1); +} -extern hw_irq_controller no_irq_type; /* needed in every arch ? */ +/* Set/get chip/data for an IRQ: */ -#endif +extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); +extern int set_irq_data(unsigned int irq, void *data); +extern int set_irq_chip_data(unsigned int irq, void *data); +extern int set_irq_type(unsigned int irq, unsigned int type); + +#define get_irq_chip(irq) (irq_desc[irq].chip) +#define get_irq_chip_data(irq) (irq_desc[irq].chip_data) +#define get_irq_data(irq) (irq_desc[irq].handler_data) + +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#endif /* !CONFIG_S390 */ -#endif /* __irq_h */ +#endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h new file mode 100644 index 00000000000..412e025bc5c --- /dev/null +++ b/include/linux/irqflags.h @@ -0,0 +1,96 @@ +/* + * include/linux/irqflags.h + * + * IRQ flags tracing: follow the state of the hardirq and softirq flags and + * provide callbacks for transitions between ON and OFF states. + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _LINUX_TRACE_IRQFLAGS_H +#define _LINUX_TRACE_IRQFLAGS_H + +#ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define trace_softirq_enter() do { current->softirq_context++; } while (0) +# define trace_softirq_exit() do { current->softirq_context--; } while (0) +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, +#else +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_softirqs_on(ip) do { } while (0) +# define trace_softirqs_off(ip) do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define trace_softirq_enter() do { } while (0) +# define trace_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT + +#include <asm/irqflags.h> + +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) + +#define local_irq_restore(flags) \ + do { \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ + } while (0) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +# define raw_local_irq_disable() local_irq_disable() +# define raw_local_irq_enable() local_irq_enable() +# define raw_local_irq_save(flags) local_irq_save(flags) +# define raw_local_irq_restore(flags) local_irq_restore(flags) +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ + } while (0) + +#define local_save_flags(flags) raw_local_save_flags(flags) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + \ + raw_local_save_flags(flags); \ + raw_irqs_disabled_flags(flags); \ +}) + +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#endif /* CONFIG_X86 */ + +#endif diff --git a/include/linux/isdn/tpam.h b/include/linux/isdn/tpam.h deleted file mode 100644 index d18dd0dc570..00000000000 --- a/include/linux/isdn/tpam.h +++ /dev/null @@ -1,55 +0,0 @@ -/* $Id: tpam.h,v 1.1.2.1 2001/06/08 08:23:46 kai Exp $ - * - * Turbo PAM ISDN driver for Linux. (Kernel Driver) - * - * Copyright 2001 Stelian Pop <stelian.pop@fr.alcove.com>, Alcôve - * - * For all support questions please contact: <support@auvertech.fr> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#ifndef _TPAM_H_ -#define _TPAM_H_ - -#include <linux/types.h> - -/* IOCTL commands */ -#define TPAM_CMD_DSPLOAD 0x0001 -#define TPAM_CMD_DSPSAVE 0x0002 -#define TPAM_CMD_DSPRUN 0x0003 -#define TPAM_CMD_LOOPMODEON 0x0004 -#define TPAM_CMD_LOOPMODEOFF 0x0005 - -/* addresses of debug information zones on board */ -#define TPAM_TRAPAUDIT_REGISTER 0x005493e4 -#define TPAM_NCOAUDIT_REGISTER 0x00500000 -#define TPAM_MSGAUDIT_REGISTER 0x008E30F0 - -/* length of debug information zones on board */ -#define TPAM_TRAPAUDIT_LENGTH 10000 -#define TPAM_NCOAUDIT_LENGTH 300000 -#define TPAM_NCOAUDIT_COUNT 30 -#define TPAM_MSGAUDIT_LENGTH 60000 - -/* IOCTL load/save parameter */ -typedef struct tpam_dsp_ioctl { - __u32 address; /* address to load/save data */ - __u32 data_len; /* size of data to be loaded/saved */ - __u8 data[0]; /* data */ -} tpam_dsp_ioctl; - -#endif /* _TPAM_H_ */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 54e2549f96b..849043ce4ed 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -57,10 +57,25 @@ do { \ #define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr) #endif -#define print_symbol(fmt, addr) \ -do { \ - __check_printsym_format(fmt, ""); \ - __print_symbol(fmt, addr); \ +static inline void print_symbol(const char *fmt, unsigned long addr) +{ + __check_printsym_format(fmt, ""); + __print_symbol(fmt, (unsigned long) + __builtin_extract_return_addr((void *)addr)); +} + +#ifndef CONFIG_64BIT +#define print_ip_sym(ip) \ +do { \ + printk("[<%08lx>]", ip); \ + print_symbol(" %s\n", ip); \ } while(0) +#else +#define print_ip_sym(ip) \ +do { \ + printk("[<%016lx>]", ip); \ + print_symbol(" %s\n", ip); \ +} while(0) +#endif #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index 4eb851ece08..efe0ee4cc80 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -155,10 +155,8 @@ static inline void con_schedule_flip(struct tty_struct *t) { unsigned long flags; spin_lock_irqsave(&t->buf.lock, flags); - if (t->buf.tail != NULL) { - t->buf.tail->active = 0; + if (t->buf.tail != NULL) t->buf.tail->commit = t->buf.tail->used; - } spin_unlock_irqrestore(&t->buf.lock, flags); schedule_work(&t->buf.work); } diff --git a/include/linux/key.h b/include/linux/key.h index e693e729bc9..169f05e4863 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -177,7 +177,8 @@ struct key { /* * kernel managed key type definition */ -typedef int (*request_key_actor_t)(struct key *key, struct key *authkey, const char *op); +typedef int (*request_key_actor_t)(struct key *key, struct key *authkey, + const char *op, void *aux); struct key_type { /* name of the type */ @@ -285,6 +286,11 @@ extern struct key *request_key(struct key_type *type, const char *description, const char *callout_info); +extern struct key *request_key_with_auxdata(struct key_type *type, + const char *description, + const char *callout_info, + void *aux); + extern int key_validate(struct key *key); extern key_ref_t key_create_or_update(key_ref_t keyring, diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 2d229327959..0503b2ed8ba 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -46,6 +46,8 @@ enum kobject_action { KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ + KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */ + KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */ }; struct kobject { diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h new file mode 100644 index 00000000000..316e0fb8d7b --- /dev/null +++ b/include/linux/lockdep.h @@ -0,0 +1,353 @@ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * + * see Documentation/lockdep-design.txt for more details. + */ +#ifndef __LINUX_LOCKDEP_H +#define __LINUX_LOCKDEP_H + +#include <linux/linkage.h> +#include <linux/list.h> +#include <linux/debug_locks.h> +#include <linux/stacktrace.h> + +#ifdef CONFIG_LOCKDEP + +/* + * Lock-class usage-state bits: + */ +enum lock_usage_bit +{ + LOCK_USED = 0, + LOCK_USED_IN_HARDIRQ, + LOCK_USED_IN_SOFTIRQ, + LOCK_ENABLED_SOFTIRQS, + LOCK_ENABLED_HARDIRQS, + LOCK_USED_IN_HARDIRQ_READ, + LOCK_USED_IN_SOFTIRQ_READ, + LOCK_ENABLED_SOFTIRQS_READ, + LOCK_ENABLED_HARDIRQS_READ, + LOCK_USAGE_STATES +}; + +/* + * Usage-state bitmasks: + */ +#define LOCKF_USED (1 << LOCK_USED) +#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) +#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) +#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) + +#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) +#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) + +#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) +#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) +#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) + +#define LOCKF_ENABLED_IRQS_READ \ + (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) +#define LOCKF_USED_IN_IRQ_READ \ + (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct list_head hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; +}; + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class[MAX_LOCKDEP_SUBCLASSES]; + const char *name; +}; + +/* + * Every lock has a list of other locks that were taken after it. + * We only grow the list, never remove from it: + */ +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct stack_trace trace; +}; + +/* + * We record lock dependency chains, so that we can cache them: + */ +struct lock_chain { + struct list_head entry; + u64 chain_key; +}; + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + struct lock_class *class; + unsigned long acquire_ip; + struct lockdep_map *instance; + + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + int irq_context; + int trylock; + int read; + int check; + int hardirqs_off; +}; + +/* + * Initialization, self-test and debugging-output methods: + */ +extern void lockdep_init(void); +extern void lockdep_info(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); + +extern void lockdep_off(void); +extern void lockdep_on(void); +extern int lockdep_internal(void); + +/* + * These methods are used by specific locking variants (spinlocks, + * rwlocks, mutexes and rwsems) to pass init/acquire/release events + * to lockdep: + */ + +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key); + +/* + * Reinitialize a lock key - for cases where there is special locking or + * special initialization of locks so that the validator gets the scope + * of dependencies wrong: they are either too broad (they need a class-split) + * or they are too narrow (they suffer from a false class-split): + */ +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key) + +/* + * Acquire a lock. + * + * Values for "read": + * + * 0: exclusive (write) acquire + * 1: read-acquire (no recursion allowed) + * 2: read-acquire with same-instance recursion allowed + * + * Values for check: + * + * 0: disabled + * 1: simple checks (freeing, held-at-exit-time, etc.) + * 2: full validation + */ +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +# define INIT_LOCKDEP .lockdep_recursion = 0, + +#else /* !LOCKDEP */ + +static inline void lockdep_off(void) +{ +} + +static inline void lockdep_on(void) +{ +} + +static inline int lockdep_internal(void) +{ + return 0; +} + +# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) +# define lockdep_init() do { } while (0) +# define lockdep_info() do { } while (0) +# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0) +# define lockdep_set_class(lock, key) do { (void)(key); } while (0) +# define lockdep_set_class_and_name(lock, key, name) \ + do { (void)(key); } while (0) +# define INIT_LOCKDEP +# define lockdep_reset() do { debug_locks = 1; } while (0) +# define lockdep_free_key_range(start, size) do { } while (0) +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; +#endif /* !LOCKDEP */ + +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) +extern void early_init_irq_lock_class(void); +#else +# define early_init_irq_lock_class() do { } while (0) +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void early_boot_irqs_off(void); +extern void early_boot_irqs_on(void); +#else +# define early_boot_irqs_off() do { } while (0) +# define early_boot_irqs_on() do { } while (0) +#endif + +/* + * For trivial one-depth nesting of a lock-class, the following + * global define can be used. (Subsystems with multiple levels + * of nesting should define their own lock-nesting subclasses.) + */ +#define SINGLE_DEPTH_NESTING 1 + +/* + * Map the dependency ops to NOP or to real lockdep ops, depending + * on the per lock-class debug mode: + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define spin_release(l, n, i) lock_release(l, n, i) +#else +# define spin_acquire(l, s, t, i) do { } while (0) +# define spin_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# else +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# endif +# define rwlock_release(l, n, i) lock_release(l, n, i) +#else +# define rwlock_acquire(l, s, t, i) do { } while (0) +# define rwlock_acquire_read(l, s, t, i) do { } while (0) +# define rwlock_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define mutex_release(l, n, i) lock_release(l, n, i) +#else +# define mutex_acquire(l, s, t, i) do { } while (0) +# define mutex_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# else +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# endif +# define rwsem_release(l, n, i) lock_release(l, n, i) +#else +# define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_read(l, s, t, i) do { } while (0) +# define rwsem_release(l, n, i) do { } while (0) +#endif + +#endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 5b584dafb5a..b03cfb91e22 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -40,7 +40,6 @@ struct miscdevice { struct list_head list; struct device *dev; struct class_device *class; - char devfs_name[64]; }; extern int misc_register(struct miscdevice * misc); diff --git a/include/linux/mm.h b/include/linux/mm.h index c41a1299b8c..990957e0929 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -14,6 +14,7 @@ #include <linux/prio_tree.h> #include <linux/fs.h> #include <linux/mutex.h> +#include <linux/debug_locks.h> struct mempolicy; struct anon_vma; @@ -36,7 +37,6 @@ extern int sysctl_legacy_va_layout; #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/atomic.h> #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -515,6 +515,11 @@ static inline void set_page_links(struct page *page, unsigned long zone, set_page_section(page, pfn_to_section_nr(pfn)); } +/* + * Some inline functions in vmstat.h depend on page_zone() + */ +#include <linux/vmstat.h> + #ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; @@ -1030,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ -static inline void -debug_check_no_locks_freed(const void *from, unsigned long len) -{ - mutex_debug_check_no_locks_freed(from, len); - rt_mutex_debug_check_no_locks_freed(from, len); -} - #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d6120fa6911..656b588a9f9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -46,6 +46,27 @@ struct zone_padding { #define ZONE_PADDING(name) #endif +enum zone_stat_item { + NR_ANON_PAGES, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_SLAB, /* Pages used by slab allocator */ + NR_PAGETABLE, /* used for pagetables */ + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_BOUNCE, +#ifdef CONFIG_NUMA + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ + NUMA_FOREIGN, /* was intended here, hit elsewhere */ + NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ + NUMA_LOCAL, /* allocation from local node */ + NUMA_OTHER, /* allocation from other node */ +#endif + NR_VM_ZONE_STAT_ITEMS }; + struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ @@ -55,13 +76,8 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ -#ifdef CONFIG_NUMA - unsigned long numa_hit; /* allocated in intended node */ - unsigned long numa_miss; /* allocated in non intended node */ - unsigned long numa_foreign; /* was intended here, hit elsewhere */ - unsigned long interleave_hit; /* interleaver prefered this zone */ - unsigned long local_node; /* allocation from local node */ - unsigned long other_node; /* allocation from other node */ +#ifdef CONFIG_SMP + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif } ____cacheline_aligned_in_smp; @@ -134,6 +150,10 @@ struct zone { unsigned long lowmem_reserve[MAX_NR_ZONES]; #ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_ratio; struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; @@ -165,12 +185,8 @@ struct zone { /* A count of how many reclaimers are scanning this zone */ atomic_t reclaim_in_progress; - /* - * timestamp (in jiffies) of the last zone reclaim that did not - * result in freeing of pages. This is used to avoid repeated scans - * if all memory in the zone is in use. - */ - unsigned long last_unsuccessful_zone_reclaim; + /* Zone statistics */ + atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; /* * prev_priority holds the scanning priority for this zone. It is @@ -402,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + struct file *, void __user *, size_t *, loff_t *); #include <linux/topology.h> /* Returns the number of the current Node. */ diff --git a/include/linux/module.h b/include/linux/module.h index 9ebbb74b7b7..d06c74fb8c2 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -203,6 +203,15 @@ void *__symbol_get_gpl(const char *symbol); #define EXPORT_SYMBOL_GPL_FUTURE(sym) \ __EXPORT_SYMBOL(sym, "_gpl_future") + +#ifdef CONFIG_UNUSED_SYMBOLS +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#else +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) +#endif + #endif struct module_ref @@ -261,6 +270,15 @@ struct module unsigned int num_gpl_syms; const unsigned long *gpl_crcs; + /* unused exported symbols. */ + const struct kernel_symbol *unused_syms; + unsigned int num_unused_syms; + const unsigned long *unused_crcs; + /* GPL-only, unused exported symbols. */ + const struct kernel_symbol *unused_gpl_syms; + unsigned int num_unused_gpl_syms; + const unsigned long *unused_gpl_crcs; + /* symbols that will be GPL-only in the near future. */ const struct kernel_symbol *gpl_future_syms; unsigned int num_gpl_future_syms; @@ -340,6 +358,7 @@ static inline int module_is_live(struct module *mod) /* Is this address in a module? (second is with no locks, for oops) */ struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); +int is_module_address(unsigned long addr); /* Returns module and fills in value, defined and namebuf, or NULL if symnum out of range. */ @@ -456,6 +475,8 @@ void module_remove_driver(struct device_driver *); #define EXPORT_SYMBOL(sym) #define EXPORT_SYMBOL_GPL(sym) #define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) /* Given an address, look for it in the exception tables. */ static inline const struct exception_table_entry * @@ -476,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr) return NULL; } +static inline int is_module_address(unsigned long addr) +{ + return 0; +} + /* Get/put a kernel symbol (calls should be symmetric) */ #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) #define symbol_put(x) do { } while(0) diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h index 8b5769f0046..2537285e106 100644 --- a/include/linux/mutex-debug.h +++ b/include/linux/mutex-debug.h @@ -2,22 +2,22 @@ #define __LINUX_MUTEX_DEBUG_H #include <linux/linkage.h> +#include <linux/lockdep.h> /* * Mutexes - debugging helpers: */ -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .held_list = LIST_HEAD_INIT(lockname.held_list), \ - .name = #lockname , .magic = &lockname +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname -#define mutex_init(sem) __mutex_init(sem, __FUNCTION__) +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) extern void FASTCALL(mutex_destroy(struct mutex *lock)); -extern void mutex_debug_show_all_locks(void); -extern void mutex_debug_show_held_locks(struct task_struct *filter); -extern void mutex_debug_check_no_locks_held(struct task_struct *task); -extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); - #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index f1ac507fa20..27c48daa318 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -13,6 +13,7 @@ #include <linux/list.h> #include <linux/spinlock_types.h> #include <linux/linkage.h> +#include <linux/lockdep.h> #include <asm/atomic.h> @@ -50,11 +51,12 @@ struct mutex { struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES struct thread_info *owner; - struct list_head held_list; - unsigned long acquire_ip; const char *name; void *magic; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; /* @@ -74,24 +76,34 @@ struct mutex_waiter { # include <linux/mutex-debug.h> #else # define __DEBUG_MUTEX_INITIALIZER(lockname) -# define mutex_init(mutex) __mutex_init(mutex, NULL) +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) # define mutex_destroy(mutex) do { } while (0) -# define mutex_debug_show_all_locks() do { } while (0) -# define mutex_debug_show_held_locks(p) do { } while (0) -# define mutex_debug_check_no_locks_held(task) do { } while (0) -# define mutex_debug_check_no_locks_freed(from, len) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = SPIN_LOCK_UNLOCKED \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ - __DEBUG_MUTEX_INITIALIZER(lockname) } + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -extern void fastcall __mutex_init(struct mutex *lock, const char *name); +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); /*** * mutex_is_locked - is the mutex locked @@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) */ extern void fastcall mutex_lock(struct mutex *lock); extern int fastcall mutex_lock_interruptible(struct mutex *lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +#else +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +#endif + /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! diff --git a/include/linux/net.h b/include/linux/net.h index 385e68f5bd9..b20c53c7441 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -61,6 +61,7 @@ typedef enum { #define SOCK_ASYNC_WAITDATA 1 #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 +#define SOCK_PASSSEC 4 #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 03cd7551a7a..85f99f60dee 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -313,8 +313,12 @@ struct net_device /* Segmentation offload features */ #define NETIF_F_GSO_SHIFT 16 +#define NETIF_F_GSO_MASK 0xffff0000 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) -#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) +#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) +#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) +#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) @@ -543,7 +547,8 @@ struct packet_type { struct net_device *, struct packet_type *, struct net_device *); - struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + int features); void *af_packet_priv; struct list_head list; }; @@ -968,7 +973,7 @@ extern int netdev_max_backlog; extern int weight_p; extern int netdev_set_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb, int inward); -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -988,11 +993,21 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); extern void linkwatch_run_queue(void); +static inline int net_gso_ok(int features, int gso_type) +{ + int feature = gso_type << NETIF_F_GSO_SHIFT; + return (features & feature) == feature; +} + +static inline int skb_gso_ok(struct sk_buff *skb, int features) +{ + return net_gso_ok(features, skb_shinfo(skb)->gso_size ? + skb_shinfo(skb)->gso_type : 0); +} + static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { - int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; - return skb_shinfo(skb)->gso_size && - (dev->features & feature) != feature; + return !skb_gso_ok(skb, dev->features); } #endif /* __KERNEL__ */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 0a1740b2532..55ea853d57b 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -9,7 +9,6 @@ #ifndef _LINUX_NFS_FS_H #define _LINUX_NFS_FS_H -#include <linux/config.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/pagemap.h> @@ -335,7 +334,7 @@ extern struct inode_operations nfs_file_inode_operations; extern struct inode_operations nfs3_file_inode_operations; #endif /* CONFIG_NFS_V3 */ extern const struct file_operations nfs_file_operations; -extern struct address_space_operations nfs_file_aops; +extern const struct address_space_operations nfs_file_aops; static inline struct rpc_cred *nfs_file_cred(struct file *file) { diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 51dbab9710c..7ff386a6ae8 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -65,7 +65,7 @@ struct raw_notifier_head { } while (0) #define ATOMIC_NOTIFIER_INIT(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0c076d58c67..5748642e9f3 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -5,12 +5,8 @@ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H -#include <linux/percpu.h> -#include <linux/cache.h> #include <linux/types.h> -#include <asm/pgtable.h> - /* * Various page->flags bits: * @@ -103,134 +99,6 @@ #endif /* - * Global page accounting. One instance per CPU. Only unsigned longs are - * allowed. - * - * - Fields can be modified with xxx_page_state and xxx_page_state_zone at - * any time safely (which protects the instance from modification by - * interrupt. - * - The __xxx_page_state variants can be used safely when interrupts are - * disabled. - * - The __xxx_page_state variants can be used if the field is only - * modified from process context and protected from preemption, or only - * modified from interrupt context. In this case, the field should be - * commented here. - */ -struct page_state { - unsigned long nr_dirty; /* Dirty writeable pages */ - unsigned long nr_writeback; /* Pages under writeback */ - unsigned long nr_unstable; /* NFS unstable pages */ - unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_mapped; /* mapped into pagetables. - * only modified from process context */ - unsigned long nr_slab; /* In slab */ -#define GET_PAGE_STATE_LAST nr_slab - - /* - * The below are zeroed by get_page_state(). Use get_full_page_state() - * to add up all these. - */ - unsigned long pgpgin; /* Disk reads */ - unsigned long pgpgout; /* Disk writes */ - unsigned long pswpin; /* swap reads */ - unsigned long pswpout; /* swap writes */ - - unsigned long pgalloc_high; /* page allocations */ - unsigned long pgalloc_normal; - unsigned long pgalloc_dma32; - unsigned long pgalloc_dma; - - unsigned long pgfree; /* page freeings */ - unsigned long pgactivate; /* pages moved inactive->active */ - unsigned long pgdeactivate; /* pages moved active->inactive */ - - unsigned long pgfault; /* faults (major+minor) */ - unsigned long pgmajfault; /* faults (major only) */ - - unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ - unsigned long pgrefill_normal; - unsigned long pgrefill_dma32; - unsigned long pgrefill_dma; - - unsigned long pgsteal_high; /* total highmem pages reclaimed */ - unsigned long pgsteal_normal; - unsigned long pgsteal_dma32; - unsigned long pgsteal_dma; - - unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ - unsigned long pgscan_kswapd_normal; - unsigned long pgscan_kswapd_dma32; - unsigned long pgscan_kswapd_dma; - - unsigned long pgscan_direct_high;/* total highmem pages scanned */ - unsigned long pgscan_direct_normal; - unsigned long pgscan_direct_dma32; - unsigned long pgscan_direct_dma; - - unsigned long pginodesteal; /* pages reclaimed via inode freeing */ - unsigned long slabs_scanned; /* slab objects scanned */ - unsigned long kswapd_steal; /* pages reclaimed by kswapd */ - unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ - unsigned long pageoutrun; /* kswapd's calls to page reclaim */ - unsigned long allocstall; /* direct reclaim calls */ - - unsigned long pgrotated; /* pages rotated to tail of the LRU */ - unsigned long nr_bounce; /* pages for bounce buffers */ -}; - -extern void get_page_state(struct page_state *ret); -extern void get_page_state_node(struct page_state *ret, int node); -extern void get_full_page_state(struct page_state *ret); -extern unsigned long read_page_state_offset(unsigned long offset); -extern void mod_page_state_offset(unsigned long offset, unsigned long delta); -extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); - -#define read_page_state(member) \ - read_page_state_offset(offsetof(struct page_state, member)) - -#define mod_page_state(member, delta) \ - mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define __mod_page_state(member, delta) \ - __mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define inc_page_state(member) mod_page_state(member, 1UL) -#define dec_page_state(member) mod_page_state(member, 0UL - 1) -#define add_page_state(member,delta) mod_page_state(member, (delta)) -#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) - -#define __inc_page_state(member) __mod_page_state(member, 1UL) -#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) -#define __add_page_state(member,delta) __mod_page_state(member, (delta)) -#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) - -#define page_state(member) (*__page_state(offsetof(struct page_state, member))) - -#define state_zone_offset(zone, member) \ -({ \ - unsigned offset; \ - if (is_highmem(zone)) \ - offset = offsetof(struct page_state, member##_high); \ - else if (is_normal(zone)) \ - offset = offsetof(struct page_state, member##_normal); \ - else if (is_dma32(zone)) \ - offset = offsetof(struct page_state, member##_dma32); \ - else \ - offset = offsetof(struct page_state, member##_dma); \ - offset; \ -}) - -#define __mod_page_state_zone(zone, member, delta) \ - do { \ - __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -#define mod_page_state_zone(zone, member, delta) \ - do { \ - mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -/* * Manipulation of page state flags */ #define PageLocked(page) \ @@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) -#ifndef SetPageUptodate +#ifdef CONFIG_S390 +#define SetPageUptodate(_page) \ + do { \ + struct page *__page = (_page); \ + if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ + page_test_and_clear_dirty(_page); \ + } while (0) +#else #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #endif #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) @@ -306,7 +181,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); do { \ if (!test_and_set_bit(PG_writeback, \ &(page)->flags)) \ - inc_page_state(nr_writeback); \ + inc_zone_page_state(page, NR_WRITEBACK); \ } while (0) #define TestSetPageWriteback(page) \ ({ \ @@ -314,14 +189,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); ret = test_and_set_bit(PG_writeback, \ &(page)->flags); \ if (!ret) \ - inc_page_state(nr_writeback); \ + inc_zone_page_state(page, NR_WRITEBACK); \ ret; \ }) #define ClearPageWriteback(page) \ do { \ if (test_and_clear_bit(PG_writeback, \ &(page)->flags)) \ - dec_page_state(nr_writeback); \ + dec_zone_page_state(page, NR_WRITEBACK); \ } while (0) #define TestClearPageWriteback(page) \ ({ \ @@ -329,7 +204,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); ret = test_and_clear_bit(PG_writeback, \ &(page)->flags); \ if (ret) \ - dec_page_state(nr_writeback); \ + dec_zone_page_state(page, NR_WRITEBACK); \ ret; \ }) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141a..0a2f5d27f60 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - -/* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. - */ -static inline void pagecache_acct(int count) -{ - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - int ret = atomic_read(&nr_pagecache); - if (unlikely(ret < 0)) - ret = 0; - return ret; -} - /* * Return byte-offset into filesystem object for page. */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 62a8c22f5f6..983fca251b2 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -404,8 +404,8 @@ int pcibios_enable_device(struct pci_dev *, int mask); char *pcibios_setup (char *str); /* Used only when drivers/pci/setup.c is used */ -void pcibios_align_resource(void *, struct resource *, - unsigned long, unsigned long); +void pcibios_align_resource(void *, struct resource *, resource_size_t, + resource_size_t); void pcibios_update_irq(struct pci_dev *, int irq); /* Generic PCI functions used internally */ @@ -532,10 +532,10 @@ void pci_release_region(struct pci_dev *, int); /* drivers/pci/bus.c */ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, - unsigned long size, unsigned long align, - unsigned long min, unsigned int type_mask, + resource_size_t size, resource_size_t align, + resource_size_t min, unsigned int type_mask, void (*alignf)(void *, struct resource *, - unsigned long, unsigned long), + resource_size_t, resource_size_t), void *alignf_data); void pci_enable_bridges(struct pci_bus *bus); @@ -730,7 +730,8 @@ static inline char *pci_name(struct pci_dev *pdev) */ #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, u64 *start, u64 *end) + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) { *start = rsrc->start; *end = rsrc->end; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9ae6b1a7536..685081c0134 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -728,7 +728,9 @@ #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 #define PCI_DEVICE_ID_TI_X515 0x8036 +#define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 @@ -1441,6 +1443,7 @@ #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 +#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 #define PCI_VENDOR_ID_DLINK 0x1186 #define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 diff --git a/include/linux/plist.h b/include/linux/plist.h index 3404faef542..b95818a037a 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -73,6 +73,7 @@ #ifndef _LINUX_PLIST_H_ #define _LINUX_PLIST_H_ +#include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock_types.h> diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 93b0959eb40..ab8a8dd8d64 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -389,7 +389,8 @@ int pnp_start_dev(struct pnp_dev *dev); int pnp_stop_dev(struct pnp_dev *dev); int pnp_activate_dev(struct pnp_dev *dev); int pnp_disable_dev(struct pnp_dev *dev); -void pnp_resource_change(struct resource *resource, unsigned long start, unsigned long size); +void pnp_resource_change(struct resource *resource, resource_size_t start, + resource_size_t size); /* protocol helpers */ int pnp_is_active(struct pnp_dev * dev); @@ -434,7 +435,9 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } -static inline void pnp_resource_change(struct resource *resource, unsigned long start, unsigned long size) { } +static inline void pnp_resource_change(struct resource *resource, + resource_size_t start, + resource_size_t size) { } /* protocol helpers */ static inline int pnp_is_active(struct pnp_dev * dev) { return 0; } diff --git a/include/linux/poison.h b/include/linux/poison.h index a5347c02432..3e628f990fd 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -44,6 +44,11 @@ /********** drivers/atm/ **********/ #define ATM_POISON_FREE 0x12 +#define ATM_POISON 0xdeadbeef + +/********** net/ **********/ +#define NEIGHBOR_DEAD 0xdeadbeef +#define NETFILTER_LINK_POISON 0xdead57ac /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 48dfe00070c..b4ca73d6589 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -163,14 +163,22 @@ extern int rcu_needs_cpu(int cpu); * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() preempt_disable() +#define rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + } while(0) /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() preempt_enable() +#define rcu_read_unlock() \ + do { \ + __release(RCU); \ + preempt_enable(); \ + } while(0) /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -193,14 +201,22 @@ extern int rcu_needs_cpu(int cpu); * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() local_bh_disable() +#define rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + } while(0) /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() local_bh_enable() +#define rcu_read_unlock_bh() \ + do { \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while(0) /** * rcu_dereference - fetch an RCU-protected pointer in an diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 5676c4210e2..daa2d83cefe 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1973,7 +1973,7 @@ void reiserfs_unmap_buffer(struct buffer_head *); /* file.c */ extern struct inode_operations reiserfs_file_inode_operations; extern const struct file_operations reiserfs_file_operations; -extern struct address_space_operations reiserfs_address_space_operations; +extern const struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 36e2bf4b431..5371e4e7459 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -34,8 +34,8 @@ struct rtc_time { * alarm API. */ struct rtc_wkalrm { - unsigned char enabled; /* 0 = alarm disable, 1 = alarm disabled */ - unsigned char pending; /* 0 = alarm pending, 1 = alarm not pending */ + unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */ + unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */ struct rtc_time time; /* time the alarm is set to */ }; diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index fa4a3b82ba7..5d41dee82f8 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -29,8 +29,6 @@ struct rt_mutex { struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; - struct list_head held_list_entry; - unsigned long acquire_ip; const char *name, *file; int line; void *magic; @@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define INIT_RT_MUTEX_DEBUG(tsk) \ - .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ - .held_list_lock = SPIN_LOCK_UNLOCKED -#else -# define INIT_RT_MUTEX_DEBUG(tsk) -#endif - #ifdef CONFIG_RT_MUTEXES # define INIT_RT_MUTEXES(tsk) \ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index f30f805080a..ae1fcadd598 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -32,30 +32,37 @@ struct rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; #endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -#define __RWSEM_DEBUG_INIT /* */ +# define __RWSEM_DEP_MAP_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ -{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + extern void FASTCALL(__down_read(struct rw_semaphore *sem)); extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__down_write(struct rw_semaphore *sem)); +extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass)); extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__up_read(struct rw_semaphore *sem)); extern void FASTCALL(__up_write(struct rw_semaphore *sem)); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f99fe90732a..658afb37c3f 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -9,8 +9,6 @@ #include <linux/linkage.h> -#define RWSEM_DEBUG 0 - #ifdef __KERNEL__ #include <linux/types.h> @@ -26,89 +24,58 @@ struct rw_semaphore; #include <asm/rwsem.h> /* use an arch-specific implementation */ #endif -#ifndef rwsemtrace -#if RWSEM_DEBUG -extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); -#else -#define rwsemtrace(SEM,FMT) -#endif -#endif - /* * lock for reading */ -static inline void down_read(struct rw_semaphore *sem) -{ - might_sleep(); - rwsemtrace(sem,"Entering down_read"); - __down_read(sem); - rwsemtrace(sem,"Leaving down_read"); -} +extern void down_read(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int down_read_trylock(struct rw_semaphore *sem) -{ - int ret; - rwsemtrace(sem,"Entering down_read_trylock"); - ret = __down_read_trylock(sem); - rwsemtrace(sem,"Leaving down_read_trylock"); - return ret; -} +extern int down_read_trylock(struct rw_semaphore *sem); /* * lock for writing */ -static inline void down_write(struct rw_semaphore *sem) -{ - might_sleep(); - rwsemtrace(sem,"Entering down_write"); - __down_write(sem); - rwsemtrace(sem,"Leaving down_write"); -} +extern void down_write(struct rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int down_write_trylock(struct rw_semaphore *sem) -{ - int ret; - rwsemtrace(sem,"Entering down_write_trylock"); - ret = __down_write_trylock(sem); - rwsemtrace(sem,"Leaving down_write_trylock"); - return ret; -} +extern int down_write_trylock(struct rw_semaphore *sem); /* * release a read lock */ -static inline void up_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_read"); - __up_read(sem); - rwsemtrace(sem,"Leaving up_read"); -} +extern void up_read(struct rw_semaphore *sem); /* * release a write lock */ -static inline void up_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_write"); - __up_write(sem); - rwsemtrace(sem,"Leaving up_write"); -} +extern void up_write(struct rw_semaphore *sem); /* * downgrade write lock to read lock */ -static inline void downgrade_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering downgrade_write"); - __downgrade_write(sem); - rwsemtrace(sem,"Leaving downgrade_write"); -} +extern void downgrade_write(struct rw_semaphore *sem); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * nested locking: + */ +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +/* + * Take/release a lock when not the owner will release it: + */ +extern void down_read_non_owner(struct rw_semaphore *sem); +extern void up_read_non_owner(struct rw_semaphore *sem); +#else +# define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nested(sem, subclass) down_write(sem) +# define down_read_non_owner(sem) down_read(sem) +# define up_read_non_owner(sem) up_read(sem) +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 821f0481ebe..1c876e27ff9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; +struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -383,7 +383,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - task_t *curr_target; + struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) -typedef struct prio_array prio_array_t; struct backing_dev_info; struct reclaim_state; @@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct*); +extern void prefetch_stack(struct task_struct *t); #else static inline void prefetch_stack(struct task_struct *t) { } #endif @@ -715,6 +714,8 @@ enum sleep_type { SLEEP_INTERRUPTED, }; +struct prio_array; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ struct thread_info *thread_info; @@ -732,7 +733,7 @@ struct task_struct { int load_weight; /* for niceness load balancing purposes */ int prio, static_prio, normal_prio; struct list_head run_list; - prio_array_t *array; + struct prio_array *array; unsigned short ioprio; unsigned int btrace_seq; @@ -865,16 +866,34 @@ struct task_struct { struct plist_head pi_waiters; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; -# ifdef CONFIG_DEBUG_RT_MUTEXES - spinlock_t held_list_lock; - struct list_head held_list_head; -# endif #endif #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + int hardirqs_enabled; + unsigned long hardirq_enable_ip; + unsigned int hardirq_enable_event; + unsigned long hardirq_disable_ip; + unsigned int hardirq_disable_event; + int softirqs_enabled; + unsigned long softirq_disable_ip; + unsigned int softirq_disable_event; + unsigned long softirq_enable_ip; + unsigned int softirq_enable_event; + int hardirq_context; + int softirq_context; +#endif +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 30UL + u64 curr_chain_key; + int lockdep_depth; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + unsigned int lockdep_recursion; +#endif /* journalling filesystem info */ void *journal_info; @@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t) #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(task_t *p); -extern void rt_mutex_setprio(task_t *p, int prio); -extern void rt_mutex_adjust_pi(task_t *p); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); #else -static inline int rt_mutex_getprio(task_t *p) +static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); @@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1153,7 +1173,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); +extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); @@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p); #else #define wait_task_inactive(p) do { } while (0) #endif @@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p); /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); + struct task_struct, thread_group); } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } diff --git a/include/linux/security.h b/include/linux/security.h index 51805806f97..f75303831d0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -67,7 +67,7 @@ struct xfrm_state; struct xfrm_user_sec_ctx; extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); -extern int cap_netlink_recv(struct sk_buff *skb); +extern int cap_netlink_recv(struct sk_buff *skb, int cap); /* * Values used in the task_security_ops calls @@ -567,6 +567,9 @@ struct swap_info_struct; * @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. + * @task_getsecid: + * Retrieve the security identifier of the process @p. + * @p contains the task_struct for the process and place is into @secid. * @task_setgroups: * Check permission before setting the supplementary group set of the * current process. @@ -582,6 +585,10 @@ struct swap_info_struct; * @p contains the task_struct of process. * @ioprio contains the new ioprio value * Return 0 if permission is granted. + * @task_getioprio + * Check permission before getting the ioprio value of @p. + * @p contains the task_struct of process. + * Return 0 if permission is granted. * @task_setrlimit: * Check permission before setting the resource limits of the current * process for @resource to @new_rlim. The old resource limit values can @@ -615,6 +622,7 @@ struct swap_info_struct; * @p contains the task_struct for process. * @info contains the signal information. * @sig contains the signal value. + * @secid contains the sid of the process where the signal originated * Return 0 if permission is granted. * @task_wait: * Check permission before allowing a process to reap a child process @p @@ -656,6 +664,7 @@ struct swap_info_struct; * Check permission before processing the received netlink message in * @skb. * @skb contains the sk_buff structure for the netlink message. + * @cap indicates the capability required * Return 0 if permission is granted. * * Security hooks for Unix domain networking. @@ -1218,16 +1227,18 @@ struct security_operations { int (*task_setpgid) (struct task_struct * p, pid_t pgid); int (*task_getpgid) (struct task_struct * p); int (*task_getsid) (struct task_struct * p); + void (*task_getsecid) (struct task_struct * p, u32 * secid); int (*task_setgroups) (struct group_info *group_info); int (*task_setnice) (struct task_struct * p, int nice); int (*task_setioprio) (struct task_struct * p, int ioprio); + int (*task_getioprio) (struct task_struct * p); int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); int (*task_setscheduler) (struct task_struct * p, int policy, struct sched_param * lp); int (*task_getscheduler) (struct task_struct * p); int (*task_movememory) (struct task_struct * p); int (*task_kill) (struct task_struct * p, - struct siginfo * info, int sig); + struct siginfo * info, int sig, u32 secid); int (*task_wait) (struct task_struct * p); int (*task_prctl) (int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, @@ -1266,7 +1277,7 @@ struct security_operations { struct sembuf * sops, unsigned nsops, int alter); int (*netlink_send) (struct sock * sk, struct sk_buff * skb); - int (*netlink_recv) (struct sk_buff * skb); + int (*netlink_recv) (struct sk_buff * skb, int cap); /* allow module stacking */ int (*register_security) (const char *name, @@ -1838,6 +1849,11 @@ static inline int security_task_getsid (struct task_struct *p) return security_ops->task_getsid (p); } +static inline void security_task_getsecid (struct task_struct *p, u32 *secid) +{ + security_ops->task_getsecid (p, secid); +} + static inline int security_task_setgroups (struct group_info *group_info) { return security_ops->task_setgroups (group_info); @@ -1853,6 +1869,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio) return security_ops->task_setioprio (p, ioprio); } +static inline int security_task_getioprio (struct task_struct *p) +{ + return security_ops->task_getioprio (p); +} + static inline int security_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) { @@ -1877,9 +1898,10 @@ static inline int security_task_movememory (struct task_struct *p) } static inline int security_task_kill (struct task_struct *p, - struct siginfo *info, int sig) + struct siginfo *info, int sig, + u32 secid) { - return security_ops->task_kill (p, info, sig); + return security_ops->task_kill (p, info, sig, secid); } static inline int security_task_wait (struct task_struct *p) @@ -2032,9 +2054,9 @@ static inline int security_netlink_send(struct sock *sk, struct sk_buff * skb) return security_ops->netlink_send(sk, skb); } -static inline int security_netlink_recv(struct sk_buff * skb) +static inline int security_netlink_recv(struct sk_buff * skb, int cap) { - return security_ops->netlink_recv(skb); + return security_ops->netlink_recv(skb, cap); } /* prototypes */ @@ -2490,6 +2512,9 @@ static inline int security_task_getsid (struct task_struct *p) return 0; } +static inline void security_task_getsecid (struct task_struct *p, u32 *secid) +{ } + static inline int security_task_setgroups (struct group_info *group_info) { return 0; @@ -2505,6 +2530,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio) return 0; } +static inline int security_task_getioprio (struct task_struct *p) +{ + return 0; +} + static inline int security_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) { @@ -2529,7 +2559,8 @@ static inline int security_task_movememory (struct task_struct *p) } static inline int security_task_kill (struct task_struct *p, - struct siginfo *info, int sig) + struct siginfo *info, int sig, + u32 secid) { return 0; } @@ -2670,9 +2701,9 @@ static inline int security_netlink_send (struct sock *sk, struct sk_buff *skb) return cap_netlink_send (sk, skb); } -static inline int security_netlink_recv (struct sk_buff *skb) +static inline int security_netlink_recv (struct sk_buff *skb, int cap) { - return cap_netlink_recv (skb); + return cap_netlink_recv (skb, cap); } static inline struct dentry *securityfs_create_dir(const char *name, diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7bc5c7c12b5..46000936f8f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,9 +38,17 @@ typedef struct { * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } -#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define SEQLOCK_UNLOCKED \ + __SEQLOCK_UNLOCKED(old_style_seqlock_init) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 951c4e85827..fc1104a2cfa 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -336,7 +336,6 @@ struct uart_driver { struct module *owner; const char *driver_name; const char *dev_name; - const char *devfs_name; int major; int minor; int nr; diff --git a/include/linux/signal.h b/include/linux/signal.h index 1e4ce7225ee..117135e33d6 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -9,32 +9,6 @@ #include <linux/spinlock.h> /* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - * SA_PROBEIRQ is set by callers when they expect sharing mismatches to occur - */ -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#define SA_PROBEIRQ 0x08000000 - -/* - * As above, these correspond to the IORESOURCE_IRQ_* defines in - * linux/ioport.h to select the interrupt line behaviour. When - * requesting an interrupt without specifying a SA_TRIGGER, the - * setting should be assumed to be "as already configured", which - * may be as per machine or firmware initialisation. - */ -#define SA_TRIGGER_LOW 0x00000008 -#define SA_TRIGGER_HIGH 0x00000004 -#define SA_TRIGGER_FALLING 0x00000002 -#define SA_TRIGGER_RISING 0x00000001 -#define SA_TRIGGER_MASK (SA_TRIGGER_HIGH|SA_TRIGGER_LOW|\ - SA_TRIGGER_RISING|SA_TRIGGER_FALLING) - -/* * Real Time signals may be queued. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 16eef03ce0e..3597b4f1438 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -171,7 +171,15 @@ enum { enum { SKB_GSO_TCPV4 = 1 << 0, - SKB_GSO_UDPV4 = 1 << 1, + SKB_GSO_UDP = 1 << 1, + + /* This indicates the skb is from an untrusted source. */ + SKB_GSO_DODGY = 1 << 2, + + /* This indicates the tcp segment has CWR set. */ + SKB_GSO_TCP_ECN = 1 << 3, + + SKB_GSO_TCPV6 = 1 << 4, }; /** @@ -596,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +extern struct lock_class_key skb_queue_lock_key; + static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); + lockdep_set_class(&list->lock, &skb_queue_lock_key); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } @@ -1298,8 +1309,7 @@ extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); -extern void skb_release_data(struct sk_buff *skb); -extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg); +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) diff --git a/include/linux/smp.h b/include/linux/smp.h index c93c3fe4308..837e8bce134 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -125,4 +125,6 @@ static inline void smp_send_reschedule(int cpu) { } #define put_cpu() preempt_enable() #define put_cpu_no_resched() preempt_enable_no_resched() +void smp_setup_processor_id(void); + #endif /* __LINUX_SMP_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index e928c0dcc29..c8bb68099eb 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -642,10 +642,14 @@ struct spi_board_info { u16 bus_num; u16 chip_select; + /* mode becomes spi_device.mode, and is essential for chips + * where the default of SPI_CS_HIGH = 0 is wrong. + */ + u8 mode; + /* ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff * needed to behave without being bound to a driver: - * - chipselect polarity * - quirks like clock rate mattering when not selected */ }; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ae23beef9cc..31473db92d3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP # include <asm/spinlock.h> #else # include <linux/spinlock_up.h> #endif -#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +# define spin_lock_init(lock) \ + do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#endif #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) @@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); @@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) @@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define write_trylock(lock) __cond_lock(_write_trylock(lock)) #define spin_lock(lock) _spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +#else +# define spin_lock_nested(lock, subclass) _spin_lock(lock) +#endif + #define write_lock(lock) _write_lock(lock) #define read_lock(lock) _read_lock(lock) @@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * We inline the unlock functions in the nondebug case: */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ + !defined(CONFIG_SMP) # define spin_unlock(lock) _spin_unlock(lock) # define read_unlock(lock) _read_unlock(lock) # define write_unlock(lock) _write_unlock(lock) -#else -# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) -# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) -#endif - -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) # define spin_unlock_irq(lock) _spin_unlock_irq(lock) # define read_unlock_irq(lock) _read_unlock_irq(lock) # define write_unlock_irq(lock) _write_unlock_irq(lock) #else +# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) +# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) +# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) # define spin_unlock_irq(lock) \ do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) # define read_unlock_irq(lock) \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 78e6989ffb5..b2c4f829946 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr); #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) + __acquires(spinlock_t); void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index cd81cee566f..67faa044c5f 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -49,6 +49,7 @@ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) #define _spin_lock(lock) __LOCK(lock) +#define _spin_lock_nested(lock, subclass) __LOCK(lock) #define _read_lock(lock) __LOCK(lock) #define _write_lock(lock) __LOCK(lock) #define _spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 9cb51e07039..dc5fb69e4de 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,6 +9,8 @@ * Released under the General Public License (GPL). */ +#include <linux/lockdep.h> + #if defined(CONFIG_SMP) # include <asm/spinlock_types.h> #else @@ -24,6 +26,9 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -37,31 +42,53 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed #define SPINLOCK_OWNER_INIT ((void *)-1L) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + #ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } -#define RW_LOCK_UNLOCKED \ + .owner_cpu = -1, \ + SPIN_DEP_MAP_INIT(lockname) } +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } #else -# define SPIN_LOCK_UNLOCKED \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -#define RW_LOCK_UNLOCKED \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } +# define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + SPIN_DEP_MAP_INIT(lockname) } +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } #endif -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198..27644af20b7 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -12,10 +12,14 @@ * Released under the General Public License (GPL). */ -#ifdef CONFIG_DEBUG_SPINLOCK +#if defined(CONFIG_DEBUG_SPINLOCK) || \ + defined(CONFIG_DEBUG_LOCK_ALLOC) typedef struct { volatile unsigned int slock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } @@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 31accf2f0b1..ea54c4c9a4e 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,7 +18,6 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK - #define __raw_spin_is_locked(x) ((x)->slock == 0) static inline void __raw_spin_lock(raw_spinlock_t *lock) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h new file mode 100644 index 00000000000..9cc81e57222 --- /dev/null +++ b/include/linux/stacktrace.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_STACKTRACE_H +#define __LINUX_STACKTRACE_H + +#ifdef CONFIG_STACKTRACE +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; +}; + +extern void save_stack_trace(struct stack_trace *trace, + struct task_struct *task, int all_contexts, + unsigned int skip); + +extern void print_stack_trace(struct stack_trace *trace, int spaces); +#else +# define save_stack_trace(trace, task, all, skip) do { } while (0) +# define print_stack_trace(trace) do { } while (0) +#endif + +#endif diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 50356438454..7b27c09b560 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -159,7 +159,9 @@ struct svc_rqst { * determine what device number * to report (real or virtual) */ - + int rq_sendfile_ok; /* turned off in gss privacy + * to prevent encrypting page + * cache pages */ wait_queue_head_t rq_wait; /* synchronization */ }; diff --git a/include/linux/swap.h b/include/linux/swap.h index c41e2d6d1ac..5e59184c909 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -189,7 +189,7 @@ extern long vm_total_pages; #ifdef CONFIG_NUMA extern int zone_reclaim_mode; -extern int zone_reclaim_interval; +extern int sysctl_min_unmapped_ratio; extern int zone_reclaim(struct zone *, gfp_t, unsigned int); #else #define zone_reclaim_mode 0 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 46e4d8f2771..e4b1a4d4dcf 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -188,7 +188,7 @@ enum VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ - VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ + VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ }; diff --git a/include/linux/tty.h b/include/linux/tty.h index cb35ca50a0a..b3b807e4b05 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -57,7 +57,6 @@ struct tty_buffer { unsigned char *flag_buf_ptr; int used; int size; - int active; int commit; int read; /* Data points here */ @@ -259,7 +258,6 @@ struct tty_struct { #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ #define TTY_PUSH 6 /* n_tty private */ #define TTY_CLOSING 7 /* ->close() in progress */ -#define TTY_DONT_FLIP 8 /* Defer buffer flip */ #define TTY_LDISC 9 /* Line discipline attached */ #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index b368b296d03..58c961c9e17 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -157,7 +157,6 @@ struct tty_driver { struct cdev cdev; struct module *owner; const char *driver_name; - const char *devfs_name; const char *name; int name_base; /* offset of printed name */ int major; /* major device number */ @@ -242,8 +241,15 @@ void tty_set_operations(struct tty_driver *driver, struct tty_operations *op); * is also a promise, if the above case is true, not to signal * overruns, either.) * - * TTY_DRIVER_NO_DEVFS --- if set, do not create devfs entries. This - * is only used by tty_register_driver(). + * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need + * to be registered with a call to tty_register_driver() when the + * device is found in the system and unregistered with a call to + * tty_unregister_device() so the devices will be show up + * properly in sysfs. If not set, driver->num entries will be + * created by the tty core in sysfs when tty_register_driver() is + * called. This is to be used by drivers that have tty devices + * that can appear and disappear while the main tty driver is + * registered with the tty core. * * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead * use dynamic memory keyed through the devpts filesystem. This @@ -252,7 +258,7 @@ void tty_set_operations(struct tty_driver *driver, struct tty_operations *op); #define TTY_DRIVER_INSTALLED 0x0001 #define TTY_DRIVER_RESET_TERMIOS 0x0002 #define TTY_DRIVER_REAL_RAW 0x0004 -#define TTY_DRIVER_NO_DEVFS 0x0008 +#define TTY_DRIVER_DYNAMIC_DEV 0x0008 #define TTY_DRIVER_DEVPTS_MEM 0x0010 /* tty driver types */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 31548303ee3..eb677cf5610 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -12,7 +12,7 @@ static inline int tty_insert_flip_char(struct tty_struct *tty, unsigned char ch, char flag) { struct tty_buffer *tb = tty->buf.tail; - if (tb && tb->active && tb->used < tb->size) { + if (tb && tb->used < tb->size) { tb->flag_buf_ptr[tb->used] = flag; tb->char_buf_ptr[tb->used++] = ch; return 1; diff --git a/include/linux/types.h b/include/linux/types.h index a5e46e783ff..3f235660a3c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -177,8 +177,15 @@ typedef __u64 __bitwise __be64; #ifdef __KERNEL__ typedef unsigned __bitwise__ gfp_t; + +#ifdef CONFIG_RESOURCES_64BIT +typedef u64 resource_size_t; +#else +typedef u32 resource_size_t; #endif +#endif /* __KERNEL__ */ + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; diff --git a/include/linux/udp.h b/include/linux/udp.h index bdd39be0940..90223f057d5 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -46,7 +46,7 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u16 encap_type; /* Is this an Encapsulation socket? */ /* - * Following member retains the infomation to create a UDP header + * Following member retains the information to create a UDP header * when the socket is uncorked. */ __u16 len; /* total length of pending frames */ diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h index 914f911325b..fc62887c520 100644 --- a/include/linux/ufs_fs.h +++ b/include/linux/ufs_fs.h @@ -966,7 +966,7 @@ extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, extern struct inode_operations ufs_file_inode_operations; extern const struct file_operations ufs_file_operations; -extern struct address_space_operations ufs_aops; +extern const struct address_space_operations ufs_aops; /* ialloc.c */ extern void ufs_free_inode (struct inode *inode); @@ -993,7 +993,7 @@ extern void ufs_panic (struct super_block *, const char *, const char *, ...) __ extern struct inode_operations ufs_fast_symlink_inode_operations; /* truncate.c */ -extern void ufs_truncate (struct inode *); +extern int ufs_truncate (struct inode *, loff_t); static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) { diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index dc7c621e464..46919f9f5eb 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,4 +1,4 @@ -#include <linux/version.h> +#include <linux/utsrelease.h> #include <linux/module.h> /* Simply sanity version stamp for modules. */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h new file mode 100644 index 00000000000..3e0daf54133 --- /dev/null +++ b/include/linux/vmstat.h @@ -0,0 +1,215 @@ +#ifndef _LINUX_VMSTAT_H +#define _LINUX_VMSTAT_H + +#include <linux/types.h> +#include <linux/percpu.h> +#include <linux/config.h> +#include <linux/mmzone.h> +#include <asm/atomic.h> + +#ifdef CONFIG_VM_EVENT_COUNTERS +/* + * Light weight per cpu counter implementation. + * + * Counters should only be incremented and no critical kernel component + * should rely on the counter values. + * + * Counters are handled completely inline. On many platforms the code + * generated will simply be the increment of a global address. + */ + +#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH + +enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + FOR_ALL_ZONES(PGALLOC), + PGFREE, PGACTIVATE, PGDEACTIVATE, + PGFAULT, PGMAJFAULT, + FOR_ALL_ZONES(PGREFILL), + FOR_ALL_ZONES(PGSTEAL), + FOR_ALL_ZONES(PGSCAN_KSWAPD), + FOR_ALL_ZONES(PGSCAN_DIRECT), + PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, + PAGEOUTRUN, ALLOCSTALL, PGROTATED, + NR_VM_EVENT_ITEMS +}; + +struct vm_event_state { + unsigned long event[NR_VM_EVENT_ITEMS]; +}; + +DECLARE_PER_CPU(struct vm_event_state, vm_event_states); + +static inline void __count_vm_event(enum vm_event_item item) +{ + __get_cpu_var(vm_event_states.event[item])++; +} + +static inline void count_vm_event(enum vm_event_item item) +{ + get_cpu_var(vm_event_states.event[item])++; + put_cpu(); +} + +static inline void __count_vm_events(enum vm_event_item item, long delta) +{ + __get_cpu_var(vm_event_states.event[item]) += delta; +} + +static inline void count_vm_events(enum vm_event_item item, long delta) +{ + get_cpu_var(vm_event_states.event[item])++; + put_cpu(); +} + +extern void all_vm_events(unsigned long *); +extern void vm_events_fold_cpu(int cpu); + +#else + +/* Disable counters */ +#define get_cpu_vm_events(e) 0L +#define count_vm_event(e) do { } while (0) +#define count_vm_events(e,d) do { } while (0) +#define __count_vm_event(e) do { } while (0) +#define __count_vm_events(e,d) do { } while (0) +#define vm_events_fold_cpu(x) do { } while (0) + +#endif /* CONFIG_VM_EVENT_COUNTERS */ + +#define __count_zone_vm_events(item, zone, delta) \ + __count_vm_events(item##_DMA + zone_idx(zone), delta) + +/* + * Zone based page accounting with per cpu differentials. + */ +extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + +static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) +{ + atomic_long_add(x, &zone->vm_stat[item]); + atomic_long_add(x, &vm_stat[item]); +} + +static inline unsigned long global_page_state(enum zone_stat_item item) +{ + long x = atomic_long_read(&vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +#ifdef CONFIG_NUMA +/* + * Determine the per node value of a stat item. This function + * is called frequently in a NUMA machine, so try to be as + * frugal as possible. + */ +static inline unsigned long node_page_state(int node, + enum zone_stat_item item) +{ + struct zone *zones = NODE_DATA(node)->node_zones; + + return +#ifndef CONFIG_DMA_IS_NORMAL +#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 + zone_page_state(&zones[ZONE_DMA32], item) + +#endif + zone_page_state(&zones[ZONE_NORMAL], item) + +#endif +#ifdef CONFIG_HIGHMEM + zone_page_state(&zones[ZONE_HIGHMEM], item) + +#endif + zone_page_state(&zones[ZONE_DMA], item); +} + +extern void zone_statistics(struct zonelist *, struct zone *); + +#else + +#define node_page_state(node, item) global_page_state(item) +#define zone_statistics(_zl,_z) do { } while (0) + +#endif /* CONFIG_NUMA */ + +#define __add_zone_page_state(__z, __i, __d) \ + __mod_zone_page_state(__z, __i, __d) +#define __sub_zone_page_state(__z, __i, __d) \ + __mod_zone_page_state(__z, __i,-(__d)) + +#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) +#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) + +static inline void zap_zone_vm_stats(struct zone *zone) +{ + memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); +} + +extern void inc_zone_state(struct zone *, enum zone_stat_item); + +#ifdef CONFIG_SMP +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void __inc_zone_page_state(struct page *, enum zone_stat_item); +void __dec_zone_page_state(struct page *, enum zone_stat_item); + +void mod_zone_page_state(struct zone *, enum zone_stat_item, int); +void inc_zone_page_state(struct page *, enum zone_stat_item); +void dec_zone_page_state(struct page *, enum zone_stat_item); + +extern void inc_zone_state(struct zone *, enum zone_stat_item); + +void refresh_cpu_vm_stats(int); +void refresh_vm_stats(void); + +#else /* CONFIG_SMP */ + +/* + * We do not maintain differentials in a single processor configuration. + * The functions directly modify the zone and global counters. + */ +static inline void __mod_zone_page_state(struct zone *zone, + enum zone_stat_item item, int delta) +{ + zone_page_state_add(delta, zone, item); +} + +static inline void __inc_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + atomic_long_inc(&page_zone(page)->vm_stat[item]); + atomic_long_inc(&vm_stat[item]); +} + +static inline void __dec_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + atomic_long_dec(&page_zone(page)->vm_stat[item]); + atomic_long_dec(&vm_stat[item]); +} + +/* + * We only use atomic operations to update counters. So there is no need to + * disable interrupts. + */ +#define inc_zone_page_state __inc_zone_page_state +#define dec_zone_page_state __dec_zone_page_state +#define mod_zone_page_state __mod_zone_page_state + +static inline void refresh_cpu_vm_stats(int cpu) { } +static inline void refresh_vm_stats(void) { } +#endif + +#endif /* _LINUX_VMSTAT_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 544e855c7c0..794be7af58a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -68,7 +68,7 @@ struct task_struct; wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ @@ -77,9 +77,15 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } +/* + * lockdep: we want one lock-class for all waitqueue locks. + */ +extern struct lock_class_key waitqueue_lock_key; + static inline void init_waitqueue_head(wait_queue_head_t *q) { spin_lock_init(&q->lock); + lockdep_set_class(&q->lock, &waitqueue_lock_key); INIT_LIST_HEAD(&q->task_list); } diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 1192ed8f4fe..011bcfeb9f0 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -28,6 +28,9 @@ struct watchdog_info { #define WDIOC_KEEPALIVE _IOR(WATCHDOG_IOCTL_BASE, 5, int) #define WDIOC_SETTIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 6, int) #define WDIOC_GETTIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 7, int) +#define WDIOC_SETPRETIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 8, int) +#define WDIOC_GETPRETIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 9, int) +#define WDIOC_GETTIMELEFT _IOR(WATCHDOG_IOCTL_BASE, 10, int) #define WDIOF_UNKNOWN -1 /* Unknown flag error */ #define WDIOS_UNKNOWN -1 /* Unknown status error */ @@ -38,9 +41,10 @@ struct watchdog_info { #define WDIOF_EXTERN2 0x0008 /* External relay 2 */ #define WDIOF_POWERUNDER 0x0010 /* Power bad/power fault */ #define WDIOF_CARDRESET 0x0020 /* Card previously reset the CPU */ -#define WDIOF_POWEROVER 0x0040 /* Power over voltage */ -#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */ -#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */ +#define WDIOF_POWEROVER 0x0040 /* Power over voltage */ +#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */ +#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */ +#define WDIOF_PRETIMEOUT 0x0200 /* Pretimeout (in seconds), get/set */ #define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */ #define WDIOS_DISABLECARD 0x0001 /* Turn off the watchdog timer */ |