diff options
Diffstat (limited to 'include/linux')
125 files changed, 2178 insertions, 1155 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h index 3209dd46ea7..b24ff086a66 100644 --- a/include/linux/8250_pci.h +++ b/include/linux/8250_pci.h @@ -31,7 +31,7 @@ struct pciserial_board { struct serial_private; struct serial_private * -pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board); +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); void pciserial_remove_ports(struct serial_private *priv); void pciserial_suspend_ports(struct serial_private *priv); void pciserial_resume_ports(struct serial_private *priv); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index e531783e5d7..39da666067b 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -56,8 +56,6 @@ header-y += dlm_device.h header-y += dlm_netlink.h header-y += dm-ioctl.h header-y += dn.h -header-y += dqblk_v1.h -header-y += dqblk_v2.h header-y += dqblk_xfs.h header-y += efs_fs_sb.h header-y += elf-fdpic.h @@ -134,8 +132,6 @@ header-y += posix_types.h header-y += ppdev.h header-y += prctl.h header-y += qnxtypes.h -header-y += quotaio_v1.h -header-y += quotaio_v2.h header-y += radeonfb.h header-y += raw.h header-y += resource.h @@ -313,6 +309,7 @@ unifdef-y += ptrace.h unifdef-y += qnx4_fs.h unifdef-y += quota.h unifdef-y += random.h +unifdef-y += irqnr.h unifdef-y += reboot.h unifdef-y += reiserfs_fs.h unifdef-y += reiserfs_xattr.h diff --git a/include/linux/aio.h b/include/linux/aio.h index f6b8cf99b59..b16a957030f 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -5,6 +5,7 @@ #include <linux/workqueue.h> #include <linux/aio_abi.h> #include <linux/uio.h> +#include <linux/rcupdate.h> #include <asm/atomic.h> @@ -183,7 +184,7 @@ struct kioctx { /* This needs improving */ unsigned long user_id; - struct kioctx *next; + struct hlist_node list; wait_queue_head_t wait; @@ -199,6 +200,8 @@ struct kioctx { struct aio_ring_info ring_info; struct delayed_work wq; + + struct rcu_head rcu_head; }; /* prototypes */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 26c4f6f65a4..67e5dbfc296 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -247,6 +247,18 @@ #define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL) #define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL|AUDIT_BIT_MASK) +enum { + Audit_equal, + Audit_not_equal, + Audit_bitmask, + Audit_bittest, + Audit_lt, + Audit_gt, + Audit_le, + Audit_ge, + Audit_bad +}; + /* Status symbols */ /* Mask values */ #define AUDIT_STATUS_ENABLED 0x0001 @@ -373,6 +385,8 @@ struct audit_krule { struct audit_watch *watch; /* associated watch */ struct audit_tree *tree; /* associated watched tree */ struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ + struct list_head list; /* for AUDIT_LIST* purposes only */ + u64 prio; }; struct audit_field { @@ -443,70 +457,56 @@ extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); #define audit_get_loginuid(t) ((t)->loginuid) #define audit_get_sessionid(t) ((t)->sessionid) extern void audit_log_task_context(struct audit_buffer *ab); -extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp); -extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); extern int audit_bprm(struct linux_binprm *bprm); -extern int audit_socketcall(int nargs, unsigned long *args); +extern void audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); -extern int __audit_fd_pair(int fd1, int fd2); +extern void __audit_fd_pair(int fd1, int fd2); extern int audit_set_macxattr(const char *name); -extern int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr); -extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout); -extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout); -extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification); -extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old); -extern int __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); +extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); -static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { if (unlikely(!audit_dummy_context())) - return __audit_ipc_obj(ipcp); - return 0; -} -static inline int audit_fd_pair(int fd1, int fd2) -{ - if (unlikely(!audit_dummy_context())) - return __audit_fd_pair(fd1, fd2); - return 0; + __audit_ipc_obj(ipcp); } -static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) +static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) - return __audit_ipc_set_perm(qbytes, uid, gid, mode); - return 0; + __audit_fd_pair(fd1, fd2); } -static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) { if (unlikely(!audit_dummy_context())) - return __audit_mq_open(oflag, mode, u_attr); - return 0; + __audit_ipc_set_perm(qbytes, uid, gid, mode); } -static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) +static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) - return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); - return 0; + __audit_mq_open(oflag, mode, attr); } -static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { if (unlikely(!audit_dummy_context())) - return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); - return 0; + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); } -static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) +static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { if (unlikely(!audit_dummy_context())) - return __audit_mq_notify(mqdes, u_notification); - return 0; + __audit_mq_notify(mqdes, notification); } -static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { if (unlikely(!audit_dummy_context())) - return __audit_mq_getsetattr(mqdes, mqstat); - return 0; + __audit_mq_getsetattr(mqdes, mqstat); } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, @@ -518,12 +518,11 @@ static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, return 0; } -static inline int audit_log_capset(pid_t pid, const struct cred *new, +static inline void audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) - return __audit_log_capset(pid, new, old); - return 0; + __audit_log_capset(pid, new, old); } extern int audit_n_rules; @@ -546,20 +545,19 @@ extern int audit_signals; #define audit_get_loginuid(t) (-1) #define audit_get_sessionid(t) (-1) #define audit_log_task_context(b) do { ; } while (0) -#define audit_ipc_obj(i) ({ 0; }) -#define audit_ipc_set_perm(q,u,g,m) ({ 0; }) +#define audit_ipc_obj(i) ((void)0) +#define audit_ipc_set_perm(q,u,g,m) ((void)0) #define audit_bprm(p) ({ 0; }) -#define audit_socketcall(n,a) ({ 0; }) -#define audit_fd_pair(n,a) ({ 0; }) +#define audit_socketcall(n,a) ((void)0) +#define audit_fd_pair(n,a) ((void)0) #define audit_sockaddr(len, addr) ({ 0; }) #define audit_set_macxattr(n) do { ; } while (0) -#define audit_mq_open(o,m,a) ({ 0; }) -#define audit_mq_timedsend(d,l,p,t) ({ 0; }) -#define audit_mq_timedreceive(d,l,p,t) ({ 0; }) -#define audit_mq_notify(d,n) ({ 0; }) -#define audit_mq_getsetattr(d,s) ({ 0; }) +#define audit_mq_open(o,m,a) ((void)0) +#define audit_mq_sendrecv(d,l,p,t) ((void)0) +#define audit_mq_notify(d,n) ((void)0) +#define audit_mq_getsetattr(d,s) ((void)0) #define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) -#define audit_log_capset(pid, ncr, ocr) ({ 0; }) +#define audit_log_capset(pid, ncr, ocr) ((void)0) #define audit_ptrace(t) ((void)0) #define audit_n_rules 0 #define audit_signals 0 diff --git a/include/linux/bio.h b/include/linux/bio.h index 6a642098e5c..18462c5b8ff 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -90,10 +90,11 @@ struct bio { unsigned int bi_comp_cpu; /* completion CPU */ + atomic_t bi_cnt; /* pin count */ + struct bio_vec *bi_io_vec; /* the actual vec list */ bio_end_io_t *bi_end_io; - atomic_t bi_cnt; /* pin count */ void *bi_private; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -101,6 +102,13 @@ struct bio { #endif bio_destructor_t *bi_destructor; /* destructor */ + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; }; /* @@ -117,6 +125,7 @@ struct bio { #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ +#define BIO_QUIET 11 /* Make BIO Quiet */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* @@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio) return NULL; } +static inline int bio_has_allocated_vec(struct bio *bio) +{ + return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; +} + /* * will die */ @@ -332,7 +346,7 @@ struct bio_pair { extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); extern void bio_pair_release(struct bio_pair *dbio); -extern struct bio_set *bioset_create(int, int); +extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); extern struct bio *bio_alloc(gfp_t, int); @@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); +extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); extern unsigned int bvec_nr_vecs(unsigned short idx); /* @@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) */ #define BIO_POOL_SIZE 2 #define BIOVEC_NR_POOLS 6 +#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + mempool_t *bio_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t *bio_integrity_pool; #endif - mempool_t *bvec_pools[BIOVEC_NR_POOLS]; + mempool_t *bvec_pool; }; struct biovec_slab { @@ -411,6 +430,7 @@ struct biovec_slab { }; extern struct bio_set *fs_bio_set; +extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; /* * a small number of entries is fine, not going to be performance critical. diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a08c33a26ca..2878811c613 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -137,9 +137,12 @@ extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ ) +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + static inline void bitmap_zero(unsigned long *dst, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = 0UL; else { int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); @@ -150,7 +153,7 @@ static inline void bitmap_zero(unsigned long *dst, int nbits) static inline void bitmap_fill(unsigned long *dst, int nbits) { size_t nlongs = BITS_TO_LONGS(nbits); - if (nlongs > 1) { + if (!small_const_nbits(nbits)) { int len = (nlongs - 1) * sizeof(unsigned long); memset(dst, 0xff, len); } @@ -160,7 +163,7 @@ static inline void bitmap_fill(unsigned long *dst, int nbits) static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src; else { int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); @@ -171,7 +174,7 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 & *src2; else __bitmap_and(dst, src1, src2, nbits); @@ -180,7 +183,7 @@ static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 | *src2; else __bitmap_or(dst, src1, src2, nbits); @@ -189,7 +192,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 ^ *src2; else __bitmap_xor(dst, src1, src2, nbits); @@ -198,7 +201,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 & ~(*src2); else __bitmap_andnot(dst, src1, src2, nbits); @@ -207,7 +210,7 @@ static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); else __bitmap_complement(dst, src, nbits); @@ -216,7 +219,7 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_equal(src1, src2, nbits); @@ -225,7 +228,7 @@ static inline int bitmap_equal(const unsigned long *src1, static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; else return __bitmap_intersects(src1, src2, nbits); @@ -234,7 +237,7 @@ static inline int bitmap_intersects(const unsigned long *src1, static inline int bitmap_subset(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_subset(src1, src2, nbits); @@ -242,7 +245,7 @@ static inline int bitmap_subset(const unsigned long *src1, static inline int bitmap_empty(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_empty(src, nbits); @@ -250,7 +253,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits) static inline int bitmap_full(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_full(src, nbits); @@ -258,7 +261,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits) static inline int bitmap_weight(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } @@ -266,7 +269,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits) static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, int n, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src >> n; else __bitmap_shift_right(dst, src, n, nbits); @@ -275,7 +278,7 @@ static inline void bitmap_shift_right(unsigned long *dst, static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, int n, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); else __bitmap_shift_left(dst, src, n, nbits); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 024f2b02724..61829139795 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -134,9 +134,20 @@ extern unsigned long find_first_bit(const unsigned long *addr, */ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); - #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifdef CONFIG_GENERIC_FIND_LAST_BIT +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ + #ifdef CONFIG_GENERIC_FIND_NEXT_BIT /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 031a315c050..7035cec583b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -26,7 +26,6 @@ struct scsi_ioctl_command; struct request_queue; struct elevator_queue; -typedef struct elevator_queue elevator_t; struct request_pm_state; struct blk_trace; struct request; @@ -313,7 +312,7 @@ struct request_queue */ struct list_head queue_head; struct request *last_merge; - elevator_t *elevator; + struct elevator_queue *elevator; /* * the queue request freelist, one for reads and one for writes @@ -449,6 +448,7 @@ struct request_queue #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ static inline int queue_is_locked(struct request_queue *q) { @@ -522,22 +522,32 @@ enum { * TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FUA : ordering by tag w/ pre flush and FUA write */ - QUEUE_ORDERED_NONE = 0x00, - QUEUE_ORDERED_DRAIN = 0x01, - QUEUE_ORDERED_TAG = 0x02, - - QUEUE_ORDERED_PREFLUSH = 0x10, - QUEUE_ORDERED_POSTFLUSH = 0x20, - QUEUE_ORDERED_FUA = 0x40, - - QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, - QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + QUEUE_ORDERED_BY_DRAIN = 0x01, + QUEUE_ORDERED_BY_TAG = 0x02, + QUEUE_ORDERED_DO_PREFLUSH = 0x10, + QUEUE_ORDERED_DO_BAR = 0x20, + QUEUE_ORDERED_DO_POSTFLUSH = 0x40, + QUEUE_ORDERED_DO_FUA = 0x80, + + QUEUE_ORDERED_NONE = 0x00, + + QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, + + QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, /* * Ordered operation sequence @@ -585,7 +595,6 @@ enum { #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) -#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) /* rq->queuelist of dequeued request must be list_empty() */ #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) @@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); -extern int blk_do_ordered(struct request_queue *, struct request **); +extern bool blk_do_ordered(struct request_queue *, struct request **); extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); +extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); @@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h index 8607312983b..e44b88ba552 100644 --- a/include/linux/blockgroup_lock.h +++ b/include/linux/blockgroup_lock.h @@ -53,7 +53,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl) * The accessor is a macro so we can embed a blockgroup_lock into different * superblock types */ -#define sb_bgl_lock(sb, block_group) \ - (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock) +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; +} #endif diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 777dbf695d4..27b1bcffe40 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -2,7 +2,6 @@ #define _LINUX_BH_H extern void local_bh_disable(void); -extern void __local_bh_enable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3ce64b90118..8605f8a74df 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -35,6 +35,7 @@ enum bh_state_bits { BH_Ordered, /* ordered write */ BH_Eopnotsupp, /* operation not supported (barrier) */ BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e5..cea153697ec 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -82,13 +82,13 @@ struct clock_event_device { int shift; int rating; int irq; - cpumask_t cpumask; + const struct cpumask *cpumask; int (*set_next_event)(unsigned long evt, struct clock_event_device *); void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); void (*event_handler)(struct clock_event_device *); - void (*broadcast)(cpumask_t mask); + void (*broadcast)(const struct cpumask *mask); struct list_head list; enum clock_event_mode mode; ktime_t next_event; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5c8351b859f..af40f8eb86f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -61,3 +61,8 @@ #define noinline __attribute__((noinline)) #define __attribute_const__ __attribute__((__const__)) #define __maybe_unused __attribute__((unused)) + +#define __gcc_header(x) #x +#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) +#define gcc_header(x) _gcc_header(x) +#include gcc_header(__GNUC__) diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index e5eb795f78a..8005effc04f 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -2,8 +2,9 @@ #error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." #endif -/* These definitions are for GCC v3.x. */ -#include <linux/compiler-gcc.h> +#if __GNUC_MINOR__ < 2 +# error Sorry, your compiler is too old - please upgrade it. +#endif #if __GNUC_MINOR__ >= 3 # define __used __attribute__((__used__)) diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 974f5b7bb20..09992718f9e 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -2,8 +2,10 @@ #error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." #endif -/* These definitions are for GCC v4.x. */ -#include <linux/compiler-gcc.h> +/* GCC 4.1.[01] miscompiles __weak */ +#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 +# error Your version of gcc miscompiles the __weak directive +#endif #define __used __attribute__((__used__)) #define __must_check __attribute__((warn_unused_result)) @@ -16,7 +18,7 @@ */ #define uninitialized_var(x) x = x -#if !(__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#if __GNUC_MINOR__ >= 3 /* Mark functions as cold. gcc will assume any path leading to a call to them will be unlikely. This means a lot of manual unlikely()s are unnecessary now for any paths leading to the usual suspects diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ea7c6be354b..d95da1020f1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -36,12 +36,8 @@ extern void __chk_io_ptr(const volatile void __iomem *); #ifdef __KERNEL__ -#if __GNUC__ >= 4 -# include <linux/compiler-gcc4.h> -#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2 -# include <linux/compiler-gcc3.h> -#else -# error Sorry, your compiler is too old/not recognized. +#ifdef __GNUC__ +#include <linux/compiler-gcc.h> #endif #define notrace __attribute__((no_instrument_function)) diff --git a/include/linux/console.h b/include/linux/console.h index 248e6e3b9b7..a67a90cf826 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty); #define VESA_HSYNC_SUSPEND 2 #define VESA_POWERDOWN 3 +#ifdef CONFIG_VGA_CONSOLE +extern bool vgacon_text_force(void); +#endif + #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1ee608fd7b7..484b3abf61b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -234,6 +234,7 @@ struct cpufreq_driver { int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; + bool hide_interface; }; /* flags */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52..9f315382610 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -144,6 +144,7 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; extern cpumask_t _unused_cpumask_arg_; +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) { @@ -267,6 +268,26 @@ static inline void __cpus_shift_left(cpumask_t *dstp, { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} /* * Special-case data structure for "single bit set only" constant CPU masks. @@ -278,13 +299,14 @@ static inline void __cpus_shift_left(cpumask_t *dstp, extern const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; -static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +static inline const struct cpumask *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; p -= cpu / BITS_PER_LONG; - return (const cpumask_t *)p; + return to_cpumask(p); } +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS /* * In cases where we take the address of the cpumask immediately, * gcc optimizes it out (it's a constant) and there's no huge stack @@ -339,36 +361,6 @@ extern cpumask_t cpu_mask_all; #endif #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) -#define cpumask_scnprintf(buf, len, src) \ - __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpumask_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnprintf(buf, len, srcp->bits, nbits); -} - -#define cpumask_parse_user(ubuf, ulen, dst) \ - __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) -static inline int __cpumask_parse_user(const char __user *buf, int len, - cpumask_t *dstp, int nbits) -{ - return bitmap_parse_user(buf, len, dstp->bits, nbits); -} - -#define cpulist_scnprintf(buf, len, src) \ - __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpulist_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); -} - -#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) -static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) -{ - return bitmap_parselist(buf, dstp->bits, nbits); -} - #define cpu_remap(oldbit, old, new) \ __cpu_remap((oldbit), &(old), &(new), NR_CPUS) static inline int __cpu_remap(int oldbit, @@ -400,19 +392,22 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #if NR_CPUS == 1 #define nr_cpu_ids 1 +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #define first_cpu(src) ({ (void)(src); 0; }) #define next_cpu(n, src) ({ (void)(src); 1; }) #define any_online_cpu(mask) 0 #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) - +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #else /* NR_CPUS > 1 */ extern int nr_cpu_ids; +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS int __first_cpu(const cpumask_t *srcp); int __next_cpu(int n, const cpumask_t *srcp); int __any_online_cpu(const cpumask_t *mask); @@ -424,8 +419,10 @@ int __any_online_cpu(const cpumask_t *mask); for ((cpu) = -1; \ (cpu) = next_cpu((cpu), (mask)), \ (cpu) < NR_CPUS; ) +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #endif +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #if NR_CPUS <= 64 #define next_cpu_nr(n, src) next_cpu(n, src) @@ -443,77 +440,67 @@ int __next_cpu_nr(int n, const cpumask_t *srcp); (cpu) < nr_cpu_ids; ) #endif /* NR_CPUS > 64 */ +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ /* * The following particular system cpumasks and operations manage - * possible, present, active and online cpus. Each of them is a fixed size - * bitmap of size NR_CPUS. + * possible, present, active and online cpus. * - * #ifdef CONFIG_HOTPLUG_CPU - * cpu_possible_map - has bit 'cpu' set iff cpu is populatable - * cpu_present_map - has bit 'cpu' set iff cpu is populated - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler - * cpu_active_map - has bit 'cpu' set iff cpu available to migration - * #else - * cpu_possible_map - has bit 'cpu' set iff cpu is populated - * cpu_present_map - copy of cpu_possible_map - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler - * #endif + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable + * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration * - * In either case, NR_CPUS is fixed at compile time, as the static - * size of these bitmaps. The cpu_possible_map is fixed at boot - * time, as the set of CPU id's that it is possible might ever - * be plugged in at anytime during the life of that system boot. - * The cpu_present_map is dynamic(*), representing which CPUs - * are currently plugged in. And cpu_online_map is the dynamic - * subset of cpu_present_map, indicating those CPUs available - * for scheduling. + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * - * If HOTPLUG is enabled, then cpu_possible_map is forced to have + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * that it is possible might ever be plugged in at anytime during the + * life of that system boot. The cpu_present_mask is dynamic(*), + * representing which CPUs are currently plugged in. And + * cpu_online_mask is the dynamic subset of cpu_present_mask, + * indicating those CPUs available for scheduling. + * + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have * all NR_CPUS bits set, otherwise it is just the set of CPUs that * ACPI reports present at boot. * - * If HOTPLUG is enabled, then cpu_present_map varies dynamically, + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, * depending on what ACPI reports as currently plugged in, otherwise - * cpu_present_map is just a copy of cpu_possible_map. + * cpu_present_mask is just a copy of cpu_possible_mask. * - * (*) Well, cpu_present_map is dynamic in the hotplug case. If not - * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. * * Subtleties: * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode * assumption that their single CPU is online. The UP - * cpu_{online,possible,present}_maps are placebos. Changing them + * cpu_{online,possible,present}_masks are placebos. Changing them * will have no useful affect on the following num_*_cpus() * and cpu_*() macros in the UP case. This ugliness is a UP * optimization - don't waste any instructions or memory references * asking if you're online or how many CPUs there are if there is * only one CPU. - * 2) Most SMP arch's #define some of these maps to be some - * other map specific to that arch. Therefore, the following - * must be #define macros, not inlines. To see why, examine - * the assembly code produced by the following. Note that - * set1() writes phys_x_map, but set2() writes x_map: - * int x_map, phys_x_map; - * #define set1(a) x_map = a - * inline void set2(int a) { x_map = a; } - * #define x_map phys_x_map - * main(){ set1(3); set2(5); } */ -extern cpumask_t cpu_possible_map; -extern cpumask_t cpu_online_map; -extern cpumask_t cpu_present_map; -extern cpumask_t cpu_active_map; +extern const struct cpumask *const cpu_possible_mask; +extern const struct cpumask *const cpu_online_mask; +extern const struct cpumask *const cpu_present_mask; +extern const struct cpumask *const cpu_active_mask; + +/* These strip const, as traditionally they weren't const. */ +#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) +#define cpu_online_map (*(cpumask_t *)cpu_online_mask) +#define cpu_present_map (*(cpumask_t *)cpu_present_mask) +#define cpu_active_map (*(cpumask_t *)cpu_active_mask) #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight_nr(cpu_online_map) -#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) -#define num_present_cpus() cpus_weight_nr(cpu_present_map) -#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) -#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) -#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) -#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) +#define num_online_cpus() cpumask_weight(cpu_online_mask) +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) +#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) +#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) +#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) #else #define num_online_cpus() 1 #define num_possible_cpus() 1 @@ -526,10 +513,6 @@ extern cpumask_t cpu_active_map; #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) - /* These are the new versions of the cpumask operators: passed by pointer. * The older versions will be implemented in terms of these, then deleted. */ #define cpumask_bits(maskp) ((maskp)->bits) @@ -540,9 +523,6 @@ extern cpumask_t cpu_active_map; [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ } -/* This produces more efficient code. */ -#define nr_cpumask_bits NR_CPUS - #else /* NR_CPUS > BITS_PER_LONG */ #define CPU_BITS_ALL \ @@ -550,9 +530,15 @@ extern cpumask_t cpu_active_map; [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ } +#endif /* NR_CPUS > BITS_PER_LONG */ +#ifdef CONFIG_CPUMASK_OFFSTACK +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ #define nr_cpumask_bits nr_cpu_ids -#endif /* NR_CPUS > BITS_PER_LONG */ +#else +#define nr_cpumask_bits NR_CPUS +#endif /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) @@ -714,7 +700,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * No static inline type checking - see Subtlety (1) above. */ #define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), (cpumask)->bits) + test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -946,22 +932,61 @@ static inline void cpumask_copy(struct cpumask *dstp, #define cpumask_of(cpu) (get_cpu_mask(cpu)) /** - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * - * @bitmap: the bitmap + * cpumask_scnprintf - print a cpumask into a string as comma-separated hex + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print * - * There are a few places where cpumask_var_t isn't appropriate and - * static cpumasks must be used (eg. very early boot), yet we don't - * expose the definition of 'struct cpumask'. + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpumask_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_parse_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. * - * This does the conversion, and can be used as a constant initializer. + * Returns -errno, or 0 for success. */ -#define to_cpumask(bitmap) \ - ((struct cpumask *)(1 ? (bitmap) \ - : (void *)sizeof(__check_is_bitmap(bitmap)))) +static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} -static inline int __check_is_bitmap(const unsigned long *bitmap) +/** + * cpulist_scnprintf - print a cpumask into a string as comma-separated list + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpulist_scnprintf(char *buf, int len, + const struct cpumask *srcp) { - return 1; + return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpulist_parse_user - extract a cpumask from a user string of ranges + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); } /** @@ -995,6 +1020,7 @@ static inline size_t cpumask_size(void) #ifdef CONFIG_CPUMASK_OFFSTACK typedef struct cpumask *cpumask_var_t; +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); @@ -1008,6 +1034,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) return true; } +static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + return true; +} + static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } @@ -1021,12 +1053,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask) } #endif /* CONFIG_CPUMASK_OFFSTACK */ -/* The pointer versions of the maps, these will become the primary versions. */ -#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) -#define cpu_online_mask ((const struct cpumask *)&cpu_online_map) -#define cpu_present_mask ((const struct cpumask *)&cpu_present_map) -#define cpu_active_mask ((const struct cpumask *)&cpu_active_map) - /* It's common to want to use cpu_all_mask in struct member initializers, * so it has to refer to an address rather than a pointer. */ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); @@ -1035,51 +1061,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); /* First bits of cpu_bit_bitmap are in fact unset. */ #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) -/* Wrappers for arch boot code to manipulate normally-constant masks */ -static inline void set_cpu_possible(unsigned int cpu, bool possible) -{ - if (possible) - cpumask_set_cpu(cpu, &cpu_possible_map); - else - cpumask_clear_cpu(cpu, &cpu_possible_map); -} - -static inline void set_cpu_present(unsigned int cpu, bool present) -{ - if (present) - cpumask_set_cpu(cpu, &cpu_present_map); - else - cpumask_clear_cpu(cpu, &cpu_present_map); -} - -static inline void set_cpu_online(unsigned int cpu, bool online) -{ - if (online) - cpumask_set_cpu(cpu, &cpu_online_map); - else - cpumask_clear_cpu(cpu, &cpu_online_map); -} - -static inline void set_cpu_active(unsigned int cpu, bool active) -{ - if (active) - cpumask_set_cpu(cpu, &cpu_active_map); - else - cpumask_clear_cpu(cpu, &cpu_active_map); -} - -static inline void init_cpu_present(const struct cpumask *src) -{ - cpumask_copy(&cpu_present_map, src); -} - -static inline void init_cpu_possible(const struct cpumask *src) -{ - cpumask_copy(&cpu_possible_map, src); -} +#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) +#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) +#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) -static inline void init_cpu_online(const struct cpumask *src) -{ - cpumask_copy(&cpu_online_map, src); -} +/* Wrappers for arch boot code to manipulate normally-constant masks */ +void set_cpu_possible(unsigned int cpu, bool possible); +void set_cpu_present(unsigned int cpu, bool present); +void set_cpu_online(unsigned int cpu, bool online); +void set_cpu_active(unsigned int cpu, bool active); +void init_cpu_present(const struct cpumask *src); +void init_cpu_possible(const struct cpumask *src); +void init_cpu_online(const struct cpumask *src); #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a37359d0bad..c66d22487bf 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -75,14 +75,22 @@ full_name_hash(const unsigned char *name, unsigned int len) return end_name_hash(hash); } -struct dcookie_struct; - -#define DNAME_INLINE_LEN_MIN 36 +/* + * Try to keep struct dentry aligned on 64 byte cachelines (this will + * give reasonable cacheline footprint with larger lines without the + * large memory footprint increase). + */ +#ifdef CONFIG_64BIT +#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */ +#else +#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */ +#endif struct dentry { atomic_t d_count; unsigned int d_flags; /* protected by d_lock */ spinlock_t d_lock; /* per dentry lock */ + int d_mounted; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ /* @@ -107,10 +115,7 @@ struct dentry { struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ void *d_fsdata; /* fs-specific data */ -#ifdef CONFIG_PROFILING - struct dcookie_struct *d_cookie; /* cookie, if any */ -#endif - int d_mounted; + unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; @@ -177,6 +182,8 @@ d_iput: no no no yes #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ +#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 4aaa4afb1cb..096476f1fb3 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -17,7 +17,7 @@ extern int debug_locks_off(void); ({ \ int __ret = 0; \ \ - if (unlikely(c)) { \ + if (!oops_in_progress && unlikely(c)) { \ if (debug_locks_off() && !debug_locks_silent) \ WARN_ON(1); \ __ret = 1; \ diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 952df39c989..136f170cecc 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -9,148 +9,16 @@ #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) -#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) - - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 val; - u64 rsvd1; -}; -#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) -static inline bool root_present(struct root_entry *root) -{ - return (root->val & 1); -} -static inline void set_root_present(struct root_entry *root) -{ - root->val |= 1; -} -static inline void set_root_value(struct root_entry *root, unsigned long value) -{ - root->val |= value & VTD_PAGE_MASK; -} - -struct context_entry; -static inline struct context_entry * -get_context_addr_from_root(struct root_entry *root) -{ - return (struct context_entry *) - (root_present(root)?phys_to_virt( - root->val & VTD_PAGE_MASK) : - NULL); -} - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; -#define context_present(c) ((c).lo & 1) -#define context_fault_disable(c) (((c).lo >> 1) & 1) -#define context_translation_type(c) (((c).lo >> 2) & 3) -#define context_address_root(c) ((c).lo & VTD_PAGE_MASK) -#define context_address_width(c) ((c).hi & 7) -#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) - -#define context_set_present(c) do {(c).lo |= 1;} while (0) -#define context_set_fault_enable(c) \ - do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) -#define context_set_translation_type(c, val) \ - do { \ - (c).lo &= (((u64)-1) << 4) | 3; \ - (c).lo |= ((val) & 3) << 2; \ - } while (0) -#define CONTEXT_TT_MULTI_LEVEL 0 -#define context_set_address_root(c, val) \ - do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) -#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) -#define context_set_domain_id(c, val) \ - do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) -#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-11: available - * 12-63: Host physcial address - */ -struct dma_pte { - u64 val; -}; -#define dma_clear_pte(p) do {(p).val = 0;} while (0) - #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) -#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) -#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) -#define dma_set_pte_prot(p, prot) \ - do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) -#define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) -#define dma_pte_present(p) (((p).val & 3) != 0) - struct intel_iommu; +struct dmar_domain; +struct root_entry; -struct dmar_domain { - int id; /* domain id */ - struct intel_iommu *iommu; /* back pointer to owning iommu */ - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - spinlock_t mapping_lock; /* page table lock */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - -#define DOMAIN_FLAG_MULTIPLE_DEVICES 1 - int flags; -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u8 bus; /* PCI bus numer */ - u8 devfn; /* PCI devfn number */ - struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ - struct dmar_domain *domain; /* pointer to domain */ -}; - -extern int init_dmars(void); extern void free_dmar_iommu(struct intel_iommu *iommu); +extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int dmar_disabled; -#ifndef CONFIG_DMAR_GFX_WA -static inline void iommu_prepare_gfx_mapping(void) -{ - return; -} -#endif /* !CONFIG_DMAR_GFX_WA */ - #endif diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f1984fc3e06..f28440784cf 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -144,7 +144,6 @@ struct dmar_rmrr_unit { list_for_each_entry(rmrr, &dmar_rmrr_units, list) /* Intel DMAR initialization functions */ extern int intel_iommu_init(void); -extern int dmar_disabled; #else static inline int intel_iommu_init(void) { diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 2bfda178f27..34161907b2f 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -47,6 +47,7 @@ extern int dmi_name_in_vendors(const char *str); extern int dmi_name_in_serial(const char *str); extern int dmi_available; extern int dmi_walk(void (*decode)(const struct dmi_header *)); +extern bool dmi_match(enum dmi_field f, const char *str); #else @@ -61,6 +62,8 @@ static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 static inline int dmi_walk(void (*decode)(const struct dmi_header *)) { return -1; } +static inline bool dmi_match(enum dmi_field f, const char *str) + { return false; } #endif diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h new file mode 100644 index 00000000000..82a16527b36 --- /dev/null +++ b/include/linux/dqblk_qtree.h @@ -0,0 +1,56 @@ +/* + * Definitions of structures and functions for quota formats using trie + */ + +#ifndef _LINUX_DQBLK_QTREE_H +#define _LINUX_DQBLK_QTREE_H + +#include <linux/types.h> + +/* Numbers of blocks needed for updates - we count with the smallest + * possible block size (1024) */ +#define QTREE_INIT_ALLOC 4 +#define QTREE_INIT_REWRITE 2 +#define QTREE_DEL_ALLOC 0 +#define QTREE_DEL_REWRITE 6 + +struct dquot; + +/* Operations */ +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */ + void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */ + int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */ +}; + +/* Inmemory copy of version specific information */ +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; /* Sb quota is on */ + int dqi_type; /* Quota type */ + unsigned int dqi_blocks; /* # of blocks in quota file */ + unsigned int dqi_free_blk; /* First block in list of free blocks */ + unsigned int dqi_free_entry; /* First block with free entry */ + unsigned int dqi_blocksize_bits; /* Block size of quota file */ + unsigned int dqi_entry_size; /* Size of quota entry in quota file */ + unsigned int dqi_usable_bs; /* Space usable in block for quota data */ + unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ + struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ +}; + +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); +static inline int qtree_depth(struct qtree_mem_dqinfo *info) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + unsigned long long entries = epb; + int i; + + for (i = 1; entries < (1ULL << 32); i++) + entries *= epb; + return i; +} + +#endif /* _LINUX_DQBLK_QTREE_H */ diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h index 57f1250d5a5..3713a7232dd 100644 --- a/include/linux/dqblk_v1.h +++ b/include/linux/dqblk_v1.h @@ -5,9 +5,6 @@ #ifndef _LINUX_DQBLK_V1_H #define _LINUX_DQBLK_V1_H -/* Id of quota format */ -#define QFMT_VFS_OLD 1 - /* Root squash turned on */ #define V1_DQF_RSQUASH 1 @@ -17,8 +14,4 @@ #define V1_DEL_ALLOC 0 #define V1_DEL_REWRITE 2 -/* Special information about quotafile */ -struct v1_mem_dqinfo { -}; - #endif /* _LINUX_DQBLK_V1_H */ diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h index 4f853322cb7..18000a54267 100644 --- a/include/linux/dqblk_v2.h +++ b/include/linux/dqblk_v2.h @@ -1,26 +1,16 @@ /* - * Definitions of structures for vfsv0 quota format + * Definitions for vfsv0 quota format */ #ifndef _LINUX_DQBLK_V2_H #define _LINUX_DQBLK_V2_H -#include <linux/types.h> - -/* id numbers of quota format */ -#define QFMT_VFS_V0 2 +#include <linux/dqblk_qtree.h> /* Numbers of blocks needed for updates */ -#define V2_INIT_ALLOC 4 -#define V2_INIT_REWRITE 2 -#define V2_DEL_ALLOC 0 -#define V2_DEL_REWRITE 6 - -/* Inmemory copy of version specific information */ -struct v2_mem_dqinfo { - unsigned int dqi_blocks; - unsigned int dqi_free_blk; - unsigned int dqi_free_entry; -}; +#define V2_INIT_ALLOC QTREE_INIT_ALLOC +#define V2_INIT_REWRITE QTREE_INIT_REWRITE +#define V2_DEL_ALLOC QTREE_DEL_ALLOC +#define V2_DEL_REWRITE QTREE_DEL_REWRITE #endif /* _LINUX_DQBLK_V2_H */ diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index 79a8ed8e6a7..55026b1a40b 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -62,10 +62,11 @@ typedef enum fe_caps { FE_CAN_HIERARCHY_AUTO = 0x100000, FE_CAN_8VSB = 0x200000, FE_CAN_16VSB = 0x400000, - FE_HAS_EXTENDED_CAPS = 0x800000, // We need more bitspace for newer APIs, indicate this. - FE_NEEDS_BENDING = 0x20000000, // not supported anymore, don't use (frontend requires frequency bending) - FE_CAN_RECOVER = 0x40000000, // frontend can recover from a cable unplug automatically - FE_CAN_MUTE_TS = 0x80000000 // frontend can stop spurious TS data output + FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ + FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ + FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ + FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ + FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */ } fe_caps_t; @@ -121,15 +122,15 @@ typedef enum fe_sec_mini_cmd { typedef enum fe_status { - FE_HAS_SIGNAL = 0x01, /* found something above the noise level */ - FE_HAS_CARRIER = 0x02, /* found a DVB signal */ - FE_HAS_VITERBI = 0x04, /* FEC is stable */ - FE_HAS_SYNC = 0x08, /* found sync bytes */ - FE_HAS_LOCK = 0x10, /* everything's working... */ - FE_TIMEDOUT = 0x20, /* no lock within the last ~2 seconds */ - FE_REINIT = 0x40 /* frontend was reinitialized, */ -} fe_status_t; /* application is recommended to reset */ - /* DiSEqC, tone and parameters */ + FE_HAS_SIGNAL = 0x01, /* found something above the noise level */ + FE_HAS_CARRIER = 0x02, /* found a DVB signal */ + FE_HAS_VITERBI = 0x04, /* FEC is stable */ + FE_HAS_SYNC = 0x08, /* found sync bytes */ + FE_HAS_LOCK = 0x10, /* everything's working... */ + FE_TIMEDOUT = 0x20, /* no lock within the last ~2 seconds */ + FE_REINIT = 0x40 /* frontend was reinitialized, */ +} fe_status_t; /* application is recommended to reset */ + /* DiSEqC, tone and parameters */ typedef enum fe_spectral_inversion { INVERSION_OFF, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 92f6f634e3e..7a204256b15 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); typedef void *(elevator_init_fn) (struct request_queue *); -typedef void (elevator_exit_fn) (elevator_t *); +typedef void (elevator_exit_fn) (struct elevator_queue *); struct elevator_ops { @@ -62,8 +62,8 @@ struct elevator_ops struct elv_fs_entry { struct attribute attr; - ssize_t (*show)(elevator_t *, char *); - ssize_t (*store)(elevator_t *, const char *, size_t); + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); }; /* @@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(elevator_t *); +extern void elevator_exit(struct elevator_queue *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h index f273415ab6f..dc541f3653d 100644 --- a/include/linux/ext2_fs_sb.h +++ b/include/linux/ext2_fs_sb.h @@ -108,4 +108,10 @@ struct ext2_sb_info { struct ext2_reserve_window_node s_rsv_window_head; }; +static inline spinlock_t * +sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT2_FS_SB */ diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index b65f0288b84..e024e38248f 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h @@ -83,4 +83,10 @@ struct ext3_sb_info { #endif }; +static inline spinlock_t * +sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT3_FS_SB */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 32368c4f032..06ca9b21dad 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ +#ifdef CONFIG_FAILSLAB +extern bool should_failslab(size_t size, gfp_t gfpflags); +#else +static inline bool should_failslab(size_t size, gfp_t gfpflags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + #endif /* _LINUX_FAULT_INJECT_H */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 4aab6f12cfa..09d6c5bbddd 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -57,8 +57,6 @@ struct files_struct { #define files_fdtable(files) (rcu_dereference((files)->fdt)) -extern struct kmem_cache *filp_cachep; - struct file_operations; struct vfsmount; struct dentry; diff --git a/include/linux/fs.h b/include/linux/fs.h index 195a8cb2a74..8b916ce92e7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -21,7 +21,6 @@ /* Fixed constants first: */ #undef NR_OPEN -extern int sysctl_nr_open; #define INR_OPEN 1024 /* Initial setting for nfile rlimits */ #define BLOCK_SIZE_BITS 10 @@ -38,21 +37,13 @@ struct files_stat_struct { int nr_free_files; /* read only */ int max_files; /* tunable */ }; -extern struct files_stat_struct files_stat; -extern int get_max_files(void); struct inodes_stat_t { int nr_inodes; int nr_unused; int dummy[5]; /* padding for sysctl ABI compatibility */ }; -extern struct inodes_stat_t inodes_stat; -extern int leases_enable, lease_break_time; - -#ifdef CONFIG_DNOTIFY -extern int dir_notify_enable; -#endif #define NR_FILE 8192 /* this can well be larger on a larger system */ @@ -82,6 +73,14 @@ extern int dir_notify_enable; (specialy hack for floppy.c) */ #define FMODE_WRITE_IOCTL ((__force fmode_t)128) +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)2048) + #define RW_MASK 1 #define RWA_MASK 2 #define READ 0 @@ -322,6 +321,15 @@ extern void __init inode_init(void); extern void __init inode_init_early(void); extern void __init files_init(unsigned long); +extern struct files_stat_struct files_stat; +extern int get_max_files(void); +extern int sysctl_nr_open; +extern struct inodes_stat_t inodes_stat; +extern int leases_enable, lease_break_time; +#ifdef CONFIG_DNOTIFY +extern int dir_notify_enable; +#endif + struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); @@ -415,6 +423,9 @@ enum positive_aop_returns { #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ /* * oh the beauties of C type declarations. @@ -1204,7 +1215,6 @@ extern void unlock_super(struct super_block *); /* * VFS helper functions.. */ -extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); @@ -1302,7 +1312,6 @@ struct file_operations { ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); - int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); @@ -1821,7 +1830,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); -extern long do_fsync(struct file *file, int datasync); +extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); extern void sync_supers(void); extern void sync_filesystems(int wait); extern void __fsync_super(struct super_block *sb); @@ -1861,7 +1870,7 @@ extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, int open_flag, int mode); -extern int may_open(struct nameidata *, int, int); +extern int may_open(struct path *, int, int); extern int kernel_read(struct file *, unsigned long, char *, unsigned long); extern struct file * open_exec(const char *); @@ -1877,7 +1886,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); +extern struct inode * inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void inode_add_to_lists(struct super_block *, struct inode *); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); @@ -1894,6 +1905,8 @@ extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); extern void unlock_new_inode(struct inode *); extern void __iget(struct inode * inode); @@ -2025,7 +2038,7 @@ extern int page_readlink(struct dentry *, char __user *, int); extern void *page_follow_link_light(struct dentry *, struct nameidata *); extern void page_put_link(struct dentry *, struct nameidata *, void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, - gfp_t gfp_mask); + int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern int generic_readlink(struct dentry *, char __user *, int); diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 9e5a06e78d0..a97c053d3a9 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -10,12 +10,6 @@ struct fs_struct { struct path root, pwd; }; -#define INIT_FS { \ - .count = ATOMIC_INIT(1), \ - .lock = RW_LOCK_UNLOCKED, \ - .umask = 0022, \ -} - extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); diff --git a/include/linux/futex.h b/include/linux/futex.h index 586ab56a3ec..3bf5bb5a34f 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -25,7 +25,8 @@ union ktime; #define FUTEX_WAKE_BITSET 10 #define FUTEX_PRIVATE_FLAG 128 -#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG +#define FUTEX_CLOCK_REALTIME 256 +#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) @@ -164,6 +165,8 @@ union futex_key { } both; }; +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } + #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr); diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h index 4cc91393981..fadff28505b 100644 --- a/include/linux/generic_serial.h +++ b/include/linux/generic_serial.h @@ -21,7 +21,6 @@ struct real_driver { void (*enable_tx_interrupts) (void *); void (*disable_rx_interrupts) (void *); void (*enable_rx_interrupts) (void *); - int (*get_CD) (void *); void (*shutdown_port) (void*); int (*set_real_termios) (void*); int (*chars_in_buffer) (void*); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 3df7742ce24..16948eaecae 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter { struct disk_part_tbl { struct rcu_head rcu_head; int len; + struct hd_struct *last_lookup; struct hd_struct *part[]; }; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 89a56d79e4c..f83288347dd 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif -#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) +#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); #else # define rcu_irq_enter() do { } while (0) # define rcu_irq_exit() do { } while (0) -#endif /* CONFIG_PREEMPT_RCU */ +# define rcu_nmi_enter() do { } while (0) +# define rcu_nmi_exit() do { } while (0) +#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ /* * It is safe to do non-atomic ops on ->hardirq_context, @@ -135,7 +139,6 @@ extern void rcu_irq_exit(void); */ #define __irq_enter() \ do { \ - rcu_irq_enter(); \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ trace_hardirq_enter(); \ @@ -154,7 +157,6 @@ extern void irq_enter(void); trace_hardirq_exit(); \ account_system_vtime(current); \ sub_preempt_count(HARDIRQ_OFFSET); \ - rcu_irq_exit(); \ } while (0) /* @@ -166,11 +168,14 @@ extern void irq_exit(void); do { \ ftrace_nmi_enter(); \ lockdep_off(); \ + rcu_nmi_enter(); \ __irq_enter(); \ } while (0) + #define nmi_exit() \ do { \ __irq_exit(); \ + rcu_nmi_exit(); \ lockdep_on(); \ ftrace_nmi_exit(); \ } while (0) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3eba43878dc..bd37078c2d7 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -43,26 +43,6 @@ enum hrtimer_restart { }; /* - * hrtimer callback modes: - * - * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context - * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context - * Special mode for tick emulation and - * scheduler timer. Such timers are per - * cpu and not allowed to be migrated on - * cpu unplug. - * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context - * with timer->base lock unlocked - * used for timers which call wakeup to - * avoid lock order problems with rq->lock - */ -enum hrtimer_cb_mode { - HRTIMER_CB_SOFTIRQ, - HRTIMER_CB_IRQSAFE_PERCPU, - HRTIMER_CB_IRQSAFE_UNLOCKED, -}; - -/* * Values to track state of the timer * * Possible states: @@ -70,7 +50,6 @@ enum hrtimer_cb_mode { * 0x00 inactive * 0x01 enqueued into rbtree * 0x02 callback function running - * 0x04 callback pending (high resolution mode) * * Special cases: * 0x03 callback function running and enqueued @@ -92,8 +71,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_INACTIVE 0x00 #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 -#define HRTIMER_STATE_PENDING 0x04 -#define HRTIMER_STATE_MIGRATE 0x08 +#define HRTIMER_STATE_MIGRATE 0x04 /** * struct hrtimer - the basic hrtimer structure @@ -109,8 +87,6 @@ enum hrtimer_cb_mode { * @function: timer expiry callback function * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) - * @cb_mode: high resolution timer feature to select the callback execution - * mode * @cb_entry: list head to enqueue an expired timer into the callback list * @start_site: timer statistics field to store the site where the timer * was started @@ -129,7 +105,6 @@ struct hrtimer { struct hrtimer_clock_base *base; unsigned long state; struct list_head cb_entry; - enum hrtimer_cb_mode cb_mode; #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; @@ -188,15 +163,11 @@ struct hrtimer_clock_base { * @check_clocks: Indictator, when set evaluate time source and clock * event devices whether high resolution mode can be * activated. - * @cb_pending: Expired timers are moved from the rbtree to this - * list in the timer interrupt. The list is processed - * in the softirq. * @nr_events: Total number of timer interrupt events */ struct hrtimer_cpu_base { spinlock_t lock; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - struct list_head cb_pending; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; int hres_active; @@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer) */ static inline int hrtimer_is_queued(struct hrtimer *timer) { - return timer->state & - (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING); + return timer->state & HRTIMER_STATE_ENQUEUED; } /* diff --git a/include/linux/ide.h b/include/linux/ide.h index 010fb26a157..db5ef8ae1ab 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -32,13 +32,6 @@ # define SUPPORT_VLB_SYNC 1 #endif -/* - * Used to indicate "no IRQ", should be a value that cannot be an IRQ - * number. - */ - -#define IDE_NO_IRQ (-1) - typedef unsigned char byte; /* used everywhere */ /* @@ -122,8 +115,6 @@ struct ide_io_ports { #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ #define SECTOR_SIZE 512 -#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t))) - /* * Timeouts for various operations: */ @@ -172,9 +163,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *); enum { ide_unknown, ide_generic, ide_pci, ide_cmd640, ide_dtc2278, ide_ali14xx, ide_qd65xx, ide_umc8672, ide_ht6560b, - ide_rz1000, ide_trm290, - ide_cmd646, ide_cy82c693, ide_4drives, - ide_pmac, ide_acorn, + ide_4drives, ide_pmac, ide_acorn, ide_au1xxx, ide_palm3710 }; @@ -407,6 +396,7 @@ enum { * This is used for several packet commands (not for READ/WRITE commands). */ #define IDE_PC_BUFFER_SIZE 256 +#define ATAPI_WAIT_PC (60 * HZ) struct ide_atapi_pc { /* actual packet bytes */ @@ -484,55 +474,53 @@ enum { /* ide-cd */ /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = (1 << 3), + IDE_AFLAG_NO_EJECT = (1 << 1), /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = (1 << 4), + IDE_AFLAG_PRE_ATAPI12 = (1 << 2), /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), /* * Drive does not provide data in multiples of SECTOR_SIZE * when more than one interrupt is needed. */ - IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), - /* Seeking in progress. */ - IDE_AFLAG_SEEKING = (1 << 8), + IDE_AFLAG_LIMIT_NFRAMES = (1 << 5), /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = (1 << 9), + IDE_AFLAG_TOC_VALID = (1 << 6), /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = (1 << 10), + IDE_AFLAG_DOOR_LOCKED = (1 << 7), /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), - IDE_AFLAG_VERTOS_300_SSD = (1 << 12), - IDE_AFLAG_VERTOS_600_ESD = (1 << 13), - IDE_AFLAG_SANYO_3CD = (1 << 14), - IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), - IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), - IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), + IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), + IDE_AFLAG_VERTOS_300_SSD = (1 << 9), + IDE_AFLAG_VERTOS_600_ESD = (1 << 10), + IDE_AFLAG_SANYO_3CD = (1 << 11), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), /* ide-floppy */ /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = (1 << 19), + IDE_AFLAG_CLIK_DRIVE = (1 << 15), /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = (1 << 20), + IDE_AFLAG_ZIP_DRIVE = (1 << 16), /* Supports format progress report */ - IDE_AFLAG_SRFP = (1 << 22), + IDE_AFLAG_SRFP = (1 << 17), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = (1 << 23), + IDE_AFLAG_IGNORE_DSC = (1 << 18), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = (1 << 24), + IDE_AFLAG_ADDRESS_VALID = (1 << 19), /* Device already opened */ - IDE_AFLAG_BUSY = (1 << 25), + IDE_AFLAG_BUSY = (1 << 20), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = (1 << 26), + IDE_AFLAG_DETECT_BS = (1 << 21), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = (1 << 27), + IDE_AFLAG_FILEMARK = (1 << 22), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 28), + IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), - IDE_AFLAG_NO_AUTOCLOSE = (1 << 29), + IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), }; /* device flags */ @@ -571,28 +559,26 @@ enum { IDE_DFLAG_NODMA = (1 << 16), /* powermanagment told us not to do anything, so sleep nicely */ IDE_DFLAG_BLOCKED = (1 << 17), - /* ide-scsi emulation */ - IDE_DFLAG_SCSI = (1 << 18), /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = (1 << 19), - IDE_DFLAG_POST_RESET = (1 << 20), - IDE_DFLAG_UDMA33_WARNED = (1 << 21), - IDE_DFLAG_LBA48 = (1 << 22), + IDE_DFLAG_SLEEPING = (1 << 18), + IDE_DFLAG_POST_RESET = (1 << 19), + IDE_DFLAG_UDMA33_WARNED = (1 << 20), + IDE_DFLAG_LBA48 = (1 << 21), /* status of write cache */ - IDE_DFLAG_WCACHE = (1 << 23), + IDE_DFLAG_WCACHE = (1 << 22), /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = (1 << 24), + IDE_DFLAG_NOWERR = (1 << 23), /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = (1 << 25), - IDE_DFLAG_LBA = (1 << 26), + IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), + IDE_DFLAG_LBA = (1 << 25), /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = (1 << 27), + IDE_DFLAG_NO_UNLOAD = (1 << 26), /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = (1 << 28), - IDE_DFLAG_MEDIA_CHANGED = (1 << 29), + IDE_DFLAG_PARKED = (1 << 27), + IDE_DFLAG_MEDIA_CHANGED = (1 << 28), /* write protect */ - IDE_DFLAG_WP = (1 << 30), - IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 31), + IDE_DFLAG_WP = (1 << 29), + IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), }; struct ide_drive_s { @@ -616,8 +602,6 @@ struct ide_drive_s { unsigned long dev_flags; unsigned long sleep; /* sleep until this time */ - unsigned long service_start; /* time we started last request */ - unsigned long service_time; /* service time of last request */ unsigned long timeout; /* max time to wait for irq */ special_t special; /* special action flags */ @@ -845,8 +829,6 @@ typedef struct hwif_s { unsigned extra_ports; /* number of extra dma ports */ unsigned present : 1; /* this interface exists */ - unsigned serialized : 1; /* serialized all channel operation */ - unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ struct device gendev; @@ -887,8 +869,6 @@ typedef struct hwgroup_s { /* BOOL: protects all fields below */ volatile int busy; - /* BOOL: wake us up on timer expiry */ - unsigned int sleeping : 1; /* BOOL: polling active & poll_timeout field valid */ unsigned int polling : 1; @@ -909,6 +889,8 @@ typedef struct hwgroup_s { int req_gen; int req_gen_timer; + + spinlock_t lock; } ide_hwgroup_t; typedef struct ide_driver_s ide_driver_t; @@ -1122,6 +1104,14 @@ enum { IDE_PM_COMPLETED, }; +int generic_ide_suspend(struct device *, pm_message_t); +int generic_ide_resume(struct device *); + +void ide_complete_power_step(ide_drive_t *, struct request *); +ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); +void ide_complete_pm_request(ide_drive_t *, struct request *); +void ide_check_pm_state(ide_drive_t *, struct request *); + /* * Subdrivers support. * @@ -1256,14 +1246,11 @@ int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); void ide_retry_pc(ide_drive_t *, struct gendisk *); -static inline unsigned long ide_scsi_get_timeout(struct ide_atapi_pc *pc) -{ - return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies); -} +int ide_cd_expiry(ide_drive_t *); -int ide_scsi_expiry(ide_drive_t *); +int ide_cd_get_xferlen(struct request *); -ide_startstop_t ide_issue_pc(ide_drive_t *, unsigned int, ide_expiry_t *); +ide_startstop_t ide_issue_pc(ide_drive_t *); ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); @@ -1285,6 +1272,26 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); extern void ide_timer_expiry(unsigned long); extern irqreturn_t ide_intr(int irq, void *dev_id); + +static inline int ide_lock_hwgroup(ide_hwgroup_t *hwgroup) +{ + if (hwgroup->busy) + return 1; + + hwgroup->busy = 1; + /* for atari only */ + ide_get_lock(ide_intr, hwgroup); + + return 0; +} + +static inline void ide_unlock_hwgroup(ide_hwgroup_t *hwgroup) +{ + /* for atari only */ + ide_release_lock(); + hwgroup->busy = 0; +} + extern void do_ide_request(struct request_queue *); void ide_init_disk(struct gendisk *, ide_drive_t *); @@ -1376,8 +1383,8 @@ enum { IDE_HFLAG_LEGACY_IRQS = (1 << 21), /* force use of legacy IRQs */ IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), - /* limit LBA48 requests to 256 sectors */ - IDE_HFLAG_RQSIZE_256 = (1 << 23), + /* host is TRM290 */ + IDE_HFLAG_TRM290 = (1 << 23), /* use 32-bit I/O ops */ IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ @@ -1415,6 +1422,9 @@ struct ide_port_info { ide_pci_enablebit_t enablebits[2]; hwif_chipset_t chipset; + + u16 max_sectors; /* if < than the default one */ + u32 host_flags; u8 pio_mask; u8 swdma_mask; @@ -1528,6 +1538,7 @@ void ide_unregister_region(struct gendisk *); void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); +int ide_sysfs_register_port(ide_hwif_t *); struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); void ide_host_free(struct ide_host *); @@ -1610,18 +1621,21 @@ extern struct mutex ide_cfg_mtx; /* * Structure locking: * - * ide_cfg_mtx and ide_lock together protect changes to - * ide_hwif_t->{next,hwgroup} + * ide_cfg_mtx and hwgroup->lock together protect changes to + * ide_hwif_t->next * ide_drive_t->next * - * ide_hwgroup_t->busy: ide_lock - * ide_hwgroup_t->hwif: ide_lock - * ide_hwif_t->mate: constant, no locking + * ide_hwgroup_t->busy: hwgroup->lock + * ide_hwgroup_t->hwif: hwgroup->lock + * ide_hwif_t->{hwgroup,mate}: constant, no locking * ide_drive_t->hwif: constant, no locking */ #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) +char *ide_media_string(ide_drive_t *); + +extern struct device_attribute ide_dev_attrs[]; extern struct bus_type ide_bus_type; extern struct class *ide_port_class; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 959f5522d10..2f3c2d4ef73 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,6 +12,7 @@ #include <net/net_namespace.h> extern struct files_struct init_files; +extern struct fs_struct init_fs; #define INIT_KIOCTX(name, which_mm) \ { \ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3d017cfd245..c4f6c101dbc 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -23,8 +23,6 @@ #define _INTEL_IOMMU_H_ #include <linux/types.h> -#include <linux/msi.h> -#include <linux/sysdev.h> #include <linux/iova.h> #include <linux/io.h> #include <linux/dma_remapping.h> @@ -289,10 +287,10 @@ struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 cap; u64 ecap; - int seg; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ + int agaw; /* agaw of this iommu */ #ifdef CONFIG_DMAR unsigned long *domain_ids; /* bitmap of domains */ @@ -302,8 +300,6 @@ struct intel_iommu { unsigned int irq; unsigned char name[7]; /* Device Name */ - struct msi_msg saved_msg; - struct sys_device sysdev; struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ @@ -334,25 +330,6 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); -void intel_iommu_domain_exit(struct dmar_domain *domain); -struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev); -int intel_iommu_context_mapping(struct dmar_domain *domain, - struct pci_dev *pdev); -int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova, - u64 hpa, size_t size, int prot); -void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn); -struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev); -u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova); - -#ifdef CONFIG_DMAR -int intel_iommu_found(void); -#else /* CONFIG_DMAR */ -static inline int intel_iommu_found(void) -{ - return 0; -} -#endif /* CONFIG_DMAR */ - extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929..0702c4d7bdf 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,8 @@ #include <linux/irqflags.h> #include <linux/smp.h> #include <linux/percpu.h> +#include <linux/irqnr.h> + #include <asm/atomic.h> #include <asm/ptrace.h> #include <asm/system.h> @@ -107,15 +109,15 @@ extern void enable_irq(unsigned int irq); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) -extern cpumask_t irq_default_affinity; +extern cpumask_var_t irq_default_affinity; -extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); #else /* CONFIG_SMP */ -static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) { return -EINVAL; } @@ -251,9 +253,6 @@ enum BLOCK_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, -#ifdef CONFIG_HIGH_RES_TIMERS - HRTIMER_SOFTIRQ, -#endif RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS @@ -465,4 +464,10 @@ static inline void init_irq_proc(void) int show_interrupts(struct seq_file *p, void *v); +struct irq_desc; + +extern int early_irq_init(void); +extern int arch_early_irq_init(void); +extern int arch_init_chip_data(struct irq_desc *desc, int cpu); + #endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h new file mode 100644 index 00000000000..8a7bfb1b6ca --- /dev/null +++ b/include/linux/iommu.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IOMMU_H +#define __LINUX_IOMMU_H + +#define IOMMU_READ (1) +#define IOMMU_WRITE (2) + +struct device; + +struct iommu_domain { + void *priv; +}; + +struct iommu_ops { + int (*domain_init)(struct iommu_domain *domain); + void (*domain_destroy)(struct iommu_domain *domain); + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + void (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size); + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + unsigned long iova); +}; + +#ifdef CONFIG_IOMMU_API + +extern void register_iommu(struct iommu_ops *ops); +extern bool iommu_found(void); +extern struct iommu_domain *iommu_domain_alloc(void); +extern void iommu_domain_free(struct iommu_domain *domain); +extern int iommu_attach_device(struct iommu_domain *domain, + struct device *dev); +extern void iommu_detach_device(struct iommu_domain *domain, + struct device *dev); +extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, + size_t size); +extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova); + +#else /* CONFIG_IOMMU_API */ + +static inline void register_iommu(struct iommu_ops *ops) +{ +} + +static inline bool iommu_found(void) +{ + return false; +} + +static inline struct iommu_domain *iommu_domain_alloc(void) +{ + return NULL; +} + +static inline void iommu_domain_free(struct iommu_domain *domain) +{ +} + +static inline int iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ +} + +static inline int iommu_map_range(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + return -ENODEV; +} + +static inline void iommu_unmap_range(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + +static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + return 0; +} + +#endif /* CONFIG_IOMMU_API */ + +#endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703eb..f899b502f18 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -113,7 +113,8 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, cpumask_t dest); + void (*set_affinity)(unsigned int irq, + const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); @@ -129,9 +130,14 @@ struct irq_chip { const char *typename; }; +struct timer_rand_state; +struct irq_2_iommu; /** * struct irq_desc - interrupt descriptor * @irq: interrupt number for this descriptor + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @irq_2_iommu: iommu with this irq * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @chip: low level interrupt hardware access * @msi_desc: MSI descriptor @@ -143,8 +149,8 @@ struct irq_chip { * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs - * @irqs_unhandled: stats field for spurious unhandled interrupts * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -154,6 +160,13 @@ struct irq_chip { */ struct irq_desc { unsigned int irq; +#ifdef CONFIG_SPARSE_IRQ + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; +# ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; +# endif +#endif irq_flow_handler_t handle_irq; struct irq_chip *chip; struct msi_desc *msi_desc; @@ -165,8 +178,8 @@ struct irq_desc { unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ unsigned int irq_count; /* For detecting broken IRQs */ - unsigned int irqs_unhandled; unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; spinlock_t lock; #ifdef CONFIG_SMP cpumask_t affinity; @@ -181,12 +194,32 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; +extern void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int cpu); +extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); +#ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; +#else /* CONFIG_SPARSE_IRQ */ +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); + +#define kstat_irqs_this_cpu(DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]) +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]++) -static inline struct irq_desc *irq_to_desc(unsigned int irq) +#endif /* CONFIG_SPARSE_IRQ */ + +extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); + +static inline struct irq_desc * +irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) { - return (irq < nr_irqs) ? irq_desc + irq : NULL; +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + return irq_to_desc(irq); +#else + return desc; +#endif } /* @@ -380,6 +413,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) +#define get_irq_desc_chip(desc) ((desc)->chip) +#define get_irq_desc_chip_data(desc) ((desc)->chip_data) +#define get_irq_desc_data(desc) ((desc)->handler_data) +#define get_irq_desc_msi(desc) ((desc)->msi_desc) + #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 452c280c811..5504a5c9783 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -1,24 +1,41 @@ #ifndef _LINUX_IRQNR_H #define _LINUX_IRQNR_H +/* + * Generic irq_desc iterators: + */ +#ifdef __KERNEL__ + #ifndef CONFIG_GENERIC_HARDIRQS #include <asm/irq.h> # define nr_irqs NR_IRQS # define for_each_irq_desc(irq, desc) \ for (irq = 0; irq < nr_irqs; irq++) -#else + +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1; irq >= 0; irq--) +#else /* CONFIG_GENERIC_HARDIRQS */ + extern int nr_irqs; +extern struct irq_desc *irq_to_desc(unsigned int irq); + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ + irq++, desc = irq_to_desc(irq)) \ + if (desc) -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) # define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ - irq >= 0; irq--, desc--) -#endif + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ + irq--, desc = irq_to_desc(irq)) \ + if (desc) -#define for_each_irq_nr(irq) \ - for (irq = 0; irq < nr_irqs; irq++) +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#define for_each_irq_nr(irq) \ + for (irq = 0; irq < nr_irqs; irq++) + +#endif /* __KERNEL__ */ #endif diff --git a/include/linux/istallion.h b/include/linux/istallion.h index 0d184072324..7faca98c7d1 100644 --- a/include/linux/istallion.h +++ b/include/linux/istallion.h @@ -59,9 +59,7 @@ struct stliport { unsigned int devnr; int baud_base; int custom_divisor; - int close_delay; int closing_wait; - int openwaitcnt; int rc; int argsize; void *argp; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index c7d106ef22e..34456476e76 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -329,6 +329,7 @@ enum jbd_state_bits { BH_State, /* Pins most journal_head state */ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ + BH_JBDPrivateStart, /* First bit available for private use by FS */ }; BUFFER_FNS(JBD, jbd) @@ -1007,6 +1008,35 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal); int __jbd2_journal_remove_checkpoint(struct journal_head *); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + +/* + * Triggers + */ + +struct jbd2_buffer_trigger_type { + /* + * Fired just before a buffer is written to the journal. + * mapped_data is a mapped buffer that is the frozen data for + * commit. + */ + void (*t_commit)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh, void *mapped_data, + size_t size); + + /* + * Fired during journal abort for dirty buffers that will not be + * committed. + */ + void (*t_abort)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh); +}; + +extern void jbd2_buffer_commit_trigger(struct journal_head *jh, + void *mapped_data, + struct jbd2_buffer_trigger_type *triggers); +extern void jbd2_buffer_abort_trigger(struct journal_head *jh, + struct jbd2_buffer_trigger_type *triggers); + /* Buffer IO */ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, @@ -1045,6 +1075,8 @@ extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); +void jbd2_journal_set_triggers(struct buffer_head *, + struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index abb6ac639e8..1a9cf78bfce 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void) ((long)(a) - (long)(b) >= 0)) #define time_before_eq(a,b) time_after_eq(b,a) +/* + * Calculate whether a is in the range of [b, c]. + */ #define time_in_range(a,b,c) \ (time_after_eq(a,b) && \ time_before_eq(a,c)) +/* + * Calculate whether a is in the range of [b, c). + */ +#define time_in_range_open(a,b,c) \ + (time_after_eq(a,b) && \ + time_before(a,c)) + /* Same as above, but does so with platform independent 64bit types. * These must be used when utilizing jiffies_64 (i.e. return value of * get_jiffies_64() */ diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h index bb70ebb6a2d..525aac3c97d 100644 --- a/include/linux/journal-head.h +++ b/include/linux/journal-head.h @@ -12,6 +12,8 @@ typedef unsigned int tid_t; /* Unique transaction ID */ typedef struct transaction_s transaction_t; /* Compound transaction type */ + + struct buffer_head; struct journal_head { @@ -87,6 +89,12 @@ struct journal_head { * [j_list_lock] */ struct journal_head *b_cpnext, *b_cpprev; + + /* Trigger type */ + struct jbd2_buffer_trigger_type *b_triggers; + + /* Trigger type for the committing transaction's frozen data */ + struct jbd2_buffer_trigger_type *b_frozen_triggers; }; #endif /* JOURNAL_HEAD_H_INCLUDED */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 6002ae76785..ca9ff6411df 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -141,6 +141,15 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) +#ifdef CONFIG_PROVE_LOCKING +void might_fault(void); +#else +static inline void might_fault(void) +{ + might_sleep(); +} +#endif + extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) @@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); +extern int func_ptr_is_kernel_text(void *ptr); + struct pid; extern struct pid *session_of_pgrp(struct pid *pgrp); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee0..570d2041311 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -28,7 +28,9 @@ struct cpu_usage_stat { struct kernel_stat { struct cpu_usage_stat cpustat; - unsigned int irqs[NR_IRQS]; +#ifndef CONFIG_SPARSE_IRQ + unsigned int irqs[NR_IRQS]; +#endif }; DECLARE_PER_CPU(struct kernel_stat, kstat); @@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); extern unsigned long long nr_context_switches(void); +#ifndef CONFIG_SPARSE_IRQ +#define kstat_irqs_this_cpu(irq) \ + (kstat_this_cpu.irqs[irq]) + struct irq_desc; static inline void kstat_incr_irqs_this_cpu(unsigned int irq, @@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, { kstat_this_cpu.irqs[irq]++; } +#endif + +#ifndef CONFIG_SPARSE_IRQ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { return kstat_cpu(cpu).irqs[irq]; } +#else +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +#endif /* * Number of interrupts per specific IRQ source, since bootup @@ -67,10 +79,13 @@ static inline unsigned int kstat_irqs(unsigned int irq) } extern unsigned long long task_delta_exec(struct task_struct *); -extern void account_user_time(struct task_struct *, cputime_t); -extern void account_user_time_scaled(struct task_struct *, cputime_t); -extern void account_system_time(struct task_struct *, int, cputime_t); -extern void account_system_time_scaled(struct task_struct *, cputime_t); -extern void account_steal_time(struct task_struct *, cputime_t); +extern void account_user_time(struct task_struct *, cputime_t, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_steal_time(cputime_t); +extern void account_idle_time(cputime_t); + +extern void account_process_tick(struct task_struct *, int user); +extern void account_steal_ticks(unsigned long ticks); +extern void account_idle_ticks(unsigned long ticks); #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f18b86fa865..35525ac6333 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -83,6 +83,7 @@ struct kvm_irqchip { #define KVM_EXIT_S390_SIEIC 13 #define KVM_EXIT_S390_RESET 14 #define KVM_EXIT_DCR 15 +#define KVM_EXIT_NMI 16 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -387,6 +388,14 @@ struct kvm_trace_rec { #define KVM_CAP_DEVICE_ASSIGNMENT 17 #endif #define KVM_CAP_IOMMU 18 +#if defined(CONFIG_X86) +#define KVM_CAP_DEVICE_MSI 20 +#endif +/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ +#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 +#if defined(CONFIG_X86) +#define KVM_CAP_USER_NMI 22 +#endif /* * ioctls for VM fds @@ -458,6 +467,8 @@ struct kvm_trace_rec { #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) +/* Available with KVM_CAP_NMI */ +#define KVM_NMI _IO(KVMIO, 0x9a) #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) @@ -500,10 +511,17 @@ struct kvm_assigned_irq { __u32 guest_irq; __u32 flags; union { + struct { + __u32 addr_lo; + __u32 addr_hi; + __u32 data; + } guest_msi; __u32 reserved[12]; }; }; #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) +#define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) + #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bb92be2153b..ec49d0be7f5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <linux/preempt.h> #include <linux/marker.h> +#include <linux/msi.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -306,9 +307,16 @@ struct kvm_assigned_dev_kernel { int host_busnr; int host_devfn; int host_irq; + bool host_irq_disabled; int guest_irq; - int irq_requested; + struct msi_msg guest_msi; +#define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) +#define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) +#define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) +#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) + unsigned long irq_requested_type; int irq_source_id; + int flags; struct pci_dev *dev; struct kvm *kvm; }; @@ -316,18 +324,20 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); -void kvm_unregister_irq_ack_notifier(struct kvm *kvm, - struct kvm_irq_ack_notifier *kian); +void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); -#ifdef CONFIG_DMAR +#ifdef CONFIG_IOMMU_API int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); -int kvm_iommu_map_guest(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev); +int kvm_iommu_map_guest(struct kvm *kvm); int kvm_iommu_unmap_guest(struct kvm *kvm); -#else /* CONFIG_DMAR */ +int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +#else /* CONFIG_IOMMU_API */ static inline int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) @@ -335,9 +345,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm, return 0; } -static inline int kvm_iommu_map_guest(struct kvm *kvm, - struct kvm_assigned_dev_kernel - *assigned_dev) +static inline int kvm_iommu_map_guest(struct kvm *kvm) { return -ENODEV; } @@ -346,7 +354,19 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) { return 0; } -#endif /* CONFIG_DMAR */ + +static inline int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} + +static inline int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} +#endif /* CONFIG_IOMMU_API */ static inline void kvm_guest_enter(void) { diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index e7217dc58f3..a53407a4165 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -54,9 +54,13 @@ struct lguest_vqconfig { /* Write command first word is a request. */ enum lguest_req { - LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ + LHREQ_INITIALIZE, /* + base, pfnlimit, start */ LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ }; + +/* The alignment to use between consumer and producer parts of vring. + * x86 pagesize for historical reasons. */ +#define LGUEST_VRING_ALIGN 4096 #endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/libata.h b/include/linux/libata.h index ed3f26eb5df..3449de597ef 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -213,10 +213,11 @@ enum { ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ - ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ + ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ + ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ @@ -1285,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link) return ata_tag_valid(link->active_tag) || link->sactive; } -extern struct ata_link *__ata_port_next_link(struct ata_port *ap, - struct ata_link *link, - bool dev_only); +/* + * Iterators + * + * ATA_LITER_* constants are used to select link iteration mode and + * ATA_DITER_* device iteration mode. + * + * For a custom iteration directly using ata_{link|dev}_next(), if + * @link or @dev, respectively, is NULL, the first element is + * returned. @dev and @link can be any valid device or link and the + * next element according to the iteration mode will be returned. + * After the last element, NULL is returned. + */ +enum ata_link_iter_mode { + ATA_LITER_EDGE, /* if present, PMP links only; otherwise, + * host link. no slave link */ + ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ + ATA_LITER_PMP_FIRST, /* PMP links followed by host link, + * slave link still comes after host link */ +}; -#define __ata_port_for_each_link(link, ap) \ - for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ - (link) = __ata_port_next_link((ap), (link), false)) +enum ata_dev_iter_mode { + ATA_DITER_ENABLED, + ATA_DITER_ENABLED_REVERSE, + ATA_DITER_ALL, + ATA_DITER_ALL_REVERSE, +}; -#define ata_port_for_each_link(link, ap) \ - for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ - (link) = __ata_port_next_link((ap), (link), true)) +extern struct ata_link *ata_link_next(struct ata_link *link, + struct ata_port *ap, + enum ata_link_iter_mode mode); -#define ata_link_for_each_dev(dev, link) \ - for ((dev) = (link)->device; \ - (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \ - (dev)++) +extern struct ata_device *ata_dev_next(struct ata_device *dev, + struct ata_link *link, + enum ata_dev_iter_mode mode); + +/* + * Shortcut notation for iterations + * + * ata_for_each_link() iterates over each link of @ap according to + * @mode. @link points to the current link in the loop. @link is + * NULL after loop termination. ata_for_each_dev() works the same way + * except that it iterates over each device of @link. + * + * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be + * specified when using the following shorthand notations. Only the + * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be + * specified. This not only increases brevity but also makes it + * impossible to use ATA_LITER_* for device iteration or vice-versa. + */ +#define ata_for_each_link(link, ap, mode) \ + for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ + (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) -#define ata_link_for_each_dev_reverse(dev, link) \ - for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ - (dev) >= (link)->device || ((dev) = NULL); (dev)--) +#define ata_for_each_dev(dev, link, mode) \ + for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ + (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) /** * ata_ncq_enabled - Test whether NCQ is enabled diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index e5872dc994c..fbc48f89852 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -41,6 +41,7 @@ struct nlmclnt_initdata { size_t addrlen; unsigned short protocol; u32 nfs_version; + int noresvport; }; /* diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index b56d5aa9b19..23da3fa69ef 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -49,6 +49,7 @@ struct nlm_host { unsigned short h_proto; /* transport proto */ unsigned short h_reclaiming : 1, h_server : 1, /* server side, not client side */ + h_noresvport : 1, h_inuse : 1; wait_queue_head_t h_gracewait; /* wait while reclaiming */ struct rw_semaphore h_rwsem; /* Reboot recovery lock */ @@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, - const char *hostname); + const char *hostname, + int noresvport); struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 29aec6e1002..23bf02fb124 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -73,6 +73,8 @@ struct lock_class_key { struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; }; +#define LOCKSTAT_POINTS 4 + /* * The lock-class itself: */ @@ -119,7 +121,8 @@ struct lock_class { int name_version; #ifdef CONFIG_LOCK_STAT - unsigned long contention_point[4]; + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; #endif }; @@ -144,6 +147,7 @@ enum bounce_type { struct lock_class_stats { unsigned long contention_point[4]; + unsigned long contending_point[4]; struct lock_time read_waittime; struct lock_time write_waittime; struct lock_time read_holdtime; @@ -165,6 +169,7 @@ struct lockdep_map { const char *name; #ifdef CONFIG_LOCK_STAT int cpu; + unsigned long ip; #endif }; @@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); -extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, - unsigned long ip); +extern void lock_set_class(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, unsigned int subclass, + unsigned long ip); + +static inline void lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + lock_set_class(lock, lock->name, lock->key, subclass, ip); +} # define INIT_LOCKDEP .lockdep_recursion = 0, @@ -328,6 +340,7 @@ static inline void lockdep_on(void) # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) @@ -356,7 +369,7 @@ struct lock_class_key { }; #ifdef CONFIG_LOCK_STAT extern void lock_contended(struct lockdep_map *lock, unsigned long ip); -extern void lock_acquired(struct lockdep_map *lock); +extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); #define LOCK_CONTENDED(_lock, try, lock) \ do { \ @@ -364,20 +377,20 @@ do { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ lock(_lock); \ } \ - lock_acquired(&(_lock)->dep_map); \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ } while (0) #else /* CONFIG_LOCK_STAT */ #define lock_contended(lockdep_map, ip) do {} while (0) -#define lock_acquired(lockdep_map) do {} while (0) +#define lock_acquired(lockdep_map, ip) do {} while (0) #define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock) #endif /* CONFIG_LOCK_STAT */ -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) +#ifdef CONFIG_GENERIC_HARDIRQS extern void early_init_irq_lock_class(void); #else static inline void early_init_irq_lock_class(void) @@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define lock_map_release(l) do { } while (0) #endif +#ifdef CONFIG_PROVE_LOCKING +# define might_lock(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +# define might_lock_read(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +#else +# define might_lock(lock) do { } while (0) +# define might_lock_read(lock) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fe825471d5a..9cfc9b627fd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -232,8 +232,9 @@ struct mm_struct { struct core_state *core_state; /* coredumping support */ /* aio bits */ - rwlock_t ioctx_list_lock; /* aio lock */ - struct kioctx *ioctx_list; + spinlock_t ioctx_lock; + struct hlist_head ioctx_list; + #ifdef CONFIG_MM_OWNER /* * "owner" points to a task that is regarded as the canonical diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 143cebf0586..7ac8b500d55 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -151,4 +151,6 @@ static inline void mmc_claim_host(struct mmc_host *host) __mmc_claim_host(host, NULL); } +extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); + #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f842f234e44..4e457256bd3 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -41,6 +41,7 @@ struct mmc_ios { #define MMC_BUS_WIDTH_1 0 #define MMC_BUS_WIDTH_4 2 +#define MMC_BUS_WIDTH_8 3 unsigned char timing; /* timing specification used */ @@ -116,6 +117,7 @@ struct mmc_host { #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ +#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 8f293922720..d2b8a1e8ca1 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -10,8 +10,11 @@ struct msi_msg { }; /* Helper functions */ +struct irq_desc; extern void mask_msi_irq(unsigned int irq); extern void unmask_msi_irq(unsigned int irq); +extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); +extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bc6da10ceee..7a0e5c4f807 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! + * + * Returns 1 if the mutex has been acquired successfully, and 0 on contention. */ extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); diff --git a/include/linux/namei.h b/include/linux/namei.h index 99eb80306dc..fc2e0357987 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -94,4 +94,9 @@ static inline char *nd_get_link(struct nameidata *nd) return nd->saved_names[nd->depth]; } +static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) +{ + ((char *) name)[min(len, maxlen)] = '\0'; +} + #endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 4eaa8347a0d..db867b04ac3 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -83,7 +83,7 @@ struct nfs_open_context { struct rpc_cred *cred; struct nfs4_state *state; fl_owner_t lockowner; - int mode; + fmode_t mode; unsigned long flags; #define NFS_CONTEXT_ERROR_WRITE (0) @@ -130,7 +130,10 @@ struct nfs_inode { * * We need to revalidate the cached attrs for this inode if * - * jiffies - read_cache_jiffies > attrtimeo + * jiffies - read_cache_jiffies >= attrtimeo + * + * Please note the comparison is greater than or equal + * so that zero timeout values can be specified. */ unsigned long read_cache_jiffies; unsigned long attrtimeo; @@ -180,7 +183,7 @@ struct nfs_inode { /* NFSv4 state */ struct list_head open_states; struct nfs_delegation *delegation; - int delegation_state; + fmode_t delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ struct inode vfs_inode; @@ -342,7 +345,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); -extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); @@ -533,12 +536,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) #endif /* CONFIG_NFS_V3_ACL */ /* - * linux/fs/mount_clnt.c - */ -extern int nfs_mount(struct sockaddr *, size_t, char *, char *, - int, int, struct nfs_fh *); - -/* * inline functions */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 4e477ae5869..9bb81aec91c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -42,12 +42,6 @@ struct nfs_client { struct rb_root cl_openowner_id; struct rb_root cl_lockowner_id; - /* - * The following rwsem ensures exclusive access to the server - * while we recover the state following a lease expiration. - */ - struct rw_semaphore cl_sem; - struct list_head cl_delegations; struct rb_root cl_state_owners; spinlock_t cl_lock; diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 6549a06ac16..4499016e6d0 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -45,7 +45,7 @@ struct nfs_mount_data { char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ }; -/* bits in the flags field */ +/* bits in the flags field visible to user space */ #define NFS_MOUNT_SOFT 0x0001 /* 1 */ #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ @@ -68,5 +68,6 @@ struct nfs_mount_data { /* The following are for internal use only */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 +#define NFS_MOUNT_NORESVPORT 0x40000 #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c1c31acb8a2..a550b528319 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -120,13 +120,14 @@ struct nfs_openargs { const struct nfs_fh * fh; struct nfs_seqid * seqid; int open_flags; + fmode_t fmode; __u64 clientid; __u64 id; union { struct iattr * attrs; /* UNCHECKED, GUARDED */ nfs4_verifier verifier; /* EXCLUSIVE */ nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ - int delegation_type; /* CLAIM_PREVIOUS */ + fmode_t delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ @@ -143,7 +144,7 @@ struct nfs_openres { struct nfs_fattr * dir_attr; struct nfs_seqid * seqid; const struct nfs_server *server; - int delegation_type; + fmode_t delegation_type; nfs4_stateid delegation; __u32 do_recall; __u64 maxsize; @@ -171,7 +172,7 @@ struct nfs_closeargs { struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; - int open_flags; + fmode_t fmode; const u32 * bitmask; }; diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index d0fe2e37845..128298c0362 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -124,6 +124,8 @@ struct nfs4_client { nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ __be32 cl_addr; /* client ipaddress */ + u32 cl_flavor; /* setclientid pseudoflavor */ + char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index a8efcfeea73..3d327b67d7e 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type; /* * An of_platform_driver driver is attached to a basic of_device on - * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS - * busses on sparc). + * the "platform bus" (of_platform_bus_type). */ struct of_platform_driver { diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5231861f357..1ce9fe572e5 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops); void oprofile_arch_exit(void); /** - * Add a sample. This may be called from any context. Pass - * smp_processor_id() as cpu. + * Add a sample. This may be called from any context. */ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 709742be02f..01ca0856caf 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -241,7 +241,8 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); -struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b6e69445428..218c73b1e6d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1766,6 +1766,7 @@ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 #define PCI_VENDOR_ID_RADISYS 0x1331 @@ -1795,6 +1796,7 @@ #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 +#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 #define PCI_VENDOR_ID_HYPERCOPE 0x1365 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index a7c72135554..4f71bf4e628 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -45,7 +45,11 @@ struct k_itimer { int it_requeue_pending; /* waiting to requeue this timer */ #define REQUEUE_PENDING 1 int it_sigev_notify; /* notify word of sigevent struct */ - struct task_struct *it_process; /* process to send signal to */ + struct signal_struct *it_signal; + union { + struct pid *it_pid; /* pid of process to send signal to */ + struct task_struct *it_process; /* for clock_nanosleep */ + }; struct sigqueue *sigq; /* signal queue entry. */ union { struct { diff --git a/include/linux/quota.h b/include/linux/quota.h index 40401b55448..d72d5d84fde 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -36,17 +36,7 @@ #include <linux/errno.h> #include <linux/types.h> -#define __DQUOT_VERSION__ "dquot_6.5.1" -#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 - -/* Size of blocks in which are counted size limits */ -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -/* Conversion routines from and to quota blocks */ -#define qb2kb(x) ((x) << (QUOTABLOCK_BITS-10)) -#define kb2qb(x) ((x) >> (QUOTABLOCK_BITS-10)) -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +#define __DQUOT_VERSION__ "dquot_6.5.2" #define MAXQUOTAS 2 #define USRQUOTA 0 /* element used for user quotas */ @@ -80,16 +70,34 @@ #define Q_GETQUOTA 0x800007 /* get user quota structure */ #define Q_SETQUOTA 0x800008 /* set user quota structure */ +/* Quota format type IDs */ +#define QFMT_VFS_OLD 1 +#define QFMT_VFS_V0 2 + +/* Size of block in which space limits are passed through the quota + * interface */ +#define QIF_DQBLKSIZE_BITS 10 +#define QIF_DQBLKSIZE (1 << QIF_DQBLKSIZE_BITS) + /* * Quota structure used for communication with userspace via quotactl * Following flags are used to specify which fields are valid */ -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B, + QIF_ILIMITS_B, + QIF_INODES_B, + QIF_BTIME_B, + QIF_ITIME_B, +}; + +#define QIF_BLIMITS (1 << QIF_BLIMITS_B) +#define QIF_SPACE (1 << QIF_SPACE_B) +#define QIF_ILIMITS (1 << QIF_ILIMITS_B) +#define QIF_INODES (1 << QIF_INODES_B) +#define QIF_BTIME (1 << QIF_BTIME_B) +#define QIF_ITIME (1 << QIF_ITIME_B) #define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) #define QIF_USAGE (QIF_SPACE | QIF_INODES) #define QIF_TIMES (QIF_BTIME | QIF_ITIME) @@ -172,7 +180,7 @@ enum { #include <asm/atomic.h> typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ -typedef __u64 qsize_t; /* Type in which we store sizes */ +typedef long long qsize_t; /* Type in which we store sizes */ extern spinlock_t dq_data_lock; @@ -187,12 +195,12 @@ extern spinlock_t dq_data_lock; * Data for one user/group kept in memory */ struct mem_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ + qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ qsize_t dqb_curspace; /* current used space */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ + qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ + qsize_t dqb_isoftlimit; /* preferred inode limit */ + qsize_t dqb_curinodes; /* current # allocated inodes */ time_t dqb_btime; /* time limit for excessive disk use */ time_t dqb_itime; /* time limit for excessive inode use */ }; @@ -212,10 +220,7 @@ struct mem_dqinfo { unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; - union { - struct v1_mem_dqinfo v1_i; - struct v2_mem_dqinfo v2_i; - } u; + void *dqi_priv; }; struct super_block; @@ -249,6 +254,11 @@ extern struct dqstats dqstats; #define DQ_FAKE_B 3 /* no limits only usage */ #define DQ_READ_B 4 /* dquot was read into memory */ #define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ +#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ + * for the mask of entries set via SETQUOTA\ + * quotactl. They are set under dq_data_lock\ + * and the quota format handling dquot can\ + * clear them when it sees fit. */ struct dquot { struct hlist_node dq_hash; /* Hash list in memory */ @@ -287,11 +297,13 @@ struct dquot_operations { int (*initialize) (struct inode *, int); int (*drop) (struct inode *); int (*alloc_space) (struct inode *, qsize_t, int); - int (*alloc_inode) (const struct inode *, unsigned long); + int (*alloc_inode) (const struct inode *, qsize_t); int (*free_space) (struct inode *, qsize_t); - int (*free_inode) (const struct inode *, unsigned long); + int (*free_inode) (const struct inode *, qsize_t); int (*transfer) (struct inode *, struct iattr *); int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ + struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ + void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */ int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ @@ -320,12 +332,42 @@ struct quota_format_type { struct quota_format_type *qf_next; }; -#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ -#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ -#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but +/* Quota state flags - they actually come in two flavors - for users and groups */ +enum { + _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ + _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ + _DQUOT_SUSPENDED, /* User diskquotas are off, but * we have necessary info in * memory to turn them on */ -#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */ + _DQUOT_STATE_FLAGS +}; +#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED) +#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED) +#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED) +#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ + DQUOT_SUSPENDED) +/* Other quota flags */ +#define DQUOT_QUOTA_SYS_FILE (1 << 6) /* Quota file is a special + * system file and user cannot + * touch it. Filesystem is + * responsible for setting + * S_NOQUOTA, S_NOATIME flags + */ +#define DQUOT_NEGATIVE_USAGE (1 << 7) /* Allow negative quota usage */ + +static inline unsigned int dquot_state_flag(unsigned int flags, int type) +{ + if (type == USRQUOTA) + return flags; + return flags << _DQUOT_STATE_FLAGS; +} + +static inline unsigned int dquot_generic_flag(unsigned int flags, int type) +{ + if (type == USRQUOTA) + return flags; + return flags >> _DQUOT_STATE_FLAGS; +} struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ diff --git a/include/linux/quotaio_v1.h b/include/linux/quotaio_v1.h deleted file mode 100644 index 746654b5de7..00000000000 --- a/include/linux/quotaio_v1.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _LINUX_QUOTAIO_V1_H -#define _LINUX_QUOTAIO_V1_H - -#include <linux/types.h> - -/* - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. - */ -struct v1_disk_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ -}; - -#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) - -#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/include/linux/quotaio_v2.h b/include/linux/quotaio_v2.h deleted file mode 100644 index 303d7cbe30d..00000000000 --- a/include/linux/quotaio_v2.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTAIO_V2_H -#define _LINUX_QUOTAIO_V2_H - -#include <linux/types.h> -#include <linux/quota.h> - -/* - * Definitions of magics and versions of current quota files - */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -#define V2_INITQVERSIONS {\ - 0, /* USRQUOTA */\ - 0 /* GRPQUOTA */\ -} - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is a radix tree whose leaves point - * to blocks of these structures. - */ -struct v2_disk_dqblk { - __le32 dqb_id; /* id this quota applies to */ - __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __le32 dqb_isoftlimit; /* preferred inode limit */ - __le32 dqb_curinodes; /* current # allocated inodes */ - __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ - __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ - __le64 dqb_curspace; /* current space occupied (in bytes) */ - __le64 dqb_btime; /* time limit for excessive disk use */ - __le64 dqb_itime; /* time limit for excessive inode use */ -}; - -/* - * Here are header structures as written on disk and their in-memory copies - */ -/* First generic header */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -/* Header with type and version specific information */ -struct v2_disk_dqinfo { - __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ - __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ - __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ - __le32 dqi_blocks; /* Number of blocks in file */ - __le32 dqi_free_blk; /* Number of first free block in the list */ - __le32 dqi_free_entry; /* Number of block with at least one free entry */ -}; - -/* - * Structure of header of block with quota structures. It is padded to 16 bytes so - * there will be space for exactly 21 quota-entries in a block - */ -struct v2_disk_dqdbheader { - __le32 dqdh_next_free; /* Number of next block with free entry */ - __le32 dqdh_prev_free; /* Number of previous block with free entry */ - __le16 dqdh_entries; /* Number of valid entries in block */ - __le16 dqdh_pad1; - __le32 dqdh_pad2; -}; - -#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ -#define V2_DQBLKSIZE_BITS 10 -#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ -#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ -#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ -#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ - -#endif /* _LINUX_QUOTAIO_V2_H */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index a558a4c1d35..21b781a3350 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -24,12 +24,21 @@ void sync_dquots(struct super_block *sb, int type); int dquot_initialize(struct inode *inode, int type); int dquot_drop(struct inode *inode); +int dquot_drop_locked(struct inode *inode); +struct dquot *dqget(struct super_block *sb, unsigned int id, int type); +void dqput(struct dquot *dquot); +int dquot_is_cached(struct super_block *sb, unsigned int id, int type); +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv); +struct dquot *dquot_alloc(struct super_block *sb, int type); +void dquot_destroy(struct dquot *dquot); int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); -int dquot_alloc_inode(const struct inode *inode, unsigned long number); +int dquot_alloc_inode(const struct inode *inode, qsize_t number); int dquot_free_space(struct inode *inode, qsize_t number); -int dquot_free_inode(const struct inode *inode, unsigned long number); +int dquot_free_inode(const struct inode *inode, qsize_t number); int dquot_transfer(struct inode *inode, struct iattr *iattr); int dquot_commit(struct dquot *dquot); @@ -40,11 +49,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path, int remount); +int vfs_quota_enable(struct inode *inode, int type, int format_id, + unsigned int flags); int vfs_quota_on_path(struct super_block *sb, int type, int format_id, struct path *path); int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int vfs_quota_off(struct super_block *sb, int type, int remount); +int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags); int vfs_quota_sync(struct super_block *sb, int type); int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); @@ -64,24 +76,22 @@ static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) * Functions for checking status of quota */ -static inline int sb_has_quota_enabled(struct super_block *sb, int type) +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) { - if (type == USRQUOTA) - return sb_dqopt(sb)->flags & DQUOT_USR_ENABLED; - return sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED; + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_USAGE_ENABLED, type); } -static inline int sb_any_quota_enabled(struct super_block *sb) +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) { - return sb_has_quota_enabled(sb, USRQUOTA) || - sb_has_quota_enabled(sb, GRPQUOTA); + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_LIMITS_ENABLED, type); } static inline int sb_has_quota_suspended(struct super_block *sb, int type) { - if (type == USRQUOTA) - return sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED; - return sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED; + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_SUSPENDED, type); } static inline int sb_any_quota_suspended(struct super_block *sb) @@ -90,6 +100,31 @@ static inline int sb_any_quota_suspended(struct super_block *sb) sb_has_quota_suspended(sb, GRPQUOTA); } +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + /* Currently if anything is on, then quota usage is on as well */ + return sb_has_quota_usage_enabled(sb, type); +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return sb_has_quota_loaded(sb, USRQUOTA) || + sb_has_quota_loaded(sb, GRPQUOTA); +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return sb_has_quota_loaded(sb, type) && + !sb_has_quota_suspended(sb, type); +} + +static inline int sb_any_quota_active(struct super_block *sb) +{ + return sb_has_quota_active(sb, USRQUOTA) || + sb_has_quota_active(sb, GRPQUOTA); +} + /* * Operations supported for diskquotas. */ @@ -104,7 +139,7 @@ extern struct quotactl_ops vfs_quotactl_ops; static inline void vfs_dq_init(struct inode *inode) { BUG_ON(!inode->i_sb); - if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) + if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) inode->i_sb->dq_op->initialize(inode, -1); } @@ -112,7 +147,7 @@ static inline void vfs_dq_init(struct inode *inode) * a transaction (deadlocks possible otherwise) */ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) return 1; @@ -132,7 +167,7 @@ static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) return 1; @@ -152,7 +187,7 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_alloc_inode(struct inode *inode) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { vfs_dq_init(inode); if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) return 1; @@ -162,7 +197,7 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) + if (sb_any_quota_active(inode->i_sb)) inode->i_sb->dq_op->free_space(inode, nr); else inode_sub_bytes(inode, nr); @@ -176,7 +211,7 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) static inline void vfs_dq_free_inode(struct inode *inode) { - if (sb_any_quota_enabled(inode->i_sb)) + if (sb_any_quota_active(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); } @@ -197,12 +232,12 @@ static inline int vfs_dq_off(struct super_block *sb, int remount) #else -static inline int sb_has_quota_enabled(struct super_block *sb, int type) +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) { return 0; } -static inline int sb_any_quota_enabled(struct super_block *sb) +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) { return 0; } @@ -217,6 +252,27 @@ static inline int sb_any_quota_suspended(struct super_block *sb) return 0; } +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return 0; +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_active(struct super_block *sb) +{ + return 0; +} + /* * NO-OP when quota not configured. */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index a916c6660df..355f6e80db0 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -136,7 +136,7 @@ do { \ */ static inline void *radix_tree_deref_slot(void **pslot) { - void *ret = *pslot; + void *ret = rcu_dereference(*pslot); if (unlikely(radix_tree_is_indirect_ptr(ret))) ret = RADIX_TREE_RETRY; return ret; diff --git a/include/linux/random.h b/include/linux/random.h index 36f125c0c60..407ea3646f8 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -8,6 +8,7 @@ #define _LINUX_RANDOM_H #include <linux/ioctl.h> +#include <linux/irqnr.h> /* ioctl()'s for the random number generator */ diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 5f89b62e698..f3f697df1d7 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -41,7 +41,7 @@ #include <linux/seqlock.h> #ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ @@ -59,8 +59,8 @@ struct rcu_ctrlblk { int signaled; spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ + DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ + /* current batch to proceed. */ } ____cacheline_internodealigned_in_smp; /* Is batch a before batch b ? */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 895dc9c1088..1168fbcea8d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,11 +52,15 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#ifdef CONFIG_CLASSIC_RCU +#if defined(CONFIG_CLASSIC_RCU) #include <linux/rcuclassic.h> -#else /* #ifdef CONFIG_CLASSIC_RCU */ +#elif defined(CONFIG_TREE_RCU) +#include <linux/rcutree.h> +#elif defined(CONFIG_PREEMPT_RCU) #include <linux/rcupreempt.h> -#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ +#else +#error "Unknown RCU implementation specified to kernel configuration" +#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 00000000000..d4368b7975c --- /dev/null +++ b/include/linux/rcutree.h @@ -0,0 +1,329 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2008 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm + * + * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ + +#ifndef __LINUX_RCUTREE_H +#define __LINUX_RCUTREE_H + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <linux/seqlock.h> + +/* + * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. + * In theory, it should be possible to add more levels straightforwardly. + * In practice, this has not been tested, so there is probably some + * bug somewhere. + */ +#define MAX_RCU_LVLS 3 +#define RCU_FANOUT (CONFIG_RCU_FANOUT) +#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) +#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) + +#if NR_CPUS <= RCU_FANOUT +# define NUM_RCU_LVLS 1 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (NR_CPUS) +# define NUM_RCU_LVL_2 0 +# define NUM_RCU_LVL_3 0 +#elif NR_CPUS <= RCU_FANOUT_SQ +# define NUM_RCU_LVLS 2 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) +# define NUM_RCU_LVL_2 (NR_CPUS) +# define NUM_RCU_LVL_3 0 +#elif NR_CPUS <= RCU_FANOUT_CUBE +# define NUM_RCU_LVLS 3 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) +# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) +# define NUM_RCU_LVL_3 NR_CPUS +#else +# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" +#endif /* #if (NR_CPUS) <= RCU_FANOUT */ + +#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) +#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) + +/* + * Dynticks per-CPU state. + */ +struct rcu_dynticks { + int dynticks_nesting; /* Track nesting level, sort of. */ + int dynticks; /* Even value for dynticks-idle, else odd. */ + int dynticks_nmi; /* Even value for either dynticks-idle or */ + /* not in nmi handler, else odd. So this */ + /* remains even for nmi from irq handler. */ +}; + +/* + * Definition for node within the RCU grace-period-detection hierarchy. + */ +struct rcu_node { + spinlock_t lock; + unsigned long qsmask; /* CPUs or groups that need to switch in */ + /* order for current grace period to proceed.*/ + unsigned long qsmaskinit; + /* Per-GP initialization for qsmask. */ + unsigned long grpmask; /* Mask to apply to parent qsmask. */ + int grplo; /* lowest-numbered CPU or group here. */ + int grphi; /* highest-numbered CPU or group here. */ + u8 grpnum; /* CPU/group number for next level up. */ + u8 level; /* root is at level 0. */ + struct rcu_node *parent; +} ____cacheline_internodealigned_in_smp; + +/* Index values for nxttail array in struct rcu_data. */ +#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ +#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ +#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ +#define RCU_NEXT_TAIL 3 +#define RCU_NEXT_SIZE 4 + +/* Per-CPU data for read-copy update. */ +struct rcu_data { + /* 1) quiescent-state and grace-period handling : */ + long completed; /* Track rsp->completed gp number */ + /* in order to detect GP end. */ + long gpnum; /* Highest gp number that this CPU */ + /* is aware of having started. */ + long passed_quiesc_completed; + /* Value of completed at time of qs. */ + bool passed_quiesc; /* User-mode/idle loop etc. */ + bool qs_pending; /* Core waits for quiesc state. */ + bool beenonline; /* CPU online at least once. */ + struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ + unsigned long grpmask; /* Mask to apply to leaf qsmask. */ + + /* 2) batch handling */ + /* + * If nxtlist is not NULL, it is partitioned as follows. + * Any of the partitions might be empty, in which case the + * pointer to that partition will be equal to the pointer for + * the following partition. When the list is empty, all of + * the nxttail elements point to nxtlist, which is NULL. + * + * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): + * Entries that might have arrived after current GP ended + * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): + * Entries known to have arrived before current GP ended + * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): + * Entries that batch # <= ->completed - 1: waiting for current GP + * [nxtlist, *nxttail[RCU_DONE_TAIL]): + * Entries that batch # <= ->completed + * The grace period for these entries has completed, and + * the other grace-period-completed entries may be moved + * here temporarily in rcu_process_callbacks(). + */ + struct rcu_head *nxtlist; + struct rcu_head **nxttail[RCU_NEXT_SIZE]; + long qlen; /* # of queued callbacks */ + long blimit; /* Upper limit on a processed batch */ + +#ifdef CONFIG_NO_HZ + /* 3) dynticks interface. */ + struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ + int dynticks_snap; /* Per-GP tracking for dynticks. */ + int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ +#endif /* #ifdef CONFIG_NO_HZ */ + + /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ +#ifdef CONFIG_NO_HZ + unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ +#endif /* #ifdef CONFIG_NO_HZ */ + unsigned long offline_fqs; /* Kicked due to being offline. */ + unsigned long resched_ipi; /* Sent a resched IPI. */ + + /* 5) state to allow this CPU to force_quiescent_state on others */ + long n_rcu_pending; /* rcu_pending() calls since boot. */ + long n_rcu_pending_force_qs; /* when to force quiescent states. */ + + int cpu; +}; + +/* Values for signaled field in struct rcu_state. */ +#define RCU_GP_INIT 0 /* Grace period being initialized. */ +#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ +#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ +#ifdef CONFIG_NO_HZ +#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK +#else /* #ifdef CONFIG_NO_HZ */ +#define RCU_SIGNAL_INIT RCU_FORCE_QS +#endif /* #else #ifdef CONFIG_NO_HZ */ + +#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ +#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ + /* to take at least one */ + /* scheduling clock irq */ + /* before ratting on them. */ + +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ + +/* + * RCU global state, including node hierarchy. This hierarchy is + * represented in "heap" form in a dense array. The root (first level) + * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second + * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), + * and the third level in ->node[m+1] and following (->node[m+1] referenced + * by ->level[2]). The number of levels is determined by the number of + * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" + * consisting of a single rcu_node. + */ +struct rcu_state { + struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ + struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ + u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ + u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ + struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ + + /* The following fields are guarded by the root rcu_node's lock. */ + + u8 signaled ____cacheline_internodealigned_in_smp; + /* Force QS state. */ + long gpnum; /* Current gp number. */ + long completed; /* # of last completed gp. */ + spinlock_t onofflock; /* exclude on/offline and */ + /* starting new GP. */ + spinlock_t fqslock; /* Only one task forcing */ + /* quiescent states. */ + unsigned long jiffies_force_qs; /* Time at which to invoke */ + /* force_quiescent_state(). */ + unsigned long n_force_qs; /* Number of calls to */ + /* force_quiescent_state(). */ + unsigned long n_force_qs_lh; /* ~Number of calls leaving */ + /* due to lock unavailable. */ + unsigned long n_force_qs_ngp; /* Number of calls leaving */ + /* due to no GP active. */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR + unsigned long gp_start; /* Time at which GP started, */ + /* but in jiffies. */ + unsigned long jiffies_stall; /* Time at which to check */ + /* for CPU stalls. */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +#ifdef CONFIG_NO_HZ + long dynticks_completed; /* Value of completed @ snap. */ +#endif /* #ifdef CONFIG_NO_HZ */ +}; + +extern struct rcu_state rcu_state; +DECLARE_PER_CPU(struct rcu_data, rcu_data); + +extern struct rcu_state rcu_bh_state; +DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); + +/* + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. + */ +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + rdp->passed_quiesc = 1; + rdp->passed_quiesc_completed = rdp->completed; +} +static inline void rcu_bh_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); + rdp->passed_quiesc = 1; + rdp->passed_quiesc_completed = rdp->completed; +} + +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + +static inline void __rcu_read_lock(void) +{ + preempt_disable(); + __acquire(RCU); + rcu_read_acquire(); +} +static inline void __rcu_read_unlock(void) +{ + rcu_read_release(); + __release(RCU); + preempt_enable(); +} +static inline void __rcu_read_lock_bh(void) +{ + local_bh_disable(); + __acquire(RCU_BH); + rcu_read_acquire(); +} +static inline void __rcu_read_unlock_bh(void) +{ + rcu_read_release(); + __release(RCU_BH); + local_bh_enable(); +} + +#define __synchronize_sched() synchronize_rcu() + +#define call_rcu_sched(head, func) call_rcu(head, func) + +static inline void rcu_init_sched(void) +{ +} + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); + +extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); + +#ifdef CONFIG_NO_HZ +void rcu_enter_nohz(void); +void rcu_exit_nohz(void); +#else /* CONFIG_NO_HZ */ +static inline void rcu_enter_nohz(void) +{ +} +static inline void rcu_exit_nohz(void) +{ +} +#endif /* CONFIG_NO_HZ */ + +#endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d363467c8f1..b3b35966008 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -118,6 +118,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(int cpu); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 91f597ad6ac..4046b75563c 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -145,6 +145,8 @@ struct rtc_class_ops { int (*irq_set_state)(struct device *, int enabled); int (*irq_set_freq)(struct device *, int freq); int (*read_callback)(struct device *, int data); + int (*alarm_irq_enable)(struct device *, unsigned int enabled); + int (*update_irq_enable)(struct device *, unsigned int enabled); }; #define RTC_DEVICE_NAME_SIZE 20 @@ -181,7 +183,7 @@ struct rtc_device struct timer_list uie_timer; /* Those fields are protected by rtc->irq_lock */ unsigned int oldsecs; - unsigned int irq_active:1; + unsigned int uie_irq_active:1; unsigned int stop_uie_polling:1; unsigned int uie_task_active:1; unsigned int uie_timer_active:1; @@ -216,6 +218,10 @@ extern int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled); extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); +extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); typedef struct rtc_task { void (*func)(void *private_data); diff --git a/include/linux/sched.h b/include/linux/sched.h index 8395e715809..38a3f4b1539 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(void); extern void task_rq_unlock_wait(struct task_struct *p); -extern cpumask_t nohz_cpu_mask; +extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); #else @@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); -extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); @@ -758,20 +757,51 @@ enum cpu_idle_type { #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ -#define BALANCE_FOR_MC_POWER \ - (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) +enum powersavings_balance_level { + POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ + POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package + * first for long running threads + */ + POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle + * cpu package for power savings + */ + MAX_POWERSAVINGS_BALANCE_LEVELS +}; -#define BALANCE_FOR_PKG_POWER \ - ((sched_mc_power_savings || sched_smt_power_savings) ? \ - SD_POWERSAVINGS_BALANCE : 0) +extern int sched_mc_power_savings, sched_smt_power_savings; -#define test_sd_parent(sd, flag) ((sd->parent && \ - (sd->parent->flags & flag)) ? 1 : 0) +static inline int sd_balance_for_mc_power(void) +{ + if (sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; + return 0; +} + +static inline int sd_balance_for_package_power(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; + + return 0; +} + +/* + * Optimise SD flags for power savings: + * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. + * Keep default SD flags if sched_{smt,mc}_power_saving=0 + */ + +static inline int sd_power_saving_flags(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_BALANCE_NEWIDLE; + + return 0; +} struct sched_group { struct sched_group *next; /* Must be a circular list */ - cpumask_t cpumask; /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a @@ -784,8 +814,15 @@ struct sched_group { * (see include/linux/reciprocal_div.h) */ u32 reciprocal_cpu_power; + + unsigned long cpumask[]; }; +static inline struct cpumask *sched_group_cpus(struct sched_group *sg) +{ + return to_cpumask(sg->cpumask); +} + enum sched_domain_level { SD_LV_NONE = 0, SD_LV_SIBLING, @@ -809,7 +846,6 @@ struct sched_domain { struct sched_domain *parent; /* top domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ - cpumask_t span; /* span of all CPUs in this domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ @@ -864,18 +900,35 @@ struct sched_domain { #ifdef CONFIG_SCHED_DEBUG char *name; #endif + + /* span of all CPUs in this domain */ + unsigned long span[]; }; -extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, struct sched_domain_attr *dattr_new); extern int arch_reinit_sched_domains(void); +/* Test a flag in parent sched domain */ +static inline int test_sd_parent(struct sched_domain *sd, int flag) +{ + if (sd->parent && (sd->parent->flags & flag)) + return 1; + + return 0; +} + #else /* CONFIG_SMP */ struct sched_domain_attr; static inline void -partition_sched_domains(int ndoms_new, cpumask_t *doms_new, +partition_sched_domains(int ndoms_new, struct cpumask *doms_new, struct sched_domain_attr *dattr_new) { } @@ -926,7 +979,7 @@ struct sched_class { void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, - const cpumask_t *newmask); + const struct cpumask *newmask); void (*rq_online)(struct rq *rq); void (*rq_offline)(struct rq *rq); @@ -1579,12 +1632,12 @@ extern cputime_t task_gtime(struct task_struct *p); #ifdef CONFIG_SMP extern int set_cpus_allowed_ptr(struct task_struct *p, - const cpumask_t *new_mask); + const struct cpumask *new_mask); #else static inline int set_cpus_allowed_ptr(struct task_struct *p, - const cpumask_t *new_mask) + const struct cpumask *new_mask) { - if (!cpu_isset(0, *new_mask)) + if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; return 0; } @@ -2195,10 +2248,8 @@ __trace_special(void *__tr, void *__data, } #endif -extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); -extern long sched_getaffinity(pid_t pid, cpumask_t *mask); - -extern int sched_mc_power_savings, sched_smt_power_savings; +extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); +extern long sched_getaffinity(pid_t pid, struct cpumask *mask); extern void normalize_rt_tasks(void); diff --git a/include/linux/security.h b/include/linux/security.h index 3416cb85e77..b92b5e453f6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -335,17 +335,37 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dir contains the inode structure of the parent directory of the new link. * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. + * @path_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link + * to the file. + * @new_dir contains the path structure of the parent directory of + * the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. * @inode_unlink: * Check the permission to remove a hard link to a file. * @dir contains the inode structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. + * @path_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the path structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. * @inode_symlink: * Check the permission to create a symbolic link to a file. * @dir contains the inode structure of parent directory of the symbolic link. * @dentry contains the dentry structure of the symbolic link. * @old_name contains the pathname of file. * Return 0 if permission is granted. + * @path_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the path structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory * associated with inode strcture @dir. @@ -353,11 +373,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. + * @path_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with path strcture @path. + * @dir containst the path structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. * @inode_rmdir: * Check the permission to remove a directory. * @dir contains the inode structure of parent of the directory to be removed. * @dentry contains the dentry structure of directory to be removed. * Return 0 if permission is granted. + * @path_rmdir: + * Check the permission to remove a directory. + * @dir contains the path structure of parent of the directory to be + * removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. * @inode_mknod: * Check permissions when creating a special file (or a socket or a fifo * file created via the mknod system call). Note that if mknod operation @@ -368,6 +402,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @mode contains the mode of the new file. * @dev contains the device number. * Return 0 if permission is granted. + * @path_mknod: + * Check permissions when creating a file. Note that this hook is called + * even if mknod operation is being done for a regular file. + * @dir contains the path structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the undecoded device number. Use new_decode_dev() to get + * the decoded device number. + * Return 0 if permission is granted. * @inode_rename: * Check for permission to rename a file or directory. * @old_dir contains the inode structure for parent of the old link. @@ -375,6 +418,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new_dir contains the inode structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. + * @path_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the path structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the path structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -403,6 +453,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure for the file. * @attr is the iattr structure containing the new file attributes. * Return 0 if permission is granted. + * @path_truncate: + * Check permission before truncating a file. + * @path contains the path structure for the file. + * @length is the new length of the file. + * @time_attrs is the flags passed to do_truncate(). + * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. * @mnt is the vfsmount where the dentry was looked up @@ -1331,6 +1387,22 @@ struct security_operations { struct super_block *newsb); int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink) (struct path *dir, struct dentry *dentry); + int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode); + int (*path_rmdir) (struct path *dir, struct dentry *dentry); + int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); + int (*path_truncate) (struct path *path, loff_t length, + unsigned int time_attrs); + int (*path_symlink) (struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link) (struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#endif + int (*inode_alloc_security) (struct inode *inode); void (*inode_free_security) (struct inode *inode); int (*inode_init_security) (struct inode *inode, struct inode *dir, @@ -2705,6 +2777,71 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #endif /* CONFIG_SECURITY_NETWORK_XFRM */ +#ifdef CONFIG_SECURITY_PATH +int security_path_unlink(struct path *dir, struct dentry *dentry); +int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); +int security_path_rmdir(struct path *dir, struct dentry *dentry); +int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); +int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs); +int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#else /* CONFIG_SECURITY_PATH */ +static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, + int mode) +{ + return 0; +} + +static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mknod(struct path *dir, struct dentry *dentry, + int mode, unsigned int dev) +{ + return 0; +} + +static inline int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs) +{ + return 0; +} + +static inline int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_path_link(struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_rename(struct path *old_dir, + struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} +#endif /* CONFIG_SECURITY_PATH */ + #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index b3dfa72f13b..40ea5058c2e 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -50,10 +50,11 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); -int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); -static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) { - return seq_bitmap(m, mask->bits, NR_CPUS); + return seq_bitmap(m, mask->bits, nr_cpu_ids); } static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ea8d9265bf..9136cc5608c 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -10,8 +10,9 @@ #ifndef _LINUX_SERIAL_H #define _LINUX_SERIAL_H -#ifdef __KERNEL__ #include <linux/types.h> + +#ifdef __KERNEL__ #include <asm/page.h> /* diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3d37c94abbc..d4d2a78ad43 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -28,6 +28,9 @@ struct plat_serial8250_port { unsigned char iotype; /* UPIO_* */ unsigned char hub6; upf_t flags; /* UPF_* flags */ + unsigned int type; /* If UPF_FIXED_TYPE */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); }; /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4e4f1277f3b..b4199841f1f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -40,7 +40,8 @@ #define PORT_NS16550A 14 #define PORT_XSCALE 15 #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ -#define PORT_MAX_8250 16 /* max port ID */ +#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ +#define PORT_MAX_8250 17 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed @@ -158,6 +159,8 @@ /* SH-SCI */ #define PORT_SCIFA 83 +#define PORT_S3C6400 84 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -246,6 +249,8 @@ struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ unsigned char __iomem *membase; /* read/write[bwl] */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); unsigned int irq; /* irq number */ unsigned int uartclk; /* base uart clock */ unsigned int fifosize; /* tx fifo size */ @@ -291,6 +296,8 @@ struct uart_port { #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) +/* The exact UART type is known and should not be probed. */ +#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) @@ -314,35 +321,13 @@ struct uart_port { }; /* - * This is the state information which is persistent across opens. - * The low level driver must not to touch any elements contained - * within. - */ -struct uart_state { - unsigned int close_delay; /* msec */ - unsigned int closing_wait; /* msec */ - -#define USF_CLOSING_WAIT_INF (0) -#define USF_CLOSING_WAIT_NONE (~0U) - - int count; - int pm_state; - struct uart_info *info; - struct uart_port *port; - - struct mutex mutex; -}; - -#define UART_XMIT_SIZE PAGE_SIZE - -typedef unsigned int __bitwise__ uif_t; - -/* * This is the state information which is only valid when the port - * is open; it may be freed by the core driver once the device has + * is open; it may be cleared the core driver once the device has * been closed. Either the low level driver or the core can modify * stuff here. */ +typedef unsigned int __bitwise__ uif_t; + struct uart_info { struct tty_port port; struct circ_buf xmit; @@ -364,6 +349,29 @@ struct uart_info { wait_queue_head_t delta_msr_wait; }; +/* + * This is the state information which is persistent across opens. + * The low level driver must not to touch any elements contained + * within. + */ +struct uart_state { + unsigned int close_delay; /* msec */ + unsigned int closing_wait; /* msec */ + +#define USF_CLOSING_WAIT_INF (0) +#define USF_CLOSING_WAIT_NONE (~0U) + + int count; + int pm_state; + struct uart_info info; + struct uart_port *port; + + struct mutex mutex; +}; + +#define UART_XMIT_SIZE PAGE_SIZE + + /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 @@ -437,8 +445,13 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); #define uart_circ_chars_free(circ) \ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) -#define uart_tx_stopped(portp) \ - ((portp)->info->port.tty->stopped || (portp)->info->port.tty->hw_stopped) +static inline int uart_tx_stopped(struct uart_port *port) +{ + struct tty_struct *tty = port->info->port.tty; + if(tty->stopped || tty->hw_stopped) + return 1; + return 0; +} /* * The following are helper functions for the low level drivers. @@ -449,7 +462,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) #ifdef SUPPORT_SYSRQ if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, port->info ? port->info->port.tty : NULL); + handle_sysrq(ch, port->info->port.tty); port->sysrq = 0; return 1; } diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf..f96d13c281e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, * request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_track_caller(size_t, gfp_t, void*); +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ - __kmalloc_track_caller(size, flags, __builtin_return_address(0)) + __kmalloc_track_caller(size, flags, _RET_IP_) #else #define kmalloc_track_caller(size, flags) \ __kmalloc(size, flags) @@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); * allocation request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ - __builtin_return_address(0)) + _RET_IP_) #else #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node(size, flags, node) @@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) -#endif /* DEBUG_SLAB */ +#endif /* CONFIG_NUMA */ /* * Shortcuts diff --git a/include/linux/smp.h b/include/linux/smp.h index 6e7ba16ff45..b8246696810 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -21,6 +21,9 @@ struct call_single_data { u16 priv; }; +/* total number of cpus in this system (may exceed NR_CPUS) */ +extern unsigned int total_cpus; + #ifdef CONFIG_SMP #include <linux/preempt.h> @@ -64,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus); * Call a function on all other processors */ int smp_call_function(void(*func)(void *info), void *info, int wait); -/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ -int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, - int wait); +void smp_call_function_many(const struct cpumask *mask, + void (*func)(void *info), void *info, bool wait); -static inline void smp_call_function_many(const struct cpumask *mask, - void (*func)(void *info), void *info, - int wait) +/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ +static inline int +smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, + int wait) { - smp_call_function_mask(*mask, func, info, wait); + smp_call_function_many(&mask, func, info, wait); + return 0; } int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index a3626aedaec..0f4eb165f25 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -1,9 +1,10 @@ #ifndef __LINUX_SPI_MMC_SPI_H #define __LINUX_SPI_MMC_SPI_H +#include <linux/device.h> +#include <linux/spi/spi.h> #include <linux/interrupt.h> -struct device; struct mmc_host; /* Put this in platform_data of a device being used to manage an MMC/SD @@ -41,4 +42,16 @@ struct mmc_spi_platform_data { void (*setpower)(struct device *, unsigned int maskval); }; +#ifdef CONFIG_OF +extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); +extern void mmc_spi_put_pdata(struct spi_device *spi); +#else +static inline struct mmc_spi_platform_data * +mmc_spi_get_pdata(struct spi_device *spi) +{ + return spi->dev.platform_data; +} +static inline void mmc_spi_put_pdata(struct spi_device *spi) {} +#endif /* CONFIG_OF */ + #endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4be01bb4437..82229317753 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -19,6 +19,8 @@ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H +#include <linux/device.h> + /* * INTERFACES between SPI master-side drivers and SPI infrastructure. * (There's no SPI slave support for Linux yet...) diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index faf1519b5ad..74d59a64136 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -23,7 +23,7 @@ * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); +int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); /** * __stop_machine: freeze the machine on all CPUs and run this function @@ -34,11 +34,11 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); * Description: This is a special version of the above, which assumes cpus * won't come or go while it's being called. Used by hotplug cpu. */ -int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); +int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); #else static inline int stop_machine(int (*fn)(void *), void *data, - const cpumask_t *cpus) + const struct cpumask *cpus) { int ret; local_irq_disable(); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6f0ee1b84a4..c39a21040dc 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -58,6 +58,7 @@ struct rpc_clnt { struct rpc_timeout cl_timeout_default; struct rpc_program * cl_program; char cl_inline_name[32]; + char *cl_principal; /* target to authenticate to */ }; /* @@ -108,6 +109,7 @@ struct rpc_create_args { u32 version; rpc_authflavor_t authflavor; unsigned long flags; + char *client_name; }; /* Values for "flags" field */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 51b977a4ca2..cea764c2359 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -15,6 +15,7 @@ struct rpc_pipe_ops { ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); ssize_t (*downcall)(struct file *, const char __user *, size_t); void (*release_pipe)(struct inode *); + int (*open_pipe)(struct inode *); void (*destroy_msg)(struct rpc_pipe_msg *); }; diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index c9165d9771a..ca7d725861f 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,6 +20,7 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); u32 svcauth_gss_flavor(struct auth_domain *dom); +char *svc_gss_principal(struct svc_rqst *); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e4057d729f0..49e1eb45446 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -37,21 +37,6 @@ struct xdr_netobj { typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); /* - * We're still requiring the BKL in the xdr code until it's been - * more carefully audited, at which point this wrapper will become - * unnecessary. - */ -static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj) -{ - int ret; - - lock_kernel(); - ret = xdrproc(rqstp, data, obj); - unlock_kernel(); - return ret; -} - -/* * Basic structure for transmission/reception of a client XDR message. * Features a header (for a linear buffer containing RPC headers * and the data payload for short messages), and then an array of diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 4d80a118d53..11fc71d50c1 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -76,8 +76,7 @@ struct rpc_rqst { struct list_head rq_list; __u32 * rq_buffer; /* XDR encode buffer */ - size_t rq_bufsize, - rq_callsize, + size_t rq_callsize, rq_rcvsize; struct xdr_buf rq_private_buf; /* The receive buffer diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8..325af1de035 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -7,9 +7,31 @@ struct device; struct dma_attrs; struct scatterlist; +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define IO_TLB_SEGSIZE 128 + + +/* + * log of the size of each IO TLB slab. The number of slabs is command line + * controllable. + */ +#define IO_TLB_SHIFT 11 + extern void swiotlb_init(void); +extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); +extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); + +extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); +extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); + +extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 04fb47bfb92..18d0a243a7b 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -549,7 +549,7 @@ asmlinkage long sys_inotify_init(void); asmlinkage long sys_inotify_init1(int flags); asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask); -asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus); diff --git a/include/linux/threads.h b/include/linux/threads.h index 38d1a5d6568..052b12bec8b 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -8,17 +8,17 @@ */ /* - * Maximum supported processors that can run under SMP. This value is - * set via configure setting. The maximum is equal to the size of the - * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller - * saves quite a bit of memory. + * Maximum supported processors. Setting this smaller saves quite a + * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. */ -#ifdef CONFIG_SMP -#define NR_CPUS CONFIG_NR_CPUS -#else -#define NR_CPUS 1 +#ifndef CONFIG_NR_CPUS +/* FIXME: This should be fixed in the arch's Kconfig */ +#define CONFIG_NR_CPUS 1 #endif +/* Places which use this should consider cpumask_var_t. */ +#define NR_CPUS CONFIG_NR_CPUS + #define MIN_THREADS_LEFT_FOR_ROOT 4 /* diff --git a/include/linux/tick.h b/include/linux/tick.h index b6ec8189ac0..469b82d88b3 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -84,10 +84,10 @@ static inline void tick_cancel_sched_timer(int cpu) { } # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern struct tick_device *tick_get_broadcast_device(void); -extern cpumask_t *tick_get_broadcast_mask(void); +extern struct cpumask *tick_get_broadcast_mask(void); # ifdef CONFIG_TICK_ONESHOT -extern cpumask_t *tick_get_broadcast_oneshot_mask(void); +extern struct cpumask *tick_get_broadcast_oneshot_mask(void); # endif # endif /* BROADCAST */ diff --git a/include/linux/timex.h b/include/linux/timex.h index 9007313b5b7..998a55d80ac 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -53,47 +53,11 @@ #ifndef _LINUX_TIMEX_H #define _LINUX_TIMEX_H -#include <linux/compiler.h> #include <linux/time.h> -#include <asm/param.h> - #define NTP_API 4 /* NTP API version */ /* - * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen - * for a slightly underdamped convergence characteristic. SHIFT_KH - * establishes the damping of the FLL and is chosen by wisdom and black - * art. - * - * MAXTC establishes the maximum time constant of the PLL. With the - * SHIFT_KG and SHIFT_KF values given and a time constant range from - * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, - * respectively. - */ -#define SHIFT_PLL 4 /* PLL frequency factor (shift) */ -#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ -#define MAXTC 10 /* maximum time constant (shift) */ - -/* - * SHIFT_USEC defines the scaling (shift) of the time_freq and - * time_tolerance variables, which represent the current frequency - * offset and maximum frequency tolerance. - */ -#define SHIFT_USEC 16 /* frequency offset scale (shift) */ -#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) -#define PPM_SCALE_INV_SHIFT 19 -#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ - PPM_SCALE + 1) - -#define MAXPHASE 500000000l /* max phase error (ns) */ -#define MAXFREQ 500000 /* max frequency error (ns/s) */ -#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) -#define MINSEC 256 /* min interval between updates (s) */ -#define MAXSEC 2048 /* max interval between updates (s) */ -#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ - -/* * syscall interface - used (mainly by NTP daemon) * to discipline kernel clock oscillator */ @@ -199,9 +163,46 @@ struct timex { #define TIME_BAD TIME_ERROR /* bw compat */ #ifdef __KERNEL__ +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/param.h> + #include <asm/timex.h> /* + * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen + * for a slightly underdamped convergence characteristic. SHIFT_KH + * establishes the damping of the FLL and is chosen by wisdom and black + * art. + * + * MAXTC establishes the maximum time constant of the PLL. With the + * SHIFT_KG and SHIFT_KF values given and a time constant range from + * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, + * respectively. + */ +#define SHIFT_PLL 4 /* PLL frequency factor (shift) */ +#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ +#define MAXTC 10 /* maximum time constant (shift) */ + +/* + * SHIFT_USEC defines the scaling (shift) of the time_freq and + * time_tolerance variables, which represent the current frequency + * offset and maximum frequency tolerance. + */ +#define SHIFT_USEC 16 /* frequency offset scale (shift) */ +#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) +#define PPM_SCALE_INV_SHIFT 19 +#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ + PPM_SCALE + 1) + +#define MAXPHASE 500000000l /* max phase error (ns) */ +#define MAXFREQ 500000 /* max frequency error (ns/s) */ +#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) +#define MINSEC 256 /* min interval between updates (s) */ +#define MAXSEC 2048 /* max interval between updates (s) */ +#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ + +/* * kernel variables * Note: maximum error = NTP synch distance = dispersion + delay / 2; * estimated error = NTP dispersion. diff --git a/include/linux/topology.h b/include/linux/topology.h index 0c5b5ac36d8..e632d29f054 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -125,7 +125,8 @@ int arch_update_cpu_topology(void); | SD_WAKE_AFFINE \ | SD_WAKE_BALANCE \ | SD_SHARE_PKG_RESOURCES\ - | BALANCE_FOR_MC_POWER, \ + | sd_balance_for_mc_power()\ + | sd_power_saving_flags(),\ .last_balance = jiffies, \ .balance_interval = 1, \ } @@ -150,7 +151,8 @@ int arch_update_cpu_topology(void); | SD_BALANCE_FORK \ | SD_WAKE_AFFINE \ | SD_WAKE_BALANCE \ - | BALANCE_FOR_PKG_POWER,\ + | sd_balance_for_package_power()\ + | sd_power_saving_flags(),\ .last_balance = jiffies, \ .balance_interval = 1, \ } diff --git a/include/linux/tty.h b/include/linux/tty.h index 3f4954c55e5..fc39db95499 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -180,8 +180,17 @@ struct signal_struct; * until a hangup so don't use the wrong path. */ +struct tty_port; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + void (*raise_dtr_rts)(struct tty_port *port); +}; + struct tty_port { struct tty_struct *tty; /* Back pointer */ + const struct tty_port_operations *ops; /* Port operations */ spinlock_t lock; /* Lock protecting tty field */ int blocked_open; /* Waiting to open */ int count; /* Usage count */ @@ -253,6 +262,7 @@ struct tty_struct { unsigned int column; unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; unsigned char closing:1; + unsigned char echo_overrun:1; unsigned short minimum_to_wake; unsigned long overrun_time; int num_overrun; @@ -262,11 +272,16 @@ struct tty_struct { int read_tail; int read_cnt; unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))]; + unsigned char *echo_buf; + unsigned int echo_pos; + unsigned int echo_cnt; int canon_data; unsigned long canon_head; unsigned int canon_column; struct mutex atomic_read_lock; struct mutex atomic_write_lock; + struct mutex output_lock; + struct mutex echo_lock; unsigned char *write_buf; int write_cnt; spinlock_t read_lock; @@ -295,6 +310,7 @@ struct tty_struct { #define TTY_PUSH 6 /* n_tty private */ #define TTY_CLOSING 7 /* ->close() in progress */ #define TTY_LDISC 9 /* Line discipline attached */ +#define TTY_LDISC_CHANGING 10 /* Line discipline changing */ #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ #define TTY_PTY_LOCK 16 /* pty private */ @@ -354,8 +370,7 @@ extern int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); -extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, - struct winsize *ws); +extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); extern void tty_shutdown(struct tty_struct *tty); extern void tty_free_termios(struct tty_struct *tty); extern int is_current_pgrp_orphaned(void); @@ -421,6 +436,14 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port); extern void tty_port_free_xmit_buf(struct tty_port *port); extern struct tty_struct *tty_port_tty_get(struct tty_port *port); extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +extern int tty_port_carrier_raised(struct tty_port *port); +extern void tty_port_raise_dtr_rts(struct tty_port *port); +extern void tty_port_hangup(struct tty_port *port); +extern int tty_port_block_til_ready(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_close_start(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 78416b90158..08e088334db 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -196,8 +196,7 @@ * Optional: If not provided then the write method is called under * the atomic write lock to keep it serialized with the ldisc. * - * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, - * unsigned int rows, unsigned int cols); + * int (*resize)(struct tty_struct *tty, struct winsize *ws) * * Called when a termios request is issued which changes the * requested terminal geometry. @@ -258,8 +257,7 @@ struct tty_operations { int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); - int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, - struct winsize *ws); + int (*resize)(struct tty_struct *tty, struct winsize *ws); int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); diff --git a/include/linux/types.h b/include/linux/types.h index 1d98330b1f2..121f349cb7e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -135,19 +135,14 @@ typedef __s64 int64_t; * * Linux always considers sectors to be 512 bytes long independently * of the devices real block size. + * + * blkcnt_t is the type of the inode's block count. */ #ifdef CONFIG_LBD typedef u64 sector_t; -#else -typedef unsigned long sector_t; -#endif - -/* - * The type of the inode's block count. - */ -#ifdef CONFIG_LSF typedef u64 blkcnt_t; #else +typedef unsigned long sector_t; typedef unsigned long blkcnt_t; #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index fec6decfb98..6b58367d145 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, \ set_fs(KERNEL_DS); \ pagefault_disable(); \ - ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ pagefault_enable(); \ set_fs(old_fs); \ ret; \ diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h index a102561e702..fb7c359bdfb 100644 --- a/include/linux/usb/wusb-wa.h +++ b/include/linux/usb/wusb-wa.h @@ -51,6 +51,7 @@ enum { WUSB_REQ_GET_TIME = 25, WUSB_REQ_SET_STREAM_IDX = 26, WUSB_REQ_SET_WUSB_MAS = 27, + WUSB_REQ_CHAN_STOP = 28, }; diff --git a/include/linux/uwb.h b/include/linux/uwb.h index f9ccbd9a2ce..c02128991ff 100644 --- a/include/linux/uwb.h +++ b/include/linux/uwb.h @@ -30,6 +30,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/timer.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include <linux/uwb/spec.h> @@ -66,6 +67,7 @@ struct uwb_dev { struct uwb_dev_addr dev_addr; int beacon_slot; DECLARE_BITMAP(streams, UWB_NUM_STREAMS); + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); }; #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) @@ -86,12 +88,31 @@ struct uwb_notifs_chain { struct mutex mutex; }; +/* Beacon cache list */ +struct uwb_beca { + struct list_head list; + size_t entries; + struct mutex mutex; +}; + +/* Event handling thread. */ +struct uwbd { + int pid; + struct task_struct *task; + wait_queue_head_t wq; + struct list_head event_list; + spinlock_t event_list_lock; +}; + /** * struct uwb_mas_bm - a bitmap of all MAS in a superframe * @bm: a bitmap of length #UWB_NUM_MAS */ struct uwb_mas_bm { DECLARE_BITMAP(bm, UWB_NUM_MAS); + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); + int safe; + int unsafe; }; /** @@ -117,14 +138,24 @@ struct uwb_mas_bm { * FIXME: further target states TBD. */ enum uwb_rsv_state { - UWB_RSV_STATE_NONE, + UWB_RSV_STATE_NONE = 0, UWB_RSV_STATE_O_INITIATED, UWB_RSV_STATE_O_PENDING, UWB_RSV_STATE_O_MODIFIED, UWB_RSV_STATE_O_ESTABLISHED, + UWB_RSV_STATE_O_TO_BE_MOVED, + UWB_RSV_STATE_O_MOVE_EXPANDING, + UWB_RSV_STATE_O_MOVE_COMBINING, + UWB_RSV_STATE_O_MOVE_REDUCING, UWB_RSV_STATE_T_ACCEPTED, UWB_RSV_STATE_T_DENIED, + UWB_RSV_STATE_T_CONFLICT, UWB_RSV_STATE_T_PENDING, + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, + UWB_RSV_STATE_T_EXPANDING_CONFLICT, + UWB_RSV_STATE_T_EXPANDING_PENDING, + UWB_RSV_STATE_T_EXPANDING_DENIED, + UWB_RSV_STATE_T_RESIZED, UWB_RSV_STATE_LAST, }; @@ -149,6 +180,12 @@ struct uwb_rsv_target { }; }; +struct uwb_rsv_move { + struct uwb_mas_bm final_mas; + struct uwb_ie_drp *companion_drp_ie; + struct uwb_mas_bm companion_mas; +}; + /* * Number of streams reserved for reservations targeted at DevAddrs. */ @@ -186,6 +223,7 @@ typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); * * @status: negotiation status * @stream: stream index allocated for this reservation + * @tiebreaker: conflict tiebreaker for this reservation * @mas: reserved MAS * @drp_ie: the DRP IE * @ie_valid: true iff the DRP IE matches the reservation parameters @@ -201,25 +239,29 @@ struct uwb_rsv { struct uwb_rc *rc; struct list_head rc_node; struct list_head pal_node; + struct kref kref; struct uwb_dev *owner; struct uwb_rsv_target target; enum uwb_drp_type type; int max_mas; int min_mas; - int sparsity; + int max_interval; bool is_multicast; uwb_rsv_cb_f callback; void *pal_priv; enum uwb_rsv_state state; + bool needs_release_companion_mas; u8 stream; + u8 tiebreaker; struct uwb_mas_bm mas; struct uwb_ie_drp *drp_ie; + struct uwb_rsv_move mv; bool ie_valid; struct timer_list timer; - bool expired; + struct work_struct handle_timeout_work; }; static const @@ -261,6 +303,13 @@ struct uwb_drp_avail { bool ie_valid; }; +struct uwb_drp_backoff_win { + u8 window; + u8 n; + int total_expired; + struct timer_list timer; + bool can_reserve_extra_mases; +}; const char *uwb_rsv_state_str(enum uwb_rsv_state state); const char *uwb_rsv_type_str(enum uwb_drp_type type); @@ -276,6 +325,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv); void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); +void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); + /** * Radio Control Interface instance * @@ -337,23 +388,33 @@ struct uwb_rc { u8 ctx_roll; int beaconing; /* Beaconing state [channel number] */ + int beaconing_forced; int scanning; enum uwb_scan_type scan_type:3; unsigned ready:1; struct uwb_notifs_chain notifs_chain; + struct uwb_beca uwb_beca; + + struct uwbd uwbd; + struct uwb_drp_backoff_win bow; struct uwb_drp_avail drp_avail; struct list_head reservations; + struct list_head cnflt_alien_list; + struct uwb_mas_bm cnflt_alien_bitmap; struct mutex rsvs_mutex; + spinlock_t rsvs_lock; struct workqueue_struct *rsv_workq; - struct work_struct rsv_update_work; + struct delayed_work rsv_update_work; + struct delayed_work rsv_alien_bp_work; + int set_drp_ie_pending; struct mutex ies_mutex; struct uwb_rc_cmd_set_ie *ies; size_t ies_capacity; - spinlock_t pal_lock; struct list_head pals; + int active_pals; struct uwb_dbg *dbg; }; @@ -361,11 +422,19 @@ struct uwb_rc { /** * struct uwb_pal - a UWB PAL - * @name: descriptive name for this PAL (wushc, wlp, etc.). + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). * @device: a device for the PAL. Used to link the PAL and the radio * controller in sysfs. + * @rc: the radio controller the PAL uses. + * @channel_changed: called when the channel used by the radio changes. + * A channel of -1 means the channel has been stopped. * @new_rsv: called when a peer requests a reservation (may be NULL if * the PAL cannot accept reservation requests). + * @channel: channel being used by the PAL; 0 if the PAL isn't using + * the radio; -1 if the PAL wishes to use the radio but + * cannot. + * @debugfs_dir: a debugfs directory which the PAL can use for its own + * debugfs files. * * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). @@ -384,12 +453,21 @@ struct uwb_pal { struct list_head node; const char *name; struct device *device; - void (*new_rsv)(struct uwb_rsv *rsv); + struct uwb_rc *rc; + + void (*channel_changed)(struct uwb_pal *pal, int channel); + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); + + int channel; + struct dentry *debugfs_dir; }; void uwb_pal_init(struct uwb_pal *pal); -int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); -void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); +int uwb_pal_register(struct uwb_pal *pal); +void uwb_pal_unregister(struct uwb_pal *pal); + +int uwb_radio_start(struct uwb_pal *pal); +void uwb_radio_stop(struct uwb_pal *pal); /* * General public API @@ -443,8 +521,6 @@ ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, u8 expected_type, u16 expected_event, struct uwb_rceb **preply); -ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); -int uwb_bg_joined(struct uwb_rc *rc); size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); @@ -520,6 +596,8 @@ void uwb_rc_rm(struct uwb_rc *); void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); void uwb_rc_neh_error(struct uwb_rc *, int); void uwb_rc_reset_all(struct uwb_rc *rc); +void uwb_rc_pre_reset(struct uwb_rc *rc); +void uwb_rc_post_reset(struct uwb_rc *rc); /** * uwb_rsv_is_owner - is the owner of this reservation the RC? @@ -531,7 +609,9 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) } /** - * Events generated by UWB that can be passed to any listeners + * enum uwb_notifs - UWB events that can be passed to any listeners + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. * * Higher layers can register callback functions with the radio * controller using uwb_notifs_register(). The radio controller @@ -539,8 +619,6 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) * nodes when an event occurs. */ enum uwb_notifs { - UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ - UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ UWB_NOTIF_ONAIR, UWB_NOTIF_OFFAIR, }; @@ -652,22 +730,9 @@ static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) /* Information Element handling */ -/* For representing the state of writing to a buffer when iterating */ -struct uwb_buf_ctx { - char *buf; - size_t bytes, size; -}; - -typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, - size_t, void *); struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); -ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, - const void *buf, size_t size); -int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, - size_t, void *); -int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); - +int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); +int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); /* * Transmission statistics diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h index 1141f41bab5..8da004e2562 100644 --- a/include/linux/uwb/debug-cmd.h +++ b/include/linux/uwb/debug-cmd.h @@ -32,6 +32,10 @@ enum uwb_dbg_cmd_type { UWB_DBG_CMD_RSV_ESTABLISH = 1, UWB_DBG_CMD_RSV_TERMINATE = 2, + UWB_DBG_CMD_IE_ADD = 3, + UWB_DBG_CMD_IE_RM = 4, + UWB_DBG_CMD_RADIO_START = 5, + UWB_DBG_CMD_RADIO_STOP = 6, }; struct uwb_dbg_cmd_rsv_establish { @@ -39,18 +43,25 @@ struct uwb_dbg_cmd_rsv_establish { __u8 type; __u16 max_mas; __u16 min_mas; - __u8 sparsity; + __u8 max_interval; }; struct uwb_dbg_cmd_rsv_terminate { int index; }; +struct uwb_dbg_cmd_ie { + __u8 data[128]; + int len; +}; + struct uwb_dbg_cmd { __u32 type; union { struct uwb_dbg_cmd_rsv_establish rsv_establish; struct uwb_dbg_cmd_rsv_terminate rsv_terminate; + struct uwb_dbg_cmd_ie ie_add; + struct uwb_dbg_cmd_ie ie_rm; }; }; diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h deleted file mode 100644 index a86a73fe303..00000000000 --- a/include/linux/uwb/debug.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Ultra Wide Band - * Debug Support - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: doc - * Invoke like: - * - * #define D_LOCAL 4 - * #include <linux/uwb/debug.h> - * - * At the end of your include files. - */ -#include <linux/types.h> - -struct device; -extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); - -/* Master debug switch; !0 enables, 0 disables */ -#define D_MASTER (!0) - -/* Local (per-file) debug switch; #define before #including */ -#ifndef D_LOCAL -#define D_LOCAL 0 -#endif - -#undef __d_printf -#undef d_fnstart -#undef d_fnend -#undef d_printf -#undef d_dump - -#define __d_printf(l, _tag, _dev, f, a...) \ -do { \ - struct device *__dev = (_dev); \ - if (D_MASTER && D_LOCAL >= (l)) { \ - char __head[64] = ""; \ - if (_dev != NULL) { \ - if ((unsigned long)__dev < 4096) \ - printk(KERN_ERR "E: Corrupt dev %p\n", \ - __dev); \ - else \ - snprintf(__head, sizeof(__head), \ - "%s %s: ", \ - dev_driver_string(__dev), \ - __dev->bus_id); \ - } \ - printk(KERN_ERR "%s%s" _tag ": " f, __head, \ - __func__, ## a); \ - } \ -} while (0 && _dev) - -#define d_fnstart(l, _dev, f, a...) \ - __d_printf(l, " FNSTART", _dev, f, ## a) -#define d_fnend(l, _dev, f, a...) \ - __d_printf(l, " FNEND", _dev, f, ## a) -#define d_printf(l, _dev, f, a...) \ - __d_printf(l, "", _dev, f, ## a) -#define d_dump(l, _dev, ptr, size) \ -do { \ - struct device *__dev = _dev; \ - if (D_MASTER && D_LOCAL >= (l)) \ - dump_bytes(__dev, ptr, size); \ -} while (0 && _dev) -#define d_test(l) (D_MASTER && D_LOCAL >= (l)) diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h index 198c15f8e25..b52e44f1bd3 100644 --- a/include/linux/uwb/spec.h +++ b/include/linux/uwb/spec.h @@ -59,6 +59,11 @@ enum { UWB_NUM_ZONES = 16 }; #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) /* + * Number of MAS required before a row can be considered available. + */ +#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) + +/* * Number of streams per DRP reservation between a pair of devices. * * [ECMA-368] section 16.8.6. @@ -94,6 +99,26 @@ enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; enum { UWB_MAX_LOST_BEACONS = 3 }; /* + * mDRPBackOffWinMin + * + * The minimum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; + +/* + * mDRPBackOffWinMax + * + * The maximum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; + +/* * Length of a superframe in microseconds. */ #define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) @@ -200,6 +225,12 @@ enum uwb_drp_reason { UWB_DRP_REASON_MODIFIED, }; +/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ +enum uwb_relinquish_req_reason { + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, +}; + /** * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) */ @@ -252,6 +283,7 @@ enum uwb_ie { UWB_APP_SPEC_PROBE_IE = 15, UWB_IDENTIFICATION_IE = 19, UWB_MASTER_KEY_ID_IE = 20, + UWB_RELINQUISH_REQUEST_IE = 21, UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ UWB_APP_SPEC_IE = 255, }; @@ -365,6 +397,27 @@ struct uwb_ie_drp_avail { DECLARE_BITMAP(bmp, UWB_NUM_MAS); } __attribute__((packed)); +/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ +struct uwb_relinquish_request_ie { + struct uwb_ie_hdr hdr; + __le16 relinquish_req_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) +{ + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; +} + +static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, + int reason_code) +{ + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); + ie->relinquish_req_control = cpu_to_le16(ctrl); +} + /** * The Vendor ID is set to an OUI that indicates the vendor of the device. * ECMA-368 [16.8.10] diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h index 36a39e34f8d..4b4fc0f4385 100644 --- a/include/linux/uwb/umc.h +++ b/include/linux/uwb/umc.h @@ -89,6 +89,8 @@ struct umc_driver { void (*remove)(struct umc_dev *); int (*suspend)(struct umc_dev *, pm_message_t state); int (*resume)(struct umc_dev *); + int (*pre_reset)(struct umc_dev *); + int (*post_reset)(struct umc_dev *); struct device_driver driver; }; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 4669d7e72e7..5571dbe1c0a 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -293,6 +293,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ #define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ #define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */ #define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ #define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ #define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ @@ -304,6 +305,8 @@ struct v4l2_pix_format { /* two planes -- one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ #define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ /* The following formats are not defined in the V4L2 specification */ #define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ @@ -1050,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode { #define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) #define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) -/* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */ +/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ #define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { @@ -1117,6 +1120,12 @@ enum v4l2_exposure_auto_type { #define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) #define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) +#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13) +#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14) +#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15) + +#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16) + /* * T U N I N G */ @@ -1361,24 +1370,41 @@ struct v4l2_streamparm { /* * A D V A N C E D D E B U G G I N G * - * NOTE: EXPERIMENTAL API + * NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS! + * FOR DEBUGGING, TESTING AND INTERNAL USE ONLY! */ /* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */ #define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ -#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */ +#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */ #define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ +#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */ + +struct v4l2_dbg_match { + __u32 type; /* Match type */ + union { /* Match this chip, meaning determined by type */ + __u32 addr; + char name[32]; + }; +} __attribute__ ((packed)); -struct v4l2_register { - __u32 match_type; /* Match type */ - __u32 match_chip; /* Match this chip, meaning determined by match_type */ +struct v4l2_dbg_register { + struct v4l2_dbg_match match; + __u32 size; /* register size in bytes */ __u64 reg; __u64 val; -}; +} __attribute__ ((packed)); -/* VIDIOC_G_CHIP_IDENT */ -struct v4l2_chip_ident { +/* VIDIOC_DBG_G_CHIP_IDENT */ +struct v4l2_dbg_chip_ident { + struct v4l2_dbg_match match; + __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ + __u32 revision; /* chip revision, chip specific */ +} __attribute__ ((packed)); + +/* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */ +struct v4l2_chip_ident_old { __u32 match_type; /* Match type */ __u32 match_chip; /* Match this chip, meaning determined by match_type */ __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ @@ -1450,14 +1476,25 @@ struct v4l2_chip_ident { #define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) #define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) #define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) +#endif -/* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) - -#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) +#if 1 +/* Experimental, meant for debugging, testing and internal use. + Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. + You must be root to use these ioctls. Never use these in applications! */ +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) + +/* Experimental, meant for debugging, testing and internal use. + Never use this ioctl in applications! */ +#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) +/* This is deprecated and will go away in 2.6.30 */ +#define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old) #endif + #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) +/* Reminder: when adding new ioctls please add support for them to + drivers/media/video/v4l2-compat-ioctl32.c as well! */ #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index c30c7bfbf39..8726ff77763 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -10,6 +10,9 @@ /* The feature bitmap for virtio balloon */ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ +/* Size of a PFN in the balloon interface. */ +#define VIRTIO_BALLOON_PFN_SHIFT 12 + struct virtio_balloon_config { /* Number of pages host wants Guest to give up. */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index 19a0da0dba4..7615ffcdd55 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -7,6 +7,17 @@ /* The ID for virtio console */ #define VIRTIO_ID_CONSOLE 3 +/* Feature bits */ +#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ + +struct virtio_console_config { + /* colums of the screens */ + __u16 cols; + /* rows of the screens */ + __u16 rows; +} __attribute__((packed)); + + #ifdef __KERNEL__ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); #endif /* __KERNEL__ */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cdef3574293..cd0fd5d181a 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h @@ -53,4 +53,12 @@ /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 + +/* How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. + * x86 pagesize again. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 #endif diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index c4a598fb382..71e03722fb5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -83,7 +83,7 @@ struct vring { * __u16 avail_idx; * __u16 available[num]; * - * // Padding to the next page boundary. + * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. @@ -93,19 +93,19 @@ struct vring { * }; */ static inline void vring_init(struct vring *vr, unsigned int num, void *p, - unsigned long pagesize) + unsigned long align) { vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1) - & ~(pagesize - 1)); + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) + & ~(align - 1)); } -static inline unsigned vring_size(unsigned int num, unsigned long pagesize) +static inline unsigned vring_size(unsigned int num, unsigned long align) { return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) - + pagesize - 1) & ~(pagesize - 1)) + + align - 1) & ~(align - 1)) + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; } @@ -115,6 +115,7 @@ struct virtio_device; struct virtqueue; struct virtqueue *vring_new_virtqueue(unsigned int num, + unsigned int vring_align, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *vq), diff --git a/include/linux/wlp.h b/include/linux/wlp.h index 033545e145c..ac95ce6606a 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h @@ -646,6 +646,7 @@ struct wlp_wss { struct wlp { struct mutex mutex; struct uwb_rc *rc; /* UWB radio controller */ + struct net_device *ndev; struct uwb_pal pal; struct wlp_eda eda; struct wlp_uuid uuid; @@ -675,7 +676,7 @@ struct wlp_wss_attribute { static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ _show, _store) -extern int wlp_setup(struct wlp *, struct uwb_rc *); +extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); extern void wlp_remove(struct wlp *); extern ssize_t wlp_neighborhood_show(struct wlp *, char *); extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); |