diff options
Diffstat (limited to 'include/linux')
76 files changed, 1950 insertions, 638 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index b68ec09399b..282a504bd1d 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -126,6 +126,7 @@ header-y += pci_regs.h header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h +header-y += phonet.h header-y += pkt_cls.h header-y += pkt_sched.h header-y += posix_types.h @@ -180,6 +181,7 @@ unifdef-y += audit.h unifdef-y += auto_fs.h unifdef-y += auxvec.h unifdef-y += binfmts.h +unifdef-y += blktrace_api.h unifdef-y += capability.h unifdef-y += capi.h unifdef-y += cciss_ioctl.h @@ -232,6 +234,7 @@ unifdef-y += if_fddi.h unifdef-y += if_frad.h unifdef-y += if_ltalk.h unifdef-y += if_link.h +unifdef-y += if_phonet.h unifdef-y += if_pppol2tp.h unifdef-y += if_pppox.h unifdef-y += if_tr.h diff --git a/include/linux/ata.h b/include/linux/ata.h index 8a12d718c16..be00973d1a8 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -88,6 +88,7 @@ enum { ATA_ID_DLF = 128, ATA_ID_CSFO = 129, ATA_ID_CFA_POWER = 160, + ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), ATA_ID_SERNO_LEN = 20, @@ -667,6 +668,15 @@ static inline int ata_id_has_dword_io(const u16 *id) return 0; } +static inline int ata_id_has_unload(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && + id[ATA_ID_CFSSE] & (1 << 13)) + return 1; + return 0; +} + static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command @@ -691,6 +701,11 @@ static inline int ata_id_is_cfa(const u16 *id) return 0; } +static inline int ata_id_is_ssd(const u16 *id) +{ + return id[ATA_ID_ROT_SPEED] == 0x01; +} + static inline int ata_drive_40wire(const u16 *dev_id) { if (ata_id_is_sata(dev_id)) diff --git a/include/linux/bio.h b/include/linux/bio.h index 0933a14e641..ff5b4cf9e2d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -26,21 +26,8 @@ #ifdef CONFIG_BLOCK -/* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include <asm/io.h> -#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY) -#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1)) -#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE) -#else -#define BIOVEC_VIRT_START_SIZE(x) 0 -#define BIOVEC_VIRT_OVERSIZE(x) 0 -#endif - -#ifndef BIO_VMERGE_BOUNDARY -#define BIO_VMERGE_BOUNDARY 0 -#endif - #define BIO_DEBUG #ifdef BIO_DEBUG @@ -88,25 +75,14 @@ struct bio { /* Number of segments in this BIO after * physical address coalescing is performed. */ - unsigned short bi_phys_segments; - - /* Number of segments after physical and DMA remapping - * hardware coalescing is performed. - */ - unsigned short bi_hw_segments; + unsigned int bi_phys_segments; unsigned int bi_size; /* residual I/O count */ - /* - * To keep track of the max hw size, we account for the - * sizes of the first and last virtually mergeable segments - * in this bio - */ - unsigned int bi_hw_front_size; - unsigned int bi_hw_back_size; - unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ + unsigned int bi_comp_cpu; /* completion CPU */ + struct bio_vec *bi_io_vec; /* the actual vec list */ bio_end_io_t *bi_end_io; @@ -126,11 +102,14 @@ struct bio { #define BIO_UPTODATE 0 /* ok after I/O completion */ #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ #define BIO_EOF 2 /* out-out-bounds error */ -#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ +#define BIO_SEG_VALID 3 /* bi_phys_segments valid */ #define BIO_CLONED 4 /* doesn't own data */ #define BIO_BOUNCED 5 /* bio is a bounce bio */ #define BIO_USER_MAPPED 6 /* contains user pages */ #define BIO_EOPNOTSUPP 7 /* not supported */ +#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ +#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ +#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* @@ -144,18 +123,31 @@ struct bio { /* * bio bi_rw flags * - * bit 0 -- read (not set) or write (set) + * bit 0 -- data direction + * If not set, bio is a read from device. If set, it's a write to device. * bit 1 -- rw-ahead when set * bit 2 -- barrier + * Insert a serialization point in the IO queue, forcing previously + * submitted IO to be completed before this oen is issued. * bit 3 -- fail fast, don't want low level driver retries * bit 4 -- synchronous I/O hint: the block layer will unplug immediately + * Note that this does NOT indicate that the IO itself is sync, just + * that the block layer will not postpone issue of this IO by plugging. + * bit 5 -- metadata request + * Used for tracing to differentiate metadata and data IO. May also + * get some preferential treatment in the IO scheduler + * bit 6 -- discard sectors + * Informs the lower level device that this range of sectors is no longer + * used by the file system and may thus be freed by the device. Used + * for flash based storage. */ -#define BIO_RW 0 -#define BIO_RW_AHEAD 1 +#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ +#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_BARRIER 2 #define BIO_RW_FAILFAST 3 #define BIO_RW_SYNC 4 #define BIO_RW_META 5 +#define BIO_RW_DISCARD 6 /* * upper 16 bits of bi_rw define the io priority of this bio @@ -185,14 +177,15 @@ struct bio { #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) -#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size) +#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) +#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) static inline unsigned int bio_cur_sectors(struct bio *bio) { if (bio->bi_vcnt) return bio_iovec(bio)->bv_len >> 9; - - return 0; + else /* dataless requests such as discard */ + return bio->bi_size >> 9; } static inline void *bio_data(struct bio *bio) @@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio) ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) #endif -#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \ - ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0) #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ (((addr1) | (mask)) == (((addr2) - 1) | (mask))) #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ @@ -319,15 +310,14 @@ struct bio_pair { atomic_t cnt; int error; }; -extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, - int first_sectors); -extern mempool_t *bio_split_pool; +extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); extern void bio_pair_release(struct bio_pair *dbio); extern struct bio_set *bioset_create(int, int); extern void bioset_free(struct bio_set *); extern struct bio *bio_alloc(gfp_t, int); +extern struct bio *bio_kmalloc(gfp_t, int); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); extern void bio_free(struct bio *, struct bio_set *); @@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *); extern void bio_endio(struct bio *, int); struct request_queue; extern int bio_phys_segments(struct request_queue *, struct bio *); -extern int bio_hw_segments(struct request_queue *, struct bio *); extern void __bio_clone(struct bio *, struct bio *); extern struct bio *bio_clone(struct bio *, gfp_t); @@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); extern int bio_get_nr_vecs(struct block_device *); +extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); extern struct bio *bio_map_user(struct request_queue *, struct block_device *, - unsigned long, unsigned int, int); + unsigned long, unsigned int, int, gfp_t); struct sg_iovec; +struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct block_device *, - struct sg_iovec *, int, int); + struct sg_iovec *, int, int, gfp_t); extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, gfp_t); @@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); -extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); -extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, - int, int); +extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, + unsigned long, unsigned int, int, gfp_t); +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, struct sg_iovec *, + int, int, gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); extern unsigned int bvec_nr_vecs(unsigned short idx); /* + * Allow queuer to specify a completion CPU for this bio + */ +static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) +{ + bio->bi_comp_cpu = cpu; +} + +/* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab @@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) +/* + * Check whether this bio carries any data or not. A NULL bio is allowed. + */ +static inline int bio_has_data(struct bio *bio) +{ + return bio && bio->bi_io_vec != NULL; +} + #if defined(CONFIG_BLK_DEV_INTEGRITY) #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) @@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, #define bip_for_each_vec(bvl, bip, i) \ __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) -static inline int bio_integrity(struct bio *bio) -{ -#if defined(CONFIG_BLK_DEV_INTEGRITY) - return bio->bi_integrity != NULL; -#else - return 0; -#endif -} +#define bio_integrity(bio) (bio->bi_integrity != NULL) extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 53ea933cf60..a92d9e4ea96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -16,7 +16,9 @@ #include <linux/bio.h> #include <linux/module.h> #include <linux/stringify.h> +#include <linux/gfp.h> #include <linux/bsg.h> +#include <linux/smp.h> #include <asm/scatterlist.h> @@ -54,7 +56,6 @@ enum rq_cmd_type_bits { REQ_TYPE_PM_SUSPEND, /* suspend request */ REQ_TYPE_PM_RESUME, /* resume request */ REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ - REQ_TYPE_FLUSH, /* flush request */ REQ_TYPE_SPECIAL, /* driver defined type */ REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ /* @@ -76,19 +77,18 @@ enum rq_cmd_type_bits { * */ enum { - /* - * just examples for now - */ REQ_LB_OP_EJECT = 0x40, /* eject request */ - REQ_LB_OP_FLUSH = 0x41, /* flush device */ + REQ_LB_OP_FLUSH = 0x41, /* flush request */ + REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ }; /* - * request type modified bits. first three bits match BIO_RW* bits, important + * request type modified bits. first two bits match BIO_RW* bits, important */ enum rq_flag_bits { __REQ_RW, /* not set, read. set, write */ __REQ_FAILFAST, /* no low level driver retries */ + __REQ_DISCARD, /* request to discard sectors */ __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_HARDBARRIER, /* may not be passed by drive either */ @@ -111,6 +111,7 @@ enum rq_flag_bits { }; #define REQ_RW (1 << __REQ_RW) +#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_FAILFAST (1 << __REQ_FAILFAST) #define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) @@ -140,12 +141,14 @@ enum rq_flag_bits { */ struct request { struct list_head queuelist; - struct list_head donelist; + struct call_single_data csd; + int cpu; struct request_queue *q; unsigned int cmd_flags; enum rq_cmd_type_bits cmd_type; + unsigned long atomic_flags; /* Maintain bio traversal state for part by part I/O submission. * hard_* are block layer internals, no driver should touch them! @@ -190,13 +193,6 @@ struct request { */ unsigned short nr_phys_segments; - /* Number of scatter-gather addr+len pairs after - * physical and DMA remapping hardware coalescing is performed. - * This is the number of scatter-gather entries the driver - * will actually have to deal with after DMA mapping is done. - */ - unsigned short nr_hw_segments; - unsigned short ioprio; void *special; @@ -220,6 +216,8 @@ struct request { void *data; void *sense; + unsigned long deadline; + struct list_head timeout_list; unsigned int timeout; int retries; @@ -233,6 +231,11 @@ struct request { struct request *next_rq; }; +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + /* * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME * requests. Some step values could eventually be made generic. @@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q); typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef void (unplug_fn) (struct request_queue *); +typedef int (prepare_discard_fn) (struct request_queue *, struct request *); struct bio_vec; struct bvec_merge_data { @@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); + +enum blk_eh_timer_return { + BLK_EH_NOT_HANDLED, + BLK_EH_HANDLED, + BLK_EH_RESET_TIMER, +}; + +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); enum blk_queue_state { Queue_down, @@ -307,10 +320,13 @@ struct request_queue make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; + prepare_discard_fn *prepare_discard_fn; merge_bvec_fn *merge_bvec_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; /* * Dispatch queue sorting @@ -385,6 +401,10 @@ struct request_queue unsigned int nr_sorted; unsigned int in_flight; + unsigned int rq_timeout; + struct timer_list timeout; + struct list_head timeout_list; + /* * sg stuff */ @@ -421,6 +441,10 @@ struct request_queue #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ +#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ +#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ +#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ static inline int queue_is_locked(struct request_queue *q) { @@ -526,7 +550,10 @@ enum { #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) +#define blk_queue_stackable(q) \ + test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) @@ -536,16 +563,18 @@ enum { #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) -#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) +#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) #define blk_pm_request(rq) \ (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) +#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) /* rq->queuelist of dequeued request must be list_empty() */ @@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) #define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) #define rq_mergeable(rq) \ - (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ + (blk_discard_rq(rq) || blk_fs_request((rq)))) /* * q->prep_rq_fn return values @@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) } #endif /* CONFIG_MMU */ +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; +}; + struct req_iterator { int i; struct bio *bio; @@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); +extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_insert_cloned_request(struct request_queue *q, + struct request *rq); extern void blk_plug_device(struct request_queue *); extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); @@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q); extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); extern void blk_start_queueing(struct request_queue *); -extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); extern int blk_rq_unmap_user(struct bio *); extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(struct request_queue *, struct request *, - struct sg_iovec *, int, unsigned int); + struct rq_map_data *, struct sg_iovec *, int, + unsigned int, gfp_t); extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, @@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error, extern int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes); extern void end_request(struct request *, int); -extern void end_queued_request(struct request *, int); -extern void end_dequeued_request(struct request *, int); extern int blk_end_request_callback(struct request *rq, int error, unsigned int nr_bytes, int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); +extern void blk_abort_queue(struct request_queue *); +extern void blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); /* * blk_end_request() takes bytes instead of sectors as a complete size. @@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); extern int blk_do_ordered(struct request_queue *, struct request **); @@ -837,6 +887,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, } extern int blkdev_issue_flush(struct block_device *, sector_t *); +extern int blkdev_issue_discard(struct block_device *, + sector_t sector, sector_t nr_sects, gfp_t); + +static inline int sb_issue_discard(struct super_block *sb, + sector_t block, sector_t nr_blocks) +{ + block <<= (sb->s_blocksize_bits - 9); + nr_blocks <<= (sb->s_blocksize_bits - 9); + return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); +} /* * command filter functions @@ -874,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q) return q ? q->dma_alignment : 511; } +static inline int blk_rq_aligned(struct request_queue *q, void *addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !((unsigned long)addr & alignment) && !(len & alignment); +} + /* assumes size > 256 */ static inline unsigned int blksize_bits(unsigned int size) { @@ -900,7 +967,7 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ @@ -945,49 +1012,19 @@ struct blk_integrity { extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); extern void blk_integrity_unregister(struct gendisk *); -extern int blk_integrity_compare(struct block_device *, struct block_device *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); extern int blk_rq_count_integrity_sg(struct request *); -static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) -{ - if (bi) - return bi->tuple_size; - - return 0; -} - -static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) { return bdev->bd_disk->integrity; } -static inline unsigned int bdev_get_tag_size(struct block_device *bdev) +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { - struct blk_integrity *bi = bdev_get_integrity(bdev); - - if (bi) - return bi->tag_size; - - return 0; -} - -static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) -{ - struct blk_integrity *bi = bdev_get_integrity(bdev); - - if (bi == NULL) - return 0; - - if (rw == READ && bi->verify_fn != NULL && - (bi->flags & INTEGRITY_FLAG_READ)) - return 1; - - if (rw == WRITE && bi->generate_fn != NULL && - (bi->flags & INTEGRITY_FLAG_WRITE)) - return 1; - - return 0; + return disk->integrity; } static inline int blk_integrity_rq(struct request *rq) @@ -1004,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq) #define blk_rq_count_integrity_sg(a) (0) #define blk_rq_map_integrity_sg(a, b) (0) #define bdev_get_integrity(a) (0) -#define bdev_get_tag_size(a) (0) +#define blk_get_integrity(a) (0) #define blk_integrity_compare(a, b) (0) #define blk_integrity_register(a, b) (0) #define blk_integrity_unregister(a) do { } while (0); diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index d084b8d227a..3a31eb50616 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -1,8 +1,10 @@ #ifndef BLKTRACE_H #define BLKTRACE_H +#ifdef __KERNEL__ #include <linux/blkdev.h> #include <linux/relay.h> +#endif /* * Trace categories @@ -21,6 +23,7 @@ enum blktrace_cat { BLK_TC_NOTIFY = 1 << 10, /* special message */ BLK_TC_AHEAD = 1 << 11, /* readahead */ BLK_TC_META = 1 << 12, /* metadata */ + BLK_TC_DISCARD = 1 << 13, /* discard requests */ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ }; @@ -47,6 +50,7 @@ enum blktrace_act { __BLK_TA_SPLIT, /* bio was split */ __BLK_TA_BOUNCE, /* bio was bounced */ __BLK_TA_REMAP, /* bio was remapped */ + __BLK_TA_ABORT, /* request aborted */ }; /* @@ -77,6 +81,7 @@ enum blktrace_notify { #define BLK_TA_SPLIT (__BLK_TA_SPLIT) #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE)) #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) @@ -89,17 +94,17 @@ enum blktrace_notify { * The trace itself */ struct blk_io_trace { - u32 magic; /* MAGIC << 8 | version */ - u32 sequence; /* event number */ - u64 time; /* in microseconds */ - u64 sector; /* disk offset */ - u32 bytes; /* transfer length */ - u32 action; /* what happened */ - u32 pid; /* who did it */ - u32 device; /* device number */ - u32 cpu; /* on what cpu did it happen */ - u16 error; /* completion error */ - u16 pdu_len; /* length of data after this trace */ + __u32 magic; /* MAGIC << 8 | version */ + __u32 sequence; /* event number */ + __u64 time; /* in microseconds */ + __u64 sector; /* disk offset */ + __u32 bytes; /* transfer length */ + __u32 action; /* what happened */ + __u32 pid; /* who did it */ + __u32 device; /* device number */ + __u32 cpu; /* on what cpu did it happen */ + __u16 error; /* completion error */ + __u16 pdu_len; /* length of data after this trace */ }; /* @@ -117,6 +122,23 @@ enum { Blktrace_stopped, }; +#define BLKTRACE_BDEV_SIZE 32 + +/* + * User setup structure passed with BLKTRACESTART + */ +struct blk_user_trace_setup { + char name[BLKTRACE_BDEV_SIZE]; /* output */ + __u16 act_mask; /* input */ + __u32 buf_size; /* input */ + __u32 buf_nr; /* input */ + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +#ifdef __KERNEL__ +#if defined(CONFIG_BLK_DEV_IO_TRACE) struct blk_trace { int trace_state; struct rchan *rchan; @@ -133,21 +155,6 @@ struct blk_trace { atomic_t dropped; }; -/* - * User setup structure passed with BLKTRACESTART - */ -struct blk_user_trace_setup { - char name[BDEVNAME_SIZE]; /* output */ - u16 act_mask; /* input */ - u32 buf_size; /* input */ - u32 buf_nr; /* input */ - u64 start_lba; - u64 end_lba; - u32 pid; -}; - -#ifdef __KERNEL__ -#if defined(CONFIG_BLK_DEV_IO_TRACE) extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); @@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (likely(!bt)) return; + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + if (blk_pc_request(rq)) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h new file mode 100644 index 00000000000..8c0f9505b48 --- /dev/null +++ b/include/linux/cnt32_to_63.h @@ -0,0 +1,80 @@ +/* + * Extend a 32-bit counter to 63 bits + * + * Author: Nicolas Pitre + * Created: December 3, 2006 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/byteorder.h> + +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * + * Many hardware clock counters are only 32 bits wide and therefore have + * a relatively short period making wrap-arounds rather frequent. This + * is a problem when implementing sched_clock() for example, where a 64-bit + * non-wrapping monotonic value is expected to be returned. + * + * To overcome that limitation, let's extend a 32-bit counter to 63 bits + * in a completely lock free fashion. Bits 0 to 31 of the clock are provided + * by the hardware while bits 32 to 62 are stored in memory. The top bit in + * memory is used to synchronize with the hardware clock half-period. When + * the top bit of both counters (hardware and in memory) differ then the + * memory is updated with a new value, incrementing it when the hardware + * counter wraps around. + * + * Because a word store in memory is atomic then the incremented value will + * always be in synch with the top bit indicating to any potential concurrent + * reader if the value in memory is up to date or not with regards to the + * needed increment. And any race in updating the value in memory is harmless + * as the same value would simply be stored more than once. + * + * The only restriction for the algorithm to work properly is that this + * code must be executed at least once per each half period of the 32-bit + * counter to properly update the state bit in memory. This is usually not a + * problem in practice, but if it is then a kernel timer could be scheduled + * to manage for this code to be executed often enough. + * + * Note that the top bit (bit 63) in the returned value should be considered + * as garbage. It is not cleared here because callers are likely to use a + * multiplier on the returned value which can get rid of the top bit + * implicitly by making the multiplier even, therefore saving on a runtime + * clear-bit instruction. Otherwise caller must remember to clear the top + * bit explicitly. + */ +#define cnt32_to_63(cnt_lo) \ +({ \ + static volatile u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ + __x.hi = __m_cnt_hi; \ + __x.lo = (cnt_lo); \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ + __x.val; \ +}) + +#endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c8bd2daf95e..8322141ee48 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); * ACCESS_ONCE() in different C statements. * * This macro does absolutely -nothing- to prevent the CPU from reordering, - * merging, or refetching absolutely anything at any time. + * merging, or refetching absolutely anything at any time. Its main intended + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. */ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) diff --git a/include/linux/completion.h b/include/linux/completion.h index 02ef8835999..4a6b604ef7e 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -10,6 +10,18 @@ #include <linux/wait.h> +/** + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and + * INIT_COMPLETION(). + */ struct completion { unsigned int done; wait_queue_head_t wait; @@ -21,6 +33,14 @@ struct completion { #define COMPLETION_INITIALIZER_ONSTACK(work) \ ({ init_completion(&work); work; }) +/** + * DECLARE_COMPLETION: - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure. Generally used + * for static declarations. You should use the _ONSTACK variant for automatic + * variables. + */ #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) @@ -29,6 +49,13 @@ struct completion { * completions - so we use the _ONSTACK() variant for those that * are on the kernel stack: */ +/** + * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure on the kernel + * stack. + */ #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) @@ -36,6 +63,13 @@ struct completion { # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) #endif +/** + * init_completion: - Initialize a dynamically allocated completion + * @x: completion structure that is to be initialized + * + * This inline function will initialize a dynamically created completion + * structure. + */ static inline void init_completion(struct completion *x) { x->done = 0; @@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); +/** + * INIT_COMPLETION: - reinitialize a completion structure + * @x: completion structure to be reinitialized + * + * This macro should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ #define INIT_COMPLETION(x) ((x).done = 0) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d7faf880849..c2747ac2ae4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) #endif int cpu_up(unsigned int cpu); +void notify_cpu_starting(unsigned int cpu); extern void cpu_hotplug_init(void); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 6fd5668aa57..1ee608fd7b7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -187,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation); -extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy); +extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, + unsigned int cpu); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); @@ -226,7 +227,9 @@ struct cpufreq_driver { unsigned int (*get) (unsigned int cpu); /* optional */ - unsigned int (*getavg) (unsigned int cpu); + unsigned int (*getavg) (struct cpufreq_policy *policy, + unsigned int cpu); + int (*exit) (struct cpufreq_policy *policy); int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index c43dc47fdf7..3d2317e4af2 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -38,6 +38,7 @@ #define CRYPTO_ALG_TYPE_DIGEST 0x00000008 #define CRYPTO_ALG_TYPE_HASH 0x00000009 #define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c @@ -61,6 +62,14 @@ #define CRYPTO_ALG_GENIV 0x00000200 /* + * Set if the algorithm has passed automated run-time testing. Note that + * if there is no run-time testing for a given algorithm it is considered + * to have passed. + */ + +#define CRYPTO_ALG_TESTED 0x00000400 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_REQ_MASK 0x000fff00 @@ -105,6 +114,7 @@ struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; struct crypto_ahash; +struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; @@ -290,6 +300,15 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +struct rng_alg { + int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); + + unsigned int seedsize; +}; + + #define cra_ablkcipher cra_u.ablkcipher #define cra_aead cra_u.aead #define cra_blkcipher cra_u.blkcipher @@ -298,6 +317,7 @@ struct compress_alg { #define cra_hash cra_u.hash #define cra_ahash cra_u.ahash #define cra_compress cra_u.compress +#define cra_rng cra_u.rng struct crypto_alg { struct list_head cra_list; @@ -325,6 +345,7 @@ struct crypto_alg { struct hash_alg hash; struct ahash_alg ahash; struct compress_alg compress; + struct rng_alg rng; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); @@ -430,6 +451,12 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; +struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +}; + #define crt_ablkcipher crt_u.ablkcipher #define crt_aead crt_u.aead #define crt_blkcipher crt_u.blkcipher @@ -437,6 +464,7 @@ struct compress_tfm { #define crt_hash crt_u.hash #define crt_ahash crt_u.ahash #define crt_compress crt_u.compress +#define crt_rng crt_u.rng struct crypto_tfm { @@ -450,6 +478,7 @@ struct crypto_tfm { struct hash_tfm hash; struct ahash_tfm ahash; struct compress_tfm compress; + struct rng_tfm rng; } crt_u; struct crypto_alg *__crt_alg; @@ -481,6 +510,10 @@ struct crypto_hash { struct crypto_tfm base; }; +struct crypto_rng { + struct crypto_tfm base; +}; + enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, @@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); void crypto_free_tfm(struct crypto_tfm *tfm); +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + /* * Transform helpers which query the underlying algorithm. */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a90222e3297..08d783592b7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -13,7 +13,6 @@ struct dm_target; struct dm_table; -struct dm_dev; struct mapped_device; struct bio_vec; @@ -84,6 +83,12 @@ void dm_error(const char *message); */ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); +struct dm_dev { + struct block_device *bdev; + int mode; + char name[16]; +}; + /* * Constructors should call these functions to ensure destination devices * are opened/closed correctly. @@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct mapped_device *md); int dm_noflush_suspending(struct dm_target *ti); +union map_info *dm_get_mapinfo(struct bio *bio); /* * Geometry functions. @@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type, int dm_table_complete(struct dm_table *t); /* + * Unplug all devices in a table. + */ +void dm_table_unplug_all(struct dm_table *t); + +/* * Table reference counting. */ struct dm_table *dm_get_table(struct mapped_device *md); @@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t); */ int dm_swap_table(struct mapped_device *md, struct dm_table *t); +/* + * A wrapper around vmalloc. + */ +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); + /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ diff --git a/include/linux/device.h b/include/linux/device.h index 4d8372d135d..246937c9cbc 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -199,6 +199,11 @@ struct class { struct class_private *p; }; +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + extern struct kobject *sysfs_dev_block_kobj; extern struct kobject *sysfs_dev_char_kobj; extern int __must_check __class_register(struct class *class, @@ -213,6 +218,13 @@ extern void class_unregister(struct class *class); __class_register(class, &__key); \ }) +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + extern int class_for_each_device(struct class *class, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); @@ -396,7 +408,7 @@ struct device { spinlock_t devres_lock; struct list_head devres_head; - struct list_head node; + struct klist_node knode_class; struct class *class; dev_t devt; /* dev_t, creates the sysfs "dev" */ struct attribute_group **groups; /* optional groups */ diff --git a/include/linux/dlm.h b/include/linux/dlm.h index 203a025e30e..b9cd38603fd 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -65,9 +65,12 @@ struct dlm_lksb { char * sb_lvbptr; }; +/* dlm_new_lockspace() flags */ + #define DLM_LSFL_NODIR 0x00000001 #define DLM_LSFL_TIMEWARN 0x00000002 #define DLM_LSFL_FS 0x00000004 +#define DLM_LSFL_NEWEXCL 0x00000008 #ifdef __KERNEL__ diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h index c6034508fed..3060783c419 100644 --- a/include/linux/dlm_device.h +++ b/include/linux/dlm_device.h @@ -26,7 +26,7 @@ /* Version of the device interface */ #define DLM_DEVICE_VERSION_MAJOR 6 #define DLM_DEVICE_VERSION_MINOR 0 -#define DLM_DEVICE_VERSION_PATCH 0 +#define DLM_DEVICE_VERSION_PATCH 1 /* struct passed to the lock write */ struct dlm_lock_params { diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 639624b55fb..92f6f634e3e 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); extern int elv_may_queue(struct request_queue *, int); +extern void elv_abort_queue(struct request_queue *); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *, struct request *, gfp_t); extern void elv_put_request(struct request_queue *, struct request *); @@ -173,15 +174,15 @@ enum { #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) /* - * Hack to reuse the donelist list_head as the fifo time holder while + * Hack to reuse the csd.list list_head as the fifo time holder while * the request is in the io scheduler. Saves an unsigned long in rq. */ -#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next) -#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp)) +#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next) +#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp)) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) do { \ list_del_init(&(rq)->queuelist); \ - INIT_LIST_HEAD(&(rq)->donelist); \ + INIT_LIST_HEAD(&(rq)->csd.list); \ } while (0) /* diff --git a/include/linux/fd.h b/include/linux/fd.h index b6bd41d2b46..f5d194af07a 100644 --- a/include/linux/fd.h +++ b/include/linux/fd.h @@ -15,10 +15,16 @@ struct floppy_struct { sect, /* sectors per track */ head, /* nr of heads */ track, /* nr of tracks */ - stretch; /* !=0 means double track steps */ + stretch; /* bit 0 !=0 means double track steps */ + /* bit 1 != 0 means swap sides */ + /* bits 2..9 give the first sector */ + /* number (the LSB is flipped) */ #define FD_STRETCH 1 #define FD_SWAPSIDES 2 #define FD_ZEROBASED 4 +#define FD_SECTBASEMASK 0x3FC +#define FD_MKSECTBASE(s) (((s) ^ 1) << 2) +#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1) unsigned char gap, /* gap1 size */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 580b513668f..32477e8872d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -86,7 +86,9 @@ extern int dir_notify_enable; #define READ_META (READ | (1 << BIO_RW_META)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) -#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) +#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) +#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) +#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) #define SEL_IN 1 #define SEL_OUT 2 @@ -222,6 +224,7 @@ extern int dir_notify_enable; #define BLKTRACESTART _IO(0x12,116) #define BLKTRACESTOP _IO(0x12,117) #define BLKTRACETEARDOWN _IO(0x12,118) +#define BLKDISCARD _IO(0x12,119) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ @@ -1682,6 +1685,7 @@ extern void chrdev_show(struct seq_file *,off_t); /* fs/block_dev.c */ #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ #ifdef CONFIG_BLOCK #define BLKDEV_MAJOR_HASH_SIZE 255 @@ -1718,6 +1722,9 @@ extern int fs_may_remount_ro(struct super_block *); */ #define bio_data_dir(bio) ((bio)->bi_rw & 1) +extern void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev); +extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index be4f5e5bfe0..206cdf96c3a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -11,12 +11,15 @@ #include <linux/types.h> #include <linux/kdev_t.h> +#include <linux/rcupdate.h> #ifdef CONFIG_BLOCK -#define kobj_to_dev(k) container_of(k, struct device, kobj) -#define dev_to_disk(device) container_of(device, struct gendisk, dev) -#define dev_to_part(device) container_of(device, struct hd_struct, dev) +#define kobj_to_dev(k) container_of((k), struct device, kobj) +#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) +#define dev_to_part(device) container_of((device), struct hd_struct, __dev) +#define disk_to_dev(disk) (&(disk)->part0.__dev) +#define part_to_dev(part) (&((part)->__dev)) extern struct device_type part_type; extern struct kobject *block_depr; @@ -55,6 +58,9 @@ enum { UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ }; +#define DISK_MAX_PARTS 256 +#define DISK_NAME_LEN 32 + #include <linux/major.h> #include <linux/device.h> #include <linux/smp.h> @@ -87,7 +93,7 @@ struct disk_stats { struct hd_struct { sector_t start_sect; sector_t nr_sects; - struct device dev; + struct device __dev; struct kobject *holder_dir; int policy, partno; #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -100,6 +106,7 @@ struct hd_struct { #else struct disk_stats dkstats; #endif + struct rcu_head rcu_head; }; #define GENHD_FL_REMOVABLE 1 @@ -108,100 +115,148 @@ struct hd_struct { #define GENHD_FL_CD 8 #define GENHD_FL_UP 16 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 -#define GENHD_FL_FAIL 64 +#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ + +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_scsi_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; + +struct disk_part_tbl { + struct rcu_head rcu_head; + int len; + struct hd_struct *part[]; +}; struct gendisk { + /* major, first_minor and minors are input parameters only, + * don't use directly. Use disk_devt() and disk_max_parts(). + */ int major; /* major number of driver */ int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ - char disk_name[32]; /* name of major driver */ - struct hd_struct **part; /* [indexed by minor] */ + + char disk_name[DISK_NAME_LEN]; /* name of major driver */ + + /* Array of pointers to partitions indexed by partno. + * Protected with matching bdev lock but stat and other + * non-critical accesses use RCU. Always access through + * helpers. + */ + struct disk_part_tbl *part_tbl; + struct hd_struct part0; + struct block_device_operations *fops; struct request_queue *queue; void *private_data; - sector_t capacity; int flags; struct device *driverfs_dev; // FIXME: remove - struct device dev; - struct kobject *holder_dir; struct kobject *slave_dir; struct timer_rand_state *random; - int policy; atomic_t sync_io; /* RAID */ - unsigned long stamp; - int in_flight; -#ifdef CONFIG_SMP - struct disk_stats *dkstats; -#else - struct disk_stats dkstats; -#endif struct work_struct async_notify; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity *integrity; #endif + int node_id; }; -/* - * Macros to operate on percpu disk statistics: - * - * The __ variants should only be called in critical sections. The full - * variants disable/enable preemption. - */ -static inline struct hd_struct *get_part(struct gendisk *gendiskp, - sector_t sector) +static inline struct gendisk *part_to_disk(struct hd_struct *part) { - struct hd_struct *part; - int i; - for (i = 0; i < gendiskp->minors - 1; i++) { - part = gendiskp->part[i]; - if (part && part->start_sect <= sector - && sector < part->start_sect + part->nr_sects) - return part; + if (likely(part)) { + if (part->partno) + return dev_to_disk(part_to_dev(part)->parent); + else + return dev_to_disk(part_to_dev(part)); } return NULL; } -#ifdef CONFIG_SMP -#define __disk_stat_add(gendiskp, field, addnd) \ - (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) +static inline int disk_max_parts(struct gendisk *disk) +{ + if (disk->flags & GENHD_FL_EXT_DEVT) + return DISK_MAX_PARTS; + return disk->minors; +} -#define disk_stat_read(gendiskp, field) \ -({ \ - typeof(gendiskp->dkstats->field) res = 0; \ - int i; \ - for_each_possible_cpu(i) \ - res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ - res; \ -}) +static inline bool disk_partitionable(struct gendisk *disk) +{ + return disk_max_parts(disk) > 1; +} -static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { - int i; +static inline dev_t disk_devt(struct gendisk *disk) +{ + return disk_to_dev(disk)->devt; +} - for_each_possible_cpu(i) - memset(per_cpu_ptr(gendiskp->dkstats, i), value, - sizeof(struct disk_stats)); -} +static inline dev_t part_devt(struct hd_struct *part) +{ + return part_to_dev(part)->devt; +} -#define __part_stat_add(part, field, addnd) \ - (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd) +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ -({ \ - if (part) \ - __part_stat_add(part, field, addnd); \ - __disk_stat_add(gendiskp, field, addnd); \ -}) +static inline void disk_put_part(struct hd_struct *part) +{ + if (likely(part)) + put_device(part_to_dev(part)); +} + +/* + * Smarter partition iterator without context limits. + */ +#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ +#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ +#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); + +extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, + sector_t sector); + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define __part_stat_add(cpu, part, field, addnd) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) #define part_stat_read(part, field) \ ({ \ - typeof(part->dkstats->field) res = 0; \ + typeof((part)->dkstats->field) res = 0; \ int i; \ for_each_possible_cpu(i) \ - res += per_cpu_ptr(part->dkstats, i)->field; \ + res += per_cpu_ptr((part)->dkstats, i)->field; \ res; \ }) @@ -213,171 +268,107 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) memset(per_cpu_ptr(part->dkstats, i), value, sizeof(struct disk_stats)); } - -#else /* !CONFIG_SMP */ -#define __disk_stat_add(gendiskp, field, addnd) \ - (gendiskp->dkstats.field += addnd) -#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field) -static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) +static inline int init_part_stats(struct hd_struct *part) { - memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; } -#define __part_stat_add(part, field, addnd) \ - (part->dkstats.field += addnd) - -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ -({ \ - if (part) \ - part->dkstats.field += addnd; \ - __disk_stat_add(gendiskp, field, addnd); \ -}) - -#define part_stat_read(part, field) (part->dkstats.field) - -static inline void part_stat_set_all(struct hd_struct *part, int value) +static inline void free_part_stats(struct hd_struct *part) { - memset(&part->dkstats, value, sizeof(struct disk_stats)); + free_percpu(part->dkstats); } -#endif /* CONFIG_SMP */ +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() -#define disk_stat_add(gendiskp, field, addnd) \ - do { \ - preempt_disable(); \ - __disk_stat_add(gendiskp, field, addnd); \ - preempt_enable(); \ - } while (0) - -#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1) -#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1) - -#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1) -#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1) - -#define __disk_stat_sub(gendiskp, field, subnd) \ - __disk_stat_add(gendiskp, field, -subnd) -#define disk_stat_sub(gendiskp, field, subnd) \ - disk_stat_add(gendiskp, field, -subnd) - -#define part_stat_add(gendiskp, field, addnd) \ - do { \ - preempt_disable(); \ - __part_stat_add(gendiskp, field, addnd);\ - preempt_enable(); \ - } while (0) - -#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1) -#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1) - -#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1) -#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1) - -#define __part_stat_sub(gendiskp, field, subnd) \ - __part_stat_add(gendiskp, field, -subnd) -#define part_stat_sub(gendiskp, field, subnd) \ - part_stat_add(gendiskp, field, -subnd) - -#define all_stat_add(gendiskp, part, field, addnd, sector) \ - do { \ - preempt_disable(); \ - __all_stat_add(gendiskp, part, field, addnd, sector); \ - preempt_enable(); \ - } while (0) - -#define __all_stat_dec(gendiskp, field, sector) \ - __all_stat_add(gendiskp, field, -1, sector) -#define all_stat_dec(gendiskp, field, sector) \ - all_stat_add(gendiskp, field, -1, sector) - -#define __all_stat_inc(gendiskp, part, field, sector) \ - __all_stat_add(gendiskp, part, field, 1, sector) -#define all_stat_inc(gendiskp, part, field, sector) \ - all_stat_add(gendiskp, part, field, 1, sector) - -#define __all_stat_sub(gendiskp, part, field, subnd, sector) \ - __all_stat_add(gendiskp, part, field, -subnd, sector) -#define all_stat_sub(gendiskp, part, field, subnd, sector) \ - all_stat_add(gendiskp, part, field, -subnd, sector) - -/* Inlines to alloc and free disk stats in struct gendisk */ -#ifdef CONFIG_SMP -static inline int init_disk_stats(struct gendisk *disk) -{ - disk->dkstats = alloc_percpu(struct disk_stats); - if (!disk->dkstats) - return 0; - return 1; -} +#define __part_stat_add(cpu, part, field, addnd) \ + ((part)->dkstats.field += addnd) + +#define part_stat_read(part, field) ((part)->dkstats.field) -static inline void free_disk_stats(struct gendisk *disk) +static inline void part_stat_set_all(struct hd_struct *part, int value) { - free_percpu(disk->dkstats); + memset(&part->dkstats, value, sizeof(struct disk_stats)); } static inline int init_part_stats(struct hd_struct *part) { - part->dkstats = alloc_percpu(struct disk_stats); - if (!part->dkstats) - return 0; return 1; } static inline void free_part_stats(struct hd_struct *part) { - free_percpu(part->dkstats); -} - -#else /* CONFIG_SMP */ -static inline int init_disk_stats(struct gendisk *disk) -{ - return 1; } -static inline void free_disk_stats(struct gendisk *disk) -{ -} +#endif /* CONFIG_SMP */ -static inline int init_part_stats(struct hd_struct *part) +#define part_stat_add(cpu, part, field, addnd) do { \ + __part_stat_add((cpu), (part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add((cpu), &part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, -1) +#define part_stat_inc(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, 1) +#define part_stat_sub(cpu, gendiskp, field, subnd) \ + part_stat_add(cpu, gendiskp, field, -subnd) + +static inline void part_inc_in_flight(struct hd_struct *part) { - return 1; + part->in_flight++; + if (part->partno) + part_to_disk(part)->part0.in_flight++; } -static inline void free_part_stats(struct hd_struct *part) +static inline void part_dec_in_flight(struct hd_struct *part) { + part->in_flight--; + if (part->partno) + part_to_disk(part)->part0.in_flight--; } -#endif /* CONFIG_SMP */ /* drivers/block/ll_rw_blk.c */ -extern void disk_round_stats(struct gendisk *disk); -extern void part_round_stats(struct hd_struct *part); +extern void part_round_stats(int cpu, struct hd_struct *part); /* drivers/block/genhd.c */ extern int get_blkdev_list(char *, int); extern void add_disk(struct gendisk *disk); extern void del_gendisk(struct gendisk *gp); extern void unlink_gendisk(struct gendisk *gp); -extern struct gendisk *get_gendisk(dev_t dev, int *part); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); extern void set_device_ro(struct block_device *bdev, int flag); extern void set_disk_ro(struct gendisk *disk, int flag); +static inline int get_disk_ro(struct gendisk *disk) +{ + return disk->part0.policy; +} + /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk); extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) { - return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect; + return bdev->bd_part->start_sect; } static inline sector_t get_capacity(struct gendisk *disk) { - return disk->capacity; + return disk->part0.nr_sects; } static inline void set_capacity(struct gendisk *disk, sector_t size) { - disk->capacity = size; + disk->part0.nr_sects = size; } #ifdef CONFIG_SOLARIS_X86_PARTITION @@ -527,9 +518,12 @@ struct unixware_disklabel { #define ADDPART_FLAG_RAID 1 #define ADDPART_FLAG_WHOLEDISK 2 -extern dev_t blk_lookup_devt(const char *name, int part); -extern char *disk_name (struct gendisk *hd, int part, char *buf); +extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); +extern void blk_free_devt(dev_t devt); +extern dev_t blk_lookup_devt(const char *name, int partno); +extern char *disk_name (struct gendisk *hd, int partno, char *buf); +extern int disk_expand_part_tbl(struct gendisk *disk, int target); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); extern void delete_partition(struct gendisk *, int); @@ -546,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range, void *data); extern void blk_unregister_region(dev_t devt, unsigned long range); -static inline struct block_device *bdget_disk(struct gendisk *disk, int index) -{ - return bdget(MKDEV(disk->major, disk->first_minor) + index); -} +extern ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf); +#ifdef CONFIG_FAIL_MAKE_REQUEST +extern ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* CONFIG_FAIL_MAKE_REQUEST */ #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } -static inline dev_t blk_lookup_devt(const char *name, int part) +static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); return devt; diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h index c3c19f926e6..14d0df0b574 100644 --- a/include/linux/gfs2_ondisk.h +++ b/include/linux/gfs2_ondisk.h @@ -118,7 +118,11 @@ struct gfs2_sb { char sb_lockproto[GFS2_LOCKNAME_LEN]; char sb_locktable[GFS2_LOCKNAME_LEN]; - /* In gfs1, quota and license dinodes followed */ + + struct gfs2_inum __pad3; /* Was quota inode in gfs1 */ + struct gfs2_inum __pad4; /* Was licence inode in gfs1 */ +#define GFS2_HAS_UUID 1 + __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */ }; /* diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce61cb..2f245fe63bd 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -47,14 +47,22 @@ enum hrtimer_restart { * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and * does not restart the timer - * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context - * Special mode for tick emultation + * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context + * Special mode for tick emulation and + * scheduler timer. Such timers are per + * cpu and not allowed to be migrated on + * cpu unplug. + * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context + * with timer->base lock unlocked + * used for timers which call wakeup to + * avoid lock order problems with rq->lock */ enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ, HRTIMER_CB_IRQSAFE, HRTIMER_CB_IRQSAFE_NO_RESTART, - HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, + HRTIMER_CB_IRQSAFE_PERCPU, + HRTIMER_CB_IRQSAFE_UNLOCKED, }; /* @@ -67,9 +75,10 @@ enum hrtimer_cb_mode { * 0x02 callback function running * 0x04 callback pending (high resolution mode) * - * Special case: + * Special cases: * 0x03 callback function running and enqueued * (was requeued on another CPU) + * 0x09 timer was migrated on CPU hotunplug * The "callback function running and enqueued" status is only possible on * SMP. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer @@ -87,6 +96,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 #define HRTIMER_STATE_PENDING 0x04 +#define HRTIMER_STATE_MIGRATE 0x08 /** * struct hrtimer - the basic hrtimer structure diff --git a/include/linux/ide.h b/include/linux/ide.h index 1524829f73f..6514db8fd2e 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -366,7 +366,9 @@ enum { /* Currently on a filemark */ IDE_AFLAG_FILEMARK = (1 << 25), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) + IDE_AFLAG_MEDIUM_PRESENT = (1 << 26), + + IDE_AFLAG_NO_AUTOCLOSE = (1 << 27), }; struct ide_drive_s { diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7f4df7c7659..14126bc3664 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -471,6 +471,11 @@ struct ieee80211s_hdr { u8 eaddr3[6]; } __attribute__ ((packed)); +/* Mesh flags */ +#define MESH_FLAGS_AE_A4 0x1 +#define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_PS_DEEP 0x4 + /** * struct ieee80211_quiet_ie * @@ -643,6 +648,9 @@ struct ieee80211_mgmt { } u; } __attribute__ ((packed)); +/* mgmt header + 1 byte category code */ +#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) + /* Control frames */ struct ieee80211_rts { @@ -708,12 +716,13 @@ struct ieee80211_ht_addt_info { /* 802.11n HT capabilities masks */ #define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 -#define IEEE80211_HT_CAP_MIMO_PS 0x000C +#define IEEE80211_HT_CAP_SM_PS 0x000C #define IEEE80211_HT_CAP_GRN_FLD 0x0010 #define IEEE80211_HT_CAP_SGI_20 0x0020 #define IEEE80211_HT_CAP_SGI_40 0x0040 #define IEEE80211_HT_CAP_DELAY_BA 0x0400 #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 /* 802.11n HT capability AMPDU settings */ #define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 #define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C @@ -736,11 +745,26 @@ struct ieee80211_ht_addt_info { #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 -/* MIMO Power Save Modes */ -#define WLAN_HT_CAP_MIMO_PS_STATIC 0 -#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 -#define WLAN_HT_CAP_MIMO_PS_INVALID 2 -#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 +/* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +/* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + */ +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF 0x40 + + +/* Spatial Multiplexing Power Save Modes */ +#define WLAN_HT_CAP_SM_PS_STATIC 0 +#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 +#define WLAN_HT_CAP_SM_PS_INVALID 2 +#define WLAN_HT_CAP_SM_PS_DISABLED 3 /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 diff --git a/include/linux/if.h b/include/linux/if.h index 5c9d1fa93fe..65246846c84 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -24,6 +24,7 @@ #include <linux/compiler.h> /* for "__user" et al */ #define IFNAMSIZ 16 +#define IFALIASZ 256 #include <linux/hdlc/ioctl.h> /* Standard interface flags (netdevice->flags). */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index e157c1399b6..bf1a53b2682 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -56,6 +56,7 @@ #define ETH_P_DIAG 0x6005 /* DEC Diagnostics */ #define ETH_P_CUST 0x6006 /* DEC Customer use */ #define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */ +#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */ #define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */ #define ETH_P_ATALK 0x809B /* Appletalk DDP */ #define ETH_P_AARP 0x80F3 /* Appletalk AARP */ @@ -74,8 +75,10 @@ #define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport * over Ethernet */ +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* * Non DIX types. Won't clash for 1500 types. @@ -99,6 +102,9 @@ #define ETH_P_ECONET 0x0018 /* Acorn Econet */ #define ETH_P_HDLC 0x0019 /* HDLC frames */ #define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ +#define ETH_P_DSA 0x001B /* Distributed Switch Arch. */ +#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ +#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ /* * This is an Ethernet frame header. diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 84c3492ae5c..f9032c88716 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -79,6 +79,7 @@ enum IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, + IFLA_IFALIAS, __IFLA_MAX }; diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h new file mode 100644 index 00000000000..d70034bcec0 --- /dev/null +++ b/include/linux/if_phonet.h @@ -0,0 +1,19 @@ +/* + * File: if_phonet.h + * + * Phonet interface kernel definitions + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + */ +#ifndef LINUX_IF_PHONET_H +#define LINUX_IF_PHONET_H + +#define PHONET_MIN_MTU 6 /* pn_length = 0 */ +#define PHONET_MAX_MTU 65541 /* pn_length = 0xffff */ +#define PHONET_DEV_MTU PHONET_MAX_MTU + +#ifdef __KERNEL__ +extern struct header_ops phonet_header_ops; +#endif + +#endif diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index d4efe401470..aeab2cb32a9 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -2,6 +2,7 @@ #define _IF_TUNNEL_H_ #include <linux/types.h> +#include <linux/ip.h> #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) @@ -47,4 +48,22 @@ struct ip_tunnel_prl { /* PRL flags */ #define PRL_DEFAULT 0x0001 +enum +{ + IFLA_GRE_UNSPEC, + IFLA_GRE_LINK, + IFLA_GRE_IFLAGS, + IFLA_GRE_OFLAGS, + IFLA_GRE_IKEY, + IFLA_GRE_OKEY, + IFLA_GRE_LOCAL, + IFLA_GRE_REMOTE, + IFLA_GRE_TTL, + IFLA_GRE_TOS, + IFLA_GRE_PMTUDISC, + __IFLA_GRE_MAX, +}; + +#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1) + #endif /* _IF_TUNNEL_H_ */ diff --git a/include/linux/in.h b/include/linux/in.h index 4065313cd7e..db458beef19 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -75,6 +75,7 @@ struct in_addr { #define IP_IPSEC_POLICY 16 #define IP_XFRM_POLICY 17 #define IP_PASSSEC 18 +#define IP_TRANSPARENT 19 /* BSD compatibility */ #define IP_RECVRETOPTS IP_RETOPTS diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index c6f51ad52d5..06fcdb45106 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -25,6 +25,7 @@ struct in_device struct in_ifaddr *ifa_list; /* IP ifaddr chain */ rwlock_t mc_list_lock; struct ip_mc_list *mc_list; /* IP multicast filter chain */ + int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index ec6eb49af2d..0f434a28fb5 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h @@ -242,4 +242,164 @@ struct ip_vs_daemon_user { int syncid; }; +/* + * + * IPVS Generic Netlink interface definitions + * + */ + +/* Generic Netlink family info */ + +#define IPVS_GENL_NAME "IPVS" +#define IPVS_GENL_VERSION 0x1 + +struct ip_vs_flags { + __be32 flags; + __be32 mask; +}; + +/* Generic Netlink command attributes */ +enum { + IPVS_CMD_UNSPEC = 0, + + IPVS_CMD_NEW_SERVICE, /* add service */ + IPVS_CMD_SET_SERVICE, /* modify service */ + IPVS_CMD_DEL_SERVICE, /* delete service */ + IPVS_CMD_GET_SERVICE, /* get service info */ + + IPVS_CMD_NEW_DEST, /* add destination */ + IPVS_CMD_SET_DEST, /* modify destination */ + IPVS_CMD_DEL_DEST, /* delete destination */ + IPVS_CMD_GET_DEST, /* get destination info */ + + IPVS_CMD_NEW_DAEMON, /* start sync daemon */ + IPVS_CMD_DEL_DAEMON, /* stop sync daemon */ + IPVS_CMD_GET_DAEMON, /* get sync daemon status */ + + IPVS_CMD_SET_CONFIG, /* set config settings */ + IPVS_CMD_GET_CONFIG, /* get config settings */ + + IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */ + IPVS_CMD_GET_INFO, /* get general IPVS info */ + + IPVS_CMD_ZERO, /* zero all counters and stats */ + IPVS_CMD_FLUSH, /* flush services and dests */ + + __IPVS_CMD_MAX, +}; + +#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1) + +/* Attributes used in the first level of commands */ +enum { + IPVS_CMD_ATTR_UNSPEC = 0, + IPVS_CMD_ATTR_SERVICE, /* nested service attribute */ + IPVS_CMD_ATTR_DEST, /* nested destination attribute */ + IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */ + IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */ + IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */ + IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */ + __IPVS_CMD_ATTR_MAX, +}; + +#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a service + * + * Used inside nested attribute IPVS_CMD_ATTR_SERVICE + */ +enum { + IPVS_SVC_ATTR_UNSPEC = 0, + IPVS_SVC_ATTR_AF, /* address family */ + IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */ + IPVS_SVC_ATTR_ADDR, /* virtual service address */ + IPVS_SVC_ATTR_PORT, /* virtual service port */ + IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */ + + IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */ + IPVS_SVC_ATTR_FLAGS, /* virtual service flags */ + IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */ + IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ + + IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ + __IPVS_SVC_ATTR_MAX, +}; + +#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a destination (real server) + * + * Used inside nested attribute IPVS_CMD_ATTR_DEST + */ +enum { + IPVS_DEST_ATTR_UNSPEC = 0, + IPVS_DEST_ATTR_ADDR, /* real server address */ + IPVS_DEST_ATTR_PORT, /* real server port */ + + IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */ + IPVS_DEST_ATTR_WEIGHT, /* destination weight */ + + IPVS_DEST_ATTR_U_THRESH, /* upper threshold */ + IPVS_DEST_ATTR_L_THRESH, /* lower threshold */ + + IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */ + IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */ + IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ + + IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ + __IPVS_DEST_ATTR_MAX, +}; + +#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1) + +/* + * Attributes describing a sync daemon + * + * Used inside nested attribute IPVS_CMD_ATTR_DAEMON + */ +enum { + IPVS_DAEMON_ATTR_UNSPEC = 0, + IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */ + IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */ + IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */ + __IPVS_DAEMON_ATTR_MAX, +}; + +#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1) + +/* + * Attributes used to describe service or destination entry statistics + * + * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS + */ +enum { + IPVS_STATS_ATTR_UNSPEC = 0, + IPVS_STATS_ATTR_CONNS, /* connections scheduled */ + IPVS_STATS_ATTR_INPKTS, /* incoming packets */ + IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */ + IPVS_STATS_ATTR_INBYTES, /* incoming bytes */ + IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */ + + IPVS_STATS_ATTR_CPS, /* current connection rate */ + IPVS_STATS_ATTR_INPPS, /* current in packet rate */ + IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ + IPVS_STATS_ATTR_INBPS, /* current in byte rate */ + IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ + __IPVS_STATS_ATTR_MAX, +}; + +#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1) + +/* Attributes used in response to IPVS_CMD_GET_INFO command */ +enum { + IPVS_INFO_ATTR_UNSPEC = 0, + IPVS_INFO_ATTR_VERSION, /* IPVS version number */ + IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */ + __IPVS_INFO_ATTR_MAX, +}; + +#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1) + #endif /* _IP_VS_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h index 8687a7dc063..4c218ee7587 100644 --- a/include/linux/isdn_ppp.h +++ b/include/linux/isdn_ppp.h @@ -157,7 +157,7 @@ typedef struct { typedef struct { int mp_mrru; /* unused */ - struct sk_buff * frags; /* fragments sl list -- use skb->next */ + struct sk_buff_head frags; /* fragments sl list */ long frames; /* number of frames in the frame list */ unsigned int seq; /* last processed packet seq #: any packets * with smaller seq # will be dropped diff --git a/include/linux/klist.h b/include/linux/klist.h index 06c338ef7f1..8ea98db223e 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)); struct klist_node { - struct klist *n_klist; + void *n_klist; /* never access directly */ struct list_head n_node; struct kref n_ref; struct completion n_removed; @@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n); struct klist_iter { struct klist *i_klist; - struct list_head *i_head; struct klist_node *i_cur; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 225bfc5bd9e..947cf84e555 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -146,6 +146,7 @@ enum { ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), @@ -244,6 +245,7 @@ enum { ATA_TMOUT_BOOT = 30000, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, /* FIXME: GoVault needs 2s but we can't afford that without * parallel probing. 800ms is enough for iVDR disk @@ -319,8 +321,11 @@ enum { ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_LPM = (1 << 4), /* link power management action */ + ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ - ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, + ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | + ATA_EH_ENABLE_LINK | ATA_EH_LPM, /* ata_eh_info->flags */ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ @@ -452,6 +457,7 @@ enum link_pm { MEDIUM_POWER, }; extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_unload_heads; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; @@ -554,8 +560,8 @@ struct ata_ering { struct ata_device { struct ata_link *link; unsigned int devno; /* 0 or 1 */ - unsigned long flags; /* ATA_DFLAG_xxx */ unsigned int horkage; /* List of broken features */ + unsigned long flags; /* ATA_DFLAG_xxx */ struct scsi_device *sdev; /* attached SCSI device */ #ifdef CONFIG_ATA_ACPI acpi_handle acpi_handle; @@ -564,6 +570,7 @@ struct ata_device { /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ u64 n_sectors; /* size of device, if ATA */ unsigned int class; /* ATA_DEV_xxx */ + unsigned long unpark_deadline; u8 pio_mode; u8 dma_mode; @@ -621,6 +628,7 @@ struct ata_eh_context { [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; unsigned int classes[ATA_MAX_DEVICES]; unsigned int did_probe_mask; + unsigned int unloaded_mask; unsigned int saved_ncq_enabled; u8 saved_xfer_mode[ATA_MAX_DEVICES]; /* timestamp for the last reset attempt or success */ @@ -688,7 +696,8 @@ struct ata_port { unsigned int qc_active; int nr_active_links; /* #links with active qcs */ - struct ata_link link; /* host default link */ + struct ata_link link; /* host default link */ + struct ata_link *slave_link; /* see ata_slave_link_init() */ int nr_pmp_links; /* nr of available PMP links */ struct ata_link *pmp_link; /* array of PMP links */ @@ -709,6 +718,7 @@ struct ata_port { struct list_head eh_done_q; wait_queue_head_t eh_wait_q; int eh_tries; + struct completion park_req_pending; pm_message_t pm_mesg; int *pm_result; @@ -772,8 +782,8 @@ struct ata_port_operations { /* * Optional features */ - int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); - int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); + int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); + int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); void (*pmp_attach)(struct ata_port *ap); void (*pmp_detach)(struct ata_port *ap); int (*enable_pm)(struct ata_port *ap, enum link_pm policy); @@ -895,6 +905,7 @@ extern void ata_port_disable(struct ata_port *); extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); +extern int ata_slave_link_init(struct ata_port *ap); extern int ata_host_start(struct ata_host *host); extern int ata_host_register(struct ata_host *host, struct scsi_host_template *sht); @@ -920,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link); extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); -extern int ata_link_online(struct ata_link *link); -extern int ata_link_offline(struct ata_link *link); +extern bool ata_link_online(struct ata_link *link); +extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); extern void ata_host_resume(struct ata_host *host); @@ -1098,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap); */ extern const struct ata_port_operations ata_base_port_ops; extern const struct ata_port_operations sata_port_ops; +extern struct device_attribute *ata_common_sdev_attrs[]; #define ATA_BASE_SHT(drv_name) \ .module = THIS_MODULE, \ @@ -1112,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops; .proc_name = drv_name, \ .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ - .bios_param = ata_std_bios_param + .bios_param = ata_std_bios_param, \ + .sdev_attrs = ata_common_sdev_attrs #define ATA_NCQ_SHT(drv_name) \ ATA_BASE_SHT(drv_name), \ @@ -1134,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline int ata_is_host_link(const struct ata_link *link) { - return link == &link->ap->link; + return link == &link->ap->link || link == link->ap->slave_link; } #else /* CONFIG_SATA_PMP */ static inline bool sata_pmp_supported(struct ata_port *ap) @@ -1167,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link) printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) #define ata_link_printk(link, lv, fmt, args...) do { \ - if (sata_pmp_attached((link)->ap)) \ + if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \ printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ (link)->pmp , ##args); \ else \ @@ -1265,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link) return ata_tag_valid(link->active_tag) || link->sactive; } -static inline struct ata_link *ata_port_first_link(struct ata_port *ap) -{ - if (sata_pmp_attached(ap)) - return ap->pmp_link; - return &ap->link; -} - -static inline struct ata_link *ata_port_next_link(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - - if (ata_is_host_link(link)) { - if (!sata_pmp_attached(ap)) - return NULL; - return ap->pmp_link; - } - - if (++link < ap->nr_pmp_links + ap->pmp_link) - return link; - return NULL; -} +extern struct ata_link *__ata_port_next_link(struct ata_port *ap, + struct ata_link *link, + bool dev_only); -#define __ata_port_for_each_link(lk, ap) \ - for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk)) +#define __ata_port_for_each_link(link, ap) \ + for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ + (link) = __ata_port_next_link((ap), (link), false)) #define ata_port_for_each_link(link, ap) \ - for ((link) = ata_port_first_link(ap); (link); \ - (link) = ata_port_next_link(link)) + for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ + (link) = __ata_port_next_link((ap), (link), true)) #define ata_link_for_each_dev(dev, link) \ for ((dev) = (link)->device; \ diff --git a/include/linux/major.h b/include/linux/major.h index 53d5fafd85c..88249452b93 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -170,4 +170,6 @@ #define VIOTAPE_MAJOR 230 +#define BLOCK_EXT_MAJOR 259 + #endif diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 655ea0d1ee1..b2f94446831 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -141,6 +141,10 @@ enum { MLX4_STAT_RATE_OFFSET = 5 }; +enum { + MLX4_MTT_FLAG_PRESENT = 1 +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 310e6160641..8b4aa0523db 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -41,6 +41,8 @@ struct mtd_blktrans_ops { unsigned long block, char *buffer); int (*writesect)(struct mtd_blktrans_dev *dev, unsigned long block, char *buffer); + int (*discard)(struct mtd_blktrans_dev *dev, + unsigned long block, unsigned nr_blocks); /* Block layer ioctls */ int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 12078577aef..cbbbe9bfeca 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -17,9 +17,14 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; + struct platform_device *shared_smi; unsigned int t_clk; }; +#define MV643XX_ETH_PHY_ADDR_DEFAULT 0 +#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) +#define MV643XX_ETH_PHY_NONE 0xff + struct mv643xx_eth_platform_data { /* * Pointer back to our parent instance, and our port number. @@ -30,8 +35,6 @@ struct mv643xx_eth_platform_data { /* * Whether a PHY is present, and if yes, at which address. */ - struct platform_device *shared_smi; - int force_phy_addr; int phy_addr; /* @@ -49,10 +52,10 @@ struct mv643xx_eth_platform_data { int duplex; /* - * Which RX/TX queues to use. + * How many RX/TX queues to use. */ - int rx_queue_mask; - int tx_queue_mask; + int rx_queue_count; + int tx_queue_count; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 488c56e649b..d3ea3de70a8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -42,6 +42,7 @@ #include <linux/workqueue.h> #include <net/net_namespace.h> +#include <net/dsa.h> struct vlan_group; struct ethtool_ops; @@ -471,6 +472,8 @@ struct net_device char name[IFNAMSIZ]; /* device name hash chain */ struct hlist_node name_hlist; + /* snmp alias */ + char *ifalias; /* * I/O specific fields @@ -605,6 +608,9 @@ struct net_device /* Protocol specific pointers */ +#ifdef CONFIG_NET_DSA + void *dsa_ptr; /* dsa specific data */ +#endif void *atalk_ptr; /* AppleTalk link */ void *ip_ptr; /* IPv4 specific data */ void *dn_ptr; /* DECnet specific data */ @@ -796,6 +802,26 @@ void dev_net_set(struct net_device *dev, struct net *net) #endif } +static inline bool netdev_uses_dsa_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_DSA + if (dev->dsa_ptr != NULL) + return dsa_uses_dsa_tags(dev->dsa_ptr); +#endif + + return 0; +} + +static inline bool netdev_uses_trailer_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_TRAILER + if (dev->dsa_ptr != NULL) + return dsa_uses_trailer_tags(dev->dsa_ptr); +#endif + + return 0; +} + /** * netdev_priv - access network device private data * @dev: network device @@ -1223,7 +1249,8 @@ extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ethtool(struct net *net, struct ifreq *); extern unsigned dev_get_flags(const struct net_device *); extern int dev_change_flags(struct net_device *, unsigned); -extern int dev_change_name(struct net_device *, char *); +extern int dev_change_name(struct net_device *, const char *); +extern int dev_set_alias(struct net_device *, const char *, size_t); extern int dev_change_net_namespace(struct net_device *, struct net *, const char *); extern int dev_set_mtu(struct net_device *, int); @@ -1667,7 +1694,7 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); extern int netdev_class_create_file(struct class_attribute *class_attr); extern void netdev_class_remove_file(struct class_attribute *class_attr); -extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); +extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); extern void linkwatch_run_queue(void); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0c5eb7ed8b3..48cfe51bfdd 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -5,13 +5,11 @@ #include <linux/init.h> #include <linux/skbuff.h> #include <linux/net.h> -#include <linux/netdevice.h> #include <linux/if.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> -#include <net/net_namespace.h> #endif #include <linux/types.h> #include <linux/compiler.h> @@ -52,6 +50,16 @@ enum nf_inet_hooks { NF_INET_NUMHOOKS }; +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO, +}; + union nf_inet_addr { __u32 all[4]; __be32 ip; @@ -92,8 +100,8 @@ struct nf_hook_ops /* User fills in from here down. */ nf_hookfn *hook; struct module *owner; - int pf; - int hooknum; + u_int8_t pf; + unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; @@ -102,7 +110,7 @@ struct nf_sockopt_ops { struct list_head list; - int pf; + u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; @@ -138,9 +146,9 @@ extern struct ctl_path nf_net_netfilter_sysctl_path[]; extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; #endif /* CONFIG_SYSCTL */ -extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; +extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, +int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh); @@ -151,7 +159,7 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ -static inline int nf_hook_thresh(int pf, unsigned int hook, +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -167,7 +175,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook, return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); } -static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { @@ -212,14 +220,14 @@ __ret;}) NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, INT_MIN) /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int len); -int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt, +int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); -int compat_nf_setsockopt(struct sock *sk, int pf, int optval, +int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int len); -int compat_nf_getsockopt(struct sock *sk, int pf, int optval, +int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); /* Call this before modifying an existing packet: ensures it is @@ -247,7 +255,7 @@ struct nf_afinfo { int route_key_size; }; -extern const struct nf_afinfo *nf_afinfo[NPROTO]; +extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO]; static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) { return rcu_dereference(nf_afinfo[family]); @@ -292,7 +300,7 @@ extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); static inline void -nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #ifdef CONFIG_NF_NAT_NEEDED void (*decodefn)(struct sk_buff *, struct flowi *); @@ -315,7 +323,7 @@ extern struct proc_dir_entry *proc_net_netfilter; #else /* !CONFIG_NETFILTER */ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) #define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) -static inline int nf_hook_thresh(int pf, unsigned int hook, +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -324,7 +332,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook, { return okfn(skb); } -static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { @@ -332,7 +340,9 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, } struct flowi; static inline void -nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {} +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +} #endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -343,56 +353,5 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *); static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif -static inline struct net *nf_pre_routing_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_local_in_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_forward_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - BUG_ON(in->nd_net != out->nd_net); - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_local_out_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return out->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_post_routing_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return out->nd_net; -#else - return &init_net; -#endif -} - #endif /*__KERNEL__*/ #endif /*__LINUX_NETFILTER_H*/ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 3aff513d12c..5a8af875bce 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -32,6 +32,7 @@ header-y += xt_owner.h header-y += xt_pkttype.h header-y += xt_rateest.h header-y += xt_realm.h +header-y += xt_recent.h header-y += xt_sctp.h header-y += xt_state.h header-y += xt_statistic.h diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 535e4219d2b..2a10efda17f 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -87,7 +87,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -extern void nf_ct_gre_keymap_flush(void); +extern void nf_ct_gre_keymap_flush(struct net *net); extern void nf_nat_need_gre(void); #endif /* __KERNEL__ */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2326296b6f2..be41b609c88 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -173,6 +173,98 @@ struct xt_counters_info #include <linux/netdevice.h> +/** + * struct xt_match_param - parameters for match extensions' match functions + * + * @in: input netdevice + * @out: output netdevice + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @fragoff: packet is a fragment, this is the data offset + * @thoff: position of transport header relative to skb->data + * @hotdrop: drop packet if we had inspection problems + * @family: Actual NFPROTO_* through which the function is invoked + * (helpful when match->family == NFPROTO_UNSPEC) + */ +struct xt_match_param { + const struct net_device *in, *out; + const struct xt_match *match; + const void *matchinfo; + int fragoff; + unsigned int thoff; + bool *hotdrop; + u_int8_t family; +}; + +/** + * struct xt_mtchk_param - parameters for match extensions' + * checkentry functions + * + * @table: table the rule is tried to be inserted into + * @entryinfo: the family-specific rule data + * (struct ipt_ip, ip6t_ip, ebt_entry) + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @hook_mask: via which hooks the new rule is reachable + */ +struct xt_mtchk_param { + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Match destructor parameters */ +struct xt_mtdtor_param { + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +/** + * struct xt_target_param - parameters for target extensions' target functions + * + * @hooknum: hook through which this target was invoked + * @target: struct xt_target through which this function was invoked + * @targinfo: per-target data + * + * Other fields see above. + */ +struct xt_target_param { + const struct net_device *in, *out; + unsigned int hooknum; + const struct xt_target *target; + const void *targinfo; + u_int8_t family; +}; + +/** + * struct xt_tgchk_param - parameters for target extensions' + * checkentry functions + * + * @entryinfo: the family-specific rule data + * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) + * + * Other fields see above. + */ +struct xt_tgchk_param { + const char *table; + void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Target destructor parameters */ +struct xt_tgdtor_param { + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + struct xt_match { struct list_head list; @@ -185,24 +277,13 @@ struct xt_match non-linear skb, using skb_header_pointer and skb_ip_make_writable. */ bool (*match)(const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct xt_match *match, - const void *matchinfo, - int offset, - unsigned int protoff, - bool *hotdrop); + const struct xt_match_param *); /* Called when user tries to insert an entry of this type. */ - /* Should return true or false. */ - bool (*checkentry)(const char *tablename, - const void *ip, - const struct xt_match *match, - void *matchinfo, - unsigned int hook_mask); + bool (*checkentry)(const struct xt_mtchk_param *); /* Called when entry of this type deleted. */ - void (*destroy)(const struct xt_match *match, void *matchinfo); + void (*destroy)(const struct xt_mtdtor_param *); /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, void *src); @@ -235,24 +316,16 @@ struct xt_target must now handle non-linear skbs, using skb_copy_bits and skb_ip_make_writable. */ unsigned int (*target)(struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const struct xt_target *target, - const void *targinfo); + const struct xt_target_param *); /* Called when user tries to insert an entry of this type: hook_mask is a bitmask of hooks from which it can be called. */ /* Should return true or false. */ - bool (*checkentry)(const char *tablename, - const void *entry, - const struct xt_target *target, - void *targinfo, - unsigned int hook_mask); + bool (*checkentry)(const struct xt_tgchk_param *); /* Called when entry of this type deleted. */ - void (*destroy)(const struct xt_target *target, void *targinfo); + void (*destroy)(const struct xt_tgdtor_param *); /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, void *src); @@ -292,7 +365,7 @@ struct xt_table /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; - int af; /* address/protocol family */ + u_int8_t af; /* address/protocol family */ }; #include <linux/netfilter_ipv4.h> @@ -328,12 +401,10 @@ extern void xt_unregister_match(struct xt_match *target); extern int xt_register_matches(struct xt_match *match, unsigned int n); extern void xt_unregister_matches(struct xt_match *match, unsigned int n); -extern int xt_check_match(const struct xt_match *match, unsigned short family, - unsigned int size, const char *table, unsigned int hook, - unsigned short proto, int inv_proto); -extern int xt_check_target(const struct xt_target *target, unsigned short family, - unsigned int size, const char *table, unsigned int hook, - unsigned short proto, int inv_proto); +extern int xt_check_match(struct xt_mtchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); +extern int xt_check_target(struct xt_tgchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); extern struct xt_table *xt_register_table(struct net *net, struct xt_table *table, @@ -346,19 +417,19 @@ extern struct xt_table_info *xt_replace_table(struct xt_table *table, struct xt_table_info *newinfo, int *error); -extern struct xt_match *xt_find_match(int af, const char *name, u8 revision); -extern struct xt_target *xt_find_target(int af, const char *name, u8 revision); -extern struct xt_target *xt_request_find_target(int af, const char *name, +extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); -extern int xt_find_revision(int af, const char *name, u8 revision, int target, - int *err); +extern int xt_find_revision(u8 af, const char *name, u8 revision, + int target, int *err); -extern struct xt_table *xt_find_table_lock(struct net *net, int af, +extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); extern void xt_table_unlock(struct xt_table *t); -extern int xt_proto_init(struct net *net, int af); -extern void xt_proto_fini(struct net *net, int af); +extern int xt_proto_init(struct net *net, u_int8_t af); +extern void xt_proto_fini(struct net *net, u_int8_t af); extern struct xt_table_info *xt_alloc_table_info(unsigned int size); extern void xt_free_table_info(struct xt_table_info *info); @@ -423,12 +494,12 @@ struct compat_xt_counters_info #define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \ & ~(__alignof__(struct compat_xt_counters)-1)) -extern void xt_compat_lock(int af); -extern void xt_compat_unlock(int af); +extern void xt_compat_lock(u_int8_t af); +extern void xt_compat_unlock(u_int8_t af); -extern int xt_compat_add_offset(int af, unsigned int offset, short delta); -extern void xt_compat_flush_offsets(int af); -extern short xt_compat_calc_jump(int af, unsigned int offset); +extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); +extern void xt_compat_flush_offsets(u_int8_t af); +extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset); extern int xt_compat_match_offset(const struct xt_match *match); extern int xt_compat_match_from_user(struct xt_entry_match *m, diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h new file mode 100644 index 00000000000..152e8f97132 --- /dev/null +++ b/include/linux/netfilter/xt_TPROXY.h @@ -0,0 +1,14 @@ +#ifndef _XT_TPROXY_H_target +#define _XT_TPROXY_H_target + +/* TPROXY target is capable of marking the packet to perform + * redirection. We can get rid of that whenever we get support for + * mutliple targets in the same rule. */ +struct xt_tproxy_target_info { + u_int32_t mark_mask; + u_int32_t mark_value; + __be32 laddr; + __be16 lport; +}; + +#endif /* _XT_TPROXY_H_target */ diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h new file mode 100644 index 00000000000..5cfeb81c679 --- /dev/null +++ b/include/linux/netfilter/xt_recent.h @@ -0,0 +1,26 @@ +#ifndef _LINUX_NETFILTER_XT_RECENT_H +#define _LINUX_NETFILTER_XT_RECENT_H 1 + +enum { + XT_RECENT_CHECK = 1 << 0, + XT_RECENT_SET = 1 << 1, + XT_RECENT_UPDATE = 1 << 2, + XT_RECENT_REMOVE = 1 << 3, + XT_RECENT_TTL = 1 << 4, + + XT_RECENT_SOURCE = 0, + XT_RECENT_DEST = 1, + + XT_RECENT_NAME_LEN = 200, +}; + +struct xt_recent_mtinfo { + u_int32_t seconds; + u_int32_t hit_count; + u_int8_t check_set; + u_int8_t invert; + char name[XT_RECENT_NAME_LEN]; + u_int8_t side; +}; + +#endif /* _LINUX_NETFILTER_XT_RECENT_H */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 892f5b7771c..d45e29cd1cf 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -31,6 +31,9 @@ * The 4 lsb are more than enough to store the verdict. */ #define EBT_VERDICT_BITS 0x0000000F +struct xt_match; +struct xt_target; + struct ebt_counter { uint64_t pcnt; @@ -121,7 +124,7 @@ struct ebt_entry_match { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_match *match; + struct xt_match *match; } u; /* size of data */ unsigned int match_size; @@ -132,7 +135,7 @@ struct ebt_entry_watcher { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_watcher *watcher; + struct xt_target *watcher; } u; /* size of data */ unsigned int watcher_size; @@ -143,7 +146,7 @@ struct ebt_entry_target { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_target *target; + struct xt_target *target; } u; /* size of data */ unsigned int target_size; @@ -207,14 +210,17 @@ struct ebt_match { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - /* 0 == it matches */ - int (*match)(const struct sk_buff *skb, const struct net_device *in, - const struct net_device *out, const void *matchdata, - unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *matchdata, unsigned int datalen); - void (*destroy)(void *matchdata, unsigned int datalen); + bool (*match)(const struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const struct xt_match *match, + const void *matchinfo, int offset, unsigned int protoff, + bool *hotdrop); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_match *match, void *matchinfo); + unsigned int matchsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -222,13 +228,17 @@ struct ebt_watcher { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - void (*watcher)(const struct sk_buff *skb, unsigned int hooknr, - const struct net_device *in, const struct net_device *out, - const void *watcherdata, unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *watcherdata, unsigned int datalen); - void (*destroy)(void *watcherdata, unsigned int datalen); + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -236,14 +246,18 @@ struct ebt_target { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - /* returns one of the standard verdicts */ - int (*target)(struct sk_buff *skb, unsigned int hooknr, - const struct net_device *in, const struct net_device *out, - const void *targetdata, unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *targetdata, unsigned int datalen); - void (*destroy)(void *targetdata, unsigned int datalen); + /* returns one of the standard EBT_* verdicts */ + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -288,12 +302,6 @@ struct ebt_table ~(__alignof__(struct ebt_replace)-1)) extern int ebt_register_table(struct ebt_table *table); extern void ebt_unregister_table(struct ebt_table *table); -extern int ebt_register_match(struct ebt_match *match); -extern void ebt_unregister_match(struct ebt_match *match); -extern int ebt_register_watcher(struct ebt_watcher *watcher); -extern void ebt_unregister_watcher(struct ebt_watcher *watcher); -extern int ebt_register_target(struct ebt_target *target); -extern void ebt_unregister_target(struct ebt_target *target); extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, struct ebt_table *table); @@ -302,9 +310,9 @@ extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ -#define BASE_CHAIN (hookmask & (1 << NF_BR_NUMHOOKS)) +#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) /* Clear the bit in the hook mask that tells if the rule is on a base chain */ -#define CLEAR_BASE_CHAIN_BIT (hookmask &= ~(1 << NF_BR_NUMHOOKS)) +#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) /* True if the target is not a standard target */ #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) diff --git a/include/linux/netfilter_ipv4/ipt_recent.h b/include/linux/netfilter_ipv4/ipt_recent.h index 6508a459265..d636cca133c 100644 --- a/include/linux/netfilter_ipv4/ipt_recent.h +++ b/include/linux/netfilter_ipv4/ipt_recent.h @@ -1,27 +1,21 @@ #ifndef _IPT_RECENT_H #define _IPT_RECENT_H -#define RECENT_NAME "ipt_recent" -#define RECENT_VER "v0.3.1" +#include <linux/netfilter/xt_recent.h> -#define IPT_RECENT_CHECK 1 -#define IPT_RECENT_SET 2 -#define IPT_RECENT_UPDATE 4 -#define IPT_RECENT_REMOVE 8 -#define IPT_RECENT_TTL 16 +#define ipt_recent_info xt_recent_mtinfo -#define IPT_RECENT_SOURCE 0 -#define IPT_RECENT_DEST 1 +enum { + IPT_RECENT_CHECK = XT_RECENT_CHECK, + IPT_RECENT_SET = XT_RECENT_SET, + IPT_RECENT_UPDATE = XT_RECENT_UPDATE, + IPT_RECENT_REMOVE = XT_RECENT_REMOVE, + IPT_RECENT_TTL = XT_RECENT_TTL, -#define IPT_RECENT_NAME_LEN 200 + IPT_RECENT_SOURCE = XT_RECENT_SOURCE, + IPT_RECENT_DEST = XT_RECENT_DEST, -struct ipt_recent_info { - u_int32_t seconds; - u_int32_t hit_count; - u_int8_t check_set; - u_int8_t invert; - char name[IPT_RECENT_NAME_LEN]; - u_int8_t side; + IPT_RECENT_NAME_LEN = XT_RECENT_NAME_LEN, }; #endif /*_IPT_RECENT_H*/ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 2be7c63bc0f..9bad65400fb 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -89,6 +89,22 @@ * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC * or, if no MAC address given, all mesh paths, on the interface identified * by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command + * after being queried by the kernel. CRDA replies by sending a regulatory + * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our + * current alpha2 if it found a match. It also provides + * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each + * regulatory rule is a nested set of attributes given by + * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and + * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by + * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and + * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. + * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain + * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will + * store this as a valid request and then query userspace for it. * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use @@ -127,13 +143,23 @@ enum nl80211_commands { NL80211_CMD_NEW_MPATH, NL80211_CMD_DEL_MPATH, - /* add commands here */ + NL80211_CMD_SET_BSS, + + NL80211_CMD_SET_REG, + NL80211_CMD_REQ_SET_REG, + + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ __NL80211_CMD_AFTER_LAST, NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 }; +/* + * Allow user space programs to use #ifdef on new commands by defining them + * here + */ +#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS /** * enum nl80211_attrs - nl80211 netlink attributes @@ -188,10 +214,34 @@ enum nl80211_commands { * info given for %NL80211_CMD_GET_MPATH, nested attribute described at * &enum nl80211_mpath_info. * - * * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of * &enum nl80211_mntr_flags. * + * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the + * current regulatory domain should be set to or is already set to. + * For example, 'CR', for Costa Rica. This attribute is used by the kernel + * to query the CRDA to retrieve one regulatory domain. This attribute can + * also be used by userspace to query the kernel for the currently set + * regulatory domain. We chose an alpha2 as that is also used by the + * IEEE-802.11d country information element to identify a country. + * Users can also simply ask the wireless core to set regulatory domain + * to a specific alpha2. + * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory + * rules. + * + * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled + * (u8, 0 or 1) + * + * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION) + * + * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all + * supported interface types, each a flag attribute with the number + * of the interface mode. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -235,16 +285,35 @@ enum nl80211_attrs { NL80211_ATTR_MPATH_NEXT_HOP, NL80211_ATTR_MPATH_INFO, + NL80211_ATTR_BSS_CTS_PROT, + NL80211_ATTR_BSS_SHORT_PREAMBLE, + NL80211_ATTR_BSS_SHORT_SLOT_TIME, + + NL80211_ATTR_HT_CAPABILITY, + + NL80211_ATTR_SUPPORTED_IFTYPES, + + NL80211_ATTR_REG_ALPHA2, + NL80211_ATTR_REG_RULES, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; +/* + * Allow user space programs to use #ifdef on new attributes by defining them + * here + */ +#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY + #define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_REG_RULES 32 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 #define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 +#define NL80211_HT_CAPABILITY_LEN 26 /** * enum nl80211_iftype - (virtual) interface types @@ -436,6 +505,66 @@ enum nl80211_bitrate_attr { }; /** + * enum nl80211_reg_rule_attr - regulatory rule attributes + * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional + * considerations for a given frequency range. These are the + * &enum nl80211_reg_rule_flags. + * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory + * rule in KHz. This is not a center of frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule + * in KHz. This is not a center a frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this + * frequency range, in KHz. + * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain + * for a given frequency range. The value is in mBi (100 * dBi). + * If you don't have one then don't send this. + * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for + * a given frequency range. The value is in mBm (100 * dBm). + */ +enum nl80211_reg_rule_attr { + __NL80211_REG_RULE_ATTR_INVALID, + NL80211_ATTR_REG_RULE_FLAGS, + + NL80211_ATTR_FREQ_RANGE_START, + NL80211_ATTR_FREQ_RANGE_END, + NL80211_ATTR_FREQ_RANGE_MAX_BW, + + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + NL80211_ATTR_POWER_RULE_MAX_EIRP, + + /* keep last */ + __NL80211_REG_RULE_ATTR_AFTER_LAST, + NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_reg_rule_flags - regulatory rule flags + * + * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed + * @NL80211_RRF_NO_CCK: CCK modulation not allowed + * @NL80211_RRF_NO_INDOOR: indoor operation not allowed + * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed + * @NL80211_RRF_DFS: DFS support is required to be used + * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links + * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links + * @NL80211_RRF_PASSIVE_SCAN: passive scan is required + * @NL80211_RRF_NO_IBSS: no IBSS is allowed + */ +enum nl80211_reg_rule_flags { + NL80211_RRF_NO_OFDM = 1<<0, + NL80211_RRF_NO_CCK = 1<<1, + NL80211_RRF_NO_INDOOR = 1<<2, + NL80211_RRF_NO_OUTDOOR = 1<<3, + NL80211_RRF_DFS = 1<<4, + NL80211_RRF_PTP_ONLY = 1<<5, + NL80211_RRF_PTMP_ONLY = 1<<6, + NL80211_RRF_PASSIVE_SCAN = 1<<7, + NL80211_RRF_NO_IBSS = 1<<8, +}; + +/** * enum nl80211_mntr_flags - monitor configuration flags * * Monitor configuration flags. diff --git a/include/linux/notifier.h b/include/linux/notifier.h index da2698b0fdd..b86fa2ffca0 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, - * not handling interrupts, soon dead */ + * not handling interrupts, soon dead. + * Called on the dying cpu, interrupts + * are already disabled. Must not + * sleep, must not fail */ #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug * lock is dropped */ +#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. + * Called on the new cpu, just before + * enabling interrupts. Must not sleep, + * must not fail */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend * operation in progress @@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) +#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ diff --git a/include/linux/pci.h b/include/linux/pci.h index c0e14008a3c..98dc6243a70 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -534,7 +534,7 @@ extern void pci_sort_breadthfirst(void); #ifdef CONFIG_PCI_LEGACY struct pci_dev __deprecated *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev __deprecated *pci_find_slot(unsigned int bus, unsigned int devfn); #endif /* CONFIG_PCI_LEGACY */ @@ -550,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); @@ -816,7 +816,7 @@ _PCI_NOP_ALL(write,) static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } @@ -838,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f1624b39675..a65b082a888 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1411,6 +1411,8 @@ #define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 #define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 +#define PCI_VENDOR_ID_CISCO 0x1137 + #define PCI_VENDOR_ID_ZIATECH 0x1138 #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 @@ -2213,6 +2215,7 @@ #define PCI_VENDOR_ID_ATTANSIC 0x1969 #define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 +#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 @@ -2244,6 +2247,16 @@ #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 +#define PCI_VENDOR_ID_NETXEN 0x4040 +#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 +#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 +#define PCI_DEVICE_ID_NX2031_4GCU 0x0003 +#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 +#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 +#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 +#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 +#define PCI_DEVICE_ID_NX3031 0x0100 + #define PCI_VENDOR_ID_AKS 0x416c #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h index 700725ddcaa..01b262959f2 100644 --- a/include/linux/pfkeyv2.h +++ b/include/linux/pfkeyv2.h @@ -226,6 +226,15 @@ struct sadb_x_sec_ctx { } __attribute__((packed)); /* sizeof(struct sadb_sec_ctx) = 8 */ +/* Used by MIGRATE to pass addresses IKE will use to perform + * negotiation with the peer */ +struct sadb_x_kmaddress { + uint16_t sadb_x_kmaddress_len; + uint16_t sadb_x_kmaddress_exttype; + uint32_t sadb_x_kmaddress_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_x_kmaddress) == 8 */ + /* Message types */ #define SADB_RESERVED 0 #define SADB_GETSPI 1 @@ -346,7 +355,9 @@ struct sadb_x_sec_ctx { #define SADB_X_EXT_NAT_T_DPORT 22 #define SADB_X_EXT_NAT_T_OA 23 #define SADB_X_EXT_SEC_CTX 24 -#define SADB_EXT_MAX 24 +/* Used with MIGRATE to pass @ to IKE for negotiation */ +#define SADB_X_EXT_KMADDRESS 25 +#define SADB_EXT_MAX 25 /* Identity Extension values */ #define SADB_IDENTTYPE_RESERVED 0 diff --git a/include/linux/phonet.h b/include/linux/phonet.h new file mode 100644 index 00000000000..c9609f9aeda --- /dev/null +++ b/include/linux/phonet.h @@ -0,0 +1,170 @@ +/** + * file phonet.h + * + * Phonet sockets kernel interface + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef LINUX_PHONET_H +#define LINUX_PHONET_H + +/* Automatic protocol selection */ +#define PN_PROTO_TRANSPORT 0 +/* Phonet datagram socket */ +#define PN_PROTO_PHONET 1 +/* Phonet pipe */ +#define PN_PROTO_PIPE 2 +#define PHONET_NPROTO 3 + +/* Socket options for SOL_PNPIPE level */ +#define PNPIPE_ENCAP 1 +#define PNPIPE_IFINDEX 2 + +#define PNADDR_ANY 0 +#define PNPORT_RESOURCE_ROUTING 0 + +/* Values for PNPIPE_ENCAP option */ +#define PNPIPE_ENCAP_NONE 0 +#define PNPIPE_ENCAP_IP 1 + +/* ioctls */ +#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) + +/* Phonet protocol header */ +struct phonethdr { + __u8 pn_rdev; + __u8 pn_sdev; + __u8 pn_res; + __be16 pn_length; + __u8 pn_robj; + __u8 pn_sobj; +} __attribute__((packed)); + +/* Common Phonet payload header */ +struct phonetmsg { + __u8 pn_trans_id; /* transaction ID */ + __u8 pn_msg_id; /* message type */ + union { + struct { + __u8 pn_submsg_id; /* message subtype */ + __u8 pn_data[5]; + } base; + struct { + __u16 pn_e_res_id; /* extended resource ID */ + __u8 pn_e_submsg_id; /* message subtype */ + __u8 pn_e_data[3]; + } ext; + } pn_msg_u; +}; +#define PN_COMMON_MESSAGE 0xF0 +#define PN_PREFIX 0xE0 /* resource for extended messages */ +#define pn_submsg_id pn_msg_u.base.pn_submsg_id +#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id +#define pn_e_res_id pn_msg_u.ext.pn_e_res_id +#define pn_data pn_msg_u.base.pn_data +#define pn_e_data pn_msg_u.ext.pn_e_data + +/* data for unreachable errors */ +#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01 +#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14 +#define pn_orig_msg_id pn_data[0] +#define pn_status pn_data[1] +#define pn_e_orig_msg_id pn_e_data[0] +#define pn_e_status pn_e_data[1] + +/* Phonet socket address structure */ +struct sockaddr_pn { + sa_family_t spn_family; + __u8 spn_obj; + __u8 spn_dev; + __u8 spn_resource; + __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; +} __attribute__ ((packed)); + +static inline __u16 pn_object(__u8 addr, __u16 port) +{ + return (addr << 8) | (port & 0x3ff); +} + +static inline __u8 pn_obj(__u16 handle) +{ + return handle & 0xff; +} + +static inline __u8 pn_dev(__u16 handle) +{ + return handle >> 8; +} + +static inline __u16 pn_port(__u16 handle) +{ + return handle & 0x3ff; +} + +static inline __u8 pn_addr(__u16 handle) +{ + return (handle >> 8) & 0xfc; +} + +static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr) +{ + spn->spn_dev &= 0x03; + spn->spn_dev |= addr & 0xfc; +} + +static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port) +{ + spn->spn_dev &= 0xfc; + spn->spn_dev |= (port >> 8) & 0x03; + spn->spn_obj = port & 0xff; +} + +static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn, + __u16 handle) +{ + spn->spn_dev = pn_dev(handle); + spn->spn_obj = pn_obj(handle); +} + +static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn, + __u8 resource) +{ + spn->spn_resource = resource; +} + +static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn) +{ + return spn->spn_dev & 0xfc; +} + +static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn) +{ + return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj; +} + +static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn) +{ + return pn_object(spn->spn_dev, spn->spn_obj); +} + +static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn) +{ + return spn->spn_resource; +} + +#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index 7224c4099a2..77c4ed60b98 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -99,7 +99,14 @@ struct mii_bus { */ struct mutex mdio_lock; - struct device *dev; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED, + MDIOBUS_UNREGISTERED, + MDIOBUS_RELEASED, + } state; + struct device dev; /* list of all PHYs on bus */ struct phy_device *phy_map[PHY_MAX_ADDR]; @@ -113,6 +120,16 @@ struct mii_bus { */ int *irq; }; +#define to_mii_bus(d) container_of(d, struct mii_bus, dev) + +struct mii_bus *mdiobus_alloc(void); +int mdiobus_register(struct mii_bus *bus); +void mdiobus_unregister(struct mii_bus *bus); +void mdiobus_free(struct mii_bus *bus); +struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); +int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val); + #define PHY_INTERRUPT_DISABLED 0x0 #define PHY_INTERRUPT_ENABLED 0x80000000 @@ -391,8 +408,35 @@ struct phy_fixup { int (*run)(struct phy_device *phydev); }; -int phy_read(struct phy_device *phydev, u16 regnum); -int phy_write(struct phy_device *phydev, u16 regnum, u16 val); +/** + * phy_read - Convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_read(struct phy_device *phydev, u16 regnum) +{ + return mdiobus_read(phydev->bus, phydev->addr, regnum); +} + +/** + * phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val) +{ + return mdiobus_write(phydev->bus, phydev->addr, regnum, val); +} + int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); struct phy_device* get_phy_device(struct mii_bus *bus, int addr); int phy_clear_interrupt(struct phy_device *phydev); @@ -408,8 +452,6 @@ void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); -int mdiobus_register(struct mii_bus *bus); -void mdiobus_unregister(struct mii_bus *bus); void phy_sanitize_settings(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_enable_interrupts(struct phy_device *phydev); diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index e5de421ac7b..5d921fa91a5 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -123,6 +123,13 @@ struct tc_prio_qopt __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ }; +/* MULTIQ section */ + +struct tc_multiq_qopt { + __u16 bands; /* Number of bands */ + __u16 max_bands; /* Maximum number of queues */ +}; + /* TBF section */ struct tc_tbf_qopt diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 5afc1b23346..cf793bbbd05 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -104,8 +104,8 @@ struct prop_local_single { * snapshot of the last seen global state * and a lock protecting this state */ - int shift; unsigned long period; + int shift; spinlock_t lock; /* protect the snapshot state */ }; diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index b160fb18e8d..37aaf2b3986 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); #ifndef CONFIG_MMU +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4ab84362272..5f89b62e698 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -40,12 +40,21 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* Global control variables for rcupdate callback mechanism. */ struct rcu_ctrlblk { long cur; /* Current batch number. */ long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ + long pending; /* Number of the last pending batch */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR + unsigned long gp_start; /* Time at which GP started in jiffies. */ + unsigned long jiffies_stall; + /* Time at which to check for CPU stalls. */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ int signaled; @@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b) return (a - b) > 0; } -/* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any - */ +/* Per-CPU data for Read-Copy UPdate. */ struct rcu_data { /* 1) quiescent state handling : */ long quiescbatch; /* Batch # for grace period */ @@ -78,12 +83,24 @@ struct rcu_data { int qs_pending; /* core waits for quiesc state */ /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ + /* + * if nxtlist is not NULL, then: + * batch: + * The batch # for the last entry of nxtlist + * [*nxttail[1], NULL = *nxttail[2]): + * Entries that batch # <= batch + * [*nxttail[0], *nxttail[1]): + * Entries that batch # <= batch - 1 + * [nxtlist, *nxttail[0]): + * Entries that batch # <= batch - 2 + * The grace period for these entries has completed, and + * the other grace-period-completed entries may be moved + * here temporarily in rcu_process_callbacks(). + */ + long batch; struct rcu_head *nxtlist; - struct rcu_head **nxttail; + struct rcu_head **nxttail[3]; long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; struct rcu_head *donelist; struct rcu_head **donetail; long blimit; /* Upper limit on a processed batch */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index eb4443c7e05..e649bd3f2c9 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list, at->prev = last; } -/** - * list_for_each_rcu - iterate over an rcu-protected list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_rcu(pos, head) \ - for (pos = rcu_dereference((head)->next); \ - prefetch(pos->next), pos != (head); \ - pos = rcu_dereference(pos->next)) - #define __list_for_each_rcu(pos, head) \ for (pos = rcu_dereference((head)->next); \ pos != (head); \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e8b4039cfb2..86f1f5e43e3 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -133,6 +133,26 @@ struct rcu_head { #define rcu_read_unlock_bh() __rcu_read_unlock_bh() /** + * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section + * + * Should be used with either + * - synchronize_sched() + * or + * - call_rcu_sched() and rcu_barrier_sched() + * on the write-side to insure proper synchronization. + */ +#define rcu_read_lock_sched() preempt_disable() + +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section + * + * See rcu_read_lock_sched for more information. + */ +#define rcu_read_unlock_sched() preempt_enable() + + + +/** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later * be safely dereferenced. diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 0967f03b070..3e05c09b54a 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu) rdssp->sched_qs++; } #define rcu_bh_qsctr_inc(cpu) -#define call_rcu_bh(head, rcu) call_rcu(head, rcu) + +/* + * Someone might want to pass call_rcu_bh as a function pointer. + * So this needs to just be a rename and not a macro function. + * (no parentheses) + */ +#define call_rcu_bh call_rcu /** * call_rcu_sched - Queue RCU callback for invocation after sched grace period. @@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); struct softirq_action; #ifdef CONFIG_NO_HZ -DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); static inline void rcu_enter_nohz(void) { @@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void) { static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; + smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), &rs); } diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 741d1a62cc3..4cd64b0d982 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -49,6 +49,7 @@ enum rfkill_state { RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ + RFKILL_STATE_MAX, /* marker for last valid state */ }; /* @@ -110,12 +111,14 @@ struct rfkill { }; #define to_rfkill(d) container_of(d, struct rfkill, dev) -struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type); +struct rfkill * __must_check rfkill_allocate(struct device *parent, + enum rfkill_type type); void rfkill_free(struct rfkill *rfkill); -int rfkill_register(struct rfkill *rfkill); +int __must_check rfkill_register(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); +int rfkill_set_default(enum rfkill_type type, enum rfkill_state state); /** * rfkill_state_complement - return complementar state diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index ca643b13b02..2b3d51c6ec9 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -582,6 +582,10 @@ enum rtnetlink_groups { #define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE RTNLGRP_ND_USEROPT, #define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT + RTNLGRP_PHONET_IFADDR, +#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR + RTNLGRP_PHONET_ROUTE, +#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d9120c5ad1..5d0819ee442 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -451,8 +451,8 @@ struct signal_struct { * - everyone except group_exit_task is stopped during signal delivery * of fatal signals, group_exit_task processes the signal. */ - struct task_struct *group_exit_task; int notify_count; + struct task_struct *group_exit_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; @@ -824,6 +824,9 @@ struct sched_domain { unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; #endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif }; extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, @@ -897,7 +900,7 @@ struct sched_class { void (*yield_task) (struct rq *rq); int (*select_task_rq)(struct task_struct *p, int sync); - void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); struct task_struct * (*pick_next_task) (struct rq *rq); void (*put_prev_task) (struct rq *rq, struct task_struct *p); @@ -1010,8 +1013,8 @@ struct sched_entity { struct sched_rt_entity { struct list_head run_list; - unsigned int time_slice; unsigned long timeout; + unsigned int time_slice; int nr_cpus_allowed; struct sched_rt_entity *back; diff --git a/include/linux/security.h b/include/linux/security.h index 80c4d002864..f5c4a51eb42 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1560,11 +1560,6 @@ struct security_operations { extern int security_init(void); extern int security_module_enable(struct security_operations *ops); extern int register_security(struct security_operations *ops); -extern struct dentry *securityfs_create_file(const char *name, mode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); -extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); -extern void securityfs_remove(struct dentry *dentry); /* Security operations */ int security_ptrace_may_access(struct task_struct *child, unsigned int mode); @@ -2424,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap) return cap_netlink_recv(skb, cap); } -static inline struct dentry *securityfs_create_dir(const char *name, - struct dentry *parent) -{ - return ERR_PTR(-ENODEV); -} - -static inline struct dentry *securityfs_create_file(const char *name, - mode_t mode, - struct dentry *parent, - void *data, - const struct file_operations *fops) -{ - return ERR_PTR(-ENODEV); -} - -static inline void securityfs_remove(struct dentry *dentry) -{ -} - static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return -EOPNOTSUPP; @@ -2806,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_AUDIT */ +#ifdef CONFIG_SECURITYFS + +extern struct dentry *securityfs_create_file(const char *name, mode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +extern void securityfs_remove(struct dentry *dentry); + +#else /* CONFIG_SECURITYFS */ + +static inline struct dentry *securityfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *securityfs_create_file(const char *name, + mode_t mode, + struct dentry *parent, + void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline void securityfs_remove(struct dentry *dentry) +{} + +#endif + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 90992371783..2725f4e5a9b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -146,8 +146,14 @@ struct skb_shared_info { unsigned short gso_segs; unsigned short gso_type; __be32 ip6_frag_id; +#ifdef CONFIG_HAS_DMA + unsigned int num_dma_maps; +#endif struct sk_buff *frag_list; skb_frag_t frags[MAX_SKB_FRAGS]; +#ifdef CONFIG_HAS_DMA + dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; +#endif }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -353,6 +359,14 @@ struct sk_buff { #include <asm/system.h> +#ifdef CONFIG_HAS_DMA +#include <linux/dma-mapping.h> +extern int skb_dma_map(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +#endif + extern void kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, @@ -369,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } +extern int skb_recycle_check(struct sk_buff *skb, int skb_size); + extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); @@ -459,6 +475,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->next == (struct sk_buff *) list); +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** * skb_get - reference buffer * @skb: buffer to reference * @@ -646,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + /* * This function creates a split out lock class for each invocation; * this is needed for now since a whole lot of users of the skb-queue @@ -657,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); - list->prev = list->next = (struct sk_buff *)list; - list->qlen = 0; + __skb_queue_head_init(list); } static inline void skb_queue_head_init_class(struct sk_buff_head *list, @@ -685,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk, list->qlen++; } +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + /** * __skb_queue_after - queue a buffer at the list head * @list: list to use @@ -829,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, skb_shinfo(skb)->nr_frags = i + 1; } +extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size); + #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) @@ -1243,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } +extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); + +/** + * netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on + * + * Allocate a new page node local to the specified device. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *netdev_alloc_page(struct net_device *dev) +{ + return __netdev_alloc_page(dev, GFP_ATOMIC); +} + +static inline void netdev_free_page(struct net_device *dev, struct page *page) +{ + __free_page(page); +} + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check @@ -1434,6 +1596,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->next) +#define skb_queue_walk_from(queue, skb) \ + for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ + skb = skb->next) + +#define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + #define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ diff --git a/include/linux/smb.h b/include/linux/smb.h index caa43b2370c..82fefddc598 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h @@ -11,7 +11,9 @@ #include <linux/types.h> #include <linux/magic.h> +#ifdef __KERNEL__ #include <linux/time.h> +#endif enum smb_protocol { SMB_PROTOCOL_NONE, diff --git a/include/linux/socket.h b/include/linux/socket.h index dc5086fe773..20fc4bbfca4 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -190,7 +190,8 @@ struct ucred { #define AF_IUCV 32 /* IUCV sockets */ #define AF_RXRPC 33 /* RxRPC sockets */ #define AF_ISDN 34 /* mISDN sockets */ -#define AF_MAX 35 /* For now.. */ +#define AF_PHONET 35 /* Phonet sockets */ +#define AF_MAX 36 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -227,6 +228,7 @@ struct ucred { #define PF_IUCV AF_IUCV #define PF_RXRPC AF_RXRPC #define PF_ISDN AF_ISDN +#define PF_PHONET AF_PHONET #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -295,6 +297,7 @@ struct ucred { #define SOL_RXRPC 272 #define SOL_PPPOL2TP 273 #define SOL_BLUETOOTH 274 +#define SOL_PNPIPE 275 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index ebad0bac980..99a0f991e85 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -245,8 +245,6 @@ /* SPROM Revision 3 (inherits most data from rev 2) */ #define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ -#define SSB_SPROM3_ET0MAC 0x1050 /* 6 bytes MAC address for Ethernet ?? */ -#define SSB_SPROM3_ET1MAC 0x1050 /* 6 bytes MAC address for 802.11a ?? */ #define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ @@ -267,8 +265,6 @@ /* SPROM Revision 4 */ #define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ -#define SSB_SPROM4_ET0MAC 0x1018 /* 6 bytes MAC address for Ethernet ?? */ -#define SSB_SPROM4_ET1MAC 0x1018 /* 6 bytes MAC address for 802.11a ?? */ #define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ #define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ #define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ @@ -316,6 +312,21 @@ #define SSB_SPROM4_PA1B1 0x1090 #define SSB_SPROM4_PA1B2 0x1092 +/* SPROM Revision 5 (inherits most data from rev 4) */ +#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */ +#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */ +#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */ +#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM5_GPIOA_P1_SHIFT 8 +#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM5_GPIOB_P3_SHIFT 8 + + /* Values for SSB_SPROM1_BINF_CCODE */ enum { SSB_SPROM1CCODE_WORLD = 0, diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d7..b106fd8e0d5 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +struct task_struct; + #ifdef CONFIG_STACKTRACE struct stack_trace { unsigned int nr_entries, max_entries; diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h new file mode 100644 index 00000000000..a3eb2f65b65 --- /dev/null +++ b/include/linux/string_helpers.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_STRING_HELPERS_H_ +#define _LINUX_STRING_HELPERS_H_ + +#include <linux/types.h> + +/* Descriptions of the types of units to + * print in */ +enum string_size_units { + STRING_UNITS_10, /* use powers of 10^3 (standard SI) */ + STRING_UNITS_2, /* use binary powers of 2^10 */ +}; + +int string_get_size(u64 size, enum string_size_units units, + char *buf, int len); + +#endif diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild index 6dac0d7365c..76990937f4c 100644 --- a/include/linux/tc_act/Kbuild +++ b/include/linux/tc_act/Kbuild @@ -3,3 +3,4 @@ header-y += tc_ipt.h header-y += tc_mirred.h header-y += tc_pedit.h header-y += tc_nat.h +header-y += tc_skbedit.h diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h new file mode 100644 index 00000000000..a14e461a7af --- /dev/null +++ b/include/linux/tc_act/tc_skbedit.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Alexander Duyck <alexander.h.duyck@intel.com> + */ + +#ifndef __LINUX_TC_SKBEDIT_H +#define __LINUX_TC_SKBEDIT_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_SKBEDIT 11 + +#define SKBEDIT_F_PRIORITY 0x1 +#define SKBEDIT_F_QUEUE_MAPPING 0x2 + +struct tc_skbedit { + tc_gen; +}; + +enum { + TCA_SKBEDIT_UNSPEC, + TCA_SKBEDIT_TM, + TCA_SKBEDIT_PARMS, + TCA_SKBEDIT_PRIORITY, + TCA_SKBEDIT_QUEUE_MAPPING, + __TCA_SKBEDIT_MAX +}; +#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) + +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2e2557388e3..fe77e1499ab 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -312,8 +312,11 @@ struct tcp_sock { u32 retrans_out; /* Retransmitted packets out */ u16 urg_data; /* Saved octet of OOB data and control flags */ - u8 urg_mode; /* In urgent mode */ u8 ecn_flags; /* ECN status bits. */ + u8 reordering; /* Packet reordering metric. */ + u32 snd_up; /* Urgent pointer */ + + u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ @@ -342,7 +345,6 @@ struct tcp_sock { struct sk_buff* lost_skb_hint; struct sk_buff *scoreboard_skb_hint; struct sk_buff *retransmit_skb_hint; - struct sk_buff *forward_skb_hint; struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ @@ -358,12 +360,10 @@ struct tcp_sock { */ int lost_cnt_hint; - int retransmit_cnt_hint; + u32 retransmit_high; /* L-bits may be on up to this seqno */ u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u8 reordering; /* Packet reordering metric. */ - u8 keepalive_probes; /* num of allowed keep alive probes */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -375,8 +375,6 @@ struct tcp_sock { u32 total_retrans; /* Total retransmits for entire connection */ u32 urg_seq; /* Seq of received urgent pointer */ - u32 snd_up; /* Urgent pointer */ - unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ diff --git a/include/linux/tick.h b/include/linux/tick.h index 8cf8cfe2cc9..98921a3e1aa 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -126,7 +126,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) return len; } static inline void tick_nohz_stop_idle(int cpu) { } -static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } +static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } # endif /* !NO_HZ */ #endif diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index fb0c215a305..4bc1e6b86cb 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -279,6 +279,7 @@ enum xfrm_attr_type_t { XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */ XFRMA_MIGRATE, XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */ __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) @@ -415,6 +416,15 @@ struct xfrm_user_report { struct xfrm_selector sel; }; +/* Used by MIGRATE to pass addresses IKE should use to perform + * SA negotiation with the peer */ +struct xfrm_user_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + __u32 reserved; + __u16 family; +}; + struct xfrm_user_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; |