summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2006-01-06 12:59:59 -0800
committerGreg Kroah-Hartman <gregkh@suse.de>2006-01-06 12:59:59 -0800
commitccf18968b1bbc2fb117190a1984ac2a826dac228 (patch)
tree7bc8fbf5722aecf1e84fa50c31c657864cba1daa /include/linux
parente91c021c487110386a07facd0396e6c3b7cf9c1f (diff)
parentd99cf9d679a520d67f81d805b7cb91c68e1847f0 (diff)
Merge ../torvalds-2.6/
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ata.h8
-rw-r--r--include/linux/blkdev.h91
-rw-r--r--include/linux/bootmem.h46
-rw-r--r--include/linux/configfs.h205
-rw-r--r--include/linux/dm-ioctl.h11
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/fs.h32
-rw-r--r--include/linux/fuse.h24
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/i2o.h976
-rw-r--r--include/linux/irq.h13
-rw-r--r--include/linux/jbd.h8
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/libata.h11
-rw-r--r--include/linux/mempolicy.h38
-rw-r--r--include/linux/mm.h55
-rw-r--r--include/linux/mmzone.h44
-rw-r--r--include/linux/nbd.h8
-rw-r--r--include/linux/nfsd/xdr.h3
-rw-r--r--include/linux/nfsd/xdr3.h1
-rw-r--r--include/linux/page-flags.h91
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/parport_pc.h4
-rw-r--r--include/linux/pci_ids.h10
-rw-r--r--include/linux/raid/md.h4
-rw-r--r--include/linux/raid/md_k.h80
-rw-r--r--include/linux/raid/raid1.h14
-rw-r--r--include/linux/raid/raid10.h22
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/ramfs.h10
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h25
-rw-r--r--include/linux/suspend.h8
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/writeback.h6
35 files changed, 1191 insertions, 681 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2873b732bb..94f77cce27f 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -129,6 +129,7 @@ enum {
ATA_CMD_READ_EXT = 0x25,
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_WRITE_FUA_EXT = 0x3D,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
@@ -137,10 +138,13 @@ enum {
ATA_CMD_READ_MULTI_EXT = 0x29,
ATA_CMD_WRITE_MULTI = 0xC5,
ATA_CMD_WRITE_MULTI_EXT = 0x39,
+ ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
ATA_CMD_SET_FEATURES = 0xEF,
ATA_CMD_PACKET = 0xA0,
ATA_CMD_VERIFY = 0x40,
ATA_CMD_VERIFY_EXT = 0x42,
+ ATA_CMD_STANDBYNOW1 = 0xE0,
+ ATA_CMD_IDLEIMMEDIATE = 0xE1,
ATA_CMD_INIT_DEV_PARAMS = 0x91,
/* SETFEATURES stuff */
@@ -192,6 +196,7 @@ enum {
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
+ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
};
enum ata_tf_protocols {
@@ -245,7 +250,8 @@ struct ata_taskfile {
#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
-#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
+#define ata_id_has_fua(id) ((id)[84] & (1 << 6))
+#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
#define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
#define ata_id_has_wcache(id) ((id)[82] & (1 << 5))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a18500d196e..fb098537742 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
struct request;
-typedef void (rq_end_io_fn)(struct request *);
+typedef void (rq_end_io_fn)(struct request *, int);
struct request_list {
int count[2];
@@ -207,6 +207,7 @@ enum rq_flag_bits {
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
+ __REQ_FUA, /* forced unit access */
__REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
@@ -230,9 +231,7 @@ enum rq_flag_bits {
__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
- __REQ_BAR_PREFLUSH, /* barrier pre-flush done */
- __REQ_BAR_POSTFLUSH, /* barrier post-flush */
- __REQ_BAR_FLUSH, /* rq is the flush request */
+ __REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_NR_BITS, /* stops here */
};
@@ -241,6 +240,7 @@ enum rq_flag_bits {
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
+#define REQ_FUA (1 << __REQ_FUA)
#define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
@@ -260,9 +260,7 @@ enum rq_flag_bits {
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
-#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH)
-#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
-#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
+#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
@@ -292,8 +290,7 @@ struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
-typedef int (prepare_flush_fn) (request_queue_t *, struct request *);
-typedef void (end_flush_fn) (request_queue_t *, struct request *);
+typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
enum blk_queue_state {
Queue_down,
@@ -335,7 +332,6 @@ struct request_queue
activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
- end_flush_fn *end_flush_fn;
/*
* Dispatch queue sorting
@@ -420,14 +416,11 @@ struct request_queue
/*
* reserved for flush operations
*/
- struct request *flush_rq;
- unsigned char ordered;
-};
-
-enum {
- QUEUE_ORDERED_NONE,
- QUEUE_ORDERED_TAG,
- QUEUE_ORDERED_FLUSH,
+ unsigned int ordered, next_ordered, ordseq;
+ int orderr, ordcolor;
+ struct request pre_flush_rq, bar_rq, post_flush_rq;
+ struct request *orig_bar_rq;
+ unsigned int bi_size;
};
#define RQ_INACTIVE (-1)
@@ -445,12 +438,51 @@ enum {
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
+
+enum {
+ /*
+ * Hardbarrier is supported with one of the following methods.
+ *
+ * NONE : hardbarrier unsupported
+ * DRAIN : ordering by draining is enough
+ * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
+ * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
+ * TAG : ordering by tag is enough
+ * TAG_FLUSH : ordering by tag w/ pre and post flushes
+ * TAG_FUA : ordering by tag w/ pre flush and FUA write
+ */
+ QUEUE_ORDERED_NONE = 0x00,
+ QUEUE_ORDERED_DRAIN = 0x01,
+ QUEUE_ORDERED_TAG = 0x02,
+
+ QUEUE_ORDERED_PREFLUSH = 0x10,
+ QUEUE_ORDERED_POSTFLUSH = 0x20,
+ QUEUE_ORDERED_FUA = 0x40,
+
+ QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
+ QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+ QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
+ QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+
+ /*
+ * Ordered operation sequence
+ */
+ QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
+ QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
+ QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
+ QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
+ QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
+ QUEUE_ORDSEQ_DONE = 0x20,
+};
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags)
+#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
@@ -466,8 +498,7 @@ enum {
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
-#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
-#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
+#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
@@ -560,7 +591,7 @@ extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *);
-extern void blk_end_sync_rq(struct request *rq);
+extern void blk_end_sync_rq(struct request *rq, int error);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
@@ -582,8 +613,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
- struct request *, int,
- void (*done)(struct request *));
+ struct request *, int, rq_end_io_fn *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
@@ -614,7 +644,7 @@ static inline void blk_run_address_space(struct address_space *mapping)
*/
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *);
+extern void end_that_request_last(struct request *, int);
extern void end_request(struct request *req, int uptodate);
/*
@@ -665,11 +695,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern void blk_queue_ordered(request_queue_t *, int);
+extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
-extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
-extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
-extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
+extern int blk_do_ordered(request_queue_t *, struct request **);
+extern unsigned blk_ordered_cur_seq(request_queue_t *);
+extern unsigned blk_ordered_req_seq(struct request *);
+extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 3b03b0b868d..993da8cc970 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -43,50 +43,38 @@ typedef struct bootmem_data {
extern unsigned long __init bootmem_bootmap_pages (unsigned long);
extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
extern void __init free_bootmem (unsigned long addr, unsigned long size);
-extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
+extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
+extern void * __init __alloc_bootmem_low(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
#define alloc_bootmem(x) \
__alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
- __alloc_bootmem((x), SMP_CACHE_BYTES, 0)
+ __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem((x), PAGE_SIZE, 0)
-
-#define alloc_bootmem_limit(x, limit) \
- __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_limit(x, limit) \
- __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit))
-#define alloc_bootmem_pages_limit(x, limit) \
- __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_pages_limit(x, limit) \
- __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit))
-
+ __alloc_bootmem_low((x), PAGE_SIZE, 0)
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
extern unsigned long __init free_all_bootmem (void);
-
+extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn);
extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
-extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
-
-#define alloc_bootmem_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_pages_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit))
-
+ __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0)
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
@@ -123,15 +111,5 @@ extern void *__init alloc_large_system_hash(const char *tablename,
#endif
extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */
-static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
-{
- return __alloc_bootmem_limit(size, align, goal, 0);
-}
-
-static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0);
-}
#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
new file mode 100644
index 00000000000..acffb8c9073
--- /dev/null
+++ b/include/linux/configfs.h
@@ -0,0 +1,205 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs.h - definitions for the device driver filesystem
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * Please read Documentation/filesystems/configfs.txt before using the
+ * configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_H_
+#define _CONFIGFS_H_
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+#define CONFIGFS_ITEM_NAME_LEN 20
+
+struct module;
+
+struct configfs_item_operations;
+struct configfs_group_operations;
+struct configfs_attribute;
+struct configfs_subsystem;
+
+struct config_item {
+ char *ci_name;
+ char ci_namebuf[CONFIGFS_ITEM_NAME_LEN];
+ struct kref ci_kref;
+ struct list_head ci_entry;
+ struct config_item *ci_parent;
+ struct config_group *ci_group;
+ struct config_item_type *ci_type;
+ struct dentry *ci_dentry;
+};
+
+extern int config_item_set_name(struct config_item *, const char *, ...);
+
+static inline char *config_item_name(struct config_item * item)
+{
+ return item->ci_name;
+}
+
+extern void config_item_init(struct config_item *);
+extern void config_item_init_type_name(struct config_item *item,
+ const char *name,
+ struct config_item_type *type);
+extern void config_item_cleanup(struct config_item *);
+
+extern struct config_item * config_item_get(struct config_item *);
+extern void config_item_put(struct config_item *);
+
+struct config_item_type {
+ struct module *ct_owner;
+ struct configfs_item_operations *ct_item_ops;
+ struct configfs_group_operations *ct_group_ops;
+ struct configfs_attribute **ct_attrs;
+};
+
+
+/**
+ * group - a group of config_items of a specific type, belonging
+ * to a specific subsystem.
+ */
+
+struct config_group {
+ struct config_item cg_item;
+ struct list_head cg_children;
+ struct configfs_subsystem *cg_subsys;
+ struct config_group **default_groups;
+};
+
+
+extern void config_group_init(struct config_group *group);
+extern void config_group_init_type_name(struct config_group *group,
+ const char *name,
+ struct config_item_type *type);
+
+
+static inline struct config_group *to_config_group(struct config_item *item)
+{
+ return item ? container_of(item,struct config_group,cg_item) : NULL;
+}
+
+static inline struct config_group *config_group_get(struct config_group *group)
+{
+ return group ? to_config_group(config_item_get(&group->cg_item)) : NULL;
+}
+
+static inline void config_group_put(struct config_group *group)
+{
+ config_item_put(&group->cg_item);
+}
+
+extern struct config_item *config_group_find_obj(struct config_group *, const char *);
+
+
+struct configfs_attribute {
+ char *ca_name;
+ struct module *ca_owner;
+ mode_t ca_mode;
+};
+
+
+/*
+ * If allow_link() exists, the item can symlink(2) out to other
+ * items. If the item is a group, it may support mkdir(2).
+ * Groups supply one of make_group() and make_item(). If the
+ * group supports make_group(), one can create group children. If it
+ * supports make_item(), one can create config_item children. If it has
+ * default_groups on group->default_groups, it has automatically created
+ * group children. default_groups may coexist alongsize make_group() or
+ * make_item(), but if the group wishes to have only default_groups
+ * children (disallowing mkdir(2)), it need not provide either function.
+ * If the group has commit(), it supports pending and commited (active)
+ * items.
+ */
+struct configfs_item_operations {
+ void (*release)(struct config_item *);
+ ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *);
+ ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t);
+ int (*allow_link)(struct config_item *src, struct config_item *target);
+ int (*drop_link)(struct config_item *src, struct config_item *target);
+};
+
+struct configfs_group_operations {
+ struct config_item *(*make_item)(struct config_group *group, const char *name);
+ struct config_group *(*make_group)(struct config_group *group, const char *name);
+ int (*commit_item)(struct config_item *item);
+ void (*drop_item)(struct config_group *group, struct config_item *item);
+};
+
+
+
+/**
+ * Use these macros to make defining attributes easier. See include/linux/device.h
+ * for examples..
+ */
+
+#if 0
+#define __ATTR(_name,_mode,_show,_store) { \
+ .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define __ATTR_RO(_name) { \
+ .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \
+ .show = _name##_show, \
+}
+
+#define __ATTR_NULL { .attr = { .name = NULL } }
+
+#define attr_name(_attr) (_attr).attr.name
+#endif
+
+
+struct configfs_subsystem {
+ struct config_group su_group;
+ struct semaphore su_sem;
+};
+
+static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct configfs_subsystem, su_group) :
+ NULL;
+}
+
+int configfs_register_subsystem(struct configfs_subsystem *subsys);
+void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+
+#endif /* __KERNEL__ */
+
+#endif /* _CONFIGFS_H_ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index f5eb6b6cd10..fa75ba0d635 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ typedef char ioctl_struct[308];
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 4
+#define DM_VERSION_MINOR 5
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2005-01-12)"
+#define DM_VERSION_EXTRA "-ioctl (2005-10-04)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -301,8 +301,13 @@ typedef char ioctl_struct[308];
#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
/*
- * Set this to improve performance when you aren't going to use open_count
+ * Set this to improve performance when you aren't going to use open_count.
*/
#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */
+/*
+ * Set this to avoid attempting to freeze any filesystem when suspending.
+ */
+#define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a74c27e460b..fb80fa44c4d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_FRONT 1
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
+#define ELEVATOR_INSERT_REQUEUE 4
/*
* return values from elevator_may_queue_fn
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc35b6ac778..115e72be25d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -302,6 +302,37 @@ struct iattr {
*/
#include <linux/quota.h>
+/**
+ * enum positive_aop_returns - aop return codes with specific semantics
+ *
+ * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
+ * completed, that the page is still locked, and
+ * should be considered active. The VM uses this hint
+ * to return the page to the active list -- it won't
+ * be a candidate for writeback again in the near
+ * future. Other callers must be careful to unlock
+ * the page if they get this return. Returned by
+ * writepage();
+ *
+ * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
+ * unlocked it and the page might have been truncated.
+ * The caller should back up to acquiring a new page and
+ * trying again. The aop will be taking reasonable
+ * precautions not to livelock. If the caller held a page
+ * reference, it should drop it before retrying. Returned
+ * by readpage(), prepare_write(), and commit_write().
+ *
+ * address_space_operation functions return these large constants to indicate
+ * special semantics to the caller. These are much larger than the bytes in a
+ * page to allow for functions that return the number of bytes operated on in a
+ * given page.
+ */
+
+enum positive_aop_returns {
+ AOP_WRITEPAGE_ACTIVATE = 0x80000,
+ AOP_TRUNCATED_PAGE = 0x80001,
+};
+
/*
* oh the beauties of C type declarations.
*/
@@ -1019,6 +1050,7 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
};
struct seq_file;
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index b76b558b03d..528959c52f1 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -14,7 +14,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 3
+#define FUSE_KERNEL_MINOR_VERSION 5
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -53,6 +53,9 @@ struct fuse_kstatfs {
__u64 ffree;
__u32 bsize;
__u32 namelen;
+ __u32 frsize;
+ __u32 padding;
+ __u32 spare[6];
};
#define FATTR_MODE (1 << 0)
@@ -105,12 +108,8 @@ enum fuse_opcode {
FUSE_CREATE = 35
};
-/* Conservative buffer size for the client */
-#define FUSE_MAX_IN 8192
-
-#define FUSE_NAME_MAX 1024
-#define FUSE_SYMLINK_MAX 4096
-#define FUSE_XATTR_SIZE_MAX 4096
+/* The read buffer is required to be at least 8k, but may be much larger */
+#define FUSE_MIN_READ_BUFFER 8192
struct fuse_entry_out {
__u64 nodeid; /* Inode ID */
@@ -213,6 +212,8 @@ struct fuse_write_out {
__u32 padding;
};
+#define FUSE_COMPAT_STATFS_SIZE 48
+
struct fuse_statfs_out {
struct fuse_kstatfs st;
};
@@ -243,9 +244,16 @@ struct fuse_access_in {
__u32 padding;
};
-struct fuse_init_in_out {
+struct fuse_init_in {
+ __u32 major;
+ __u32 minor;
+};
+
+struct fuse_init_out {
__u32 major;
__u32 minor;
+ __u32 unused[3];
+ __u32 max_write;
};
struct fuse_in_header {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1056717ee50..68d82ad6b17 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,7 @@ int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
-struct page *alloc_huge_page(void);
+struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
void free_huge_page(struct page *);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -97,7 +97,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
do { } while (0)
-#define alloc_huge_page() ({ NULL; })
+#define alloc_huge_page(vma, addr) ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index d79c8a4bc4f..9ba80679666 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/workqueue.h> /* work_struct */
+#include <linux/mempool.h>
#include <asm/io.h>
#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -38,6 +39,355 @@
#define I2O_QUEUE_EMPTY 0xffffffff
/*
+ * Cache strategies
+ */
+
+/* The NULL strategy leaves everything up to the controller. This tends to be a
+ * pessimal but functional choice.
+ */
+#define CACHE_NULL 0
+/* Prefetch data when reading. We continually attempt to load the next 32 sectors
+ * into the controller cache.
+ */
+#define CACHE_PREFETCH 1
+/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
+ * into the controller cache. When an I/O is less <= 8K we assume its probably
+ * not sequential and don't prefetch (default)
+ */
+#define CACHE_SMARTFETCH 2
+/* Data is written to the cache and then out on to the disk. The I/O must be
+ * physically on the medium before the write is acknowledged (default without
+ * NVRAM)
+ */
+#define CACHE_WRITETHROUGH 17
+/* Data is written to the cache and then out on to the disk. The controller
+ * is permitted to write back the cache any way it wants. (default if battery
+ * backed NVRAM is present). It can be useful to set this for swap regardless of
+ * battery state.
+ */
+#define CACHE_WRITEBACK 18
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writeback cached
+ */
+#define CACHE_SMARTBACK 19
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writethrough cached. Suitable for devices
+ * lacking battery backup
+ */
+#define CACHE_SMARTTHROUGH 20
+
+/*
+ * Ioctl structures
+ */
+
+#define BLKI2OGRSTRAT _IOR('2', 1, int)
+#define BLKI2OGWSTRAT _IOR('2', 2, int)
+#define BLKI2OSRSTRAT _IOW('2', 3, int)
+#define BLKI2OSWSTRAT _IOW('2', 4, int)
+
+/*
+ * I2O Function codes
+ */
+
+/*
+ * Executive Class
+ */
+#define I2O_CMD_ADAPTER_ASSIGN 0xB3
+#define I2O_CMD_ADAPTER_READ 0xB2
+#define I2O_CMD_ADAPTER_RELEASE 0xB5
+#define I2O_CMD_BIOS_INFO_SET 0xA5
+#define I2O_CMD_BOOT_DEVICE_SET 0xA7
+#define I2O_CMD_CONFIG_VALIDATE 0xBB
+#define I2O_CMD_CONN_SETUP 0xCA
+#define I2O_CMD_DDM_DESTROY 0xB1
+#define I2O_CMD_DDM_ENABLE 0xD5
+#define I2O_CMD_DDM_QUIESCE 0xC7
+#define I2O_CMD_DDM_RESET 0xD9
+#define I2O_CMD_DDM_SUSPEND 0xAF
+#define I2O_CMD_DEVICE_ASSIGN 0xB7
+#define I2O_CMD_DEVICE_RELEASE 0xB9
+#define I2O_CMD_HRT_GET 0xA8
+#define I2O_CMD_ADAPTER_CLEAR 0xBE
+#define I2O_CMD_ADAPTER_CONNECT 0xC9
+#define I2O_CMD_ADAPTER_RESET 0xBD
+#define I2O_CMD_LCT_NOTIFY 0xA2
+#define I2O_CMD_OUTBOUND_INIT 0xA1
+#define I2O_CMD_PATH_ENABLE 0xD3
+#define I2O_CMD_PATH_QUIESCE 0xC5
+#define I2O_CMD_PATH_RESET 0xD7
+#define I2O_CMD_STATIC_MF_CREATE 0xDD
+#define I2O_CMD_STATIC_MF_RELEASE 0xDF
+#define I2O_CMD_STATUS_GET 0xA0
+#define I2O_CMD_SW_DOWNLOAD 0xA9
+#define I2O_CMD_SW_UPLOAD 0xAB
+#define I2O_CMD_SW_REMOVE 0xAD
+#define I2O_CMD_SYS_ENABLE 0xD1
+#define I2O_CMD_SYS_MODIFY 0xC1
+#define I2O_CMD_SYS_QUIESCE 0xC3
+#define I2O_CMD_SYS_TAB_SET 0xA3
+
+/*
+ * Utility Class
+ */
+#define I2O_CMD_UTIL_NOP 0x00
+#define I2O_CMD_UTIL_ABORT 0x01
+#define I2O_CMD_UTIL_CLAIM 0x09
+#define I2O_CMD_UTIL_RELEASE 0x0B
+#define I2O_CMD_UTIL_PARAMS_GET 0x06
+#define I2O_CMD_UTIL_PARAMS_SET 0x05
+#define I2O_CMD_UTIL_EVT_REGISTER 0x13
+#define I2O_CMD_UTIL_EVT_ACK 0x14
+#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
+#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
+#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
+#define I2O_CMD_UTIL_LOCK 0x17
+#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
+#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
+
+/*
+ * SCSI Host Bus Adapter Class
+ */
+#define I2O_CMD_SCSI_EXEC 0x81
+#define I2O_CMD_SCSI_ABORT 0x83
+#define I2O_CMD_SCSI_BUSRESET 0x27
+
+/*
+ * Bus Adapter Class
+ */
+#define I2O_CMD_BUS_ADAPTER_RESET 0x85
+#define I2O_CMD_BUS_RESET 0x87
+#define I2O_CMD_BUS_SCAN 0x89
+#define I2O_CMD_BUS_QUIESCE 0x8b
+
+/*
+ * Random Block Storage Class
+ */
+#define I2O_CMD_BLOCK_READ 0x30
+#define I2O_CMD_BLOCK_WRITE 0x31
+#define I2O_CMD_BLOCK_CFLUSH 0x37
+#define I2O_CMD_BLOCK_MLOCK 0x49
+#define I2O_CMD_BLOCK_MUNLOCK 0x4B
+#define I2O_CMD_BLOCK_MMOUNT 0x41
+#define I2O_CMD_BLOCK_MEJECT 0x43
+#define I2O_CMD_BLOCK_POWER 0x70
+
+#define I2O_CMD_PRIVATE 0xFF
+
+/* Command status values */
+
+#define I2O_CMD_IN_PROGRESS 0x01
+#define I2O_CMD_REJECTED 0x02
+#define I2O_CMD_FAILED 0x03
+#define I2O_CMD_COMPLETED 0x04
+
+/* I2O API function return values */
+
+#define I2O_RTN_NO_ERROR 0
+#define I2O_RTN_NOT_INIT 1
+#define I2O_RTN_FREE_Q_EMPTY 2
+#define I2O_RTN_TCB_ERROR 3
+#define I2O_RTN_TRANSACTION_ERROR 4
+#define I2O_RTN_ADAPTER_ALREADY_INIT 5
+#define I2O_RTN_MALLOC_ERROR 6
+#define I2O_RTN_ADPTR_NOT_REGISTERED 7
+#define I2O_RTN_MSG_REPLY_TIMEOUT 8
+#define I2O_RTN_NO_STATUS 9
+#define I2O_RTN_NO_FIRM_VER 10
+#define I2O_RTN_NO_LINK_SPEED 11
+
+/* Reply message status defines for all messages */
+
+#define I2O_REPLY_STATUS_SUCCESS 0x00
+#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
+#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
+#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
+#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
+#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
+#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
+#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
+#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
+#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
+#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
+#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
+
+/* Status codes and Error Information for Parameter functions */
+
+#define I2O_PARAMS_STATUS_SUCCESS 0x00
+#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
+#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
+#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
+#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
+#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
+#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
+#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
+#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
+#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
+#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
+#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
+#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
+#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
+#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
+#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
+#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
+
+/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
+ * messages: Table 3-2 Detailed Status Codes.*/
+
+#define I2O_DSC_SUCCESS 0x0000
+#define I2O_DSC_BAD_KEY 0x0002
+#define I2O_DSC_TCL_ERROR 0x0003
+#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
+#define I2O_DSC_NO_SUCH_PAGE 0x0005
+#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
+#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
+#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
+#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
+#define I2O_DSC_DEVICE_LOCKED 0x000B
+#define I2O_DSC_DEVICE_RESET 0x000C
+#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
+#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
+#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
+#define I2O_DSC_INVALID_OFFSET 0x0010
+#define I2O_DSC_INVALID_PARAMETER 0x0011
+#define I2O_DSC_INVALID_REQUEST 0x0012
+#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
+#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
+#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
+#define I2O_DSC_MISSING_PARAMETER 0x0016
+#define I2O_DSC_TIMEOUT 0x0017
+#define I2O_DSC_UNKNOWN_ERROR 0x0018
+#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
+#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
+#define I2O_DSC_DEVICE_BUSY 0x001B
+#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
+
+/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
+ Status Codes.*/
+
+#define I2O_BSA_DSC_SUCCESS 0x0000
+#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
+#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
+#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
+#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
+#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
+#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
+#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
+#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
+#define I2O_BSA_DSC_BUS_FAILURE 0x0009
+#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
+#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
+#define I2O_BSA_DSC_DEVICE_RESET 0x000C
+#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
+#define I2O_BSA_DSC_TIMEOUT 0x000E
+
+/* FailureStatusCodes, Table 3-3 Message Failure Codes */
+
+#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
+#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
+#define I2O_FSC_TRANSPORT_CONGESTION 0x83
+#define I2O_FSC_TRANSPORT_FAILURE 0x84
+#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
+#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
+#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
+#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
+#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
+#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
+#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
+#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
+#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
+#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
+
+/* Device Claim Types */
+#define I2O_CLAIM_PRIMARY 0x01000000
+#define I2O_CLAIM_MANAGEMENT 0x02000000
+#define I2O_CLAIM_AUTHORIZED 0x03000000
+#define I2O_CLAIM_SECONDARY 0x04000000
+
+/* Message header defines for VersionOffset */
+#define I2OVER15 0x0001
+#define I2OVER20 0x0002
+
+/* Default is 1.5 */
+#define I2OVERSION I2OVER15
+
+#define SGL_OFFSET_0 I2OVERSION
+#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
+#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
+#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
+#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
+#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
+#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
+#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
+#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
+#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
+#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
+
+/* Transaction Reply Lists (TRL) Control Word structure */
+#define TRL_SINGLE_FIXED_LENGTH 0x00
+#define TRL_SINGLE_VARIABLE_LENGTH 0x40
+#define TRL_MULTIPLE_FIXED_LENGTH 0x80
+
+ /* msg header defines for MsgFlags */
+#define MSG_STATIC 0x0100
+#define MSG_64BIT_CNTXT 0x0200
+#define MSG_MULTI_TRANS 0x1000
+#define MSG_FAIL 0x2000
+#define MSG_FINAL 0x4000
+#define MSG_REPLY 0x8000
+
+ /* minimum size msg */
+#define THREE_WORD_MSG_SIZE 0x00030000
+#define FOUR_WORD_MSG_SIZE 0x00040000
+#define FIVE_WORD_MSG_SIZE 0x00050000
+#define SIX_WORD_MSG_SIZE 0x00060000
+#define SEVEN_WORD_MSG_SIZE 0x00070000
+#define EIGHT_WORD_MSG_SIZE 0x00080000
+#define NINE_WORD_MSG_SIZE 0x00090000
+#define TEN_WORD_MSG_SIZE 0x000A0000
+#define ELEVEN_WORD_MSG_SIZE 0x000B0000
+#define I2O_MESSAGE_SIZE(x) ((x)<<16)
+
+/* special TID assignments */
+#define ADAPTER_TID 0
+#define HOST_TID 1
+
+/* outbound queue defines */
+#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
+#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+/* inbound queue definitions */
+#define I2O_MSG_INPOOL_MIN 32
+#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+#define I2O_POST_WAIT_OK 0
+#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
+
+#define I2O_CONTEXT_LIST_MIN_LENGTH 15
+#define I2O_CONTEXT_LIST_USED 0x01
+#define I2O_CONTEXT_LIST_DELETED 0x02
+
+/* timeouts */
+#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
+#define I2O_TIMEOUT_MESSAGE_GET 5
+#define I2O_TIMEOUT_RESET 30
+#define I2O_TIMEOUT_STATUS_GET 5
+#define I2O_TIMEOUT_LCT_GET 360
+#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
+
+/* retries */
+#define I2O_HRT_GET_TRIES 3
+#define I2O_LCT_GET_TRIES 3
+
+/* defines for max_sectors and max_phys_segments */
+#define I2O_MAX_SECTORS 1024
+#define I2O_MAX_SECTORS_LIMITED 128
+#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
+
+/*
* Message structures
*/
struct i2o_message {
@@ -58,6 +408,12 @@ struct i2o_message {
u32 body[0];
};
+/* MFA and I2O message used by mempool */
+struct i2o_msg_mfa {
+ u32 mfa; /* MFA returned by the controller */
+ struct i2o_message msg; /* I2O message */
+};
+
/*
* Each I2O device entity has one of these. There is one per device.
*/
@@ -130,6 +486,15 @@ struct i2o_dma {
};
/*
+ * Contains slab cache and mempool information
+ */
+struct i2o_pool {
+ char *name;
+ kmem_cache_t *slab;
+ mempool_t *mempool;
+};
+
+/*
* Contains IO mapped address information
*/
struct i2o_io {
@@ -174,8 +539,6 @@ struct i2o_controller {
void __iomem *irq_status; /* Interrupt status register address */
void __iomem *irq_mask; /* Interrupt mask register address */
- /* Dynamic LCT related data */
-
struct i2o_dma status; /* IOP status block */
struct i2o_dma hrt; /* HW Resource Table */
@@ -188,6 +551,8 @@ struct i2o_controller {
struct i2o_io in_queue; /* inbound message queue Host->IOP */
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
+ struct i2o_pool in_msg; /* mempool for inbound messages */
+
unsigned int battery:1; /* Has a battery backup */
unsigned int io_alloc:1; /* An I/O resource was allocated */
unsigned int mem_alloc:1; /* A memory resource was allocated */
@@ -196,7 +561,6 @@ struct i2o_controller {
struct resource mem_resource; /* Mem resource allocated to the IOP */
struct device device;
- struct class_device *classdev; /* I2O controller class device */
struct i2o_device *exec; /* Executive */
#if BITS_PER_LONG == 64
spinlock_t context_list_lock; /* lock for context_list */
@@ -247,16 +611,13 @@ struct i2o_sys_tbl {
extern struct list_head i2o_controllers;
/* Message functions */
-static inline u32 i2o_msg_get(struct i2o_controller *,
- struct i2o_message __iomem **);
-extern u32 i2o_msg_get_wait(struct i2o_controller *,
- struct i2o_message __iomem **, int);
-static inline void i2o_msg_post(struct i2o_controller *, u32);
-static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
- unsigned long);
-extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
- struct i2o_dma *);
-extern void i2o_msg_nop(struct i2o_controller *, u32);
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
+extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
+static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
+static inline int i2o_msg_post_wait(struct i2o_controller *,
+ struct i2o_message *, unsigned long);
+extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
+ unsigned long, struct i2o_dma *);
static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* IOP functions */
@@ -384,10 +745,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
dma_addr_t dma_addr;
switch (direction) {
@@ -405,16 +766,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
if (!dma_mapping_error(dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
- writel(sg_flags | size, mptr++);
- writel(i2o_dma_low(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | size);
+ *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
#endif
*sg_ptr = mptr;
}
@@ -439,10 +800,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
static inline int i2o_dma_map_sg(struct i2o_controller *c,
struct scatterlist *sg, int sg_count,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
switch (direction) {
case DMA_TO_DEVICE:
@@ -461,19 +822,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
while (sg_count-- > 0) {
if (!sg_count)
sg_flags |= 0xC0000000;
- writel(sg_flags | sg_dma_len(sg), mptr++);
- writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
+ *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
sg++;
}
@@ -563,6 +924,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
return 0;
};
+/*
+ * i2o_pool_alloc - Allocate an slab cache and mempool
+ * @mempool: pointer to struct i2o_pool to write data into.
+ * @name: name which is used to identify cache
+ * @size: size of each object
+ * @min_nr: minimum number of objects
+ *
+ * First allocates a slab cache with name and size. Then allocates a
+ * mempool which uses the slab cache for allocation and freeing.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
+ size_t size, int min_nr)
+{
+ pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!pool->name)
+ goto exit;
+ strcpy(pool->name, name);
+
+ pool->slab =
+ kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
+ NULL);
+ if (!pool->slab)
+ goto free_name;
+
+ pool->mempool =
+ mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+ pool->slab);
+ if (!pool->mempool)
+ goto free_slab;
+
+ return 0;
+
+ free_slab:
+ kmem_cache_destroy(pool->slab);
+
+ free_name:
+ kfree(pool->name);
+
+ exit:
+ return -ENOMEM;
+};
+
+/*
+ * i2o_pool_free - Free slab cache and mempool again
+ * @mempool: pointer to struct i2o_pool which should be freed
+ *
+ * Note that you have to return all objects to the mempool again before
+ * calling i2o_pool_free().
+ */
+static inline void i2o_pool_free(struct i2o_pool *pool)
+{
+ mempool_destroy(pool->mempool);
+ kmem_cache_destroy(pool->slab);
+ kfree(pool->name);
+};
+
/* I2O driver (OSM) functions */
extern int i2o_driver_register(struct i2o_driver *);
extern void i2o_driver_unregister(struct i2o_driver *);
@@ -638,39 +1057,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *);
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
/**
+ * i2o_out_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a receive message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for sender side messages as they are ioremap objects
+ * provided by the I2O controller.
+ */
+static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
+ u32 m)
+{
+ BUG_ON(m < c->out_queue.phys
+ || m >= c->out_queue.phys + c->out_queue.len);
+
+ return c->out_queue.virt + (m - c->out_queue.phys);
+};
+
+/**
+ * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a send message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for receive side messages as they are kmalloc objects
+ * in a different pool.
+ */
+static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
+ i2o_controller *c,
+ u32 m)
+{
+ return c->in_queue.virt + m;
+};
+
+/**
* i2o_msg_get - obtain an I2O message from the IOP
* @c: I2O controller
- * @msg: pointer to a I2O message pointer
*
- * This function tries to get a message slot. If no message slot is
+ * This function tries to get a message frame. If no message frame is
* available do not wait until one is availabe (see also i2o_msg_get_wait).
+ * The returned pointer to the message frame is not in I/O memory, it is
+ * allocated from a mempool. But because a MFA is allocated from the
+ * controller too it is guaranteed that i2o_msg_post() will never fail.
*
- * On a success the message is returned and the pointer to the message is
- * set in msg. The returned message is the physical page frame offset
- * address from the read port (see the i2o spec). If no message is
- * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
+ * On a success a pointer to the message frame is returned. If the message
+ * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
+ * is returned.
*/
-static inline u32 i2o_msg_get(struct i2o_controller *c,
- struct i2o_message __iomem ** msg)
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
{
- u32 m = readl(c->in_port);
-
- if (m != I2O_QUEUE_EMPTY)
- *msg = c->in_queue.virt + m;
+ struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
+ if (!mmsg)
+ return ERR_PTR(-ENOMEM);
+
+ mmsg->mfa = readl(c->in_port);
+ if (mmsg->mfa == I2O_QUEUE_EMPTY) {
+ mempool_free(mmsg, c->in_msg.mempool);
+ return ERR_PTR(-EBUSY);
+ }
- return m;
+ return &mmsg->msg;
};
/**
* i2o_msg_post - Post I2O message to I2O controller
* @c: I2O controller to which the message should be send
- * @m: the message identifier
+ * @msg: message returned by i2o_msg_get()
*
- * Post the message to the I2O controller.
+ * Post the message to the I2O controller and return immediately.
*/
-static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_post(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- writel(m, c->in_port);
+ struct i2o_msg_mfa *mmsg;
+
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
+ memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
+ (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
+ writel(mmsg->mfa, c->in_port);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
@@ -685,62 +1154,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
*
* Returns 0 on success or negative error code on failure.
*/
-static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
+static inline int i2o_msg_post_wait(struct i2o_controller *c,
+ struct i2o_message *msg,
unsigned long timeout)
{
- return i2o_msg_post_wait_mem(c, m, timeout, NULL);
+ return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
};
/**
- * i2o_flush_reply - Flush reply from I2O controller
- * @c: I2O controller
- * @m: the message identifier
+ * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
+ * @c: I2O controller from which the MFA was fetched
+ * @mfa: MFA which should be returned
*
- * The I2O controller must be informed that the reply message is not needed
- * anymore. If you forget to flush the reply, the message frame can't be
- * used by the controller anymore and is therefore lost.
+ * This function must be used for preserved messages, because i2o_msg_nop()
+ * also returns the allocated memory back to the msg_pool mempool.
*/
-static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
{
- writel(m, c->out_port);
+ struct i2o_message __iomem *msg;
+ u32 nop[3] = {
+ THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
+ I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
+ 0x00000000
+ };
+
+ msg = i2o_msg_in_to_virt(c, mfa);
+ memcpy_toio(msg, nop, sizeof(nop));
+ writel(mfa, c->in_port);
};
/**
- * i2o_out_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_msg_nop - Returns a message which is not used
+ * @c: I2O controller from which the message was created
+ * @msg: message which should be returned
*
- * Turn a receive message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for sender side messages as they are ioremap objects
- * provided by the I2O controller.
+ * If you fetch a message via i2o_msg_get, and can't use it, you must
+ * return the message with this function. Otherwise the MFA is lost as well
+ * as the allocated memory from the mempool.
*/
-static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
- u32 m)
+static inline void i2o_msg_nop(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- BUG_ON(m < c->out_queue.phys
- || m >= c->out_queue.phys + c->out_queue.len);
+ struct i2o_msg_mfa *mmsg;
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
- return c->out_queue.virt + (m - c->out_queue.phys);
+ i2o_msg_nop_mfa(c, mmsg->mfa);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
- * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_flush_reply - Flush reply from I2O controller
+ * @c: I2O controller
+ * @m: the message identifier
*
- * Turn a send message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for receive side messages as they are kmalloc objects
- * in a different pool.
+ * The I2O controller must be informed that the reply message is not needed
+ * anymore. If you forget to flush the reply, the message frame can't be
+ * used by the controller anymore and is therefore lost.
*/
-static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
- i2o_controller *c,
- u32 m)
+static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
{
- return c->in_queue.virt + m;
+ writel(m, c->out_port);
};
/*
@@ -779,350 +1252,5 @@ extern void i2o_dump_message(struct i2o_message *);
extern void i2o_dump_hrt(struct i2o_controller *c);
extern void i2o_debug_state(struct i2o_controller *c);
-/*
- * Cache strategies
- */
-
-/* The NULL strategy leaves everything up to the controller. This tends to be a
- * pessimal but functional choice.
- */
-#define CACHE_NULL 0
-/* Prefetch data when reading. We continually attempt to load the next 32 sectors
- * into the controller cache.
- */
-#define CACHE_PREFETCH 1
-/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
- * into the controller cache. When an I/O is less <= 8K we assume its probably
- * not sequential and don't prefetch (default)
- */
-#define CACHE_SMARTFETCH 2
-/* Data is written to the cache and then out on to the disk. The I/O must be
- * physically on the medium before the write is acknowledged (default without
- * NVRAM)
- */
-#define CACHE_WRITETHROUGH 17
-/* Data is written to the cache and then out on to the disk. The controller
- * is permitted to write back the cache any way it wants. (default if battery
- * backed NVRAM is present). It can be useful to set this for swap regardless of
- * battery state.
- */
-#define CACHE_WRITEBACK 18
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writeback cached
- */
-#define CACHE_SMARTBACK 19
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writethrough cached. Suitable for devices
- * lacking battery backup
- */
-#define CACHE_SMARTTHROUGH 20
-
-/*
- * Ioctl structures
- */
-
-#define BLKI2OGRSTRAT _IOR('2', 1, int)
-#define BLKI2OGWSTRAT _IOR('2', 2, int)
-#define BLKI2OSRSTRAT _IOW('2', 3, int)
-#define BLKI2OSWSTRAT _IOW('2', 4, int)
-
-/*
- * I2O Function codes
- */
-
-/*
- * Executive Class
- */
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-/*
- * Utility Class
- */
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-/*
- * SCSI Host Bus Adapter Class
- */
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-/*
- * Bus Adapter Class
- */
-#define I2O_CMD_BUS_ADAPTER_RESET 0x85
-#define I2O_CMD_BUS_RESET 0x87
-#define I2O_CMD_BUS_SCAN 0x89
-#define I2O_CMD_BUS_QUIESCE 0x8b
-
-/*
- * Random Block Storage Class
- */
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-#define I2O_CMD_BLOCK_POWER 0x70
-
-#define I2O_CMD_PRIVATE 0xFF
-
-/* Command status values */
-
-#define I2O_CMD_IN_PROGRESS 0x01
-#define I2O_CMD_REJECTED 0x02
-#define I2O_CMD_FAILED 0x03
-#define I2O_CMD_COMPLETED 0x04
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
- Status Codes.*/
-
-#define I2O_BSA_DSC_SUCCESS 0x0000
-#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
-#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
-#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
-#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
-#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
-#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
-#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
-#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
-#define I2O_BSA_DSC_BUS_FAILURE 0x0009
-#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
-#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
-#define I2O_BSA_DSC_DEVICE_RESET 0x000C
-#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
-#define I2O_BSA_DSC_TIMEOUT 0x000E
-
-/* FailureStatusCodes, Table 3-3 Message Failure Codes */
-
-#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
-#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
-#define I2O_FSC_TRANSPORT_CONGESTION 0x83
-#define I2O_FSC_TRANSPORT_FAILURE 0x84
-#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
-#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
-#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
-#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
-#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
-#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
-#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
-#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
-#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
-#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-
-/* Default is 1.5 */
-#define I2OVERSION I2OVER15
-
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_FINAL 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define ELEVEN_WORD_MSG_SIZE 0x000B0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-/* special TID assignments */
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-/* outbound queue defines */
-#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
-#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-#define I2O_CONTEXT_LIST_MIN_LENGTH 15
-#define I2O_CONTEXT_LIST_USED 0x01
-#define I2O_CONTEXT_LIST_DELETED 0x02
-
-/* timeouts */
-#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
-#define I2O_TIMEOUT_MESSAGE_GET 5
-#define I2O_TIMEOUT_RESET 30
-#define I2O_TIMEOUT_STATUS_GET 5
-#define I2O_TIMEOUT_LCT_GET 360
-#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
-
-/* retries */
-#define I2O_HRT_GET_TRIES 3
-#define I2O_LCT_GET_TRIES 3
-
-/* defines for max_sectors and max_phys_segments */
-#define I2O_MAX_SECTORS 1024
-#define I2O_MAX_SECTORS_LIMITED 256
-#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
-
#endif /* __KERNEL__ */
#endif /* _I2O_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f04ba20712a..6c5d4c898cc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -12,7 +12,7 @@
#include <linux/config.h>
#include <linux/smp.h>
-#if !defined(CONFIG_ARCH_S390)
+#if !defined(CONFIG_S390)
#include <linux/linkage.h>
#include <linux/cache.h>
@@ -221,6 +221,17 @@ extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
extern void init_irq_proc(void);
+
+#ifdef CONFIG_AUTO_IRQ_AFFINITY
+extern int select_smp_affinity(unsigned int irq);
+#else
+static inline int
+select_smp_affinity(unsigned int irq)
+{
+ return 1;
+}
+#endif
+
#endif
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index dcde7adfdce..558cb4c26ec 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -498,6 +498,12 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
* Doubly-linked circular list of temporary buffers currently undergoing
* IO in the log [j_list_lock]
*/
@@ -843,7 +849,7 @@ extern void journal_commit_transaction(journal_t *);
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
-void __journal_remove_checkpoint(struct journal_head *);
+int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
diff --git a/include/linux/key.h b/include/linux/key.h
index 53513a3be53..4d189e51bc6 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -193,14 +193,6 @@ struct key_type {
*/
int (*instantiate)(struct key *key, const void *data, size_t datalen);
- /* duplicate a key of this type (optional)
- * - the source key will be locked against change
- * - the new description will be attached
- * - the quota will have been adjusted automatically from
- * source->quotalen
- */
- int (*duplicate)(struct key *key, const struct key *source);
-
/* update a key of this type (optional)
* - this method should call key_payload_reserve() to recalculate the
* quota consumption
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e828e172ccb..a43c95f8f96 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -124,6 +124,8 @@ enum {
ATA_FLAG_DEBUGMSG = (1 << 10),
ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */
+ ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
+
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
@@ -436,6 +438,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
+extern int ata_pci_device_resume(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_host_set_remove(struct ata_host_set *host_set);
@@ -445,6 +449,10 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
extern int ata_scsi_error(struct Scsi_Host *host);
extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+extern int ata_scsi_device_resume(struct scsi_device *);
+extern int ata_scsi_device_suspend(struct scsi_device *);
+extern int ata_device_resume(struct ata_port *, struct ata_device *);
+extern int ata_device_suspend(struct ata_port *, struct ata_device *);
extern int ata_ratelimit(void);
/*
@@ -480,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap);
-extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd,
+extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *));
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8b67cf837ca..ed00b278cb9 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -110,14 +110,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
- * Hugetlb policy. i386 hugetlb so far works with node numbers
- * instead of zone lists, so give it special interfaces for now.
- */
-extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
-extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
- unsigned long addr);
-
-/*
* Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas
* carry the policy. As a special twist the pseudo mm is indexed in pages, not
@@ -156,6 +148,16 @@ extern void numa_default_policy(void);
extern void numa_policy_init(void);
extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
extern struct mempolicy default_policy;
+extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr);
+
+extern int policy_zone;
+
+static inline void check_highest_zone(int k)
+{
+ if (k > policy_zone)
+ policy_zone = k;
+}
#else
@@ -182,17 +184,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
return NULL;
}
-static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
-{
- return numa_node_id();
-}
-
-static inline int
-mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
-{
- return 1;
-}
-
struct shared_policy {};
static inline int mpol_set_shared_policy(struct shared_policy *info,
@@ -232,6 +223,15 @@ static inline void numa_policy_rebind(const nodemask_t *old,
{
}
+static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+}
+
+static inline void check_highest_zone(int k)
+{
+}
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a06a84d347f..bc01fff3aa0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -634,14 +634,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
#define shmem_nopage filemap_nopage
-#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */
-#define shmem_set_policy(a, b) (0)
-#define shmem_get_policy(a, b) (NULL)
+
+static inline int shmem_lock(struct file *file, int lock,
+ struct user_struct *user)
+{
+ return 0;
+}
+
+static inline int shmem_set_policy(struct vm_area_struct *vma,
+ struct mempolicy *new)
+{
+ return 0;
+}
+
+static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NULL;
+}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
int shmem_zero_setup(struct vm_area_struct *);
+#ifndef CONFIG_MMU
+extern unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#endif
+
static inline int can_do_mlock(void)
{
if (capable(CAP_IPC_LOCK))
@@ -690,14 +714,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
}
extern int vmtruncate(struct inode * inode, loff_t offset);
+extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
-extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
-static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
+#ifdef CONFIG_MMU
+extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
{
- return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);
+ return __handle_mm_fault(mm, vma, address, write_access) &
+ (~VM_FAULT_WRITE);
}
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
+}
+#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -896,6 +937,8 @@ extern unsigned long do_brk(unsigned long, unsigned long);
/* filemap.c */
extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
/* generic vm_area_ops exported for stackable file systems */
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f22090df7d..c34f4a2c62f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,7 +46,6 @@ struct zone_padding {
struct per_cpu_pages {
int count; /* number of pages in the list */
- int low; /* low watermark, refill needed */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
struct list_head list; /* the list of pages */
@@ -389,6 +388,11 @@ static inline struct zone *next_zone(struct zone *zone)
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+static inline int populated_zone(struct zone *zone)
+{
+ return (!!zone->present_pages);
+}
+
static inline int is_highmem_idx(int idx)
{
return (idx == ZONE_HIGHMEM);
@@ -398,6 +402,7 @@ static inline int is_normal_idx(int idx)
{
return (idx == ZONE_NORMAL);
}
+
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
@@ -414,6 +419,16 @@ static inline int is_normal(struct zone *zone)
return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}
+static inline int is_dma32(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
+}
+
+static inline int is_dma(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+}
+
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
@@ -435,7 +450,6 @@ extern struct pglist_data contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
#define MAX_NODES_SHIFT 1
-#define pfn_to_nid(pfn) (0)
#else /* CONFIG_NEED_MULTIPLE_NODES */
@@ -470,6 +484,10 @@ extern struct pglist_data contig_page_data;
#define early_pfn_to_nid(nid) (0UL)
#endif
+#ifdef CONFIG_FLATMEM
+#define pfn_to_nid(pfn) (0)
+#endif
+
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
@@ -564,11 +582,6 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
@@ -598,13 +611,14 @@ static inline int pfn_valid(unsigned long pfn)
* this restriction.
*/
#ifdef CONFIG_NUMA
-#define pfn_to_nid early_pfn_to_nid
-#endif
-
-#define pfn_to_pgdat(pfn) \
+#define pfn_to_nid(pfn) \
({ \
- NODE_DATA(pfn_to_nid(pfn)); \
+ unsigned long __pfn_to_nid_pfn = (pfn); \
+ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
})
+#else
+#define pfn_to_nid(pfn) (0)
+#endif
#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
@@ -613,12 +627,6 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
-#else
-#define early_pfn_in_nid(pfn, nid) (1)
-#endif
-
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 090e210e98f..f95d51fae73 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -37,18 +37,26 @@ enum {
/* userspace doesn't need the nbd_device structure */
#ifdef __KERNEL__
+#include <linux/wait.h>
+
/* values for flags field */
#define NBD_READ_ONLY 0x0001
#define NBD_WRITE_NOCHK 0x0002
+struct request;
+
struct nbd_device {
int flags;
int harderror; /* Code of hard error */
struct socket * sock;
struct file * file; /* If == NULL, device is not ready, yet */
int magic;
+
spinlock_t queue_lock;
struct list_head queue_head;/* Requests are added here... */
+ struct request *active_req;
+ wait_queue_head_t active_wq;
+
struct semaphore tx_lock;
struct gendisk *disk;
int blksize;
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
index 130d4f588a3..3f4f7142bbe 100644
--- a/include/linux/nfsd/xdr.h
+++ b/include/linux/nfsd/xdr.h
@@ -88,10 +88,12 @@ struct nfsd_readdirargs {
struct nfsd_attrstat {
struct svc_fh fh;
+ struct kstat stat;
};
struct nfsd_diropres {
struct svc_fh fh;
+ struct kstat stat;
};
struct nfsd_readlinkres {
@@ -101,6 +103,7 @@ struct nfsd_readlinkres {
struct nfsd_readres {
struct svc_fh fh;
unsigned long count;
+ struct kstat stat;
};
struct nfsd_readdirres {
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
index 3c2a71b43ba..a4322741f8b 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/include/linux/nfsd/xdr3.h
@@ -126,6 +126,7 @@ struct nfsd3_setaclargs {
struct nfsd3_attrstat {
__u32 status;
struct svc_fh fh;
+ struct kstat stat;
};
/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 343083fec25..d52999c4333 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -79,13 +79,23 @@
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
+ *
+ * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
+ * any time safely (which protects the instance from modification by
+ * interrupt.
+ * - The __xxx_page_state variants can be used safely when interrupts are
+ * disabled.
+ * - The __xxx_page_state variants can be used if the field is only
+ * modified from process context, or only modified from interrupt context.
+ * In this case, the field should be commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
- unsigned long nr_mapped; /* mapped into pagetables */
+ unsigned long nr_mapped; /* mapped into pagetables.
+ * only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab
@@ -97,32 +107,40 @@ struct page_state {
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
- unsigned long pgalloc_high; /* page allocations */
+ unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
+ unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;
+
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
+
unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
+ unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;
unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
+ unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;
+
unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
-
+ unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;
+
unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
+ unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;
- unsigned long pginodesteal; /* pages reclaimed via inode freeing */
+ unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
@@ -136,31 +154,54 @@ struct page_state {
extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
-extern unsigned long __read_page_state(unsigned long offset);
-extern void __mod_page_state(unsigned long offset, unsigned long delta);
+extern unsigned long read_page_state_offset(unsigned long offset);
+extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
+extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define read_page_state(member) \
- __read_page_state(offsetof(struct page_state, member))
+ read_page_state_offset(offsetof(struct page_state, member))
#define mod_page_state(member, delta) \
- __mod_page_state(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member) mod_page_state(member, 1UL)
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta) mod_page_state(member, (delta))
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
-
-#define mod_page_state_zone(zone, member, delta) \
- do { \
- unsigned offset; \
- if (is_highmem(zone)) \
- offset = offsetof(struct page_state, member##_high); \
- else if (is_normal(zone)) \
- offset = offsetof(struct page_state, member##_normal); \
- else \
- offset = offsetof(struct page_state, member##_dma); \
- __mod_page_state(offset, (delta)); \
- } while (0)
+ mod_page_state_offset(offsetof(struct page_state, member), (delta))
+
+#define __mod_page_state(member, delta) \
+ __mod_page_state_offset(offsetof(struct page_state, member), (delta))
+
+#define inc_page_state(member) mod_page_state(member, 1UL)
+#define dec_page_state(member) mod_page_state(member, 0UL - 1)
+#define add_page_state(member,delta) mod_page_state(member, (delta))
+#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
+
+#define __inc_page_state(member) __mod_page_state(member, 1UL)
+#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
+#define __add_page_state(member,delta) __mod_page_state(member, (delta))
+#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
+
+#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
+
+#define state_zone_offset(zone, member) \
+({ \
+ unsigned offset; \
+ if (is_highmem(zone)) \
+ offset = offsetof(struct page_state, member##_high); \
+ else if (is_normal(zone)) \
+ offset = offsetof(struct page_state, member##_normal); \
+ else if (is_dma32(zone)) \
+ offset = offsetof(struct page_state, member##_dma32); \
+ else \
+ offset = offsetof(struct page_state, member##_dma); \
+ offset; \
+})
+
+#define __mod_page_state_zone(zone, member, delta) \
+ do { \
+ __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
+ } while (0)
+
+#define mod_page_state_zone(zone, member, delta) \
+ do { \
+ mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
+ } while (0)
/*
* Manipulation of page state flags
diff --git a/include/linux/parport.h b/include/linux/parport.h
index d2a4d9e1e6d..f7ff0b0c403 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -242,7 +242,6 @@ enum ieee1284_phase {
IEEE1284_PH_FWD_IDLE,
IEEE1284_PH_TERMINATE,
IEEE1284_PH_NEGOTIATION,
- IEEE1284_PH_HBUSY_DNA,
IEEE1284_PH_REV_IDLE,
IEEE1284_PH_HBUSY_DAVAIL,
IEEE1284_PH_REV_DATA,
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index c6f76247087..1cc0f6b1a49 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -79,13 +79,13 @@ static __inline__ unsigned char parport_pc_read_data(struct parport *p)
}
#ifdef DEBUG_PARPORT
-extern __inline__ void dump_parport_state (char *str, struct parport *p)
+static inline void dump_parport_state (char *str, struct parport *p)
{
/* here's hoping that reading these ports won't side-effect anything underneath */
unsigned char ecr = inb (ECONTROL (p));
unsigned char dcr = inb (CONTROL (p));
unsigned char dsr = inb (STATUS (p));
- static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
+ static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
const struct parport_pc_private *priv = p->physport->private_data;
int i;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 96a0403f61f..a213e999de3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -394,6 +394,13 @@
#define PCI_DEVICE_ID_NS_87410 0xd001
#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
+#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
+#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
+#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
+#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
+
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
@@ -496,6 +503,9 @@
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
+#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 13e7c4b6236..b6e0bcad84e 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -71,8 +71,8 @@
*/
#define MD_PATCHLEVEL_VERSION 3
-extern int register_md_personality (int p_num, mdk_personality_t *p);
-extern int unregister_md_personality (int p_num);
+extern int register_md_personality (struct mdk_personality *p);
+extern int unregister_md_personality (struct mdk_personality *p);
extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
extern void md_unregister_thread (mdk_thread_t *thread);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 46629a275ba..617b9506c76 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -18,62 +18,19 @@
/* and dm-bio-list.h is not under include/linux because.... ??? */
#include "../../../drivers/md/dm-bio-list.h"
-#define MD_RESERVED 0UL
-#define LINEAR 1UL
-#define RAID0 2UL
-#define RAID1 3UL
-#define RAID5 4UL
-#define TRANSLUCENT 5UL
-#define HSM 6UL
-#define MULTIPATH 7UL
-#define RAID6 8UL
-#define RAID10 9UL
-#define FAULTY 10UL
-#define MAX_PERSONALITY 11UL
-
#define LEVEL_MULTIPATH (-4)
#define LEVEL_LINEAR (-1)
#define LEVEL_FAULTY (-5)
+/* we need a value for 'no level specified' and 0
+ * means 'raid0', so we need something else. This is
+ * for internal use only
+ */
+#define LEVEL_NONE (-1000000)
+
#define MaxSector (~(sector_t)0)
#define MD_THREAD_NAME_MAX 14
-static inline int pers_to_level (int pers)
-{
- switch (pers) {
- case FAULTY: return LEVEL_FAULTY;
- case MULTIPATH: return LEVEL_MULTIPATH;
- case HSM: return -3;
- case TRANSLUCENT: return -2;
- case LINEAR: return LEVEL_LINEAR;
- case RAID0: return 0;
- case RAID1: return 1;
- case RAID5: return 5;
- case RAID6: return 6;
- case RAID10: return 10;
- }
- BUG();
- return MD_RESERVED;
-}
-
-static inline int level_to_pers (int level)
-{
- switch (level) {
- case LEVEL_FAULTY: return FAULTY;
- case LEVEL_MULTIPATH: return MULTIPATH;
- case -3: return HSM;
- case -2: return TRANSLUCENT;
- case LEVEL_LINEAR: return LINEAR;
- case 0: return RAID0;
- case 1: return RAID1;
- case 4:
- case 5: return RAID5;
- case 6: return RAID6;
- case 10: return RAID10;
- }
- return MD_RESERVED;
-}
-
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
@@ -138,14 +95,16 @@ struct mdk_rdev_s
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
+ atomic_t corrected_errors; /* number of corrected read errors,
+ * for reporting to userspace and storing
+ * in superblock.
+ */
};
-typedef struct mdk_personality_s mdk_personality_t;
-
struct mddev_s
{
void *private;
- mdk_personality_t *pers;
+ struct mdk_personality *pers;
dev_t unit;
int md_minor;
struct list_head disks;
@@ -164,6 +123,7 @@ struct mddev_s
int chunk_size;
time_t ctime, utime;
int level, layout;
+ char clevel[16];
int raid_disks;
int max_disks;
sector_t size; /* used size of component devices */
@@ -183,6 +143,11 @@ struct mddev_s
sector_t resync_mismatches; /* count of sectors where
* parity/replica mismatch found
*/
+ /* if zero, use the system-wide default */
+ int sync_speed_min;
+ int sync_speed_max;
+
+ int ok_start_degraded;
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
@@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
-struct mdk_personality_s
+struct mdk_personality
{
char *name;
+ int level;
+ struct list_head list;
struct module *owner;
int (*make_request)(request_queue_t *q, struct bio *bio);
int (*run)(mddev_t *mddev);
@@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev)
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
-extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
-
/*
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
@@ -366,5 +331,10 @@ do { \
__wait_event_lock_irq(wq, condition, lock, cmd); \
} while (0)
+static inline void safe_put_page(struct page *p)
+{
+ if (p) put_page(p);
+}
+
#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
index 292b98f2b40..9d5494aaac0 100644
--- a/include/linux/raid/raid1.h
+++ b/include/linux/raid/raid1.h
@@ -45,6 +45,8 @@ struct r1_private_data_s {
spinlock_t resync_lock;
int nr_pending;
+ int nr_waiting;
+ int nr_queued;
int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
@@ -52,11 +54,12 @@ struct r1_private_data_s {
* Cleared when a sync completes.
*/
- wait_queue_head_t wait_idle;
- wait_queue_head_t wait_resume;
+ wait_queue_head_t wait_barrier;
struct pool_info *poolinfo;
+ struct page *tmppage;
+
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
};
@@ -106,6 +109,13 @@ struct r1bio_s {
/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio*)1)
+
/* bits for r1bio.state */
#define R1BIO_Uptodate 0
#define R1BIO_IsSync 1
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
index 60708789c8f..b1103298a8c 100644
--- a/include/linux/raid/raid10.h
+++ b/include/linux/raid/raid10.h
@@ -35,18 +35,26 @@ struct r10_private_data_s {
sector_t chunk_mask;
struct list_head retry_list;
- /* for use when syncing mirrors: */
+ /* queue pending writes and submit them on unplug */
+ struct bio_list pending_bio_list;
+
spinlock_t resync_lock;
int nr_pending;
+ int nr_waiting;
+ int nr_queued;
int barrier;
sector_t next_resync;
+ int fullsync; /* set to 1 if a full sync is needed,
+ * (fresh device added).
+ * Cleared when a sync completes.
+ */
- wait_queue_head_t wait_idle;
- wait_queue_head_t wait_resume;
+ wait_queue_head_t wait_barrier;
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
+ struct page *tmppage;
};
typedef struct r10_private_data_s conf_t;
@@ -96,8 +104,16 @@ struct r10bio_s {
} devs[0];
};
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio*)1)
+
/* bits for r10bio.state */
#define R10BIO_Uptodate 0
#define R10BIO_IsSync 1
#define R10BIO_IsRecover 2
+#define R10BIO_Degraded 3
#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f025ba6fb14..394da8207b3 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -126,7 +126,7 @@
*/
struct stripe_head {
- struct stripe_head *hash_next, **hash_pprev; /* hash pointers */
+ struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
struct raid5_private_data *raid_conf;
sector_t sector; /* sector of this row */
@@ -152,7 +152,6 @@ struct stripe_head {
#define R5_Insync 3 /* rdev && rdev->in_sync at start */
#define R5_Wantread 4 /* want to schedule a read */
#define R5_Wantwrite 5
-#define R5_Syncio 6 /* this io need to be accounted as resync io */
#define R5_Overlap 7 /* There is a pending overlapping request on this block */
#define R5_ReadError 8 /* seen a read error here recently */
#define R5_ReWrite 9 /* have tried to over-write the readerror */
@@ -205,7 +204,7 @@ struct disk_info {
};
struct raid5_private_data {
- struct stripe_head **stripe_hashtbl;
+ struct hlist_head *stripe_hashtbl;
mddev_t *mddev;
struct disk_info *spare;
int chunk_size, level, algorithm;
@@ -228,6 +227,8 @@ struct raid5_private_data {
* Cleared when a sync completes.
*/
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+
/*
* Free stripes pool
*/
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index e0a4faa9610..953b6df5d03 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -5,6 +5,16 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev);
struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
+#ifndef CONFIG_MMU
+extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+
+extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
+#endif
+
extern struct file_operations ramfs_file_operations;
extern struct vm_operations_struct generic_file_vm_ops;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 33261f1d223..9d6fbeef210 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
* rmap interfaces called when adding or removing pte of page
*/
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b0ad6f30679..7da33619d5d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
*/
-#ifdef ATOMIC64_INIT
-#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member)
-typedef atomic64_t mm_counter_t;
-#else /* !ATOMIC64_INIT */
-/*
- * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
- * that is, at 16TB if using 4kB page size.
- */
-#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
-typedef atomic_t mm_counter_t;
-#endif /* !ATOMIC64_INIT */
+#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
+#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
+#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
+#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
+typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a61c04f804b..5dc94e777fa 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -14,11 +14,7 @@
typedef struct pbe {
unsigned long address; /* address of the copy */
unsigned long orig_address; /* original address of page */
- swp_entry_t swap_address;
-
- struct pbe *next; /* also used as scratch space at
- * end of page (see link, diskpage)
- */
+ struct pbe *next;
} suspend_pagedir_t;
#define for_each_pbe(pbe, pblist) \
@@ -77,6 +73,6 @@ unsigned long get_safe_page(gfp_t gfp_mask);
* XXX: We try to keep some more pages free so that I/O operations succeed
* without paging. Might this be more?
*/
-#define PAGES_FOR_IO 512
+#define PAGES_FOR_IO 1024
#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 508668f840b..556617bcf7a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,7 +172,6 @@ extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern int try_to_free_pages(struct zone **, gfp_t);
-extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
extern int shrink_all_memory(int);
extern int vm_swappiness;
@@ -210,6 +209,7 @@ extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int type);
extern int swap_duplicate(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 343d883d69c..64a36ba43b2 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -60,12 +60,6 @@ struct writeback_control {
};
/*
- * ->writepage() return values (make these much larger than a pagesize, in
- * case some fs is returning number-of-bytes-written from writepage)
- */
-#define WRITEPAGE_ACTIVATE 0x80000 /* IO was not started: activate page */
-
-/*
* fs/fs-writeback.c
*/
void writeback_inodes(struct writeback_control *wbc);