summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/auxvec.h6
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/cgroup.h61
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/ext2_fs.h24
-rw-r--r--include/linux/ext2_fs_sb.h4
-rw-r--r--include/linux/ext3_fs.h24
-rw-r--r--include/linux/ext3_fs_sb.h4
-rw-r--r--include/linux/if_vlan.h19
-rw-r--r--include/linux/jbd.h15
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/memcontrol.h154
-rw-r--r--include/linux/mm_inline.h22
-rw-r--r--include/linux/mmzone.h24
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/nwpserial.h18
-rw-r--r--include/linux/page_cgroup.h52
-rw-r--r--include/linux/pid.h18
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/raid/md_k.h20
-rw-r--r--include/linux/raid/md_p.h2
-rw-r--r--include/linux/raid/raid0.h10
-rw-r--r--include/linux/res_counter.h8
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/swap.h25
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/wimax.h3
-rw-r--r--include/xen/xenbus.h2
30 files changed, 448 insertions, 107 deletions
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index d7afa9dd663..f3b5d4e3a2a 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -23,16 +23,16 @@
#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
#define AT_CLKTCK 17 /* frequency at which times() increments */
-
+/* AT_* values 18 through 22 are reserved */
#define AT_SECURE 23 /* secure mode boolean */
-
#define AT_BASE_PLATFORM 24 /* string identifying real platform, may
* differ from AT_PLATFORM. */
+#define AT_RANDOM 25 /* address of 16 random bytes */
#define AT_EXECFN 31 /* filename of program */
#ifdef __KERNEL__
-#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index f50785ad478..25085cbadcf 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -19,7 +19,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20081130"
+#define CAN_VERSION "20090105"
/* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "8"
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 08b78c09b09..e267e62827b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -52,9 +52,9 @@ struct cgroup_subsys_state {
* hierarchy structure */
struct cgroup *cgroup;
- /* State maintained by the cgroup system to allow
- * subsystems to be "busy". Should be accessed via css_get()
- * and css_put() */
+ /* State maintained by the cgroup system to allow subsystems
+ * to be "busy". Should be accessed via css_get(),
+ * css_tryget() and and css_put(). */
atomic_t refcnt;
@@ -64,11 +64,14 @@ struct cgroup_subsys_state {
/* bits in struct cgroup_subsys_state flags field */
enum {
CSS_ROOT, /* This CSS is the root of the subsystem */
+ CSS_REMOVED, /* This CSS is dead */
};
/*
- * Call css_get() to hold a reference on the cgroup;
- *
+ * Call css_get() to hold a reference on the css; it can be used
+ * for a reference obtained via:
+ * - an existing ref-counted reference to the css
+ * - task->cgroups for a locked task
*/
static inline void css_get(struct cgroup_subsys_state *css)
@@ -77,9 +80,32 @@ static inline void css_get(struct cgroup_subsys_state *css)
if (!test_bit(CSS_ROOT, &css->flags))
atomic_inc(&css->refcnt);
}
+
+static inline bool css_is_removed(struct cgroup_subsys_state *css)
+{
+ return test_bit(CSS_REMOVED, &css->flags);
+}
+
+/*
+ * Call css_tryget() to take a reference on a css if your existing
+ * (known-valid) reference isn't already ref-counted. Returns false if
+ * the css has been destroyed.
+ */
+
+static inline bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (test_bit(CSS_ROOT, &css->flags))
+ return true;
+ while (!atomic_inc_not_zero(&css->refcnt)) {
+ if (test_bit(CSS_REMOVED, &css->flags))
+ return false;
+ }
+ return true;
+}
+
/*
* css_put() should be called to release a reference taken by
- * css_get()
+ * css_get() or css_tryget()
*/
extern void __css_put(struct cgroup_subsys_state *css);
@@ -116,7 +142,7 @@ struct cgroup {
struct list_head children; /* my children */
struct cgroup *parent; /* my parent */
- struct dentry *dentry; /* cgroup fs entry */
+ struct dentry *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
@@ -145,6 +171,9 @@ struct cgroup {
int pids_use_count;
/* Length of the current tasks_pids array */
int pids_length;
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
};
/* A css_set is a structure holding pointers to a set of
@@ -337,9 +366,23 @@ struct cgroup_subsys {
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;
- /* Protected by RCU */
- struct cgroupfs_root *root;
+ /*
+ * Protects sibling/children links of cgroups in this
+ * hierarchy, plus protects which hierarchy (or none) the
+ * subsystem is a part of (i.e. root/sibling). To avoid
+ * potential deadlocks, the following operations should not be
+ * undertaken while holding any hierarchy_mutex:
+ *
+ * - allocating memory
+ * - initiating hotplug events
+ */
+ struct mutex hierarchy_mutex;
+ /*
+ * Link to parent, and list entry in parent's children.
+ * Protected by this->hierarchy_mutex and cgroup_lock()
+ */
+ struct cgroupfs_root *root;
struct list_head sibling;
};
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 51ea2bdea0f..90c6074a36c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init_early(void);
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
-extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
+extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p,
+ struct cpumask *mask);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
-static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
+static inline void cpuset_cpus_allowed(struct task_struct *p,
+ struct cpumask *mask)
{
*mask = cpu_possible_map;
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
- cpumask_t *mask)
+ struct cpumask *mask)
{
*mask = cpu_possible_map;
}
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 78c775a83f7..121720d74e1 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -194,6 +194,30 @@ struct ext2_group_desc
#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
+ EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\
+ EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\
+ EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
+ EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & EXT2_REG_FLMASK;
+ else
+ return flags & EXT2_OTHER_FLMASK;
+}
+
/*
* ioctl commands
*/
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index dc541f3653d..1cdb66367c9 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -101,7 +101,7 @@ struct ext2_sb_info {
struct percpu_counter s_freeblocks_counter;
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
- struct blockgroup_lock s_blockgroup_lock;
+ struct blockgroup_lock *s_blockgroup_lock;
/* root of the per fs reservation window tree */
spinlock_t s_rsv_window_lock;
struct rb_root s_rsv_window_root;
@@ -111,7 +111,7 @@ struct ext2_sb_info {
static inline spinlock_t *
sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
{
- return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group);
+ return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
}
#endif /* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index d14f0291848..d76800f6ecf 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -178,6 +178,30 @@ struct ext3_group_desc
#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
+ EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\
+ EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\
+ EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
+ EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & EXT3_REG_FLMASK;
+ else
+ return flags & EXT3_OTHER_FLMASK;
+}
+
/*
* Inode dynamic state flags
*/
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
index e024e38248f..76fdc0f4b02 100644
--- a/include/linux/ext3_fs_sb.h
+++ b/include/linux/ext3_fs_sb.h
@@ -60,7 +60,7 @@ struct ext3_sb_info {
struct percpu_counter s_freeblocks_counter;
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
- struct blockgroup_lock s_blockgroup_lock;
+ struct blockgroup_lock *s_blockgroup_lock;
/* root of the per fs reservation window tree */
spinlock_t s_rsv_window_lock;
@@ -86,7 +86,7 @@ struct ext3_sb_info {
static inline spinlock_t *
sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
{
- return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group);
+ return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
}
#endif /* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5cb0c3f6dc..f8ff918c208 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -115,6 +115,11 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
+extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb);
+extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci,
+ struct napi_gro_fraginfo *info);
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -140,6 +145,20 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
return 0;
}
+
+static inline int vlan_gro_receive(struct napi_struct *napi,
+ struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ return NET_RX_DROP;
+}
+
+static inline int vlan_gro_frags(struct napi_struct *napi,
+ struct vlan_group *grp, unsigned int vlan_tci,
+ struct napi_gro_fraginfo *info)
+{
+ return NET_RX_DROP;
+}
#endif
/**
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 346e2b80be7..6384b19efe6 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -543,6 +543,11 @@ struct transaction_s
unsigned long t_expires;
/*
+ * When this transaction started, in nanoseconds [no locking]
+ */
+ ktime_t t_start_time;
+
+ /*
* How many handles used this transaction? [t_handle_lock]
*/
int t_handle_count;
@@ -798,9 +803,19 @@ struct journal_s
struct buffer_head **j_wbuf;
int j_wbufsize;
+ /*
+ * this is the pid of the last person to run a synchronous operation
+ * through the journal.
+ */
pid_t j_last_sync_writer;
/*
+ * the average amount of time in nanoseconds it takes to commit a
+ * transaction to the disk. [j_state_lock]
+ */
+ u64 j_average_commit_time;
+
+ /*
* An opaque pointer to fs-private information. ext3 puts its
* superblock pointer here
*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6b8e2027165..343df9ef241 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -476,6 +476,12 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
__val = __val < __min ? __min: __val; \
__val > __max ? __max: __val; })
+
+/*
+ * swap - swap value of @a and @b
+ */
+#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; })
+
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 3449de597ef..4f7c8fb4d3f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1518,6 +1518,7 @@ extern void sata_pmp_error_handler(struct ata_port *ap);
extern const struct ata_port_operations ata_sff_port_ops;
extern const struct ata_port_operations ata_bmdma_port_ops;
+extern const struct ata_port_operations ata_bmdma32_port_ops;
/* PIO only, sg_tablesize and dma_boundary limits can be removed */
#define ATA_PIO_SHT(drv_name) \
@@ -1545,6 +1546,8 @@ extern void ata_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf);
extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
+extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
extern u8 ata_sff_irq_on(struct ata_port *ap);
diff --git a/include/linux/magic.h b/include/linux/magic.h
index f7f3fdddbef..439f6f3cb0c 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -13,6 +13,7 @@
#define EFS_SUPER_MAGIC 0x414A53
#define EXT2_SUPER_MAGIC 0xEF53
#define EXT3_SUPER_MAGIC 0xEF53
+#define XENFS_SUPER_MAGIC 0xabba1974
#define EXT4_SUPER_MAGIC 0xEF53
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1fbe14d3952..326f45c8653 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -19,22 +19,45 @@
#ifndef _LINUX_MEMCONTROL_H
#define _LINUX_MEMCONTROL_H
-
+#include <linux/cgroup.h>
struct mem_cgroup;
struct page_cgroup;
struct page;
struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/*
+ * All "charge" functions with gfp_mask should use GFP_KERNEL or
+ * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
+ * alloc memory but reclaims memory from all available zones. So, "where I want
+ * memory from" bits of gfp_mask has no meaning. So any bits of that field is
+ * available but adding a rule is better. charge functions' gfp_mask should
+ * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
+ * codes.
+ * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
+ */
-extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
+/* for swap handling */
+extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+ struct page *page, gfp_t mask, struct mem_cgroup **ptr);
+extern void mem_cgroup_commit_charge_swapin(struct page *page,
+ struct mem_cgroup *ptr);
+extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
+
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
-extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
+extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_del_lru(struct page *page);
+extern void mem_cgroup_move_lists(struct page *page,
+ enum lru_list from, enum lru_list to);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
+extern int mem_cgroup_shrink_usage(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask);
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
@@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
-#define mm_match_cgroup(mm, cgroup) \
- ((cgroup) == mem_cgroup_from_task((mm)->owner))
+static inline
+int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
+{
+ struct mem_cgroup *mem;
+ rcu_read_lock();
+ mem = mem_cgroup_from_task((mm)->owner);
+ rcu_read_unlock();
+ return cgroup == mem;
+}
extern int
-mem_cgroup_prepare_migration(struct page *page, struct page *newpage);
-extern void mem_cgroup_end_migration(struct page *page);
+mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
+extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
+ struct page *oldpage, struct page *newpage);
/*
* For memory reclaim.
@@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority);
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
+int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
+ struct zone *zone,
+ enum lru_list lru);
+struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
+ struct zone *zone);
+struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat_from_page(struct page *page);
-extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
- int priority, enum lru_list lru);
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+extern int do_swap_account;
+#endif
+static inline bool mem_cgroup_disabled(void)
+{
+ if (mem_cgroup_subsys.disabled)
+ return true;
+ return false;
+}
+
+extern bool mem_cgroup_oom_called(struct task_struct *task);
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline int mem_cgroup_charge(struct page *page,
+struct mem_cgroup;
+
+static inline int mem_cgroup_newpage_charge(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
@@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page,
return 0;
}
+static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+ struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_commit_charge_swapin(struct page *page,
+ struct mem_cgroup *ptr)
+{
+}
+
+static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
+{
+}
+
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}
@@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}
-static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+static inline int mem_cgroup_shrink_usage(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
}
-static inline void mem_cgroup_move_lists(struct page *page, bool active)
+static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
+{
+}
+
+static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
+{
+ return ;
+}
+
+static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
+{
+ return ;
+}
+
+static inline void mem_cgroup_del_lru(struct page *page)
+{
+ return ;
+}
+
+static inline void
+mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
{
}
@@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
}
static inline int
-mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
+mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
{
return 0;
}
-static inline void mem_cgroup_end_migration(struct page *page)
+static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
+ struct page *oldpage,
+ struct page *newpage)
{
}
@@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
{
}
-static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
- struct zone *zone, int priority,
- enum lru_list lru)
+static inline bool mem_cgroup_disabled(void)
+{
+ return true;
+}
+
+static inline bool mem_cgroup_oom_called(struct task_struct *task)
+{
+ return false;
+}
+
+static inline int
+mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
+{
+ return 1;
+}
+
+static inline unsigned long
+mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
+ enum lru_list lru)
{
return 0;
}
+
+
+static inline struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
+{
+ return NULL;
+}
+
+static inline struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+{
+ return NULL;
+}
+
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c948350c378..7fbb9726755 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_add(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_LRU_BASE + l);
+ mem_cgroup_add_lru_list(page, l);
}
static inline void
@@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_del(&page->lru);
__dec_zone_state(zone, NR_LRU_BASE + l);
+ mem_cgroup_del_lru_list(page, l);
}
static inline void
@@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
l += page_is_file_cache(page);
}
__dec_zone_state(zone, NR_LRU_BASE + l);
+ mem_cgroup_del_lru_list(page, l);
}
/**
@@ -78,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page)
return lru;
}
-/**
- * inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- *
- * Returns true if the zone does not have enough inactive anon pages,
- * meaning some active anon pages need to be deactivated.
- */
-static inline int inactive_anon_is_low(struct zone *zone)
-{
- unsigned long active, inactive;
-
- active = zone_page_state(zone, NR_ACTIVE_ANON);
- inactive = zone_page_state(zone, NR_INACTIVE_ANON);
-
- if (inactive * zone->inactive_ratio < active)
- return 1;
-
- return 0;
-}
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 35a7b5e1946..09c14e213b6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -263,6 +263,19 @@ enum zone_type {
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
+struct zone_reclaim_stat {
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are refeferenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+};
+
struct zone {
/* Fields commonly accessed by the page allocator */
unsigned long pages_min, pages_low, pages_high;
@@ -315,16 +328,7 @@ struct zone {
unsigned long nr_scan;
} lru[NR_LRU_LISTS];
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are refeferenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
+ struct zone_reclaim_stat reclaim_stat;
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c28bbba3c23..114091be887 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1373,8 +1373,14 @@ extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern void napi_gro_flush(struct napi_struct *napi);
+extern int dev_gro_receive(struct napi_struct *napi,
+ struct sk_buff *skb);
extern int napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
+extern void napi_reuse_skb(struct napi_struct *napi,
+ struct sk_buff *skb);
+extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi,
+ struct napi_gro_fraginfo *info);
extern int napi_gro_frags(struct napi_struct *napi,
struct napi_gro_fraginfo *info);
extern void netif_nit_deliver(struct sk_buff *skb);
diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h
new file mode 100644
index 00000000000..9acb21572ea
--- /dev/null
+++ b/include/linux/nwpserial.h
@@ -0,0 +1,18 @@
+/*
+ * Serial Port driver for a NWP uart device
+ *
+ * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _NWPSERIAL_H
+#define _NWPSERIAL_H
+
+int nwpserial_register_port(struct uart_port *port);
+void nwpserial_unregister_port(int line);
+
+#endif /* _NWPSERIAL_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 1e6d34bfa09..602cc1fdee9 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -26,10 +26,6 @@ enum {
PCG_LOCK, /* page cgroup is locked */
PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */
- /* flags for LRU placement */
- PCG_ACTIVE, /* page is active in this cgroup */
- PCG_FILE, /* page is file system backed */
- PCG_UNEVICTABLE, /* page is unevictableable */
};
#define TESTPCGFLAG(uname, lname) \
@@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE)
TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED)
-/* LRU management flags (from global-lru definition) */
-TESTPCGFLAG(File, FILE)
-SETPCGFLAG(File, FILE)
-CLEARPCGFLAG(File, FILE)
-
-TESTPCGFLAG(Active, ACTIVE)
-SETPCGFLAG(Active, ACTIVE)
-CLEARPCGFLAG(Active, ACTIVE)
-
-TESTPCGFLAG(Unevictable, UNEVICTABLE)
-SETPCGFLAG(Unevictable, UNEVICTABLE)
-CLEARPCGFLAG(Unevictable, UNEVICTABLE)
-
static inline int page_cgroup_nid(struct page_cgroup *pc)
{
return page_to_nid(pc->page);
@@ -105,4 +88,39 @@ static inline void page_cgroup_init(void)
}
#endif
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#include <linux/swap.h>
+extern struct mem_cgroup *
+swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem);
+extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
+extern int swap_cgroup_swapon(int type, unsigned long max_pages);
+extern void swap_cgroup_swapoff(int type);
+#else
+#include <linux/swap.h>
+
+static inline
+struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+{
+ return NULL;
+}
+
+static inline
+struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+{
+ return NULL;
+}
+
+static inline int
+swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+ return 0;
+}
+
+static inline void swap_cgroup_swapoff(int type)
+{
+ return;
+}
+
+#endif
#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index bb206c56d1f..49f1c2f66e9 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -123,6 +123,24 @@ extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
/*
+ * ns_of_pid() returns the pid namespace in which the specified pid was
+ * allocated.
+ *
+ * NOTE:
+ * ns_of_pid() is expected to be called for a process (task) that has
+ * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
+ * is expected to be non-NULL. If @pid is NULL, caller should handle
+ * the resulting NULL pid-ns.
+ */
+static inline struct pid_namespace *ns_of_pid(struct pid *pid)
+{
+ struct pid_namespace *ns = NULL;
+ if (pid)
+ ns = pid->numbers[pid->level].ns;
+ return ns;
+}
+
+/*
* the helpers to get the pid's id seen from different namespaces
*
* pid_nr() : global id, i.e. the id seen from the init namespace;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index d82fe825d62..38d10326246 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -79,11 +79,7 @@ static inline void zap_pid_ns_processes(struct pid_namespace *ns)
}
#endif /* CONFIG_PID_NS */
-static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
-{
- return tsk->nsproxy->pid_ns;
-}
-
+extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pidmap_init(void);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 8fc909ef678..9743e4dbc91 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -137,6 +137,9 @@ struct mddev_s
struct gendisk *gendisk;
struct kobject kobj;
+ int hold_active;
+#define UNTIL_IOCTL 1
+#define UNTIL_STOP 2
/* Superblock information */
int major_version,
@@ -215,6 +218,9 @@ struct mddev_s
#define MD_RECOVERY_FROZEN 9
unsigned long recovery;
+ int recovery_disabled; /* if we detect that recovery
+ * will always fail, set this
+ * so we don't loop trying */
int in_sync; /* know to not need resync */
struct mutex reconfig_mutex;
@@ -244,6 +250,9 @@ struct mddev_s
struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
* file in sysfs.
*/
+ struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
+
+ struct work_struct del_work; /* used for delayed sysfs removal */
spinlock_t write_lock;
wait_queue_head_t sb_wait; /* for waiting on superblock updates */
@@ -334,17 +343,14 @@ static inline char * mdname (mddev_t * mddev)
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
*/
-#define rdev_for_each_list(rdev, tmp, list) \
- \
- for ((tmp) = (list).next; \
- (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \
- (tmp) = (tmp)->next, (tmp)->prev != &(list) \
- ; )
+#define rdev_for_each_list(rdev, tmp, head) \
+ list_for_each_entry_safe(rdev, tmp, head, same_set)
+
/*
* iterates through the 'same array disks' ringlist
*/
#define rdev_for_each(rdev, tmp, mddev) \
- rdev_for_each_list(rdev, tmp, (mddev)->disks)
+ list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
#define rdev_for_each_rcu(rdev, mddev) \
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 8b4de4a41ff..9491026afe6 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -194,6 +194,8 @@ static inline __u64 md_event(mdp_super_t *sb) {
return (ev<<32)| sb->events_lo;
}
+#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
+
/*
* The version-1 superblock :
* All numeric fields are little-endian.
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
index 1b2dda035f8..fd42aa87c39 100644
--- a/include/linux/raid/raid0.h
+++ b/include/linux/raid/raid0.h
@@ -5,9 +5,9 @@
struct strip_zone
{
- sector_t zone_offset; /* Zone offset in md_dev */
- sector_t dev_offset; /* Zone offset in real dev */
- sector_t size; /* Zone size */
+ sector_t zone_start; /* Zone offset in md_dev (in sectors) */
+ sector_t dev_start; /* Zone offset in real dev (in sectors) */
+ sector_t sectors; /* Zone size in sectors */
int nb_dev; /* # of devices attached to the zone */
mdk_rdev_t **dev; /* Devices attached to the zone */
};
@@ -19,8 +19,8 @@ struct raid0_private_data
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
- sector_t hash_spacing;
- int preshift; /* shift this before divide by hash_spacing */
+ sector_t spacing;
+ int sector_shift; /* shift this before divide by spacing */
};
typedef struct raid0_private_data raid0_conf_t;
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 271c1c2c9f6..dede0a2cfc4 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -43,6 +43,10 @@ struct res_counter {
* the routines below consider this to be IRQ-safe
*/
spinlock_t lock;
+ /*
+ * Parent counter, used for hierarchial resource accounting
+ */
+ struct res_counter *parent;
};
/**
@@ -87,7 +91,7 @@ enum {
* helpers for accounting
*/
-void res_counter_init(struct res_counter *counter);
+void res_counter_init(struct res_counter *counter, struct res_counter *parent);
/*
* charge - try to consume more resource.
@@ -103,7 +107,7 @@ void res_counter_init(struct res_counter *counter);
int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val);
int __must_check res_counter_charge(struct res_counter *counter,
- unsigned long val);
+ unsigned long val, struct res_counter **limit_fail_at);
/*
* uncharge - tell that some portion of the resource is released
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b4199841f1f..90bbbf0b116 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,6 +161,9 @@
#define PORT_S3C6400 84
+/* NWPSERIAL */
+#define PORT_NWPSERIAL 85
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 91dee50fe26..d3021557887 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -214,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page)
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
- gfp_t gfp_mask);
+ gfp_t gfp_mask, bool noswap,
+ unsigned int swappiness);
extern int __isolate_lru_page(struct page *page, int mode, int file);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
@@ -333,6 +334,22 @@ static inline void disable_swap_token(void)
put_swap_token(swap_token_mm);
}
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent);
+#else
+static inline void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+{
+}
+#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
+#else
+static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
+{
+}
+#endif
+
#else /* CONFIG_SWAP */
#define nr_swap_pages 0L
@@ -409,6 +426,12 @@ static inline swp_entry_t get_swap_page(void)
#define has_swap_token(x) 0
#define disable_swap_token() do { } while(0)
+static inline int mem_cgroup_cache_charge_swapin(struct page *page,
+ struct mm_struct *mm, gfp_t mask, bool locked)
+{
+ return 0;
+}
+
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index cb2965aa1b6..ffa5b8b1f1d 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -59,6 +59,9 @@ struct inet6_protocol
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb);
unsigned int flags; /* INET6_PROTO_xxx */
};
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 1602614fdaf..073809ce94f 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -323,6 +323,9 @@ struct input_dev;
*
* @rf_hw: [private] State of the hardware radio switch (OFF/ON)
*
+ * @debufs_dentry: [private] Used to hook up a debugfs entry. This
+ * shows up in the debugfs root as wimax:DEVICENAME.
+ *
* Description:
* This structure defines a common interface to access all WiMAX
* devices from different vendors and provides a common API as well as
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 6369d89c25d..f87f9614844 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -136,8 +136,6 @@ struct xenbus_transaction
/* Nil transaction ID. */
#define XBT_NIL ((struct xenbus_transaction) { 0 })
-int __init xenbus_dev_init(void);
-
char **xenbus_directory(struct xenbus_transaction t,
const char *dir, const char *node, unsigned int *num);
void *xenbus_read(struct xenbus_transaction t,