summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/backlight.h6
-rw-r--r--include/linux/cpuset.h27
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/decompress/inflate.h4
-rw-r--r--include/linux/err.h7
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/list_lru.h8
-rw-r--r--include/linux/mfd/pm8xxx/rtc.h25
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/nilfs2_fs.h52
-rw-r--r--include/linux/pagemap.h43
-rw-r--r--include/linux/pagevec.h5
-rw-r--r--include/linux/printk.h16
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/radix-tree.h55
-rw-r--r--include/linux/shmem_fs.h1
-rw-r--r--include/linux/swap.h36
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmstat.h17
-rw-r--r--include/uapi/linux/libc-compat.h9
-rw-r--r--include/uapi/linux/xattr.h7
28 files changed, 276 insertions, 90 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 24819001f5c..e488e9459a9 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -95,7 +95,7 @@ struct backing_dev_info {
unsigned int max_ratio, max_prop_frac;
struct bdi_writeback wb; /* default writeback info for this bdi */
- spinlock_t wb_lock; /* protects work_list */
+ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
struct list_head work_list;
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 5f9cd963213..72647429adf 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -9,6 +9,7 @@
#define _LINUX_BACKLIGHT_H
#include <linux/device.h>
+#include <linux/fb.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
@@ -104,6 +105,11 @@ struct backlight_device {
struct list_head entry;
struct device dev;
+
+ /* Multiple framebuffers may share one backlight device */
+ bool fb_bl_on[FB_MAX];
+
+ int use_count;
};
static inline void backlight_update_status(struct backlight_device *bd)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 3fe661fe96d..b19d3dc2e65 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -87,25 +87,26 @@ extern void rebuild_sched_domains(void);
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
/*
- * get_mems_allowed is required when making decisions involving mems_allowed
- * such as during page allocation. mems_allowed can be updated in parallel
- * and depending on the new value an operation can fail potentially causing
- * process failure. A retry loop with get_mems_allowed and put_mems_allowed
- * prevents these artificial failures.
+ * read_mems_allowed_begin is required when making decisions involving
+ * mems_allowed such as during page allocation. mems_allowed can be updated in
+ * parallel and depending on the new value an operation can fail potentially
+ * causing process failure. A retry loop with read_mems_allowed_begin and
+ * read_mems_allowed_retry prevents these artificial failures.
*/
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
{
return read_seqcount_begin(&current->mems_allowed_seq);
}
/*
- * If this returns false, the operation that took place after get_mems_allowed
- * may have failed. It is up to the caller to retry the operation if
+ * If this returns true, the operation that took place after
+ * read_mems_allowed_begin may have failed artificially due to a concurrent
+ * update of mems_allowed. It is up to the caller to retry the operation if
* appropriate.
*/
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
{
- return !read_seqcount_retry(&current->mems_allowed_seq, seq);
+ return read_seqcount_retry(&current->mems_allowed_seq, seq);
}
static inline void set_mems_allowed(nodemask_t nodemask)
@@ -225,14 +226,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
{
}
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
{
return 0;
}
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
{
- return true;
+ return false;
}
#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 04421e82536..f61d6c8f5ef 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -66,7 +66,7 @@ extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
-extern int set_groups(struct cred *, struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
/* access the groups "array" with this macro */
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index 8c0aef1ba5f..1d0aedef982 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,5 +1,5 @@
-#ifndef INFLATE_H
-#define INFLATE_H
+#ifndef LINUX_DECOMPRESS_INFLATE_H
+#define LINUX_DECOMPRESS_INFLATE_H
int gunzip(unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
diff --git a/include/linux/err.h b/include/linux/err.h
index 15f92e07245..a729120644d 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -2,12 +2,13 @@
#define _LINUX_ERR_H
#include <linux/compiler.h>
+#include <linux/types.h>
#include <asm/errno.h>
/*
* Kernel pointers have redundant information, so we can use a
- * scheme where we can return either an error code or a dentry
+ * scheme where we can return either an error code or a normal
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
@@ -29,12 +30,12 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
-static inline long __must_check IS_ERR(__force const void *ptr)
+static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
-static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr)
+static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6e765d28841..3ca9420f627 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -419,6 +419,7 @@ struct address_space {
struct mutex i_mmap_mutex; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
+ unsigned long nrshadows; /* number of shadow entries */
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits/gfp mask */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 64cf3ef5069..fc7718c6bd3 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -178,7 +178,7 @@ struct fsnotify_group {
struct fanotify_group_private_data {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* allows a group to block waiting for a userspace response */
- struct mutex access_mutex;
+ spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
atomic_t bypass_perm;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8c43cc469d7..5b337cf8fb8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -6,6 +6,8 @@
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/list.h>
+#include <linux/kref.h>
struct ctl_table;
struct user_struct;
@@ -23,6 +25,14 @@ struct hugepage_subpool {
long max_hpages, used_hpages;
};
+struct resv_map {
+ struct kref refs;
+ spinlock_t lock;
+ struct list_head regions;
+};
+extern struct resv_map *resv_map_alloc(void);
+void resv_map_release(struct kref *ref);
+
extern spinlock_t hugetlb_lock;
extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 2a5e5548a1d..5bb424659c0 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -30,8 +30,6 @@ extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
-extern void kmemleak_padding(const void *ptr, unsigned long offset,
- size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 926afb6f6b5..f896a33e834 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -119,6 +119,7 @@ struct kobj_type {
};
struct kobj_uevent_env {
+ char *argv[3];
char *envp[UEVENT_NUM_ENVP];
int envp_idx;
char buf[UEVENT_BUFFER_SIZE];
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 3ce541753c8..f3434533fbf 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -13,6 +13,8 @@
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
LRU_REMOVED, /* item removed from list */
+ LRU_REMOVED_RETRY, /* item removed, but lock has been
+ dropped and reacquired */
LRU_ROTATE, /* item referenced, give another pass */
LRU_SKIP, /* item cannot be locked, skip */
LRU_RETRY, /* item not freeable. May drop the lock
@@ -32,7 +34,11 @@ struct list_lru {
};
void list_lru_destroy(struct list_lru *lru);
-int list_lru_init(struct list_lru *lru);
+int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key);
+static inline int list_lru_init(struct list_lru *lru)
+{
+ return list_lru_init_key(lru, NULL);
+}
/**
* list_lru_add: add an element to the lru list's tail
diff --git a/include/linux/mfd/pm8xxx/rtc.h b/include/linux/mfd/pm8xxx/rtc.h
deleted file mode 100644
index 14f1983eaec..00000000000
--- a/include/linux/mfd/pm8xxx/rtc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __RTC_PM8XXX_H__
-#define __RTC_PM8XXX_H__
-
-#define PM8XXX_RTC_DEV_NAME "rtc-pm8xxx"
-/**
- * struct pm8xxx_rtc_pdata - RTC driver platform data
- * @rtc_write_enable: variable stating RTC write capability
- */
-struct pm8xxx_rtc_platform_data {
- bool rtc_write_enable;
-};
-
-#endif /* __RTC_PM8XXX_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2eec61fe75c..35300f390eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1041,6 +1041,14 @@ extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
int shmem_zero_setup(struct vm_area_struct *);
+#ifdef CONFIG_SHMEM
+bool shmem_mapping(struct address_space *mapping);
+#else
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+#endif
extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
@@ -1658,10 +1666,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
-#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/* there is a per-arch backend function. */
extern int __meminit __early_pfn_to_nid(unsigned long pfn);
-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -1826,6 +1832,7 @@ vm_unmapped_area(struct vm_unmapped_area_info *info)
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
+extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9b61b9bf81a..fac5509c18f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -142,6 +142,9 @@ enum zone_stat_item {
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
#endif
+ WORKINGSET_REFAULT,
+ WORKINGSET_ACTIVATE,
+ WORKINGSET_NODERECLAIM,
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };
@@ -392,6 +395,9 @@ struct zone {
spinlock_t lru_lock;
struct lruvec lruvec;
+ /* Evictions & activations on the inactive file list */
+ atomic_long_t inactive_age;
+
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 98755767c7b..ff3fea3194c 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -82,6 +82,8 @@ struct nilfs_inode {
__le32 i_pad;
};
+#define NILFS_MIN_INODE_SIZE 128
+
/**
* struct nilfs_super_root - structure of super root
* @sr_sum: check sum
@@ -482,6 +484,8 @@ struct nilfs_dat_entry {
__le64 de_rsv;
};
+#define NILFS_MIN_DAT_ENTRY_SIZE 32
+
/**
* struct nilfs_snapshot_list - snapshot list
* @ssl_next: next checkpoint number on snapshot list
@@ -520,6 +524,8 @@ struct nilfs_checkpoint {
struct nilfs_inode cp_ifile_inode;
};
+#define NILFS_MIN_CHECKPOINT_SIZE (64 + NILFS_MIN_INODE_SIZE)
+
/* checkpoint flags */
enum {
NILFS_CHECKPOINT_SNAPSHOT,
@@ -615,6 +621,8 @@ struct nilfs_segment_usage {
__le32 su_flags;
};
+#define NILFS_MIN_SEGMENT_USAGE_SIZE 16
+
/* segment usage flag */
enum {
NILFS_SEGMENT_USAGE_ACTIVE,
@@ -710,6 +718,48 @@ static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
}
/* ioctl */
+/**
+ * nilfs_suinfo_update - segment usage information update
+ * @sup_segnum: segment number
+ * @sup_flags: flags for which fields are active in sup_sui
+ * @sup_reserved: reserved necessary for alignment
+ * @sup_sui: segment usage information
+ */
+struct nilfs_suinfo_update {
+ __u64 sup_segnum;
+ __u32 sup_flags;
+ __u32 sup_reserved;
+ struct nilfs_suinfo sup_sui;
+};
+
+enum {
+ NILFS_SUINFO_UPDATE_LASTMOD,
+ NILFS_SUINFO_UPDATE_NBLOCKS,
+ NILFS_SUINFO_UPDATE_FLAGS,
+ __NR_NILFS_SUINFO_UPDATE_FIELDS,
+};
+
+#define NILFS_SUINFO_UPDATE_FNS(flag, name) \
+static inline void \
+nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \
+} \
+static inline void \
+nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \
+} \
+static inline int \
+nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \
+{ \
+ return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\
+}
+
+NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod)
+NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks)
+NILFS_SUINFO_UPDATE_FNS(FLAGS, flags)
+
enum {
NILFS_CHECKPOINT,
NILFS_SNAPSHOT,
@@ -863,5 +913,7 @@ struct nilfs_bdesc {
_IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
#define NILFS_IOCTL_SET_ALLOC_RANGE \
_IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
+#define NILFS_IOCTL_SET_SUINFO \
+ _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv)
#endif /* _LINUX_NILFS_FS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1710d1b060b..45598f1e9aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -25,6 +25,7 @@ enum mapping_flags {
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
+ AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
};
static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -69,6 +70,16 @@ static inline int mapping_balloon(struct address_space *mapping)
return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
}
+static inline void mapping_set_exiting(struct address_space *mapping)
+{
+ set_bit(AS_EXITING, &mapping->flags);
+}
+
+static inline int mapping_exiting(struct address_space *mapping)
+{
+ return test_bit(AS_EXITING, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -243,12 +254,20 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
typedef int filler_t(void *, struct page *);
-extern struct page * find_get_page(struct address_space *mapping,
- pgoff_t index);
-extern struct page * find_lock_page(struct address_space *mapping,
- pgoff_t index);
-extern struct page * find_or_create_page(struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
+struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
+ gfp_t gfp_mask);
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_entries, struct page **entries,
+ pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
@@ -270,8 +289,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
pgoff_t index);
-extern struct page * read_cache_page_async(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
@@ -279,14 +296,6 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping,
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
-static inline struct page *read_mapping_page_async(
- struct address_space *mapping,
- pgoff_t index, void *data)
-{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return read_cache_page_async(mapping, index, filler, data);
-}
-
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
@@ -539,7 +548,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page);
+extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index e4dbfab3772..b45d391b454 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -22,6 +22,11 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t start, unsigned nr_entries,
+ pgoff_t *indices);
+void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
diff --git a/include/linux/printk.h b/include/linux/printk.h
index fa47e2708c0..8752f7595b2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -24,13 +24,9 @@ static inline int printk_get_level(const char *buffer)
static inline const char *printk_skip_level(const char *buffer)
{
- if (printk_get_level(buffer)) {
- switch (buffer[1]) {
- case '0' ... '7':
- case 'd': /* KERN_DEFAULT */
- return buffer + 2;
- }
- }
+ if (printk_get_level(buffer))
+ return buffer + 2;
+
return buffer;
}
@@ -124,9 +120,9 @@ asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
asmlinkage __printf(5, 6) __cold
-asmlinkage int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...);
+int printk_emit(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, ...);
asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 6965fe394c3..1d3eee594cd 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -46,6 +46,14 @@ void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
void dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
+static inline struct dquot *dqgrab(struct dquot *dquot)
+{
+ /* Make sure someone else has active reference to dquot */
+ WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
+ WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+ atomic_inc(&dquot->dq_count);
+ return dquot;
+}
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 403940787be..33170dbd9db 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -60,6 +60,49 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
#define RADIX_TREE_MAX_TAGS 3
+#ifdef __KERNEL__
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#else
+#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
+#endif
+
+#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
+#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
+
+#define RADIX_TREE_TAG_LONGS \
+ ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+
+/* Height component in node->path */
+#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1)
+#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1)
+
+/* Internally used bits of node->count */
+#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
+#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
+
+struct radix_tree_node {
+ unsigned int path; /* Offset in parent & height from the bottom */
+ unsigned int count;
+ union {
+ struct {
+ /* Used when ascending tree */
+ struct radix_tree_node *parent;
+ /* For tree user */
+ void *private_data;
+ };
+ /* Used when freeing node */
+ struct rcu_head rcu_head;
+ };
+ /* For tree user */
+ struct list_head private_list;
+ void __rcu *slots[RADIX_TREE_MAP_SIZE];
+ unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
+};
+
/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
struct radix_tree_root {
unsigned int height;
@@ -101,6 +144,7 @@ do { \
* concurrently with other readers.
*
* The notable exceptions to this rule are the following functions:
+ * __radix_tree_lookup
* radix_tree_lookup
* radix_tree_lookup_slot
* radix_tree_tag_get
@@ -216,9 +260,16 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
rcu_assign_pointer(*pslot, item);
}
+int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp);
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
+bool __radix_tree_delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node);
+void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
@@ -226,10 +277,6 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan);
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
void radix_tree_init(void);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9d55438bc4a..4d1771c2d29 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -51,6 +51,7 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern bool shmem_mapping(struct address_space *mapping);
extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 46ba0c6c219..35071156075 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -260,6 +260,42 @@ struct swap_list_t {
int next; /* swapfile to be used next */
};
+/* linux/mm/workingset.c */
+void *workingset_eviction(struct address_space *mapping, struct page *page);
+bool workingset_refault(void *shadow);
+void workingset_activation(struct page *page);
+extern struct list_lru workingset_shadow_nodes;
+
+static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
+{
+ return node->count & RADIX_TREE_COUNT_MASK;
+}
+
+static inline void workingset_node_pages_inc(struct radix_tree_node *node)
+{
+ node->count++;
+}
+
+static inline void workingset_node_pages_dec(struct radix_tree_node *node)
+{
+ node->count--;
+}
+
+static inline unsigned int workingset_node_shadows(struct radix_tree_node *node)
+{
+ return node->count >> RADIX_TREE_COUNT_SHIFT;
+}
+
+static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
+{
+ node->count += 1U << RADIX_TREE_COUNT_SHIFT;
+}
+
+static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
+{
+ node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+}
+
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1e67b7a5968..2aa8b749f13 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -200,6 +200,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
} \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
+ qid_t id, void __user *addr);
asmlinkage long sys_time(time_t __user *tloc);
asmlinkage long sys_stime(time_t __user *tptr);
asmlinkage long sys_gettimeofday(struct timeval __user *tv,
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 3a712e2e7d7..486c3972c0b 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -37,6 +37,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+ DROP_PAGECACHE, DROP_SLAB,
#ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES,
NUMA_HUGE_PTE_UPDATES,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 67ce70c8279..ea4476157e0 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -187,8 +187,6 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
-extern void inc_zone_state(struct zone *, enum zone_stat_item);
-
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
@@ -230,18 +228,18 @@ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
atomic_long_inc(&vm_stat[item]);
}
-static inline void __inc_zone_page_state(struct page *page,
- enum zone_stat_item item)
-{
- __inc_zone_state(page_zone(page), item);
-}
-
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_dec(&zone->vm_stat[item]);
atomic_long_dec(&vm_stat[item]);
}
+static inline void __inc_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ __inc_zone_state(page_zone(page), item);
+}
+
static inline void __dec_zone_page_state(struct page *page,
enum zone_stat_item item)
{
@@ -256,6 +254,9 @@ static inline void __dec_zone_page_state(struct page *page,
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state
+#define inc_zone_state __inc_zone_state
+#define dec_zone_state __dec_zone_state
+
#define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_cpu_vm_stats(int cpu) { }
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 335e8a7cad3..c140620dad9 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -85,6 +85,12 @@
#endif /* _NETINET_IN_H */
+/* Definitions for xattr.h */
+#if defined(_SYS_XATTR_H)
+#define __UAPI_DEF_XATTR 0
+#else
+#define __UAPI_DEF_XATTR 1
+#endif
/* If we did not see any headers from any supported C libraries,
* or we are being included in the kernel, then define everything
@@ -98,6 +104,9 @@
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+/* Definitions for xattr.h */
+#define __UAPI_DEF_XATTR 1
+
#endif /* __GLIBC__ */
#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index 40bbc04b6f8..c38355c1f3c 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -7,11 +7,18 @@
Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*/
+
+#include <linux/libc-compat.h>
+
#ifndef _UAPI_LINUX_XATTR_H
#define _UAPI_LINUX_XATTR_H
+#ifdef __UAPI_DEF_XATTR
+#define __USE_KERNEL_XATTR_DEFS
+
#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+#endif
/* Namespaces */
#define XATTR_OS2_PREFIX "os2."