summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c43
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c44
-rw-r--r--mm/slub.c33
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/util.c20
-rw-r--r--mm/vmalloc.c20
-rw-r--r--mm/vmscan.c32
12 files changed, 157 insertions, 91 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4..cbe9e0581b7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
return buffer;
}
-void free_locked_buffer(void *buffer, size_t size)
+void release_locked_buffer(void *buffer, size_t size)
{
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
current->mm->locked_vm -= pgsz;
up_write(&current->mm->mmap_sem);
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+ release_locked_buffer(buffer, size);
kfree(buffer);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6106a5c7ed4..74dc57c7434 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
-static inline void task_dirty_inc(struct task_struct *tsk)
+void task_dirty_inc(struct task_struct *tsk)
{
prop_inc_single(&vm_dirties, &tsk->dirties);
}
@@ -1079,7 +1079,7 @@ continue_unlock:
pagevec_release(&pvec);
cond_resched();
}
- if (!cycled) {
+ if (!cycled && !done) {
/*
* range_cyclic:
* We hit the last page and there is more work to be done: wrap
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page)
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
+ task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
radix_tree_tag_set(&mapping->page_tree,
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
-static int __set_page_dirty(struct page *page)
+int set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page)
}
return 0;
}
-
-int set_page_dirty(struct page *page)
-{
- int ret = __set_page_dirty(page);
- if (ret)
- task_dirty_inc(current);
- return ret;
-}
EXPORT_SYMBOL(set_page_dirty);
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b307385..5c44ed49ca9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
* was used and there are no special requirements, this is a convenient
* alternative
*/
-int __meminit early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
{
int i;
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
if (start_pfn <= pfn && pfn < end_pfn)
return early_node_map[i].nid;
}
+ /* This is a memory hole */
+ return -1;
+}
+#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+int __meminit early_pfn_to_nid(unsigned long pfn)
+{
+ int nid;
+ nid = __early_pfn_to_nid(pfn);
+ if (nid >= 0)
+ return nid;
+ /* just returns 0 */
return 0;
}
-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+ int nid;
+
+ nid = __early_pfn_to_nid(pfn);
+ if (nid >= 0 && nid != node)
+ return false;
+ return true;
+}
+#endif
/* Basic iterator support to walk early_node_map[] */
#define for_each_active_range_index_in_nid(i, nid) \
diff --git a/mm/page_io.c b/mm/page_io.c
index dc6ce0afbde..3023c475e04 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
goto out;
}
if (wbc->sync_mode == WB_SYNC_ALL)
- rw |= (1 << BIO_RW_SYNC);
+ rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 19d566ccdee..4103a239ce8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
- return (flags & VM_ACCOUNT) ?
- security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
+ return (flags & VM_NORESERVE) ?
+ 0 : security_vm_enough_memory_kern(VM_ACCT(size));
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
- if (flags & VM_ACCOUNT)
+ if (!(flags & VM_NORESERVE))
vm_unacct_memory(VM_ACCT(size));
}
@@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
*/
static inline int shmem_acct_block(unsigned long flags)
{
- return (flags & VM_ACCOUNT) ?
- 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
+ return (flags & VM_NORESERVE) ?
+ security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
- if (!(flags & VM_ACCOUNT))
+ if (flags & VM_NORESERVE)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
@@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static struct inode *
-shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *shmem_get_inode(struct super_block *sb, int mode,
+ dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
@@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
+ info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
switch (mode & S_IFMT) {
@@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
static int
shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
- struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
+ struct inode *inode;
int error = -ENOSPC;
+ inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir, NULL, NULL,
NULL);
@@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
- inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+ inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
if (!inode)
return -ENOSPC;
@@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_flags |= MS_POSIXACL;
#endif
- inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
+ inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)
goto failed;
inode->i_uid = sbinfo->uid;
@@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
-#define shmem_file_operations ramfs_file_operations
-#define shmem_vm_ops generic_file_vm_ops
-#define shmem_get_inode ramfs_get_inode
-#define shmem_acct_size(a, b) 0
-#define shmem_unacct_size(a, b) do {} while (0)
-#define SHMEM_MAX_BYTES LLONG_MAX
+#define shmem_vm_ops generic_file_vm_ops
+#define shmem_file_operations ramfs_file_operations
+#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
+#define shmem_acct_size(flags, size) 0
+#define shmem_unacct_size(flags, size) do {} while (0)
+#define SHMEM_MAX_BYTES LLONG_MAX
#endif /* CONFIG_SHMEM */
@@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
* shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
- * @flags: vm_flags
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
{
@@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
goto put_dentry;
error = -ENOSPC;
- inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
+ inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
goto close_file;
-#ifdef CONFIG_SHMEM
- SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
-#endif
d_instantiate(dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
diff --git a/mm/slab.c b/mm/slab.c
index ddc41f337d5..4d00855629c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4457,3 +4457,4 @@ size_t ksize(const void *objp)
return obj_size(virt_to_cache(objp));
}
+EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index bf7e8fc3aed..0bfa680a898 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
+ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct slob_page *sp)
{
return PageSlobPage((struct page *)sp);
}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
__ClearPageSlobPage((struct page *)sp);
}
+static inline struct slob_page *slob_page(const void *addr)
+{
+ return (struct slob_page *)virt_to_page(addr);
+}
+
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
+ return NULL;
+ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +403,11 @@ static void slob_free(void *block, int size)
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
+ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
- goto out;
+ return;
}
if (!slob_page_free(sp)) {
@@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
void *ret;
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
@@ -494,8 +505,8 @@ void kfree(const void *block)
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
@@ -513,14 +524,15 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
} else
return sp->page.private;
}
+EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
@@ -572,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align, node);
else
- b = slob_new_page(flags, get_order(c->size), node);
+ b = slob_new_pages(flags, get_order(c->size), node);
if (c->ctor)
c->ctor(b);
@@ -586,7 +598,7 @@ static void __kmem_cache_free(void *b, int size)
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index 77268d18e78..c65a4edafc3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
- struct track *p;
+ struct track *p = get_track(s, object, alloc);
- if (s->offset)
- p = object + s->offset + sizeof(void *);
- else
- p = object + s->inuse;
-
- p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
+ debug_check_no_obj_freed(object, c->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size)
int order;
int min_objects;
int fraction;
+ int max_objects;
/*
* Attempt to find best configuration for a slab. This
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
+ max_objects = (PAGE_SIZE << slub_max_order)/size;
+ min_objects = min(min_objects, max_objects);
+
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
- min_objects /= 2;
+ min_objects --;
}
/*
@@ -2478,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2540,7 +2538,7 @@ panic:
}
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
@@ -2661,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2689,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags);
@@ -2739,6 +2737,7 @@ size_t ksize(const void *object)
*/
return s->size;
}
+EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
@@ -2988,7 +2987,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3025,7 +3024,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3225,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
@@ -3241,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7e6304dfafa..312fafe0ab6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
if (!bdev) {
if (bdev_p)
- *bdev_p = sis->bdev;
+ *bdev_p = bdget(sis->bdev->bd_dev);
spin_unlock(&swap_lock);
return i;
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
- *bdev_p = sis->bdev;
+ *bdev_p = bdget(sis->bdev->bd_dev);
spin_unlock(&swap_lock);
bdput(bdev);
diff --git a/mm/util.c b/mm/util.c
index cb00b748ce4..37eaccdf305 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
}
EXPORT_SYMBOL(krealloc);
+/**
+ * kzfree - like kfree but zero memory
+ * @p: object to free memory of
+ *
+ * The memory of the object @p points to is zeroed before freed.
+ * If @p is %NULL, kzfree() does nothing.
+ */
+void kzfree(const void *p)
+{
+ size_t ks;
+ void *mem = (void *)p;
+
+ if (unlikely(ZERO_OR_NULL_PTR(mem)))
+ return;
+ ks = ksize(mem);
+ memset(mem, 0, ks);
+ kfree(mem);
+}
+EXPORT_SYMBOL(kzfree);
+
/*
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 75f49d312e8..520a7598026 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long addr;
int purged = 0;
+ BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
+ if (addr + size - 1 < addr)
+ goto overflow;
+
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
@@ -365,6 +369,8 @@ retry:
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
+ if (addr + size - 1 < addr)
+ goto overflow;
n = rb_next(&first->rb_node);
if (n)
@@ -375,6 +381,7 @@ retry:
}
found:
if (addr + size > vend) {
+overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
@@ -498,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
+ struct vmap_area *n_va;
int nr = 0;
/*
@@ -537,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) {
spin_lock(&vmap_area_lock);
- list_for_each_entry(va, &valist, purge_list)
+ list_for_each_entry_safe(va, n_va, &valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}
@@ -1012,6 +1020,8 @@ void __init vmalloc_init(void)
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;
+
+ flush_cache_vunmap(addr, end);
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
@@ -1106,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
}
EXPORT_SYMBOL_GPL(__get_vm_area);
+struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+ void *caller)
+{
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ caller);
+}
+
/**
* get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a27c44aa32..56ddf41149e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1262,7 +1262,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* Move the pages to the [file or anon] inactive list.
*/
pagevec_init(&pvec, 1);
- pgmoved = 0;
lru = LRU_BASE + file * LRU_FILE;
spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1273,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
*/
reclaim_stat->recent_rotated[!!file] += pgmoved;
+ pgmoved = 0;
while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
int file = is_file_lru(l);
int scan;
- scan = zone_page_state(zone, NR_LRU_BASE + l);
+ scan = zone_nr_pages(zone, sc, l);
if (priority) {
scan >>= priority;
scan = (scan * percent[file]) / 100;
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long nr_to_scan, ret = 0;
- enum lru_list l;
+ unsigned long ret = 0;
for_each_zone(zone) {
+ enum lru_list l;
if (!populated_zone(zone))
continue;
-
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
continue;
for_each_evictable_lru(l) {
+ enum zone_stat_item ls = NR_LRU_BASE + l;
+ unsigned long lru_pages = zone_page_state(zone, ls);
+
/* For pass = 0, we don't shrink the active list */
- if (pass == 0 &&
- (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
+ if (pass == 0 && (l == LRU_ACTIVE_ANON ||
+ l == LRU_ACTIVE_FILE))
continue;
- zone->lru[l].nr_scan +=
- (zone_page_state(zone, NR_LRU_BASE + l)
- >> prio) + 1;
+ zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+ unsigned long nr_to_scan;
+
zone->lru[l].nr_scan = 0;
- nr_to_scan = min(nr_pages,
- zone_page_state(zone,
- NR_LRU_BASE + l));
+ nr_to_scan = min(nr_pages, lru_pages);
ret += shrink_list(l, nr_to_scan, zone,
sc, prio);
if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
}
}
}
-
return ret;
}
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
.may_swap = 0,
.swap_cluster_max = nr_pages,
.may_writepage = 1,
- .swappiness = vm_swappiness,
.isolate_pages = isolate_pages_global,
};
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
int prio;
/* Force reclaiming mapped pages in the passes #3 and #4 */
- if (pass > 2) {
+ if (pass > 2)
sc.may_swap = 1;
- sc.swappiness = 100;
- }
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret;