summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/rmap.c17
-rw-r--r--mm/shmem.c9
-rw-r--r--mm/slab.c2
-rw-r--r--mm/tiny-shmem.c2
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmscan.c2
11 files changed, 39 insertions, 25 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cf2a5381030..d76e8eb342d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -321,15 +321,6 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
return 0;
}
-/* Check if a vma is migratable */
-static inline int vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (
- VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
- return 0;
- return 1;
-}
-
/*
* Check if all pages in a range are on a set of nodes.
* If pagelist != NULL then isolate pages from the LRU and
diff --git a/mm/migrate.c b/mm/migrate.c
index e9b161bde95..7a66ca25dc8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -781,7 +781,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
err = -EFAULT;
vma = find_vma(mm, pp->addr);
- if (!vma)
+ if (!vma || !vma_migratable(vma))
goto set_status;
page = follow_page(vma, pp->addr, FOLL_GET);
diff --git a/mm/mmap.c b/mm/mmap.c
index eb509ae7655..84f997da78d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -299,6 +299,8 @@ static int browse_rb(struct rb_root *root)
printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
i++;
pn = nd;
+ prev = vma->vm_start;
+ pend = vma->vm_end;
}
j = 0;
for (nd = pn; nd; nd = rb_prev(nd)) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f7e088f5a30..f469e3cd08e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -296,11 +296,21 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
-void throttle_vm_writeout(void)
+void throttle_vm_writeout(gfp_t gfp_mask)
{
long background_thresh;
long dirty_thresh;
+ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
+ /*
+ * The caller might hold locks which can prevent IO completion
+ * or progress in the filesystem. So we cannot just sit here
+ * waiting for IO to complete.
+ */
+ congestion_wait(WRITE, HZ/10);
+ return;
+ }
+
for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
@@ -317,7 +327,6 @@ void throttle_vm_writeout(void)
}
}
-
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 41737395bbc..353ce9039a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -600,7 +600,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
- 1 << PG_checked | 1 << PG_mappedtodisk);
+ 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
set_page_private(page, 0);
set_page_refcounted(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 669acb22b57..22ed3f71a67 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -183,7 +183,7 @@ void __init anon_vma_init(void)
*/
static struct anon_vma *page_lock_anon_vma(struct page *page)
{
- struct anon_vma *anon_vma = NULL;
+ struct anon_vma *anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
@@ -195,9 +195,16 @@ static struct anon_vma *page_lock_anon_vma(struct page *page)
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
spin_lock(&anon_vma->lock);
+ return anon_vma;
out:
rcu_read_unlock();
- return anon_vma;
+ return NULL;
+}
+
+static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+ spin_unlock(&anon_vma->lock);
+ rcu_read_unlock();
}
/*
@@ -333,7 +340,8 @@ static int page_referenced_anon(struct page *page)
if (!mapcount)
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return referenced;
}
@@ -802,7 +810,8 @@ static int try_to_unmap_anon(struct page *page, int migration)
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 882053031aa..b8c429a2d27 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -175,7 +175,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
-static struct super_operations shmem_ops;
+static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
@@ -1228,7 +1228,8 @@ failed:
return error;
}
-struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+static struct page *shmem_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct page *page = NULL;
@@ -1335,7 +1336,7 @@ out_nomem:
return retval;
}
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
@@ -2382,7 +2383,7 @@ static const struct inode_operations shmem_special_inode_operations = {
#endif
};
-static struct super_operations shmem_ops = {
+static const struct super_operations shmem_ops = {
.alloc_inode = shmem_alloc_inode,
.destroy_inode = shmem_destroy_inode,
#ifdef CONFIG_TMPFS
diff --git a/mm/slab.c b/mm/slab.c
index 8fdaffa717e..57f7aa42006 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4026,7 +4026,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
/**
* cache_reap - Reclaim memory from caches.
- * @unused: unused parameter
+ * @w: work descriptor
*
* Called from workqueue/eventd every few seconds.
* Purpose:
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index c7f6e1914bc..8803471593f 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -126,6 +126,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
+#if 0
int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
@@ -135,6 +136,7 @@ int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
#endif
}
+#endif /* 0 */
#ifndef CONFIG_MMU
unsigned long shmem_get_unmapped_area(struct file *file,
diff --git a/mm/truncate.c b/mm/truncate.c
index ebf3fcb4115..0f4b6d18ab0 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -375,10 +375,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pagevec_init(&pvec, 0);
next = start;
- while (next <= end && !ret && !wrapped &&
+ while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
- for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0655d5fe73e..db023e2ff38 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -952,7 +952,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
}
}
- throttle_vm_writeout();
+ throttle_vm_writeout(sc->gfp_mask);
atomic_dec(&zone->reclaim_in_progress);
return nr_reclaimed;