summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c15
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/rmap.c8
5 files changed, 26 insertions, 6 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 7a66ca25dc8..a91ca00abeb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -297,7 +297,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
void **pslot;
if (!mapping) {
- /* Anonymous page */
+ /* Anonymous page without mapping */
if (page_count(page) != 1)
return -EAGAIN;
return 0;
@@ -333,6 +333,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
__put_page(page);
+ /*
+ * If moved to a different zone then also account
+ * the page for that zone. Other VM counters will be
+ * taken care of when we establish references to the
+ * new page and drop references to the old page.
+ *
+ * Note that anonymous pages are accounted for
+ * via NR_FILE_PAGES and NR_ANON_PAGES if they
+ * are mapped to swap space.
+ */
+ __dec_zone_page_state(page, NR_FILE_PAGES);
+ __inc_zone_page_state(newpage, NR_FILE_PAGES);
+
write_unlock_irq(&mapping->tree_lock);
return 0;
diff --git a/mm/nommu.c b/mm/nommu.c
index cbbc1377481..1f60194d9b9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -45,6 +45,7 @@ int heap_stack_gap = 0;
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(__vm_enough_memory);
+EXPORT_SYMBOL(num_physpages);
/* list of shareable VMAs */
struct rb_root nommu_vma_tree = RB_ROOT;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2f3916986ab..3791edfffee 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,6 +176,8 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
struct zone **z;
nodemask_t nodes;
int node;
+
+ nodes_clear(nodes);
/* node has memory ? */
for_each_online_node(node)
if (NODE_DATA(node)->node_present_pages)
@@ -333,7 +335,7 @@ static int oom_kill_task(struct task_struct *p)
*/
do_each_thread(g, q) {
if (q->mm == mm && q->tgid != p->tgid)
- force_sig(SIGKILL, p);
+ force_sig(SIGKILL, q);
} while_each_thread(g, q);
return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f469e3cd08e..a794945fd19 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -67,12 +67,12 @@ static inline long sync_writeback_pages(void)
/*
* Start background writeback (via pdflush) at this percentage
*/
-int dirty_background_ratio = 10;
+int dirty_background_ratio = 5;
/*
* The generator of dirty data starts writeback at this percentage
*/
-int vm_dirty_ratio = 40;
+int vm_dirty_ratio = 10;
/*
* The interval between `kupdate'-style writebacks, in jiffies
diff --git a/mm/rmap.c b/mm/rmap.c
index b82146e6dfc..59da5b734c8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -498,8 +498,10 @@ int page_mkclean(struct page *page)
struct address_space *mapping = page_mapping(page);
if (mapping)
ret = page_mkclean_file(mapping, page);
- if (page_test_and_clear_dirty(page))
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
ret = 1;
+ }
}
return ret;
@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
- if (page_test_and_clear_dirty(page))
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
set_page_dirty(page);
+ }
__dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
}