diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-05-20 09:02:28 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-05-20 09:02:28 +0200 |
commit | 521c180874dae86f675d23c4eade4dba8b1f2cc8 (patch) | |
tree | 7509303da3a9a1b40a26f6811f321c89cd31737b /mm/vmscan.c | |
parent | f1a11e0576c7a73d759d05d776692b2b2d37172b (diff) | |
parent | 64d1304a64477629cb16b75491a77bafe6f86963 (diff) |
Merge branch 'core/urgent' into core/futexes
Merge reason: this branch was on an pre -rc1 base, merge it up to -rc6+
to get the latest upstream fixes.
Conflicts:
kernel/futex.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 425244988bb..5fa3eda1f03 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -63,6 +63,9 @@ struct scan_control { /* Can mapped pages be reclaimed? */ int may_unmap; + /* Can pages be swapped as part of reclaim? */ + int may_swap; + /* This context's SWAP_CLUSTER_MAX. If freeing memory for * suspend, we effectively ignore SWAP_CLUSTER_MAX. * In this context, it doesn't matter that we scan the @@ -1380,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); /* If we have no swap space, do not bother scanning anon pages. */ - if (nr_swap_pages <= 0) { + if (!sc->may_swap || (nr_swap_pages <= 0)) { percent[0] = 0; percent[1] = 100; return; @@ -1468,7 +1471,7 @@ static void shrink_zone(int priority, struct zone *zone, for_each_evictable_lru(l) { int file = is_file_lru(l); - int scan; + unsigned long scan; scan = zone_nr_pages(zone, sc, l); if (priority) { @@ -1697,6 +1700,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .swap_cluster_max = SWAP_CLUSTER_MAX, .may_unmap = 1, + .may_swap = 1, .swappiness = vm_swappiness, .order = order, .mem_cgroup = NULL, @@ -1717,6 +1721,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, struct scan_control sc = { .may_writepage = !laptop_mode, .may_unmap = 1, + .may_swap = !noswap, .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = swappiness, .order = 0, @@ -1726,9 +1731,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, }; struct zonelist *zonelist; - if (noswap) - sc.may_unmap = 0; - sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); zonelist = NODE_DATA(numa_node_id())->node_zonelists; @@ -1767,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_unmap = 1, + .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = vm_swappiness, .order = order, @@ -1967,7 +1970,7 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; - node_to_cpumask_ptr(cpumask, pgdat->node_id); + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); @@ -2088,13 +2091,13 @@ static void shrink_all_zones(unsigned long nr_pages, int prio, nr_reclaimed += shrink_list(l, nr_to_scan, zone, sc, prio); if (nr_reclaimed >= nr_pages) { - sc->nr_reclaimed = nr_reclaimed; + sc->nr_reclaimed += nr_reclaimed; return; } } } } - sc->nr_reclaimed = nr_reclaimed; + sc->nr_reclaimed += nr_reclaimed; } /* @@ -2115,6 +2118,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) .may_unmap = 0, .may_writepage = 1, .isolate_pages = isolate_pages_global, + .nr_reclaimed = 0, }; current->reclaim_state = &reclaim_state; @@ -2204,7 +2208,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb, if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_HIGH_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); - node_to_cpumask_ptr(mask, pgdat->node_id); + const struct cpumask *mask; + + mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ @@ -2295,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), + .may_swap = 1, .swap_cluster_max = max_t(unsigned long, nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, |