summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHillf Danton <dhillf@gmail.com>2012-03-21 16:34:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 17:54:57 -0700
commitd563c0501bf8702b9b683206c09b9defb37d8a8a (patch)
treeac3c870c1885337e340d807cd8b5dc0ad8eb0315 /mm
parent978ea78b65794ef07eb66b9946064dea66b52554 (diff)
vmscan: handle isolated pages with lru lock released
When shrinking inactive lru list, isolated pages are queued on locally private list, so the lock-hold time could be reduced if pages are counted without lock protection. To achieve that, firstly updating reclaim stat is delayed until the putback stage, after reacquiring the lru lock. Secondly, operations related to vm and zone stats are now proteced with preemption disabled as they are per-cpu operations. Signed-off-by: Hillf Danton <dhillf@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ae3bf0a09cd..57d8ef6ee4d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
unsigned long *nr_anon,
unsigned long *nr_file)
{
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
struct zone *zone = mz->zone;
unsigned int count[NR_LRU_LISTS] = { 0, };
unsigned long nr_active = 0;
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
count[lru] += numpages;
}
+ preempt_disable();
__count_vm_events(PGDEACTIVATE, nr_active);
__mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
- reclaim_stat->recent_scanned[0] += *nr_anon;
- reclaim_stat->recent_scanned[1] += *nr_file;
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
+ preempt_enable();
}
/*
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
unsigned long nr_writeback = 0;
isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
struct zone *zone = mz->zone;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
__count_zone_vm_events(PGSCAN_DIRECT, zone,
nr_scanned);
}
+ spin_unlock_irq(&zone->lru_lock);
- if (nr_taken == 0) {
- spin_unlock_irq(&zone->lru_lock);
+ if (nr_taken == 0)
return 0;
- }
update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
-
- spin_unlock_irq(&zone->lru_lock);
-
nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
&nr_dirty, &nr_writeback);
@@ -1569,6 +1565,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
+ reclaim_stat->recent_scanned[0] += nr_anon;
+ reclaim_stat->recent_scanned[1] += nr_file;
+
if (current_is_kswapd())
__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);