summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 20:26:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 08:50:25 -0700
commit556adecba110bf5f1db6c6b56416cfab5bcab698 (patch)
treea721d84d28c4d99a54632b472b452ea3d4b2b137 /include/linux
parent4f98a2fee8acdb4ac84545df98cccecfd130f8db (diff)
vmscan: second chance replacement for anonymous pages
We avoid evicting and scanning anonymous pages for the most part, but under some workloads we can end up with most of memory filled with anonymous pages. At that point, we suddenly need to clear the referenced bits on all of memory, which can take ages on very large memory systems. We can reduce the maximum number of pages that need to be scanned by not taking the referenced state into account when deactivating an anonymous page. After all, every anonymous page starts out referenced, so why check? If an anonymous page gets referenced again before it reaches the end of the inactive list, we move it back to the active list. To keep the maximum amount of necessary work reasonable, we scale the active to inactive ratio with the size of memory, using the formula active:inactive ratio = sqrt(memory in GB * 10). Kswapd CPU use now seems to scale by the amount of pageout bandwidth, instead of by the amount of memory present in the system. [kamezawa.hiroyu@jp.fujitsu.com: fix OOM with memcg] [kamezawa.hiroyu@jp.fujitsu.com: memcg: lru scan fix] Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_inline.h19
-rw-r--r--include/linux/mmzone.h6
2 files changed, 25 insertions, 0 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2eb599465d5..f451fedd1e7 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -117,4 +117,23 @@ static inline enum lru_list page_lru(struct page *page)
return lru;
}
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static inline int inactive_anon_is_low(struct zone *zone)
+{
+ unsigned long active, inactive;
+
+ active = zone_page_state(zone, NR_ACTIVE_ANON);
+ inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+ if (inactive * zone->inactive_ratio < active)
+ return 1;
+
+ return 0;
+}
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 59a4c8fd6eb..9c5111f49a3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -323,6 +323,12 @@ struct zone {
*/
int prev_priority;
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+ * this zone's LRU. Maintained by the pageout code.
+ */
+ unsigned int inactive_ratio;
+
ZONE_PADDING(_pad2_)
/* Rarely used or read-mostly fields */