diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-11 03:41:39 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-11 03:41:39 +0100 |
commit | abede81c4fb2e3b85d8760f25e3da39d2c69a134 (patch) | |
tree | 26c893ec108d837eb9171d678c55a1cea7b22af4 /include/linux/mmzone.h | |
parent | c9d557c19f94df42db78d4a5de4d25feee694bad (diff) | |
parent | c59765042f53a79a7a65585042ff463b69cb248c (diff) |
Merge commit 'v2.6.29-rc1' into core/urgent
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 35a7b5e1946..09c14e213b6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -263,6 +263,19 @@ enum zone_type { #error ZONES_SHIFT -- too many zones configured adjust calculation #endif +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + struct zone { /* Fields commonly accessed by the page allocator */ unsigned long pages_min, pages_low, pages_high; @@ -315,16 +328,7 @@ struct zone { unsigned long nr_scan; } lru[NR_LRU_LISTS]; - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; + struct zone_reclaim_stat reclaim_stat; unsigned long pages_scanned; /* since last reclaim */ unsigned long flags; /* zone flags, see below */ |