summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h47
1 files changed, 40 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 156e18f3919..59a4c8fd6eb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -82,21 +82,23 @@ enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
NR_LRU_BASE,
- NR_INACTIVE = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
- NR_ACTIVE, /* " " " " " */
+ NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
+ NR_ACTIVE_ANON, /* " " " " " */
+ NR_INACTIVE_FILE, /* " " " " " */
+ NR_ACTIVE_FILE, /* " " " " " */
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- /* Second 128 byte cacheline */
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
NR_VMSCAN_WRITE,
+ /* Second 128 byte cacheline */
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
@@ -108,17 +110,36 @@ enum zone_stat_item {
#endif
NR_VM_ZONE_STAT_ITEMS };
+/*
+ * We do arithmetic on the LRU lists in various places in the code,
+ * so it is important to keep the active lists LRU_ACTIVE higher in
+ * the array than the corresponding inactive lists, and to keep
+ * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
+ *
+ * This has to be kept in sync with the statistics in zone_stat_item
+ * above and the descriptions in vmstat_text in mm/vmstat.c
+ */
+#define LRU_BASE 0
+#define LRU_ACTIVE 1
+#define LRU_FILE 2
+
enum lru_list {
- LRU_BASE,
- LRU_INACTIVE=LRU_BASE, /* must match order of NR_[IN]ACTIVE */
- LRU_ACTIVE, /* " " " " " */
+ LRU_INACTIVE_ANON = LRU_BASE,
+ LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
+ LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
+ LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
NR_LRU_LISTS };
#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
+static inline int is_file_lru(enum lru_list l)
+{
+ return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+}
+
static inline int is_active_lru(enum lru_list l)
{
- return (l == LRU_ACTIVE);
+ return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
}
struct per_cpu_pages {
@@ -269,6 +290,18 @@ struct zone {
struct list_head list;
unsigned long nr_scan;
} lru[NR_LRU_LISTS];
+
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are refeferenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */