summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h99
1 files changed, 88 insertions, 11 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 59855b8718a..e339a7345f2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -218,13 +218,9 @@ struct zone {
* under - it drives the swappiness decision: whether to unmap mapped
* pages.
*
- * temp_priority is used to remember the scanning priority at which
- * this zone was successfully refilled to free_pages == pages_high.
- *
- * Access to both these fields is quite racy even on uniprocessor. But
+ * Access to both this field is quite racy even on uniprocessor. But
* it is expected to average out OK.
*/
- int temp_priority;
int prev_priority;
@@ -282,7 +278,7 @@ struct zone {
/*
* rarely used fields:
*/
- char *name;
+ const char *name;
} ____cacheline_internodealigned_in_smp;
/*
@@ -292,19 +288,94 @@ struct zone {
*/
#define DEF_PRIORITY 12
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ * up short of free memory since the last time (last_fullzone_zap)
+ * we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ * id, so that we can efficiently evaluate whether that node is
+ * set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans. During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set. During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist. However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter). A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here. We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct. This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ * 1) The full, fixed length version, shown below, and
+ * 2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it. This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+ unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
+ DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
+ unsigned long last_full_zap; /* when last zap'd (jiffies) */
+};
+#else
+struct zonelist_cache;
+#endif
+
/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above. If zlcache_ptr is NULL, there is no zlcache.
*/
+
struct zonelist {
- struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+ struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
+ struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited
+#ifdef CONFIG_NUMA
+ struct zonelist_cache zlcache; // optional ...
+#endif
};
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -674,6 +745,12 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
+#else
+#define early_pfn_in_nid(pfn, nid) (1)
+#endif
+
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif