summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2005-09-03 15:54:28 -0700
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 00:05:38 -0700
commit3e347261a80b57df792ab9464b5f0ed59add53a8 (patch)
tree047b35e0f9ec82b3beeff882a9af6292a500097c /include/linux/mmzone.h
parent802f192e4a600f7ef84ca25c8b818c8830acef5a (diff)
[PATCH] sparsemem extreme implementation
With cleanups from Dave Hansen <haveblue@us.ibm.com> SPARSEMEM_EXTREME makes mem_section a one dimensional array of pointers to mem_sections. This two level layout scheme is able to achieve smaller memory requirements for SPARSEMEM with the tradeoff of an additional shift and load when fetching the memory section. The current SPARSEMEM implementation is a one dimensional array of mem_sections which is the default SPARSEMEM configuration. The patch attempts isolates the implementation details of the physical layout of the sparsemem section array. SPARSEMEM_EXTREME requires bootmem to be functioning at the time of memory_present() calls. This is not always feasible, so architectures which do not need it may allocate everything statically by using SPARSEMEM_STATIC. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h40
1 files changed, 15 insertions, 25 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b97054bbc39..79cf578e21b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -487,39 +487,29 @@ struct mem_section {
unsigned long section_mem_map;
};
-#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME
-/*
- * Should we ever require GCC 4 or later then the flat array scheme
- * can be eliminated and a uniform solution for EXTREME and !EXTREME can
- * be arrived at.
- */
-#define SECTION_ROOT_SHIFT (PAGE_SHIFT-3)
-#define SECTION_ROOT_MASK ((1UL<<SECTION_ROOT_SHIFT) - 1)
-#define SECTION_TO_ROOT(_sec) ((_sec) >> SECTION_ROOT_SHIFT)
-#define NR_SECTION_ROOTS (NR_MEM_SECTIONS >> SECTION_ROOT_SHIFT)
+#ifdef CONFIG_SPARSEMEM_EXTREME
+#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
+#else
+#define SECTIONS_PER_ROOT 1
+#endif
-extern struct mem_section *mem_section[NR_SECTION_ROOTS];
-
-static inline struct mem_section *__nr_to_section(unsigned long nr)
-{
- if (!mem_section[SECTION_TO_ROOT(nr)])
- return NULL;
- return &mem_section[SECTION_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
-}
+#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
+#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
+#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
+#ifdef CONFIG_SPARSEMEM_EXTREME
+extern struct mem_section *mem_section[NR_SECTION_ROOTS];
#else
-
-extern struct mem_section mem_section[NR_MEM_SECTIONS];
+extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
+#endif
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
- return &mem_section[nr];
+ if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+ return NULL;
+ return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
-#define sparse_index_init(_sec, _nid) do {} while (0)
-
-#endif
-
/*
* We use the lower bits of the mem_map pointer to store
* a little bit of information. There should be at least