summaryrefslogtreecommitdiffstats
path: root/arch/i386/mm/discontig.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-10-01 17:55:53 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2006-10-01 17:55:53 +0100
commit8a84fc15ae5cafcc366dd85cf8e1ab2040679abc (patch)
tree5d8dce194c9667fa92e9ec9f545cec867a9a1e0d /arch/i386/mm/discontig.c
parent28b79ff9661b22e4c41c0d00d4ab8503e810f13d (diff)
parent82965addad66fce61a92c5f03104ea90b0b87124 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Manually resolve conflict in include/mtd/Kbuild Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'arch/i386/mm/discontig.c')
-rw-r--r--arch/i386/mm/discontig.c105
1 files changed, 44 insertions, 61 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 7c392dc553b..51e3739dd22 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -117,7 +117,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
void *node_remap_end_vaddr[MAX_NUMNODES];
void *node_remap_alloc_vaddr[MAX_NUMNODES];
-
+static unsigned long kva_start_pfn;
+static unsigned long kva_pages;
/*
* FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat
@@ -156,21 +157,6 @@ static void __init find_max_pfn_node(int nid)
BUG();
}
-/* Find the owning node for a pfn. */
-int early_pfn_to_nid(unsigned long pfn)
-{
- int nid;
-
- for_each_node(nid) {
- if (node_end_pfn[nid] == 0)
- break;
- if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
- return nid;
- }
-
- return 0;
-}
-
/*
* Allocate memory for the pg_data_t for this node via a crude pre-bootmem
* method. For node zero take this from the bottom of memory, for
@@ -226,6 +212,8 @@ static unsigned long calculate_numa_remap_pages(void)
unsigned long pfn;
for_each_online_node(nid) {
+ unsigned old_end_pfn = node_end_pfn[nid];
+
/*
* The acpi/srat node info can show hot-add memroy zones
* where memory could be added but not currently present.
@@ -275,6 +263,7 @@ static unsigned long calculate_numa_remap_pages(void)
node_end_pfn[nid] -= size;
node_remap_start_pfn[nid] = node_end_pfn[nid];
+ shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]);
}
printk("Reserving total of %ld pages for numa KVA remap\n",
reserve_pages);
@@ -286,7 +275,6 @@ unsigned long __init setup_memory(void)
{
int nid;
unsigned long system_start_pfn, system_max_low_pfn;
- unsigned long reserve_pages;
/*
* When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -298,14 +286,23 @@ unsigned long __init setup_memory(void)
find_max_pfn();
get_memcfg_numa();
- reserve_pages = calculate_numa_remap_pages();
+ kva_pages = calculate_numa_remap_pages();
/* partially used pages are not usable - thus round upwards */
system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
- system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages;
- printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n",
- reserve_pages, max_low_pfn + reserve_pages);
+ kva_start_pfn = find_max_low_pfn() - kva_pages;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Numa kva area is below the initrd */
+ if (LOADER_TYPE && INITRD_START)
+ kva_start_pfn = PFN_DOWN(INITRD_START) - kva_pages;
+#endif
+ kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1);
+
+ system_max_low_pfn = max_low_pfn = find_max_low_pfn();
+ printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
+ kva_start_pfn, max_low_pfn);
printk("max_pfn = %ld\n", max_pfn);
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
@@ -313,6 +310,11 @@ unsigned long __init setup_memory(void)
highstart_pfn = system_max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
+ num_physpages = highend_pfn;
+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
+#else
+ num_physpages = system_max_low_pfn;
+ high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
pages_to_mb(system_max_low_pfn));
@@ -323,7 +325,7 @@ unsigned long __init setup_memory(void)
(ulong) pfn_to_kaddr(max_low_pfn));
for_each_online_node(nid) {
node_remap_start_vaddr[nid] = pfn_to_kaddr(
- highstart_pfn + node_remap_offset[nid]);
+ kva_start_pfn + node_remap_offset[nid]);
/* Init the node remap allocator */
node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
(node_remap_size[nid] * PAGE_SIZE);
@@ -338,7 +340,6 @@ unsigned long __init setup_memory(void)
}
printk("High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
- vmalloc_earlyreserve = reserve_pages * PAGE_SIZE;
for_each_online_node(nid)
find_max_pfn_node(nid);
@@ -348,48 +349,30 @@ unsigned long __init setup_memory(void)
return max_low_pfn;
}
+void __init numa_kva_reserve(void)
+{
+ reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages));
+}
+
void __init zone_sizes_init(void)
{
int nid;
-
-
- for_each_online_node(nid) {
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned long *zholes_size;
- unsigned int max_dma;
-
- unsigned long low = max_low_pfn;
- unsigned long start = node_start_pfn[nid];
- unsigned long high = node_end_pfn[nid];
-
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- if (node_has_online_mem(nid)){
- if (start > low) {
-#ifdef CONFIG_HIGHMEM
- BUG_ON(start > high);
- zones_size[ZONE_HIGHMEM] = high - start;
-#endif
- } else {
- if (low < max_dma)
- zones_size[ZONE_DMA] = low;
- else {
- BUG_ON(max_dma > low);
- BUG_ON(low > high);
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = low - max_dma;
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = high - low;
-#endif
- }
- }
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {
+ virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
+ max_low_pfn,
+ highend_pfn
+ };
+
+ /* If SRAT has not registered memory, register it now */
+ if (find_max_pfn_with_active_regions() == 0) {
+ for_each_online_node(nid) {
+ if (node_has_online_mem(nid))
+ add_active_range(nid, node_start_pfn[nid],
+ node_end_pfn[nid]);
}
-
- zholes_size = get_zholes_size(nid);
-
- free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
- zholes_size);
}
+
+ free_area_init_nodes(max_zone_pfns);
return;
}
@@ -409,7 +392,7 @@ void __init set_highmem_pages_init(int bad_ppro)
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
printk("Initializing %s for node %d (%08lx:%08lx)\n",
- zone->name, zone->zone_pgdat->node_id,
+ zone->name, zone_to_nid(zone),
zone_start_pfn, zone_end_pfn);
for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {