summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-23 03:05:30 -0700
committerIngo Molnar <mingo@elte.hu>2008-07-08 12:50:20 +0200
commit2ec65f8b89ea003c27ff7723525a2ee335a2b393 (patch)
tree9b8718be2017f619b2a0185492315b85ea1731fa /arch/x86/mm
parentbef1568d9714f1162086c32583ba7984a7ca8e3e (diff)
x86: clean up using max_low_pfn on 32-bit
so that max_low_pfn is not changed after it is set. so we can move that early and out of initmem_init. could call find_low_pfn_range just after max_pfn is set. also could move reserve_initrd out of setup_bootmem_allocator so 32bit is more like 64bit. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/discontig_32.c22
-rw-r--r--arch/x86/mm/init_32.c25
2 files changed, 16 insertions, 31 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 3e75be46c4f..1dfff700264 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -309,11 +309,10 @@ static void init_remap_allocator(int nid)
(ulong) node_remap_end_vaddr[nid]);
}
-unsigned long __init initmem_init(unsigned long start_pfn,
+void __init initmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
int nid;
- unsigned long system_start_pfn, system_max_low_pfn;
long kva_target_pfn;
/*
@@ -324,17 +323,11 @@ unsigned long __init initmem_init(unsigned long start_pfn,
* and ZONE_HIGHMEM.
*/
- /* call find_max_low_pfn at first, it could update max_pfn */
- system_max_low_pfn = max_low_pfn = find_max_low_pfn();
-
remove_all_active_ranges();
get_memcfg_numa();
kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
- /* partially used pages are not usable - thus round upwards */
- system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
-
kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
do {
kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
@@ -357,19 +350,19 @@ unsigned long __init initmem_init(unsigned long start_pfn,
"KVA PG");
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > system_max_low_pfn)
- highstart_pfn = system_max_low_pfn;
+ if (max_pfn > max_low_pfn)
+ highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
- num_physpages = system_max_low_pfn;
- high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
+ num_physpages = max_low_pfn;
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
- pages_to_mb(system_max_low_pfn));
- printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
+ pages_to_mb(max_low_pfn));
+ printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
min_low_pfn, max_low_pfn, highstart_pfn);
printk("Low memory ends at vaddr %08lx\n",
@@ -387,7 +380,6 @@ unsigned long __init initmem_init(unsigned long start_pfn,
memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
NODE_DATA(0)->bdata = &node0_bdata;
setup_bootmem_allocator();
- return max_low_pfn;
}
void __init zone_sizes_init(void)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d1017336f1b..27b82931294 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -561,9 +561,15 @@ early_param("highmem", parse_highmem);
/*
* Determine low and high memory ranges:
*/
-unsigned long __init find_max_low_pfn(void)
+void __init find_low_pfn_range(void)
{
- unsigned long max_low_pfn;
+ /* it could update max_pfn */
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ min_low_pfn = PFN_UP(init_pg_tables_end);
max_low_pfn = max_pfn;
if (max_low_pfn > MAXMEM_PFN) {
@@ -625,21 +631,12 @@ unsigned long __init find_max_low_pfn(void)
" kernel!\n");
#endif
}
- return max_low_pfn;
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-unsigned long __init initmem_init(unsigned long start_pfn,
+void __init initmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
- /*
- * partially used pages are not usable - thus
- * we are rounding upwards:
- */
- min_low_pfn = PFN_UP(init_pg_tables_end);
-
- max_low_pfn = find_max_low_pfn();
-
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
@@ -661,8 +658,6 @@ unsigned long __init initmem_init(unsigned long start_pfn,
pages_to_mb(max_low_pfn));
setup_bootmem_allocator();
-
- return max_low_pfn;
}
void __init zone_sizes_init(void)
@@ -699,8 +694,6 @@ void __init setup_bootmem_allocator(void)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
- reserve_initrd();
-
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn);
printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);