summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-28 03:30:39 -0700
committerIngo Molnar <mingo@elte.hu>2008-07-08 13:16:06 +0200
commit7482b0e962e128c5b574aa29761f97164189ef14 (patch)
tree11f99e89b1957c8b47fc1a17cfafc1899471b112 /arch/x86
parentdf366e9822beca97115ba9745cbe1ea1f26fb111 (diff)
x86: fix init_memory_mapping over boundary v3
some ram-end boundary only has page alignment, instead of 2M alignment. v2: make init_memory_mapping more solid: start could be any value other than 0 v3: fix NON PAE by handling left over in kernel_physical_mapping Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_32.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index b9cf7f70530..90ca67be965 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -195,7 +195,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
unsigned pages_2m = 0, pages_4k = 0;
unsigned limit_pfn = end >> PAGE_SHIFT;
- pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd_idx = pgd_index(start + PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
pfn = start >> PAGE_SHIFT;
@@ -218,7 +218,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
- if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
+ if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) &&
+ (pfn + PTRS_PER_PTE) <= limit_pfn) {
unsigned int addr2;
pgprot_t prot = PAGE_KERNEL_LARGE;
@@ -233,13 +234,12 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
set_pmd(pmd, pfn_pmd(pfn, prot));
pfn += PTRS_PER_PTE;
- max_pfn_mapped = pfn;
continue;
}
pte = one_page_table_init(pmd);
for (pte_ofs = 0;
- pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
+ pte_ofs < PTRS_PER_PTE && pfn < limit_pfn;
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
pgprot_t prot = PAGE_KERNEL;
@@ -249,7 +249,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
pages_4k++;
set_pte(pte, pfn_pte(pfn, prot));
}
- max_pfn_mapped = pfn;
}
}
update_page_count(PG_LEVEL_2M, pages_2m);
@@ -729,7 +728,7 @@ void __init setup_bootmem_allocator(void)
static void __init find_early_table_space(unsigned long end)
{
- unsigned long puds, pmds, tables, start;
+ unsigned long puds, pmds, ptes, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = PAGE_ALIGN(puds * sizeof(pud_t));
@@ -737,10 +736,15 @@ static void __init find_early_table_space(unsigned long end)
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
- if (!cpu_has_pse) {
- int ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
- tables += PAGE_ALIGN(ptes * sizeof(pte_t));
- }
+ if (cpu_has_pse) {
+ unsigned long extra;
+ extra = end - ((end>>21) << 21);
+ extra += (2UL<<20);
+ ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ } else
+ ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ tables += PAGE_ALIGN(ptes * sizeof(pte_t));
/*
* RED-PEN putting page tables only on node 0 could