diff options
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r-- | arch/sh/mm/init.c | 47 |
1 files changed, 19 insertions, 28 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce978..29bd37b1488 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -77,6 +77,7 @@ void show_mem(void) printk("%d pages swap cached\n",cached); } +#ifdef CONFIG_MMU static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; @@ -84,30 +85,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd_t *pmd; pte_t *pte; - pgd = swapper_pg_dir + pgd_index(addr); + pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); - if (pmd != pmd_offset(pud, 0)) { - pud_ERROR(*pud); - return; - } + pud = pud_alloc(NULL, pgd, addr); + if (unlikely(!pud)) { + pud_ERROR(*pud); + return; } - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); - if (pte != pte_offset_kernel(pmd, 0)) { - pmd_ERROR(*pmd); - return; - } + pmd = pmd_alloc(NULL, pud, addr); + if (unlikely(!pmd)) { + pmd_ERROR(*pmd); + return; } pte = pte_offset_kernel(pmd, addr); @@ -147,6 +140,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } +#endif /* CONFIG_MMU */ /* References to section boundaries */ @@ -155,9 +149,6 @@ extern char __init_begin, __init_end; /* * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { @@ -180,14 +171,11 @@ void __init paging_init(void) */ { unsigned long max_dma, low, start_pfn; - pgd_t *pg_dir; - int i; - /* We don't need kernel mapping as hardware support that. */ - pg_dir = swapper_pg_dir; - - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; + /* We don't need to map the kernel through the TLB, as + * it is permanatly mapped using P1. So clear the + * entire pgd. */ + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); /* Turn on the MMU */ enable_mmu(); @@ -206,6 +194,10 @@ void __init paging_init(void) } } + /* Set an initial value for the MMU.TTB so we don't have to + * check for a null value. */ + set_TTB(swapper_pg_dir); + #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If we don't have CONFIG_MMU set and the processor in question @@ -227,7 +219,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { - extern unsigned long empty_zero_page[1024]; int codesize, reservedpages, datasize, initsize; int tmp; extern unsigned long memory_start; |