diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2014-07-16 12:07:17 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2014-07-23 15:27:00 +0100 |
commit | 7edd88ad7e59c2b7b49da0e00f251884fb785d4f (patch) | |
tree | 8e203aaaf6787e4a03d711121ea2166727057504 /arch/arm64 | |
parent | affeafbb84776b8527591aa83c16be67dfa5c985 (diff) |
arm64: Do not initialise the fixmap page tables in head.S
The early_ioremap_init() function already handles fixmap pte
initialisation, so upgrade this to cover all of pud/pmd/pte and remove
one page from swapper_pg_dir.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Jungseok Lee <jungseoklee85@gmail.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/page.h | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 7 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 26 |
3 files changed, 22 insertions, 19 deletions
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index a6331e6a92b..e84ca637af6 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -33,11 +33,11 @@ /* * The idmap and swapper page tables need some space reserved in the kernel - * image. The idmap only requires a pgd and a next level table to (section) map - * the kernel, while the swapper also maps the FDT and requires an additional - * table to map an early UART. See __create_page_tables for more information. + * image. Both require a pgd and a next level table to (section) map the + * kernel. The the swapper also maps the FDT (see __create_page_tables for + * more information). */ -#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) +#define SWAPPER_DIR_SIZE (2 * PAGE_SIZE) #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 69dafe9621f..fa3b7fb8a77 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -583,13 +583,6 @@ __create_page_tables: create_block_map x0, x7, x3, x5, x6 1: /* - * Create the pgd entry for the fixed mappings. - */ - ldr x5, =FIXADDR_TOP // Fixed mapping virtual address - add x0, x26, #2 * PAGE_SIZE // section table address - create_pgd_entry x26, x0, x5, x6, x7 - - /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 7ec328392ae..69000efa015 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -103,19 +103,25 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) } EXPORT_SYMBOL(ioremap_cache); -#ifndef CONFIG_ARM64_64K_PAGES static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; +#ifndef CONFIG_ARM64_64K_PAGES +static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; #endif -static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) +static inline pud_t * __init early_ioremap_pud(unsigned long addr) { pgd_t *pgd; - pud_t *pud; pgd = pgd_offset_k(addr); BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); - pud = pud_offset(pgd, addr); + return pud_offset(pgd, addr); +} + +static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) +{ + pud_t *pud = early_ioremap_pud(addr); + BUG_ON(pud_none(*pud) || pud_bad(*pud)); return pmd_offset(pud, addr); @@ -132,13 +138,17 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr) void __init early_ioremap_init(void) { + pgd_t *pgd; + pud_t *pud; pmd_t *pmd; + unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN); - pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); -#ifndef CONFIG_ARM64_64K_PAGES - /* need to populate pmd for 4k pagesize only */ + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pud_populate(&init_mm, pud, bm_pmd); + pmd = pmd_offset(pud, addr); pmd_populate_kernel(&init_mm, pmd, bm_pte); -#endif + /* * The boot-ioremap range spans multiple pmds, for which * we are not prepared: |