diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/init.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 20 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 23 |
3 files changed, 26 insertions, 34 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1a85ba92eb5..be9acb2b959 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -469,19 +469,20 @@ void __init_refok free_initmem(void) #ifndef CONFIG_MIPS_PGD_C0_CONTEXT unsigned long pgd_current[NR_CPUS]; #endif -/* - * On 64-bit we've got three-level pagetables with a slightly - * different layout ... - */ -#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) /* * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER * are constants. So we use the variants from asm-offset.h until that gcc * will officially be retired. + * + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. */ -pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PMD_FOLDED -pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; #endif -pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index cda4e300eb0..25407794edb 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -26,17 +26,17 @@ void pgd_init(unsigned long page) p = (unsigned long *) page; end = p + PTRS_PER_PGD; - while (p < end) { + do { p[0] = entry; p[1] = entry; p[2] = entry; p[3] = entry; p[4] = entry; - p[5] = entry; - p[6] = entry; - p[7] = entry; p += 8; - } + p[-3] = entry; + p[-2] = entry; + p[-1] = entry; + } while (p != end); } #ifndef __PAGETABLE_PMD_FOLDED @@ -47,17 +47,17 @@ void pmd_init(unsigned long addr, unsigned long pagetable) p = (unsigned long *) addr; end = p + PTRS_PER_PMD; - while (p < end) { + do { p[0] = pagetable; p[1] = pagetable; p[2] = pagetable; p[3] = pagetable; p[4] = pagetable; - p[5] = pagetable; - p[6] = pagetable; - p[7] = pagetable; p += 8; - } + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); } #endif diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e09d4925690..658a520364c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -599,8 +599,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { if (cpu_has_rixi) { - UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { #ifdef CONFIG_64BIT_PHYS_ADDR uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -1019,11 +1018,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (cpu_has_rixi) { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); } else { uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ @@ -1046,13 +1043,11 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, if (r45k_bvahwbug()) build_tlb_probe_entry(p); if (cpu_has_rixi) { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); } else { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ if (r4k_250MHZhwbug()) @@ -1212,13 +1207,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ } if (cpu_has_rixi) { - uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC)); - uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC)); - uasm_i_drotr(p, even, even, - ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ - uasm_i_drotr(p, odd, odd, - ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); } else { uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ |