summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mm-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r--arch/arm/mm/mm-armv.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 95273de4f77..38769f5862b 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -9,7 +9,6 @@
*
* Page table sludge for ARM v3 and v4 processor architectures.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -227,7 +226,7 @@ void free_pgd_slow(pgd_t *pgd)
pte = pmd_page(*pmd);
pmd_clear(pmd);
- dec_page_state(nr_page_table_pages);
+ dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pte_lock_deinit(pte);
pte_free(pte);
pmd_free(pmd);
@@ -303,16 +302,16 @@ static struct mem_types mem_types[] __initdata = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
- .prot_sect = PMD_TYPE_SECT,
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_LOW_VECTORS] = {
@@ -328,25 +327,25 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
- .prot_sect = PMD_TYPE_SECT,
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
PMD_SECT_TEX(1),
.domain = DOMAIN_IO,
},
[MT_NONSHARED_DEVICE] = {
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
+ .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
}
@@ -376,14 +375,21 @@ void __init build_mem_type_table(void)
ecc_mask = 0;
}
- if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
- for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+ /*
+ * Xscale must not have PMD bit 4 set for section mappings.
+ */
+ if (cpu_is_xscale())
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_BIT4;
+
+ /*
+ * ARMv5 and lower, excluding Xscale, bit 4 must be set for
+ * page tables.
+ */
+ if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
- if (mem_types[i].prot_sect)
- mem_types[i].prot_sect |= PMD_BIT4;
- }
- }
cp = &cache_policies[cachepolicy];
kern_pgprot = user_pgprot = cp->pte;
@@ -407,8 +413,8 @@ void __init build_mem_type_table(void)
* bit 4 becomes XN which we must clear for the
* kernel memory mapping.
*/
- mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
- mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+ mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
+ mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
/*
* Mark cache clean areas and XIP ROM read only