summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-26 08:22:50 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-26 08:22:50 +0100
commit7fbb8759eff9a348efa5f352ffaa51c364837c4b (patch)
treed40cd3f47b9f667ba94d9613270132080dcb6a1a /arch/arm/mm/mmu.c
parent6003ab0bad4cc56f3c4fadf62a0d23a967b9c53b (diff)
parent13d428afc007fcfcd6deeb215618f54cf9c0cae6 (diff)
Merge commit 'v2.6.28-rc6' into core/debug
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c211
1 files changed, 137 insertions, 74 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a713e40e1f1..7f36c825718 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -27,9 +28,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-extern void _stext, _etext, __data_start, _end;
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
@@ -68,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = {
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
- .pte = 0,
+ .pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
- .pte = PTE_BUFFERABLE,
+ .pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
- .pte = PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEALLOC,
}
};
@@ -182,39 +180,38 @@ void adjust_cr(unsigned long mask, unsigned long set)
#endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
-#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
+#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
- .prot_pte = PROT_PTE_DEVICE,
- .prot_pte_ext = PTE_EXT_TEX(2),
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
+ .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
- .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
- .prot_pte = PROT_PTE_DEVICE,
+ [MT_DEVICE_WC] = { /* ioremap_wc */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
- PMD_SECT_TEX(1),
+ .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_WC] = { /* ioremap_wc */
+ [MT_UNCACHED] = {
.prot_pte = PROT_PTE_DEVICE,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -259,7 +256,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot;
+ unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -277,27 +274,28 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
+#ifdef CONFIG_SMP
+ cachepolicy = CPOLICY_WRITEALLOC;
+#endif
/*
- * On non-Xscale3 ARMv5-and-older systems, use CB=01
- * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
- * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
- * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
+ * Strip out features not present on earlier architectures.
+ * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
+ * without extended page tables don't have the 'Shared' bit.
*/
- if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
- mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1);
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
- } else {
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
- }
+ if (cpu_arch < CPU_ARCH_ARMv5)
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
+ if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_S;
/*
- * ARMv5 and lower, bit 4 must be set for page tables.
- * (was: cache "update-able on write" bit on ARM610)
- * However, Xscale cores require this bit to be cleared.
+ * ARMv5 and lower, bit 4 must be set for page tables (was: cache
+ * "update-able on write" bit on ARM610). However, Xscale and
+ * Xscale3 require this bit to be cleared.
*/
- if (cpu_is_xscale()) {
+ if (cpu_is_xscale() || cpu_is_xsc3()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -311,19 +309,81 @@ static void __init build_mem_type_table(void)
}
}
+ /*
+ * Mark the device areas according to the CPU/architecture.
+ */
+ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
+ if (!cpu_is_xsc3()) {
+ /*
+ * Mark device regions on ARMv6+ as execute-never
+ * to prevent speculative instruction fetches.
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+ }
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /*
+ * For ARMv7 with TEX remapping,
+ * - shared device is SXCB=1100
+ * - nonshared device is SXCB=0100
+ * - write combine device mem is SXCB=0001
+ * (Uncached Normal memory)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ } else if (cpu_is_xsc3()) {
+ /*
+ * For Xscale3,
+ * - shared device is TEXCB=00101
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Inner/Outer Uncacheable in xsc3 parlance)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ } else {
+ /*
+ * For ARMv6 and ARMv7 without TEX remapping,
+ * - shared device is TEXCB=00001
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Uncached Normal in ARMv6 parlance).
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ }
+ } else {
+ /*
+ * On others, write combining is "Uncached/Buffered"
+ */
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ /*
+ * Now deal with the memory-type mappings
+ */
cp = &cache_policies[cachepolicy];
- kern_pgprot = user_pgprot = cp->pte;
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
+#endif
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent()) {
- if (cpu_is_xsc3()) {
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- }
- }
+ if (arch_is_coherent() && cpu_is_xsc3())
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
/*
* ARMv6 and above have extended page tables.
@@ -337,42 +397,24 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
- /*
- * Mark the device area as "shared device"
- */
- mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
- mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
-
#ifdef CONFIG_SMP
/*
* Mark memory with the "shared" attribute for SMP systems
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
#endif
}
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
+ protection_map[i] = __pgprot(v | user_pgprot);
}
- mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
- mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
-
- if (cpu_arch >= CPU_ARCH_ARMv5) {
-#ifndef CONFIG_SMP
- /*
- * Only use write-through for non-SMP systems
- */
- mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-#endif
- } else {
- mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
- }
+ mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
@@ -420,8 +462,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
- type->prot_pte_ext);
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -588,12 +629,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i);
}
+static unsigned long __initdata vmalloc_reserve = SZ_128M;
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static void __init early_vmalloc(char **arg)
+{
+ vmalloc_reserve = memparse(*arg, arg);
+
+ if (vmalloc_reserve < SZ_16M) {
+ vmalloc_reserve = SZ_16M;
+ printk(KERN_WARNING
+ "vmalloc area too small, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+}
+__early_param("vmalloc=", early_vmalloc);
+
+#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
+
static int __init check_membank_valid(struct membank *mb)
{
/*
- * Check whether this memory region has non-zero size.
+ * Check whether this memory region has non-zero size or
+ * invalid node number.
*/
- if (mb->size == 0)
+ if (mb->size == 0 || mb->node >= MAX_NUMNODES)
return 0;
/*
@@ -627,8 +691,7 @@ static int __init check_membank_valid(struct membank *mb)
static void __init sanity_check_meminfo(struct meminfo *mi)
{
- int i;
- int j;
+ int i, j;
for (i = 0, j = 0; i < mi->nr_banks; i++) {
if (check_membank_valid(&mi->bank[i]))
@@ -644,7 +707,7 @@ static inline void prepare_page_table(struct meminfo *mi)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
@@ -756,7 +819,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
- map.virtual = MODULE_START;
+ map.virtual = MODULES_VADDR;
map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);