summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig18
-rw-r--r--arch/arm/mm/consistent.c6
-rw-r--r--arch/arm/mm/copypage-v6.c16
-rw-r--r--arch/arm/mm/fault-armv.c7
-rw-r--r--arch/arm/mm/init.c39
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mm-armv.c23
7 files changed, 62 insertions, 51 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54e04c995e..e3c14d6b432 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -120,8 +120,8 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
- bool "Support ARM926T processor" if ARCH_INTEGRATOR
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
+ bool "Support ARM926T processor"
+ depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB
default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
select CPU_32v5
select CPU_ABRT_EV5TJ
@@ -242,7 +242,7 @@ config CPU_XSCALE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
@@ -250,6 +250,18 @@ config CPU_V6
select CPU_COPY_V6
select CPU_TLB_V6
+# ARMv6k
+config CPU_32v6K
+ bool "Support ARM V6K processor extensions" if !SMP
+ depends on CPU_V6
+ default y if SMP
+ help
+ Say Y here if your ARMv6 processor supports the 'K' extension.
+ This enables the kernel to use some instructions not present
+ on previous processors, and as such a kernel build with this
+ enabled will not boot on processors with do not support these
+ instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 82f4d5e27c5..47b0b767f08 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -397,8 +397,6 @@ static int __init consistent_init(void)
pte_t *pte;
int ret = 0;
- spin_lock(&init_mm.page_table_lock);
-
do {
pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
@@ -409,7 +407,7 @@ static int __init consistent_init(void)
}
WARN_ON(!pmd_none(*pmd));
- pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
+ pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
@@ -419,8 +417,6 @@ static int __init consistent_init(void)
consistent_pte = pte;
} while (0);
- spin_unlock(&init_mm.page_table_lock);
-
return ret;
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 27d041574ea..269ce6913ee 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -22,9 +22,7 @@
#endif
#define from_address (0xffff8000)
-#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
-#define to_pgprot PAGE_KERNEL
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
-void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
copy_page(kto, kfrom);
}
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
-void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
/*
* Copy the page, taking account of the cache colour.
*/
-void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
* so remap the kernel page into the same cache colour as the user
* page.
*/
-void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL));
flush_tlb_kernel_page(to);
clear_page((void *)to);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index be4ab3d73c9..7fc1b35a674 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
+ *
+ * Note that the pte lock held when calling update_mmu_cache must also
+ * guard the pte (somewhere else in the same mm) that we modify here.
+ * Therefore those configurations which might call adjust_pte (those
+ * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/
static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{
@@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
- * Note that the page_table_lock will be held.
+ * Note that the pte lock will be held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f4496813615..c168f322ef8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -363,20 +363,16 @@ static void __init bootmem_init(struct meminfo *mi)
memcpy(&meminfo, mi, sizeof(meminfo));
-#ifdef CONFIG_XIP_KERNEL
-#error needs fixing
- p->pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PMD_MASK);
- p->virtual = (unsigned long)&_stext & PMD_MASK;
- p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
- p->type = MT_ROM;
- p ++;
-#endif
-
/*
* Clear out all the mappings below the kernel image.
- * FIXME: what about XIP?
*/
- for (addr = 0; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+#ifdef CONFIG_XIP_KERNEL
+ /* The XIP kernel is mapped in the module area -- skip over it */
+ addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+#endif
+ for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -436,6 +432,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
pmd_clear(pmd_off_k(addr));
/*
+ * Map the kernel if it is XIP.
+ * It is always first in the modulearea.
+ */
+#ifdef CONFIG_XIP_KERNEL
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PGDIR_MASK);
+ map.virtual = MODULE_START;
+ map.length = ((unsigned long)&_etext - map.virtual + ~PGDIR_MASK) & PGDIR_MASK;
+ map.type = MT_ROM;
+ create_mapping(&map);
+#endif
+
+ /*
* Map the cache flushing regions.
*/
#ifdef FLUSH_BASE
@@ -478,10 +486,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Ask the machine support to map in the statically mapped devices.
- * After this point, we can start to touch devices again.
*/
if (mdesc->map_io)
mdesc->map_io();
+
+ /*
+ * Finally flush the tlb again - this ensures that we're in a
+ * consistent state wrt the writebuffer if the writebuffer needs
+ * draining. After this point, we can start to touch devices
+ * again.
+ */
+ local_flush_tlb_all();
}
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6fb1258df1b..0f128c28fee 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -75,7 +75,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
@@ -97,7 +97,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
BUG_ON(address >= end);
- spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd) {
@@ -114,7 +113,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
dir++;
} while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap(start, end);
return err;
}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 61bc2fa0511..fb5b40289de 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if (!vectors_high()) {
/*
- * This lock is here just to satisfy pmd_alloc and pte_lock
- */
- spin_lock(&mm->page_table_lock);
-
- /*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
set_pte(new_pte, *init_pte);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
-
- spin_unlock(&mm->page_table_lock);
}
return new_pgd;
no_pte:
- spin_unlock(&mm->page_table_lock);
pmd_free(new_pmd);
- free_pages((unsigned long)new_pgd, 2);
- return NULL;
-
no_pmd:
- spin_unlock(&mm->page_table_lock);
free_pages((unsigned long)new_pgd, 2);
- return NULL;
-
no_pgd:
return NULL;
}
@@ -243,6 +229,7 @@ void free_pgd_slow(pgd_t *pgd)
pte = pmd_page(*pmd);
pmd_clear(pmd);
dec_page_state(nr_page_table_pages);
+ pte_lock_deinit(pte);
pte_free(pte);
pmd_free(pmd);
free:
@@ -482,14 +469,14 @@ void __init create_mapping(struct map_desc *md)
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for "
- "0x%016llx at 0x%08lx in user region\n",
+ "0x%08llx at 0x%08lx in user region\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
- printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
"overlaps vmalloc space\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
}
@@ -505,14 +492,14 @@ void __init create_mapping(struct map_desc *md)
if(md->pfn >= 0x100000) {
if(domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%016llx at 0x%08lx\n",
+ "mapping for 0x%08llx at 0x%08lx\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if((md->virtual | md->length | __pfn_to_phys(md->pfn))
& ~SUPERSECTION_MASK) {
printk(KERN_ERR "MM: cannot create mapping for "
- "0x%016llx at 0x%08lx invalid alignment\n",
+ "0x%08llx at 0x%08lx invalid alignment\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}