summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c22
-rw-r--r--arch/arm/mm/cache-tauros2.c1
-rw-r--r--arch/arm/mm/cache-xsc3l2.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c9
-rw-r--r--arch/arm/mm/copypage-v6.c20
-rw-r--r--arch/arm/mm/copypage-xscale.c9
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/highmem.c21
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/ioremap.c19
-rw-r--r--arch/arm/mm/mm.h26
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/nommu.c8
-rw-r--r--arch/arm/mm/pgd.c1
-rw-r--r--arch/arm/mm/proc-fa526.S1
-rw-r--r--arch/arm/mm/vmregion.c76
-rw-r--r--arch/arm/mm/vmregion.h5
22 files changed, 194 insertions, 85 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index caf14dc059e..9107231aacc 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -22,7 +22,8 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
-#include <asm/system.h>
+#include <asm/cp15.h>
+#include <asm/system_info.h>
#include <asm/unaligned.h>
#include "fault.h"
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index e0b0e7a4ec6..dd3d59122cc 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
+#include <asm/cp15.h>
#include <plat/cache-feroceon-l2.h>
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192ba8c2..a53fd2aaa2f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -30,13 +30,13 @@
static void __iomem *l2x0_base;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
-static uint32_t l2x0_way_mask; /* Bitmask of active ways */
-static uint32_t l2x0_size;
+static u32 l2x0_way_mask; /* Bitmask of active ways */
+static u32 l2x0_size;
struct l2x0_regs l2x0_saved_regs;
struct l2x0_of_data {
- void (*setup)(const struct device_node *, __u32 *, __u32 *);
+ void (*setup)(const struct device_node *, u32 *, u32 *);
void (*save)(void);
void (*resume)(void);
};
@@ -288,7 +288,7 @@ static void l2x0_disable(void)
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void l2x0_unlock(__u32 cache_id)
+static void l2x0_unlock(u32 cache_id)
{
int lockregs;
int i;
@@ -307,11 +307,11 @@ static void l2x0_unlock(__u32 cache_id)
}
}
-void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
- __u32 aux;
- __u32 cache_id;
- __u32 way_size = 0;
+ u32 aux;
+ u32 cache_id;
+ u32 way_size = 0;
int ways;
const char *type;
@@ -388,7 +388,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
- __u32 *aux_val, __u32 *aux_mask)
+ u32 *aux_val, u32 *aux_mask)
{
u32 data[2] = { 0, 0 };
u32 tag = 0;
@@ -422,7 +422,7 @@ static void __init l2x0_of_setup(const struct device_node *np,
}
static void __init pl310_of_setup(const struct device_node *np,
- __u32 *aux_val, __u32 *aux_mask)
+ u32 *aux_val, u32 *aux_mask)
{
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
@@ -548,7 +548,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
{}
};
-int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
{
struct device_node *np;
struct l2x0_of_data *data;
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index 50868651890..1fbca05fe90 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <asm/cacheflush.h>
+#include <asm/cp15.h>
#include <asm/hardware/cache-tauros2.h>
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 5a32020471e..6c3edeb66e7 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -18,7 +18,7 @@
*/
#include <linux/init.h>
#include <linux/highmem.h>
-#include <asm/system.h>
+#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ec8c3befb9c..1267e64133b 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -23,10 +23,6 @@
#include "mm.h"
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
@@ -78,10 +74,9 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(0xffff8000);
+ set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
- mc_copy_user_page((void *)0xffff8000, kto);
+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8b03a5814d0..b9bcc9d7917 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -24,9 +24,6 @@
#error FIX ME
#endif
-#define from_address (0xffff8000)
-#define to_address (0xffffc000)
-
static DEFINE_RAW_SPINLOCK(v6_lock);
/*
@@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
*/
raw_spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
-
- kfrom = from_address + (offset << PAGE_SHIFT);
- kto = to_address + (offset << PAGE_SHIFT);
+ kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
+ kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
- flush_tlb_kernel_page(kfrom);
- flush_tlb_kernel_page(kto);
+ set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
+ set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
copy_page((void *)kto, (void *)kfrom);
@@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
*/
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
{
- unsigned int offset = CACHE_COLOUR(vaddr);
- unsigned long to = to_address + (offset << PAGE_SHIFT);
+ unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(page));
@@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
*/
raw_spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
+ set_top_pte(to, mk_pte(page, PAGE_KERNEL));
clear_page((void *)to);
raw_spin_unlock(&v6_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 439d106ae63..0fb85025344 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -23,12 +23,6 @@
#include "mm.h"
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
-#define COPYPAGE_MINICACHE 0xffff8000
-
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
@@ -100,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+ set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1aa664a1999..db23ae4aaaa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -214,7 +214,8 @@ static int __init consistent_init(void)
core_initcall(consistent_init);
static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
struct arm_vmregion *c;
size_t align;
@@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
* Allocate a virtual address in the consistent mapping region.
*/
c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
if (c) {
pte_t *pte;
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
@@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
+#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
#define __dma_free_remap(addr, size) do { } while (0)
#endif /* CONFIG_MMU */
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot)
+ pgprot_t prot, const void *caller)
{
struct page *page;
void *addr;
@@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
return NULL;
if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot);
+ addr = __dma_alloc_remap(page, size, gfp, prot, caller);
else
addr = page_address(page);
@@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
return memory;
return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel));
+ pgprot_dmacoherent(pgprot_kernel),
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -386,7 +388,8 @@ void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel));
+ pgprot_writecombine(pgprot_kernel),
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(dma_alloc_writecombine);
@@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask);
static int __init dma_debug_do_init(void)
{
+#ifdef CONFIG_MMU
+ arm_vmregion_create_proc("dma-mappings", &consistent_head);
+#endif
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bb7eac381a8..9055b5a84ec 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -21,8 +21,9 @@
#include <linux/perf_event.h>
#include <asm/exception.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
+#include <asm/system_misc.h>
+#include <asm/system_info.h>
#include <asm/tlbflush.h>
#include "fault.h"
@@ -164,7 +165,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
struct siginfo si;
#ifdef CONFIG_DEBUG_USER
- if (user_debug & UDBG_SEGV) {
+ if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
+ ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
tsk->comm, sig, addr, fsr);
show_pte(tsk->mm, addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 1a8d4aa821b..77458548e03 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -16,22 +16,18 @@
#include <asm/cachetype.h>
#include <asm/highmem.h>
#include <asm/smp_plat.h>
-#include <asm/system.h>
#include <asm/tlbflush.h>
#include "mm.h"
#ifdef CONFIG_CPU_CACHE_VIPT
-#define ALIAS_FLUSH_START 0xffff4000
-
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
{
- unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+ unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
const int zero = 0;
- set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
+ set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
@@ -42,13 +38,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
{
- unsigned long colour = CACHE_COLOUR(vaddr);
+ unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
unsigned long offset = vaddr & (PAGE_SIZE - 1);
unsigned long to;
- set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
- to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
- flush_tlb_kernel_page(to);
+ set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
+ to = va + offset;
flush_icache_range(to, to + len);
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 5a21505d755..21b9e1bf9b7 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -69,15 +69,14 @@ void *kmap_atomic(struct page *page)
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
- BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
+ BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
- set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
- * in place, so this TLB flush ensures the TLB is updated with the
- * new mapping.
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
*/
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr;
}
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
- set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
@@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
+ BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
- set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
return (void *)vaddr;
}
@@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
struct page *kmap_atomic_to_page(const void *ptr)
{
unsigned long vaddr = (unsigned long)ptr;
- pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
- pte = TOP_PTE(vaddr);
- return pte_page(*pte);
+ return pte_page(get_top_pte(vaddr));
}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index feacf4c7671..ab88ed4f8e0 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -5,6 +5,7 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/system_info.h>
pgd_t *idmap_pgd;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 245a55a0a5b..595079fa9d1 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,7 +658,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
+#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
@@ -677,7 +679,9 @@ void __init mem_init(void)
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
#endif
+#ifdef CONFIG_MODULES
MLM(MODULES_VADDR, MODULES_END),
+#endif
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(__init_begin, __init_end),
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 80632e8d753..4f55f5062ab 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -26,12 +26,14 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/sizes.h>
+#include <asm/system_info.h>
#include <asm/mach/map.h>
#include "mm.h"
@@ -306,11 +308,15 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+ unsigned int, void *) =
+ __arm_ioremap_caller;
+
void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
- return __arm_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return arch_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
@@ -369,4 +375,11 @@ void __iounmap(volatile void __iomem *io_addr)
vunmap(addr);
}
-EXPORT_SYMBOL(__iounmap);
+
+void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
+
+void __arm_iounmap(volatile void __iomem *io_addr)
+{
+ arch_iounmap(io_addr);
+}
+EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 70f6d3ea483..27f4a619b35 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -3,7 +3,31 @@
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
-#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+/*
+ * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
+ * specific hacks for copying pages efficiently, while 0xffff4000
+ * is reserved for VIPT aliasing flushing by generic code.
+ *
+ * Note that we don't allow VIPT aliasing caches with SMP.
+ */
+#define COPYPAGE_MINICACHE 0xffff8000
+#define COPYPAGE_V6_FROM 0xffff8000
+#define COPYPAGE_V6_TO 0xffffc000
+/* PFN alias flushing, for VIPT caches */
+#define FLUSH_ALIAS_START 0xffff4000
+
+static inline void set_top_pte(unsigned long va, pte_t pte)
+{
+ pte_t *ptep = pte_offset_kernel(top_pmd, va);
+ set_pte_ext(ptep, pte, 0);
+ local_flush_tlb_kernel_page(va);
+}
+
+static inline pte_t get_top_pte(unsigned long va)
+{
+ pte_t *ptep = pte_offset_kernel(top_pmd, va);
+ return *ptep;
+}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 94c5a0c94f5..b86f8933ff9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/vmalloc.h>
+#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
@@ -25,6 +26,7 @@
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
+#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/mach/arch.h>
@@ -997,11 +999,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
+ void *vectors;
/*
* Allocate the vector page early.
*/
- vectors_page = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE);
+
+ early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -1041,7 +1046,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
- map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 4fc6794cca4..6486d2f253c 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -86,13 +86,17 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
+
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
-void __iounmap(volatile void __iomem *addr)
+void (*arch_iounmap)(volatile void __iomem *);
+
+void __arm_iounmap(volatile void __iomem *addr)
{
}
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3e78ccabd6..0acb089d0f7 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <asm/cp15.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 272558a133a..d217e9795d7 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -22,7 +22,6 @@
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 036fdbfdd62..a631016e1f8 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -1,5 +1,8 @@
+#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "vmregion.h"
@@ -36,7 +39,7 @@
struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
- size_t size, gfp_t gfp)
+ size_t size, gfp_t gfp, const void *caller)
{
unsigned long start = head->vm_start, addr = head->vm_end;
unsigned long flags;
@@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
if (!new)
goto out;
+ new->caller = caller;
+
spin_lock_irqsave(&head->vm_lock, flags);
addr = rounddown(addr - size, align);
@@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
kfree(c);
}
+
+#ifdef CONFIG_PROC_FS
+static int arm_vmregion_show(struct seq_file *m, void *p)
+{
+ struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
+
+ seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
+ c->vm_end - c->vm_start);
+ if (c->caller)
+ seq_printf(m, " %pS", (void *)c->caller);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
+{
+ struct arm_vmregion_head *h = m->private;
+ spin_lock_irq(&h->vm_lock);
+ return seq_list_start(&h->vm_list, *pos);
+}
+
+static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct arm_vmregion_head *h = m->private;
+ return seq_list_next(p, &h->vm_list, pos);
+}
+
+static void arm_vmregion_stop(struct seq_file *m, void *p)
+{
+ struct arm_vmregion_head *h = m->private;
+ spin_unlock_irq(&h->vm_lock);
+}
+
+static const struct seq_operations arm_vmregion_ops = {
+ .start = arm_vmregion_start,
+ .stop = arm_vmregion_stop,
+ .next = arm_vmregion_next,
+ .show = arm_vmregion_show,
+};
+
+static int arm_vmregion_open(struct inode *inode, struct file *file)
+{
+ struct arm_vmregion_head *h = PDE(inode)->data;
+ int ret = seq_open(file, &arm_vmregion_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = h;
+ }
+ return ret;
+}
+
+static const struct file_operations arm_vmregion_fops = {
+ .open = arm_vmregion_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+ proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
+ return 0;
+}
+#else
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 15e9f044db9..162be662c08 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -19,11 +19,14 @@ struct arm_vmregion {
unsigned long vm_end;
struct page *vm_pages;
int vm_active;
+ const void *caller;
};
-struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t);
+struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
+int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
+
#endif