summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-15 12:18:15 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-15 12:18:15 +0200
commitdca2d6ac09d9ef59ff46820d4f0c94b08a671202 (patch)
treefdec753b842dad09e3a4151954fab3eb5c43500d /arch/arm/mm
parentd6a65dffb30d8636b1e5d4c201564ef401a246cf (diff)
parent18240904960a39e582ced8ba8ececb10b8c22dd3 (diff)
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/kernel/process_64.c Semantic conflict fixed in: arch/x86/kvm/x86.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-v7.S16
-rw-r--r--arch/arm/mm/dma-mapping.c94
-rw-r--r--arch/arm/mm/fault.c24
-rw-r--r--arch/arm/mm/flush.c9
-rw-r--r--arch/arm/mm/highmem.c8
-rw-r--r--arch/arm/mm/init.c32
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-macros.S8
-rw-r--r--arch/arm/mm/proc-v7.S7
11 files changed, 179 insertions, 42 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 83c025e72ce..5fe595aeba6 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -758,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
default y
select OUTER_CACHE
help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 03cd27d917b..b270d6228fe 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -159,7 +159,9 @@ union offset_union {
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -215,7 +217,9 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
@@ -245,11 +249,17 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "2: "ins" %1, [%2], #1\n" \
+ ARM( "2: "ins" %1, [%2], #1\n" ) \
+ THUMB( "2: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "3: "ins" %1, [%2], #1\n" \
+ ARM( "3: "ins" %1, [%2], #1\n" ) \
+ THUMB( "3: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index be93ff02a98..bda0ec31a4e 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -21,7 +21,7 @@
*
* Flush the whole D-cache.
*
- * Corrupted registers: r0-r5, r7, r9-r11
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
@@ -51,8 +51,12 @@ loop1:
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- stmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
- ldmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 510c179b0ac..b30925fcbcd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -36,7 +36,34 @@
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+static u64 get_coherent_dma_mask(struct device *dev)
+{
+ u64 mask = ISA_DMA_THRESHOLD;
+
+ if (dev) {
+ mask = dev->coherent_dma_mask;
+
+ /*
+ * Sanity check the DMA mask - it must be non-zero, and
+ * must be able to be satisfied by a DMA allocation.
+ */
+ if (mask == 0) {
+ dev_warn(dev, "coherent DMA mask is unset\n");
+ return 0;
+ }
+
+ if ((~mask) & ISA_DMA_THRESHOLD) {
+ dev_warn(dev, "coherent DMA mask %#llx is smaller "
+ "than system GFP_DMA mask %#llx\n",
+ mask, (unsigned long long)ISA_DMA_THRESHOLD);
+ return 0;
+ }
+ }
+ return mask;
+}
+
+#ifdef CONFIG_MMU
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/
@@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
struct arm_vm_region *c;
unsigned long order;
- u64 mask = ISA_DMA_THRESHOLD, limit;
+ u64 mask = get_coherent_dma_mask(dev);
+ u64 limit;
if (!consistent_pte[0]) {
printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
return NULL;
}
- if (dev) {
- mask = dev->coherent_dma_mask;
-
- /*
- * Sanity check the DMA mask - it must be non-zero, and
- * must be able to be satisfied by a DMA allocation.
- */
- if (mask == 0) {
- dev_warn(dev, "coherent DMA mask is unset\n");
- goto no_page;
- }
-
- if ((~mask) & ISA_DMA_THRESHOLD) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (unsigned long long)ISA_DMA_THRESHOLD);
- goto no_page;
- }
- }
+ if (!mask)
+ goto no_page;
/*
* Sanity check the allocation size.
@@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
*handle = ~0;
return NULL;
}
+#else /* !CONFIG_MMU */
+static void *
+__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
+ pgprot_t prot)
+{
+ void *virt;
+ u64 mask = get_coherent_dma_mask(dev);
+
+ if (!mask)
+ goto error;
+
+ if (mask != 0xffffffff)
+ gfp |= GFP_DMA;
+ virt = kmalloc(size, gfp);
+ if (!virt)
+ goto error;
+
+ *handle = virt_to_dma(dev, virt);
+ return virt;
+
+error:
+ *handle = ~0;
+ return NULL;
+}
+#endif /* CONFIG_MMU */
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
@@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine);
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
+ int ret = -ENXIO;
+#ifdef CONFIG_MMU
unsigned long flags, user_size, kern_size;
struct arm_vm_region *c;
- int ret = -ENXIO;
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
}
+#endif /* CONFIG_MMU */
return ret;
}
@@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
+#ifdef CONFIG_MMU
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
struct arm_vm_region *c;
@@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
__func__, cpu_addr);
dump_stack();
}
+#else /* !CONFIG_MMU */
+void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+{
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+ kfree(cpu_addr);
+}
+#endif /* CONFIG_MMU */
EXPORT_SYMBOL(dma_free_coherent);
/*
@@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent);
*/
static int __init consistent_init(void)
{
+ int ret = 0;
+#ifdef CONFIG_MMU
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int ret = 0, i = 0;
+ int i = 0;
u32 base = CONSISTENT_BASE;
do {
@@ -477,6 +526,7 @@ static int __init consistent_init(void)
consistent_pte[i++] = pte;
base += (1 << PGDIR_SHIFT);
} while (base < CONSISTENT_END);
+#endif /* !CONFIG_MMU */
return ret;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 6fdcbb70982..cc8829d7e11 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -16,6 +16,8 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -23,6 +25,7 @@
#include "fault.h"
+#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
@@ -97,6 +100,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
printk("\n");
}
+#else /* CONFIG_MMU */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{ }
+#endif /* CONFIG_MMU */
/*
* Oops. The kernel tried to access some page that wasn't present.
@@ -171,6 +178,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
__do_kernel_fault(mm, addr, fsr, regs);
}
+#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
@@ -322,6 +330,13 @@ no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
+#else /* CONFIG_MMU */
+static int
+do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
/*
* First Level Translation Fault Handler
@@ -340,6 +355,7 @@ no_context:
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
+#ifdef CONFIG_MMU
static int __kprobes
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -378,6 +394,14 @@ bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
+#else /* CONFIG_MMU */
+static int
+do_translation_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
/*
* Some section permission faults need to be handled gracefully.
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce..575f3ad722e 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
- __cpuc_flush_dcache_page(page_address(page));
+#ifdef CONFIG_HIGHMEM
+ /*
+ * kmap_atomic() doesn't set the page virtual address, and
+ * kunmap_atomic() takes care of cache flushing already.
+ */
+ if (page_address(page))
+#endif
+ __cpuc_flush_dcache_page(page_address(page));
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index a34954d9df7..73cae57fa70 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned int idx;
unsigned long vaddr;
+ void *kmap;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
+ kmap = kmap_high_get(page);
+ if (kmap)
+ return kmap;
+
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#else
(void) idx; /* to kill a warning */
#endif
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3a7279c1ce5..ea36186f32c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
+#include <linux/sort.h>
#include <linux/highmem.h>
#include <asm/mach-types.h>
@@ -349,12 +350,43 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
free_area_init_node(node, zone_size, min, zhole_size);
}
+#ifndef CONFIG_SPARSEMEM
+int pfn_valid(unsigned long pfn)
+{
+ struct meminfo *mi = &meminfo;
+ unsigned int left = 0, right = mi->nr_banks;
+
+ do {
+ unsigned int mid = (right + left) / 2;
+ struct membank *bank = &mi->bank[mid];
+
+ if (pfn < bank_pfn_start(bank))
+ right = mid;
+ else if (pfn >= bank_pfn_end(bank))
+ left = mid + 1;
+ else
+ return 1;
+ } while (left < right);
+ return 0;
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
+ sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
+
/*
* Locate which node contains the ramdisk image, if any.
*/
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index ad7bacc693b..900811cc913 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -12,6 +12,7 @@
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/page.h>
+#include <asm/setup.h>
#include <asm/mach/arch.h>
#include "mm.h"
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 54b1f721dec..7d63beaf974 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -77,19 +77,15 @@
* Sanity check the PTE configuration for the code below - which makes
* certain assumptions about how these bits are layed out.
*/
+#ifdef CONFIG_MMU
#if L_PTE_SHARED != PTE_EXT_SHARED
#error PTE shared bit mismatch
#endif
-#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
-#error PTE bufferable bit mismatch
-#endif
-#if L_PTE_CACHEABLE != PTE_CACHEABLE
-#error PTE cacheable bit mismatch
-#endif
#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
+#endif /* CONFIG_MMU */
/*
* The ARMv6 and ARMv7 set_pte_ext translation function.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 180a08d03a0..f3fa1c32fe9 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
+ ARM( str r1, [r0], #-2048 ) @ linux version
+ THUMB( str r1, [r0] ) @ linux version
+ THUMB( sub r0, r0, #2048 )
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
@@ -232,7 +234,6 @@ __v7_setup:
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
-#endif
/*
* Memory region attributes with SCTLR.TRE=1
*
@@ -265,6 +266,7 @@ __v7_setup:
ldr r6, =0x40e040e0 @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
+#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -273,6 +275,7 @@ __v7_setup:
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
+ THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)