summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-01 11:02:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-01 11:02:25 -0700
commitca24a145573124732152daff105ba68cc9a2b545 (patch)
tree14c32a9f4d73228aa10d58776286a31d55a62f9e /arch
parent6887a4131da3adaab011613776d865f4bcfb5678 (diff)
parent19b52abe3c5d759661500a1dc810924369b2ad46 (diff)
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull two ARM fixes from Russell King: "It's been fairly quiet with the fixes. Just two this time. One fixes a long standing problem with KALLSYMS needing an additional pass, and the other sorts a problem with the vmalloc space interacting with static IO mappings." * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7438/1: fill possible PMD empty section gaps ARM: 7428/1: Prevent KALLSYM size mismatch on ARM.
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mm/mmu.c74
2 files changed, 76 insertions, 0 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 43a31fb0631..36ff15bbfdd 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -183,7 +183,9 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e5dad60b558..cf4528d5177 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
}
}
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h). However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings. This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+ struct vm_struct *vm;
+
+ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm->addr = (void *)addr;
+ vm->size = SECTION_SIZE;
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->caller = pmd_empty_section_gap;
+ vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+ struct vm_struct *vm;
+ unsigned long addr, next = 0;
+ pmd_t *pmd;
+
+ /* we're still single threaded hence no lock needed here */
+ for (vm = vmlist; vm; vm = vm->next) {
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ addr = (unsigned long)vm->addr;
+ if (addr < next)
+ continue;
+
+ /*
+ * Check if this vm starts on an odd section boundary.
+ * If so and the first section entry for this PMD is free
+ * then we block the corresponding virtual address.
+ */
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr);
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr & PMD_MASK);
+ }
+
+ /*
+ * Then check if this vm ends on an odd section boundary.
+ * If so and the second section entry for this PMD is empty
+ * then we block the corresponding virtual address.
+ */
+ addr += vm->size;
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr) + 1;
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr);
+ }
+
+ /* no need to look at any vm entry until we hit the next PMD */
+ next = (addr + PMD_SIZE - 1) & PMD_MASK;
+ }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
if (mdesc->map_io)
mdesc->map_io();
+ fill_pmd_gaps();
/*
* Finally flush the caches and tlb to ensure that we're in a