summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm/init.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-12-09 18:17:25 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-12-09 18:17:25 +0100
commitd834a9dcecae834cd6b2bc5e50e1907738d9cf6a (patch)
tree0589d753465d3fe359ba451ba6cb7798df03aaa2 /arch/mn10300/mm/init.c
parenta38c5380ef9f088be9f49b6e4c5d80af8b1b5cd4 (diff)
parentf658bcfb2607bf0808966a69cf74135ce98e5c2d (diff)
Merge branch 'x86/amd-nb' into x86/apic-cleanups
Reason: apic cleanup series depends on x86/apic, x86/amd-nb x86/platform Conflicts: arch/x86/include/asm/io_apic.h Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mn10300/mm/init.c')
-rw-r--r--arch/mn10300/mm/init.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e5152..48907cc3bdb 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)