summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/mm/init.c')
-rw-r--r--arch/mn10300/mm/init.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e5152..48907cc3bdb 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)