summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-10-04 22:57:00 +0200
committerArnd Bergmann <arnd@arndb.de>2012-10-04 22:57:51 +0200
commitc37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch)
tree7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /arch/arm64/mm
parente7a570ff7dff9af6e54ff5e580a61ec7652137a0 (diff)
parent8a1ab3155c2ac7fbe5f2038d6e26efeb607a1498 (diff)
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>: This is to complete part of the UAPI disintegration for which the preparatory patches were pulled recently. Note that there are some fixup patches which are at the base of the branch aimed at you, plus all arches get the asm-generic branch merged in too. * 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers: UAPI: (Scripted) Disintegrate include/asm-generic UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k) c6x: remove c6x signal.h UAPI: Split compound conditionals containing __KERNEL__ in Arm64 UAPI: Fix the guards on various asm/unistd.h files Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/Makefile4
-rw-r--r--arch/arm64/mm/cache.S168
-rw-r--r--arch/arm64/mm/context.c159
-rw-r--r--arch/arm64/mm/copypage.c34
-rw-r--r--arch/arm64/mm/dma-mapping.c79
-rw-r--r--arch/arm64/mm/extable.c17
-rw-r--r--arch/arm64/mm/fault.c534
-rw-r--r--arch/arm64/mm/flush.c135
-rw-r--r--arch/arm64/mm/init.c437
-rw-r--r--arch/arm64/mm/ioremap.c84
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmap.c144
-rw-r--r--arch/arm64/mm/mmu.c395
-rw-r--r--arch/arm64/mm/pgd.c54
-rw-r--r--arch/arm64/mm/proc-macros.S55
-rw-r--r--arch/arm64/mm/proc.S175
-rw-r--r--arch/arm64/mm/tlb.S71
17 files changed, 2547 insertions, 0 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
new file mode 100644
index 00000000000..3140a2abcdc
--- /dev/null
+++ b/arch/arm64/mm/Makefile
@@ -0,0 +1,4 @@
+obj-y := dma-mapping.o extable.o fault.o init.o \
+ cache.o copypage.o flush.o \
+ ioremap.o mmap.o pgd.o mmu.o \
+ context.o tlb.o proc.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
new file mode 100644
index 00000000000..abe69b80cf7
--- /dev/null
+++ b/arch/arm64/mm/cache.S
@@ -0,0 +1,168 @@
+/*
+ * Cache maintenance
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+#include "proc-macros.S"
+
+/*
+ * __flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ */
+ENTRY(__flush_dcache_all)
+ dsb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+ lsr x3, x3, #23 // left align loc bit field
+ cbz x3, finished // if loc is 0, then no need to clean
+ mov x10, #0 // start clean at cache level 0
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask of the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt skip // skip if no cache, or just i-cache
+ save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ restore_irqs x9
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ mov x4, #0x3ff
+ and x4, x4, x1, lsr #3 // find maximum number on the way size
+ clz x5, x4 // find bit position of way size increment
+ mov x7, #0x7fff
+ and x7, x7, x1, lsr #13 // extract max number of the index size
+loop2:
+ mov x9, x4 // create working copy of max way size
+loop3:
+ lsl x6, x9, x5
+ orr x11, x10, x6 // factor way and cache number into x11
+ lsl x6, x7, x2
+ orr x11, x11, x6 // factor index number into x11
+ dc cisw, x11 // clean & invalidate by set/way
+ subs x9, x9, #1 // decrement the way
+ b.ge loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge loop2
+skip:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+finished:
+ mov x10, #0 // swith back to cache level 0
+ msr csselr_el1, x10 // select current cache level in csselr
+ dsb sy
+ isb
+ ret
+ENDPROC(__flush_dcache_all)
+
+/*
+ * flush_cache_all()
+ *
+ * Flush the entire cache system. The data cache flush is now achieved
+ * using atomic clean / invalidates working outwards from L1 cache. This
+ * is done using Set/Way based cache maintainance instructions. The
+ * instruction cache can still be invalidated back to the point of
+ * unification in a single instruction.
+ */
+ENTRY(flush_cache_all)
+ mov x12, lr
+ bl __flush_dcache_all
+ mov x0, #0
+ ic ialluis // I+BTB cache invalidate
+ ret x12
+ENDPROC(flush_cache_all)
+
+/*
+ * flush_icache_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(flush_icache_range)
+ /* FALLTHROUGH */
+
+/*
+ * __flush_cache_user_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__flush_cache_user_range)
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x4, x0, x3
+1:
+USER(9f, dc cvau, x4 ) // clean D line to PoU
+ add x4, x4, x2
+ cmp x4, x1
+ b.lo 1b
+ dsb sy
+
+ icache_line_size x2, x3
+ sub x3, x2, #1
+ bic x4, x0, x3
+1:
+USER(9f, ic ivau, x4 ) // invalidate I line PoU
+ add x4, x4, x2
+ cmp x4, x1
+ b.lo 1b
+9: // ignore any faulting cache operation
+ dsb sy
+ isb
+ ret
+ENDPROC(flush_icache_range)
+ENDPROC(__flush_cache_user_range)
+
+/*
+ * __flush_kern_dcache_page(kaddr)
+ *
+ * Ensure that the data held in the page kaddr is written back to the
+ * page in question.
+ *
+ * - kaddr - kernel address
+ * - size - size in question
+ */
+ENTRY(__flush_dcache_area)
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D line / unified line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__flush_dcache_area)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
new file mode 100644
index 00000000000..baa758d3702
--- /dev/null
+++ b/arch/arm64/mm/context.c
@@ -0,0 +1,159 @@
+/*
+ * Based on arch/arm/mm/context.c
+ *
+ * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/cachetype.h>
+
+#define asid_bits(reg) \
+ (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8)
+
+#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS)
+
+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+
+/*
+ * We fork()ed a process, and we need a new context for the child to run in.
+ */
+void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.id = 0;
+ raw_spin_lock_init(&mm->context.id_lock);
+}
+
+static void flush_context(void)
+{
+ /* set the reserved TTBR0 before flushing the TLB */
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+ if (icache_is_aivivt())
+ __flush_icache_all();
+}
+
+#ifdef CONFIG_SMP
+
+static void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ unsigned long flags;
+
+ /*
+ * Locking needed for multi-threaded applications where the same
+ * mm->context.id could be set from different CPUs during the
+ * broadcast. This function is also called via IPI so the
+ * mm->context.id_lock has to be IRQ-safe.
+ */
+ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
+ if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
+ /*
+ * Old version of ASID found. Set the new one and reset
+ * mm_cpumask(mm).
+ */
+ mm->context.id = asid;
+ cpumask_clear(mm_cpumask(mm));
+ }
+ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
+
+ /*
+ * Set the mm_cpumask(mm) bit for the current CPU.
+ */
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+
+/*
+ * Reset the ASID on the current CPU. This function call is broadcast from the
+ * CPU handling the ASID rollover and holding cpu_asid_lock.
+ */
+static void reset_context(void *info)
+{
+ unsigned int asid;
+ unsigned int cpu = smp_processor_id();
+ struct mm_struct *mm = current->active_mm;
+
+ smp_rmb();
+ asid = cpu_last_asid + cpu;
+
+ flush_context();
+ set_mm_context(mm, asid);
+
+ /* set the new ASID */
+ cpu_switch_mm(mm->pgd, mm);
+}
+
+#else
+
+static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ mm->context.id = asid;
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
+}
+
+#endif
+
+void __new_context(struct mm_struct *mm)
+{
+ unsigned int asid;
+ unsigned int bits = asid_bits();
+
+ raw_spin_lock(&cpu_asid_lock);
+#ifdef CONFIG_SMP
+ /*
+ * Check the ASID again, in case the change was broadcast from another
+ * CPU before we acquired the lock.
+ */
+ if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ raw_spin_unlock(&cpu_asid_lock);
+ return;
+ }
+#endif
+ /*
+ * At this point, it is guaranteed that the current mm (with an old
+ * ASID) isn't active on any other CPU since the ASIDs are changed
+ * simultaneously via IPI.
+ */
+ asid = ++cpu_last_asid;
+
+ /*
+ * If we've used up all our ASIDs, we need to start a new version and
+ * flush the TLB.
+ */
+ if (unlikely((asid & ((1 << bits) - 1)) == 0)) {
+ /* increment the ASID version */
+ cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits);
+ if (cpu_last_asid == 0)
+ cpu_last_asid = ASID_FIRST_VERSION;
+ asid = cpu_last_asid + smp_processor_id();
+ flush_context();
+#ifdef CONFIG_SMP
+ smp_wmb();
+ smp_call_function(reset_context, NULL, 1);
+#endif
+ cpu_last_asid += NR_CPUS - 1;
+ }
+
+ set_mm_context(mm, asid);
+ raw_spin_unlock(&cpu_asid_lock);
+}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
new file mode 100644
index 00000000000..9aecbace412
--- /dev/null
+++ b/arch/arm64/mm/copypage.c
@@ -0,0 +1,34 @@
+/*
+ * Based on arch/arm/mm/copypage.c
+ *
+ * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+{
+ copy_page(kto, kfrom);
+ __flush_dcache_area(kto, PAGE_SIZE);
+}
+
+void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+{
+ clear_page(kaddr);
+}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
new file mode 100644
index 00000000000..5eb244453a5
--- /dev/null
+++ b/arch/arm64/mm/dma-mapping.c
@@ -0,0 +1,79 @@
+/*
+ * SWIOTLB-based DMA API implementation
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <asm/cacheflush.h>
+
+struct dma_map_ops *dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
+static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+ flags |= GFP_DMA32;
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+}
+
+static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static struct dma_map_ops arm64_swiotlb_dma_ops = {
+ .alloc = arm64_swiotlb_alloc_coherent,
+ .free = arm64_swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
+void __init swiotlb_init_with_default_size(size_t default_size, int verbose);
+
+void __init arm64_swiotlb_init(size_t max_size)
+{
+ dma_ops = &arm64_swiotlb_dma_ops;
+ swiotlb_init_with_default_size(min((size_t)SZ_64M, max_size), 1);
+}
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_debug_do_init);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
new file mode 100644
index 00000000000..79444279ba8
--- /dev/null
+++ b/arch/arm64/mm/extable.c
@@ -0,0 +1,17 @@
+/*
+ * Based on arch/arm/mm/extable.c
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup)
+ regs->pc = fixup->fixup;
+
+ return fixup != NULL;
+}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
new file mode 100644
index 00000000000..1909a69983c
--- /dev/null
+++ b/arch/arm64/mm/fault.c
@@ -0,0 +1,534 @@
+/*
+ * Based on arch/arm/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995-2004 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/page-flags.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/perf_event.h>
+
+#include <asm/exception.h>
+#include <asm/debug-monitors.h>
+#include <asm/system_misc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Dump out the page tables associated with 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+
+ if (!mm)
+ mm = &init_mm;
+
+ pr_alert("pgd = %p\n", mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
+
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none_or_clear_bad(pgd))
+ break;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none_or_clear_bad(pud))
+ break;
+
+ pmd = pmd_offset(pud, addr);
+ printk(", *pmd=%016llx", pmd_val(*pmd));
+ if (pmd_none_or_clear_bad(pmd))
+ break;
+
+ pte = pte_offset_map(pmd, addr);
+ printk(", *pte=%016llx", pte_val(*pte));
+ pte_unmap(pte);
+ } while(0);
+
+ printk("\n");
+}
+
+/*
+ * The kernel tried to access some page that wasn't present.
+ */
+static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
+ unsigned int esr, struct pt_regs *regs)
+{
+ /*
+ * Are we prepared to handle this kernel fault?
+ */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+
+ show_pte(mm, addr);
+ die("Oops", regs, esr);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+}
+
+/*
+ * Something tried to access memory that isn't in our memory map. User mode
+ * accesses just cause a SIGSEGV
+ */
+static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ unsigned int esr, unsigned int sig, int code,
+ struct pt_regs *regs)
+{
+ struct siginfo si;
+
+ if (show_unhandled_signals) {
+ pr_info("%s[%d]: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
+ tsk->comm, task_pid_nr(tsk), sig, addr, esr);
+ show_pte(tsk->mm, addr);
+ show_regs(regs);
+ }
+
+ tsk->thread.fault_address = addr;
+ si.si_signo = sig;
+ si.si_errno = 0;
+ si.si_code = code;
+ si.si_addr = (void __user *)addr;
+ force_sig_info(sig, &si, tsk);
+}
+
+void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+ */
+ if (user_mode(regs))
+ __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
+ else
+ __do_kernel_fault(mm, addr, esr, regs);
+}
+
+#define VM_FAULT_BADMAP 0x010000
+#define VM_FAULT_BADACCESS 0x020000
+
+#define ESR_WRITE (1 << 6)
+#define ESR_LNX_EXEC (1 << 24)
+
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (esr & ESR_WRITE)
+ mask = VM_WRITE;
+ if (esr & ESR_LNX_EXEC)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
+ unsigned int esr, unsigned int flags,
+ struct task_struct *tsk)
+{
+ struct vm_area_struct *vma;
+ int fault;
+
+ vma = find_vma(mm, addr);
+ fault = VM_FAULT_BADMAP;
+ if (unlikely(!vma))
+ goto out;
+ if (unlikely(vma->vm_start > addr))
+ goto check_stack;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so we can handle
+ * it.
+ */
+good_area:
+ if (access_error(esr, vma)) {
+ fault = VM_FAULT_BADACCESS;
+ goto out;
+ }
+
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+
+check_stack:
+ if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+ goto good_area;
+out:
+ return fault;
+}
+
+static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ int fault, sig, code;
+ int write = esr & ESR_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ /*
+ * If we're in an interrupt or have no user context, we must not take
+ * the fault.
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs) && !search_exception_tables(regs->pc))
+ goto no_context;
+retry:
+ down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in which
+ * case, we'll have missed the might_sleep() from down_read().
+ */
+ might_sleep();
+#ifdef CONFIG_DEBUG_VM
+ if (!user_mode(regs) && !search_exception_tables(regs->pc))
+ goto no_context;
+#endif
+ }
+
+ fault = __do_page_fault(mm, addr, esr, flags, tsk);
+
+ /*
+ * If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
+ */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ /*
+ * Major/minor page fault accounting is only done on the initial
+ * attempt. If we go through a retry, it is extremely likely that the
+ * page will be found in page cache at that point.
+ */
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
+ addr);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
+ addr);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /*
+ * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
+ * starvation.
+ */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
+ */
+ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
+ VM_FAULT_BADACCESS))))
+ return 0;
+
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we got
+ * oom-killed).
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_SIGBUS) {
+ /*
+ * We had some memory, but were unable to successfully fix up
+ * this page fault.
+ */
+ sig = SIGBUS;
+ code = BUS_ADRERR;
+ } else {
+ /*
+ * Something tried to access memory that isn't in our memory
+ * map.
+ */
+ sig = SIGSEGV;
+ code = fault == VM_FAULT_BADACCESS ?
+ SEGV_ACCERR : SEGV_MAPERR;
+ }
+
+ __do_user_fault(tsk, addr, esr, sig, code, regs);
+ return 0;
+
+no_context:
+ __do_kernel_fault(mm, addr, esr, regs);
+ return 0;
+}
+
+/*
+ * First Level Translation Fault Handler
+ *
+ * We enter here because the first level page table doesn't contain a valid
+ * entry for the address.
+ *
+ * If the address is in kernel space (>= TASK_SIZE), then we are probably
+ * faulting in the vmalloc() area.
+ *
+ * If the init_task's first level page tables contains the relevant entry, we
+ * copy the it to this task. If not, we send the process a signal, fixup the
+ * exception, or oops the kernel.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
+ * or a critical region, and should only copy the information from the master
+ * page table, nothing more.
+ */
+static int __kprobes do_translation_fault(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, esr, regs);
+
+ do_bad_area(addr, esr, regs);
+ return 0;
+}
+
+/*
+ * Some section permission faults need to be handled gracefully. They can
+ * happen due to a __{get,put}_user during an oops.
+ */
+static int do_sect_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ do_bad_area(addr, esr, regs);
+ return 0;
+}
+
+/*
+ * This abort handler always returns "fault".
+ */
+static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+{
+ return 1;
+}
+
+static struct fault_info {
+ int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+} fault_info[] = {
+ { do_bad, SIGBUS, 0, "ttbr address size fault" },
+ { do_bad, SIGBUS, 0, "level 1 address size fault" },
+ { do_bad, SIGBUS, 0, "level 2 address size fault" },
+ { do_bad, SIGBUS, 0, "level 3 address size fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
+ { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_bad, SIGBUS, 0, "reserved access flag fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
+ { do_bad, SIGBUS, 0, "reserved permission fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
+ { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
+ { do_bad, SIGBUS, 0, "synchronous external abort" },
+ { do_bad, SIGBUS, 0, "asynchronous external abort" },
+ { do_bad, SIGBUS, 0, "unknown 18" },
+ { do_bad, SIGBUS, 0, "unknown 19" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error" },
+ { do_bad, SIGBUS, 0, "asynchronous parity error" },
+ { do_bad, SIGBUS, 0, "unknown 26" },
+ { do_bad, SIGBUS, 0, "unknown 27" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "unknown 32" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
+ { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGBUS, 0, "unknown 35" },
+ { do_bad, SIGBUS, 0, "unknown 36" },
+ { do_bad, SIGBUS, 0, "unknown 37" },
+ { do_bad, SIGBUS, 0, "unknown 38" },
+ { do_bad, SIGBUS, 0, "unknown 39" },
+ { do_bad, SIGBUS, 0, "unknown 40" },
+ { do_bad, SIGBUS, 0, "unknown 41" },
+ { do_bad, SIGBUS, 0, "unknown 42" },
+ { do_bad, SIGBUS, 0, "unknown 43" },
+ { do_bad, SIGBUS, 0, "unknown 44" },
+ { do_bad, SIGBUS, 0, "unknown 45" },
+ { do_bad, SIGBUS, 0, "unknown 46" },
+ { do_bad, SIGBUS, 0, "unknown 47" },
+ { do_bad, SIGBUS, 0, "unknown 48" },
+ { do_bad, SIGBUS, 0, "unknown 49" },
+ { do_bad, SIGBUS, 0, "unknown 50" },
+ { do_bad, SIGBUS, 0, "unknown 51" },
+ { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
+ { do_bad, SIGBUS, 0, "unknown 53" },
+ { do_bad, SIGBUS, 0, "unknown 54" },
+ { do_bad, SIGBUS, 0, "unknown 55" },
+ { do_bad, SIGBUS, 0, "unknown 56" },
+ { do_bad, SIGBUS, 0, "unknown 57" },
+ { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
+ { do_bad, SIGBUS, 0, "unknown 59" },
+ { do_bad, SIGBUS, 0, "unknown 60" },
+ { do_bad, SIGBUS, 0, "unknown 61" },
+ { do_bad, SIGBUS, 0, "unknown 62" },
+ { do_bad, SIGBUS, 0, "unknown 63" },
+};
+
+/*
+ * Dispatch a data abort to the relevant handler.
+ */
+asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ const struct fault_info *inf = fault_info + (esr & 63);
+ struct siginfo info;
+
+ if (!inf->fn(addr, esr, regs))
+ return;
+
+ pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
+ inf->name, esr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ arm64_notify_die("", regs, &info, esr);
+}
+
+/*
+ * Handle stack alignment exceptions.
+ */
+asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ struct siginfo info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)addr;
+ arm64_notify_die("", regs, &info, esr);
+}
+
+static struct fault_info debug_fault_info[] = {
+ { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
+ { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
+ { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
+ { do_bad, SIGBUS, 0, "unknown 3" },
+ { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
+ { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
+ { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
+ { do_bad, SIGBUS, 0, "unknown 7" },
+};
+
+void __init hook_debug_fault_code(int nr,
+ int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
+
+ debug_fault_info[nr].fn = fn;
+ debug_fault_info[nr].sig = sig;
+ debug_fault_info[nr].code = code;
+ debug_fault_info[nr].name = name;
+}
+
+asmlinkage int __exception do_debug_exception(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, esr, regs))
+ return 1;
+
+ pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
+ inf->name, esr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ arm64_notify_die("", regs, &info, esr);
+
+ return 0;
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
new file mode 100644
index 00000000000..c144adb1682
--- /dev/null
+++ b/arch/arm64/mm/flush.c
@@ -0,0 +1,135 @@
+/*
+ * Based on arch/arm/mm/flush.c
+ *
+ * Copyright (C) 1995-2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn)
+{
+}
+
+static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *kaddr,
+ unsigned long len)
+{
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long addr = (unsigned long)kaddr;
+ if (icache_is_aliasing()) {
+ __flush_dcache_area(kaddr, len);
+ __flush_icache_all();
+ } else {
+ flush_icache_range(addr, addr + len);
+ }
+ }
+}
+
+/*
+ * Copy user data from/to a page which is mapped into a different processes
+ * address space. Really, we want to allow our "user space" model to handle
+ * this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+#endif
+ memcpy(dst, src, len);
+ flush_ptrace_access(vma, page, uaddr, dst, len);
+#ifdef CONFIG_SMP
+ preempt_enable();
+#endif
+}
+
+void __flush_dcache_page(struct page *page)
+{
+ __flush_dcache_area(page_address(page), PAGE_SIZE);
+}
+
+void __sync_icache_dcache(pte_t pte, unsigned long addr)
+{
+ unsigned long pfn;
+ struct page *page;
+
+ pfn = pte_pfn(pte);
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+ __flush_dcache_page(page);
+ __flush_icache_all();
+ } else if (icache_is_aivivt()) {
+ __flush_icache_all();
+ }
+}
+
+/*
+ * Ensure cache coherency between kernel mapping and userspace mapping of this
+ * page.
+ */
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * The zero page is never written to, so never has any dirty cache
+ * lines, and therefore never needs to be flushed.
+ */
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping_mapped(mapping)) {
+ __flush_dcache_page(page);
+ __flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ } else {
+ clear_bit(PG_dcache_clean, &page->flags);
+ }
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * Additional functions defined in assembly.
+ */
+EXPORT_SYMBOL(flush_cache_all);
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
new file mode 100644
index 00000000000..5f719ba949b
--- /dev/null
+++ b/arch/arm64/mm/init.c
@@ -0,0 +1,437 @@
+/*
+ * Based on arch/arm/mm/init.c
+ *
+ * Copyright (C) 1995-2005 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/of_fdt.h>
+
+#include <asm/prom.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+
+#include "mm.h"
+
+static unsigned long phys_initrd_start __initdata = 0;
+static unsigned long phys_initrd_size __initdata = 0;
+
+phys_addr_t memstart_addr __read_mostly = 0;
+
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ phys_initrd_start = start;
+ phys_initrd_size = end - start;
+}
+
+static int __init early_initrd(char *p)
+{
+ unsigned long start, size;
+ char *endp;
+
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+ return 0;
+}
+early_param("initrd", early_initrd);
+
+#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+
+static void __init zone_sizes_init(unsigned long min, unsigned long max)
+{
+ struct memblock_region *reg;
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+ unsigned long max_dma32 = min;
+
+ memset(zone_size, 0, sizeof(zone_size));
+
+#ifdef CONFIG_ZONE_DMA32
+ /* 4GB maximum for 32-bit only capable devices */
+ max_dma32 = min(max, MAX_DMA32_PFN);
+ zone_size[ZONE_DMA32] = max_dma32 - min;
+#endif
+ zone_size[ZONE_NORMAL] = max - max_dma32;
+
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start >= max)
+ continue;
+#ifdef CONFIG_ZONE_DMA32
+ if (start < max_dma32) {
+ unsigned long dma_end = min(end, max_dma32);
+ zhole_size[ZONE_DMA32] -= dma_end - start;
+ }
+#endif
+ if (end > max_dma32) {
+ unsigned long normal_end = min(end, max);
+ unsigned long normal_start = max(start, max_dma32);
+ zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
+ }
+ }
+
+ free_area_init_node(0, zone_size, min, zhole_size);
+}
+
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
+int pfn_valid(unsigned long pfn)
+{
+ return memblock_is_memory(pfn << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
+
+#ifndef CONFIG_SPARSEMEM
+static void arm64_memory_present(void)
+{
+}
+#else
+static void arm64_memory_present(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg)
+ memory_present(0, memblock_region_memory_base_pfn(reg),
+ memblock_region_memory_end_pfn(reg));
+}
+#endif
+
+void __init arm64_memblock_init(void)
+{
+ u64 *reserve_map, base, size;
+
+ /* Register the kernel text, kernel data and initrd with memblock */
+ memblock_reserve(__pa(_text), _end - _text);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+
+ /* Now convert initrd to virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ /*
+ * Reserve the page tables. These are already in use,
+ * and can only be in node 0.
+ */
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
+ memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
+
+ /* Reserve the dtb region */
+ memblock_reserve(virt_to_phys(initial_boot_params),
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ /*
+ * Process the reserve map. This will probably overlap the initrd
+ * and dtb locations which are already reserved, but overlapping
+ * doesn't hurt anything
+ */
+ reserve_map = ((void*)initial_boot_params) +
+ be32_to_cpu(initial_boot_params->off_mem_rsvmap);
+ while (1) {
+ base = be64_to_cpup(reserve_map++);
+ size = be64_to_cpup(reserve_map++);
+ if (!size)
+ break;
+ memblock_reserve(base, size);
+ }
+
+ memblock_allow_resize();
+ memblock_dump_all();
+}
+
+void __init bootmem_init(void)
+{
+ unsigned long min, max;
+
+ min = PFN_UP(memblock_start_of_DRAM());
+ max = PFN_DOWN(memblock_end_of_DRAM());
+
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(), so must be
+ * done after the fixed reservations.
+ */
+ arm64_memory_present();
+
+ sparse_init();
+ zone_sizes_init(min, max);
+
+ high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
+ max_pfn = max_low_pfn = max;
+}
+
+static inline int free_area(unsigned long pfn, unsigned long end, char *s)
+{
+ unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
+
+ for (; pfn < end; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ pages++;
+ }
+
+ if (size && s)
+ pr_info("Freeing %s memory: %dK\n", s, size);
+
+ return pages;
+}
+
+/*
+ * Poison init memory with an undefined instruction (0x0).
+ */
+static inline void poison_init_mem(void *s, size_t count)
+{
+ memset(s, 0, count);
+}
+
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct page *start_pg, *end_pg;
+ unsigned long pg, pgend;
+
+ /*
+ * Convert start_pfn/end_pfn to a struct page pointer.
+ */
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
+ end_pg = pfn_to_page(end_pfn - 1) + 1;
+
+ /*
+ * Convert to physical addresses, and round start upwards and end
+ * downwards.
+ */
+ pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
+ pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+
+ /*
+ * If there are free pages between these, free the section of the
+ * memmap array.
+ */
+ if (pg < pgend)
+ free_bootmem(pg, pgend - pg);
+}
+
+/*
+ * The mem_map array can get very big. Free the unused area of the memory map.
+ */
+static void __init free_unused_memmap(void)
+{
+ unsigned long start, prev_end = 0;
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg) {
+ start = __phys_to_pfn(reg->base);
+
+#ifdef CONFIG_SPARSEMEM
+ /*
+ * Take care not to free memmap entries that don't exist due
+ * to SPARSEMEM sections which aren't present.
+ */
+ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
+#endif
+ /*
+ * If we had a previous bank, and there is a space between the
+ * current bank and the previous, free it.
+ */
+ if (prev_end && prev_end < start)
+ free_memmap(prev_end, start);
+
+ /*
+ * Align up here since the VM subsystem insists that the
+ * memmap entries are valid from the bank end aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ prev_end = ALIGN(start + __phys_to_pfn(reg->size),
+ MAX_ORDER_NR_PAGES);
+ }
+
+#ifdef CONFIG_SPARSEMEM
+ if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
+ free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
+#endif
+}
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much memory
+ * is free. This is done after various parts of the system have claimed their
+ * memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
+
+#if CONFIG_SWIOTLB
+ extern void __init arm64_swiotlb_init(size_t max_size);
+ arm64_swiotlb_init(max_pfn << (PAGE_SHIFT - 1));
+#endif
+
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+ /* this will put all unused low memory onto the freelists */
+ free_unused_memmap();
+#endif
+
+ totalram_pages += free_all_bootmem();
+
+ reserved_pages = free_pages = 0;
+
+ for_each_memblock(memory, reg) {
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = __phys_to_pfn(reg->base);
+ pfn2 = pfn1 + __phys_to_pfn(reg->size);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ if (PageReserved(page))
+ reserved_pages++;
+ else if (!page_count(page))
+ free_pages++;
+ page++;
+ } while (page < end);
+ }
+
+ /*
+ * Since our memory may not be contiguous, calculate the real number
+ * of pages we have in this system.
+ */
+ pr_info("Memory:");
+ num_physpages = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
+ }
+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
+
+ pr_notice("Memory: %luk/%luk available, %luk reserved\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ free_pages << (PAGE_SHIFT-10),
+ reserved_pages << (PAGE_SHIFT-10));
+
+#define MLK(b, t) b, t, ((t) - (b)) >> 10
+#define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
+
+ pr_notice("Virtual kernel memory layout:\n"
+ " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
+#endif
+ " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
+ " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
+ MLM(VMALLOC_START, VMALLOC_END),
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ MLM((unsigned long)virt_to_page(PAGE_OFFSET),
+ (unsigned long)virt_to_page(high_memory)),
+#endif
+ MLM(MODULES_VADDR, MODULES_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(_sdata, _edata));
+
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
+ /*
+ * Check boundaries twice: Some fundamental inconsistencies can be
+ * detected at build time already.
+ */
+#ifdef CONFIG_COMPAT
+ BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
+#endif
+ BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
+ BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
+
+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
+ extern int sysctl_overcommit_memory;
+ /*
+ * On a machine this small we won't get anywhere without
+ * overcommit, so turn it on by default.
+ */
+ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
+ }
+}
+
+void free_initmem(void)
+{
+ poison_init_mem(__init_begin, __init_end - __init_begin);
+ totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+ __phys_to_pfn(__pa(__init_end)),
+ "init");
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int keep_initrd;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (!keep_initrd) {
+ poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
+ totalram_pages += free_area(__phys_to_pfn(__pa(start)),
+ __phys_to_pfn(__pa(end)),
+ "initrd");
+ }
+}
+
+static int __init keepinitrd_setup(char *__unused)
+{
+ keep_initrd = 1;
+ return 1;
+}
+
+__setup("keepinitrd", keepinitrd_setup);
+#endif
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
new file mode 100644
index 00000000000..1725cd6db37
--- /dev/null
+++ b/arch/arm64/mm/ioremap.c
@@ -0,0 +1,84 @@
+/*
+ * Based on arch/arm/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * Hacked for ARM by Phil Blundell <philb@gnu.org>
+ * Hacked to allow all architectures to build, and various cleanups
+ * by Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot, void *caller)
+{
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ int err;
+ unsigned long addr;
+ struct vm_struct *area;
+
+ /*
+ * Page align the mapping address and size, taking account of any
+ * offset.
+ */
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+
+ /*
+ * Don't allow wraparound, zero size or outside PHYS_MASK.
+ */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
+ return NULL;
+
+ /*
+ * Don't allow RAM to be mapped.
+ */
+ if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+ return NULL;
+
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+
+ err = ioremap_page_range(addr, addr + size, phys_addr, prot);
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + addr);
+}
+
+void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
+{
+ return __ioremap_caller(phys_addr, size, prot,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__ioremap);
+
+void __iounmap(volatile void __iomem *io_addr)
+{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
new file mode 100644
index 00000000000..d8d6e7851c1
--- /dev/null
+++ b/arch/arm64/mm/mm.h
@@ -0,0 +1,2 @@
+extern void __flush_dcache_page(struct page *page);
+extern void __init bootmem_init(void);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
new file mode 100644
index 00000000000..7c7be785563
--- /dev/null
+++ b/arch/arm64/mm/mmap.c
@@ -0,0 +1,144 @@
+/*
+ * Based on arch/arm/mm/mmap.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/elf.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/export.h>
+#include <linux/shm.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+
+#include <asm/cputype.h>
+
+/*
+ * Leave enough space between the mmap area and the stack to honour ulimit in
+ * the face of randomisation.
+ */
+#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
+#define MAX_GAP (STACK_TOP/6*5)
+
+static int mmap_is_legacy(void)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+
+ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ return 1;
+
+ return sysctl_legacy_va_layout;
+}
+
+/*
+ * Since get_random_int() returns the same value within a 1 jiffy window, we
+ * will almost always get the same randomisation for the stack and mmap
+ * region. This will mean the relative distance between stack and mmap will be
+ * the same.
+ *
+ * To avoid this we can shift the randomness by 1 bit.
+ */
+static unsigned long mmap_rnd(void)
+{
+ unsigned long rnd = 0;
+
+ if (current->flags & PF_RANDOMIZE)
+ rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
+
+ return rnd << (PAGE_SHIFT + 1);
+}
+
+static unsigned long mmap_base(void)
+{
+ unsigned long gap = rlimit(RLIMIT_STACK);
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
+}
+
+/*
+ * This function, called very early during the creation of a new process VM
+ * image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ /*
+ * Fall back to the standard layout if the personality bit is set, or
+ * if the expected stack growth is unlimited:
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+}
+EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
+
+
+/*
+ * You really shouldn't be using read() or write() on /dev/mem. This might go
+ * away in the future.
+ */
+int valid_phys_addr_range(unsigned long addr, size_t size)
+{
+ if (addr < PHYS_OFFSET)
+ return 0;
+ if (addr + size > __pa(high_memory - 1) + 1)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Do not allow /dev/mem mappings beyond the supported physical range.
+ */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
+}
+
+#ifdef CONFIG_STRICT_DEVMEM
+
+#include <linux/ioport.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number. We mimic x86 here by
+ * disallowing access to system RAM as well as device-exclusive MMIO regions.
+ * This effectively disable read()/write() on /dev/mem.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
new file mode 100644
index 00000000000..a6885d896ab
--- /dev/null
+++ b/arch/arm64/mm/mmu.c
@@ -0,0 +1,395 @@
+/*
+ * Based on arch/arm/mm/mmu.c
+ *
+ * Copyright (C) 1995-2005 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/memblock.h>
+#include <linux/fs.h>
+
+#include <asm/cputype.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+
+#include "mm.h"
+
+/*
+ * Empty_zero_page is a special page that is used for zero-initialized data
+ * and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+pgprot_t pgprot_default;
+EXPORT_SYMBOL(pgprot_default);
+
+static pmdval_t prot_sect_kernel;
+
+struct cachepolicy {
+ const char policy[16];
+ u64 mair;
+ u64 tcr;
+};
+
+static struct cachepolicy cache_policies[] __initdata = {
+ {
+ .policy = "uncached",
+ .mair = 0x44, /* inner, outer non-cacheable */
+ .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
+ }, {
+ .policy = "writethrough",
+ .mair = 0xaa, /* inner, outer write-through, read-allocate */
+ .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
+ }, {
+ .policy = "writeback",
+ .mair = 0xee, /* inner, outer write-back, read-allocate */
+ .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
+ }
+};
+
+/*
+ * These are useful for identifying cache coherency problems by allowing the
+ * cache or the cache and writebuffer to be turned off. It changes the Normal
+ * memory caching attributes in the MAIR_EL1 register.
+ */
+static int __init early_cachepolicy(char *p)
+{
+ int i;
+ u64 tmp;
+
+ for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
+ int len = strlen(cache_policies[i].policy);
+
+ if (memcmp(p, cache_policies[i].policy, len) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(cache_policies)) {
+ pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
+ return 0;
+ }
+
+ flush_cache_all();
+
+ /*
+ * Modify MT_NORMAL attributes in MAIR_EL1.
+ */
+ asm volatile(
+ " mrs %0, mair_el1\n"
+ " bfi %0, %1, #%2, #8\n"
+ " msr mair_el1, %0\n"
+ " isb\n"
+ : "=&r" (tmp)
+ : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
+
+ /*
+ * Modify TCR PTW cacheability attributes.
+ */
+ asm volatile(
+ " mrs %0, tcr_el1\n"
+ " bic %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " msr tcr_el1, %0\n"
+ " isb\n"
+ : "=&r" (tmp)
+ : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
+
+ flush_cache_all();
+
+ return 0;
+}
+early_param("cachepolicy", early_cachepolicy);
+
+/*
+ * Adjust the PMD section entries according to the CPU in use.
+ */
+static void __init init_mem_pgprot(void)
+{
+ pteval_t default_pgprot;
+ int i;
+
+ default_pgprot = PTE_ATTRINDX(MT_NORMAL);
+ prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
+
+#ifdef CONFIG_SMP
+ /*
+ * Mark memory with the "shared" attribute for SMP systems
+ */
+ default_pgprot |= PTE_SHARED;
+ prot_sect_kernel |= PMD_SECT_S;
+#endif
+
+ for (i = 0; i < 16; i++) {
+ unsigned long v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | default_pgprot);
+ }
+
+ pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
+}
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn))
+ return pgprot_noncached(vma_prot);
+ else if (file->f_flags & O_SYNC)
+ return pgprot_writecombine(vma_prot);
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+
+static void __init *early_alloc(unsigned long sz)
+{
+ void *ptr = __va(memblock_alloc(sz, sz));
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn)
+{
+ pte_t *pte;
+
+ if (pmd_none(*pmd)) {
+ pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+ }
+ BUG_ON(pmd_bad(*pmd));
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ /*
+ * Check for initial section mappings in the pgd/pud and remove them.
+ */
+ if (pud_none(*pud) || pud_bad(*pud)) {
+ pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
+ pud_populate(&init_mm, pud, pmd);
+ }
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ /* try section mapping first */
+ if (((addr | next | phys) & ~SECTION_MASK) == 0)
+ set_pmd(pmd, __pmd(phys | prot_sect_kernel));
+ else
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ phys += next - addr;
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ alloc_init_pmd(pud, addr, next, phys);
+ phys += next - addr;
+ } while (pud++, addr = next, addr != end);
+}
+
+/*
+ * Create the page directory entries and any necessary page tables for the
+ * mapping specified by 'md'.
+ */
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size)
+{
+ unsigned long addr, length, end, next;
+ pgd_t *pgd;
+
+ if (virt < VMALLOC_START) {
+ pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
+ phys, virt);
+ return;
+ }
+
+ addr = virt & PAGE_MASK;
+ length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
+
+ pgd = pgd_offset_k(addr);
+ end = addr + length;
+ do {
+ next = pgd_addr_end(addr, end);
+ alloc_init_pud(pgd, addr, next, phys);
+ phys += next - addr;
+ } while (pgd++, addr = next, addr != end);
+}
+
+static void __init map_mem(void)
+{
+ struct memblock_region *reg;
+
+ /* map all the memory banks */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+
+ if (start >= end)
+ break;
+
+ create_mapping(start, __phys_to_virt(start), end - start);
+ }
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps and sets up the zero page.
+ */
+void __init paging_init(void)
+{
+ void *zero_page;
+
+ /*
+ * Maximum PGDIR_SIZE addressable via the initial direct kernel
+ * mapping in swapper_pg_dir.
+ */
+ memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
+
+ init_mem_pgprot();
+ map_mem();
+
+ /*
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state.
+ */
+ flush_cache_all();
+ flush_tlb_all();
+
+ /* allocate the zero page. */
+ zero_page = early_alloc(PAGE_SIZE);
+
+ bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ __flush_dcache_page(empty_zero_page);
+
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
+ */
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+}
+
+/*
+ * Enable the identity mapping to allow the MMU disabling.
+ */
+void setup_mm_for_reboot(void)
+{
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+ flush_tlb_all();
+}
+
+/*
+ * Check whether a kernel address is valid (derived from arch/x86/).
+ */
+int kern_addr_valid(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if ((((long)addr) >> VA_BITS) != -1UL)
+ return 0;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return 0;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ return 0;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return 0;
+
+ return pfn_valid(pte_pfn(*pte));
+}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#ifdef CONFIG_ARM64_64K_PAGES
+int __meminit vmemmap_populate(struct page *start_page,
+ unsigned long size, int node)
+{
+ return vmemmap_populate_basepages(start_page, size, node);
+}
+#else /* !CONFIG_ARM64_64K_PAGES */
+int __meminit vmemmap_populate(struct page *start_page,
+ unsigned long size, int node)
+{
+ unsigned long addr = (unsigned long)start_page;
+ unsigned long end = (unsigned long)(start_page + size);
+ unsigned long next;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ do {
+ next = pmd_addr_end(addr, end);
+
+ pgd = vmemmap_pgd_populate(addr, node);
+ if (!pgd)
+ return -ENOMEM;
+
+ pud = vmemmap_pud_populate(pgd, addr, node);
+ if (!pud)
+ return -ENOMEM;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ void *p = NULL;
+
+ p = vmemmap_alloc_block_buf(PMD_SIZE, node);
+ if (!p)
+ return -ENOMEM;
+
+ set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ } else
+ vmemmap_verify((pte_t *)pmd, node, addr, next);
+ } while (addr = next, addr != end);
+
+ return 0;
+}
+#endif /* CONFIG_ARM64_64K_PAGES */
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
new file mode 100644
index 00000000000..7083cdada65
--- /dev/null
+++ b/arch/arm64/mm/pgd.c
@@ -0,0 +1,54 @@
+/*
+ * PGD allocation/freeing
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *new_pgd;
+
+ if (PGD_SIZE == PAGE_SIZE)
+ new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ else
+ new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
+
+ if (!new_pgd)
+ return NULL;
+
+ return new_pgd;
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ if (PGD_SIZE == PAGE_SIZE)
+ free_page((unsigned long)pgd);
+ else
+ kfree(pgd);
+}
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
new file mode 100644
index 00000000000..8957b822010
--- /dev/null
+++ b/arch/arm64/mm/proc-macros.S
@@ -0,0 +1,55 @@
+/*
+ * Based on arch/arm/mm/proc-macros.S
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+ .macro vma_vm_mm, rd, rn
+ ldr \rd, [\rn, #VMA_VM_MM]
+ .endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+ .macro mmid, rd, rn
+ ldr \rd, [\rn, #MM_CONTEXT_ID]
+ .endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+ .macro dcache_line_size, reg, tmp
+ mrs \tmp, ctr_el0 // read CTR
+ lsr \tmp, \tmp, #16
+ and \tmp, \tmp, #0xf // cache line size encoding
+ mov \reg, #4 // bytes per word
+ lsl \reg, \reg, \tmp // actual cache line size
+ .endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+ .macro icache_line_size, reg, tmp
+ mrs \tmp, ctr_el0 // read CTR
+ and \tmp, \tmp, #0xf // cache line size encoding
+ mov \reg, #4 // bytes per word
+ lsl \reg, \reg, \tmp // actual cache line size
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
new file mode 100644
index 00000000000..f1d8b9bbfda
--- /dev/null
+++ b/arch/arm64/mm/proc.S
@@ -0,0 +1,175 @@
+/*
+ * Based on arch/arm/mm/proc.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/hwcap.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+
+#include "proc-macros.S"
+
+#ifndef CONFIG_SMP
+/* PTWs cacheable, inner/outer WBWA not shareable */
+#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+#else
+/* PTWs cacheable, inner/outer WBWA shareable */
+#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED
+#endif
+
+#define MAIR(attr, mt) ((attr) << ((mt) * 8))
+
+/*
+ * cpu_cache_off()
+ *
+ * Turn the CPU D-cache off.
+ */
+ENTRY(cpu_cache_off)
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el1, x0
+ isb
+ ret
+ENDPROC(cpu_cache_off)
+
+/*
+ * cpu_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the same state
+ * as it would be if it had been reset, and branch to what would be the
+ * reset vector. It must be executed with the flat identity mapping.
+ *
+ * - loc - location to jump to for soft reset
+ */
+ .align 5
+ENTRY(cpu_reset)
+ mrs x1, sctlr_el1
+ bic x1, x1, #1
+ msr sctlr_el1, x1 // disable the MMU
+ isb
+ ret x0
+ENDPROC(cpu_reset)
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ */
+ENTRY(cpu_do_idle)
+ dsb sy // WFI may enter a low-power mode
+ wfi
+ ret
+ENDPROC(cpu_do_idle)
+
+/*
+ * cpu_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys.
+ *
+ * - pgd_phys - physical address of new TTB
+ */
+ENTRY(cpu_do_switch_mm)
+ mmid w1, x1 // get mm->context.id
+ bfi x0, x1, #48, #16 // set the ASID
+ msr ttbr0_el1, x0 // set TTBR0
+ isb
+ ret
+ENDPROC(cpu_do_switch_mm)
+
+cpu_name:
+ .ascii "AArch64 Processor"
+ .align
+
+ .section ".text.init", #alloc, #execinstr
+
+/*
+ * __cpu_setup
+ *
+ * Initialise the processor for turning the MMU on. Return in x0 the
+ * value of the SCTLR_EL1 register.
+ */
+ENTRY(__cpu_setup)
+ /*
+ * Preserve the link register across the function call.
+ */
+ mov x28, lr
+ bl __flush_dcache_all
+ mov lr, x28
+ ic iallu // I+BTB cache invalidate
+ dsb sy
+
+ mov x0, #3 << 20
+ msr cpacr_el1, x0 // Enable FP/ASIMD
+ mov x0, #1
+ msr oslar_el1, x0 // Set the debug OS lock
+ tlbi vmalle1is // invalidate I + D TLBs
+ /*
+ * Memory region attributes for LPAE:
+ *
+ * n = AttrIndx[2:0]
+ * n MAIR
+ * DEVICE_nGnRnE 000 00000000
+ * DEVICE_nGnRE 001 00000100
+ * DEVICE_GRE 010 00001100
+ * NORMAL_NC 011 01000100
+ * NORMAL 100 11111111
+ */
+ ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
+ MAIR(0x04, MT_DEVICE_nGnRE) | \
+ MAIR(0x0c, MT_DEVICE_GRE) | \
+ MAIR(0x44, MT_NORMAL_NC) | \
+ MAIR(0xff, MT_NORMAL)
+ msr mair_el1, x5
+ /*
+ * Prepare SCTLR
+ */
+ adr x5, crval
+ ldp w5, w6, [x5]
+ mrs x0, sctlr_el1
+ bic x0, x0, x5 // clear bits
+ orr x0, x0, x6 // set bits
+ /*
+ * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
+ * both user and kernel.
+ */
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
+ TCR_ASID16 | (1 << 31)
+#ifdef CONFIG_ARM64_64K_PAGES
+ orr x10, x10, TCR_TG0_64K
+ orr x10, x10, TCR_TG1_64K
+#endif
+ msr tcr_el1, x10
+ ret // return to head.S
+ENDPROC(__cpu_setup)
+
+ /*
+ * n n T
+ * U E WT T UD US IHBS
+ * CE0 XWHW CZ ME TEEA S
+ * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
+ * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
+ * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ */
+ .type crval, #object
+crval:
+ .word 0x030802e2 // clear
+ .word 0x0405d11d // set
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
new file mode 100644
index 00000000000..8ae80a18e8e
--- /dev/null
+++ b/arch/arm64/mm/tlb.S
@@ -0,0 +1,71 @@
+/*
+ * Based on arch/arm/mm/tlb.S
+ *
+ * Copyright (C) 1997-2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include "proc-macros.S"
+
+/*
+ * __cpu_flush_user_tlb_range(start, end, vma)
+ *
+ * Invalidate a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_struct describing address range
+ */
+ENTRY(__cpu_flush_user_tlb_range)
+ vma_vm_mm x3, x2 // get vma->vm_mm
+ mmid x3, x3 // get vm_mm->context.id
+ dsb sy
+ lsr x0, x0, #12 // align address
+ lsr x1, x1, #12
+ bfi x0, x3, #48, #16 // start VA and ASID
+ bfi x1, x3, #48, #16 // end VA and ASID
+1: tlbi vae1is, x0 // TLB invalidate by address and ASID
+ add x0, x0, #1
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__cpu_flush_user_tlb_range)
+
+/*
+ * __cpu_flush_kern_tlb_range(start,end)
+ *
+ * Invalidate a range of kernel TLB entries.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ */
+ENTRY(__cpu_flush_kern_tlb_range)
+ dsb sy
+ lsr x0, x0, #12 // align address
+ lsr x1, x1, #12
+1: tlbi vaae1is, x0 // TLB invalidate by address
+ add x0, x0, #1
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ isb
+ ret
+ENDPROC(__cpu_flush_kern_tlb_range)