summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-common.S10
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hyp-stub.S223
-rw-r--r--arch/arm/kernel/process.c74
-rw-r--r--arch/arm/kernel/setup.c20
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/suspend.c17
-rw-r--r--arch/arm/kernel/sys_arm.c63
11 files changed, 296 insertions, 138 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5dfef9d97ed..5bbec7b8183 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -81,4 +81,6 @@ head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
+
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index e337879595e..831cd38c8d9 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -20,7 +20,7 @@
CALL(sys_creat)
CALL(sys_link)
/* 10 */ CALL(sys_unlink)
- CALL(sys_execve_wrapper)
+ CALL(sys_execve)
CALL(sys_chdir)
CALL(OBSOLETE(sys_time)) /* used by libc4 */
CALL(sys_mknod)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f45987037bf..417bac1846b 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -86,8 +86,11 @@ ENDPROC(ret_to_user)
*/
ENTRY(ret_from_fork)
bl schedule_tail
+ cmp r5, #0
+ movne r0, r4
+ movne lr, pc
+ movne pc, r5
get_thread_info tsk
- mov why, #1
b ret_slow_syscall
ENDPROC(ret_from_fork)
@@ -517,11 +520,6 @@ sys_vfork_wrapper:
b sys_vfork
ENDPROC(sys_vfork_wrapper)
-sys_execve_wrapper:
- add r3, sp, #S_OFF
- b sys_execve
-ENDPROC(sys_execve_wrapper)
-
sys_clone_wrapper:
add ip, sp, #S_OFF
str ip, [sp, #4]
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9874d074119..4eee351f466 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -83,8 +83,12 @@ ENTRY(stext)
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
- @ and irqs disabled
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
+
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
@@ -326,7 +330,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ safe_svcmode_maskall r9
+
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
new file mode 100644
index 00000000000..65b2417aebc
--- /dev/null
+++ b/arch/arm/kernel/hyp-stub.S
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/virt.h>
+
+#ifndef ZIMAGE
+/*
+ * For the kernel proper, we need to find out the CPU boot mode long after
+ * boot, so we need to store it in a writable variable.
+ *
+ * This is not in .bss, because we set it sufficiently early that the boot-time
+ * zeroing of .bss would clobber it.
+ */
+.data
+ENTRY(__boot_cpu_mode)
+ .long 0
+.text
+
+ /*
+ * Save the primary CPU boot mode. Requires 3 scratch registers.
+ */
+ .macro store_primary_cpu_mode reg1, reg2, reg3
+ mrs \reg1, cpsr
+ and \reg1, \reg1, #MODE_MASK
+ adr \reg2, .L__boot_cpu_mode_offset
+ ldr \reg3, [\reg2]
+ str \reg1, [\reg2, \reg3]
+ .endm
+
+ /*
+ * Compare the current mode with the one saved on the primary CPU.
+ * If they don't match, record that fact. The Z bit indicates
+ * if there's a match or not.
+ * Requires 3 additionnal scratch registers.
+ */
+ .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+ adr \reg2, .L__boot_cpu_mode_offset
+ ldr \reg3, [\reg2]
+ ldr \reg1, [\reg2, \reg3]
+ cmp \mode, \reg1 @ matches primary CPU boot mode?
+ orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
+ strne r7, [r5, r6] @ record what happened and give up
+ .endm
+
+#else /* ZIMAGE */
+
+ .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
+ .endm
+
+/*
+ * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
+ * consistency checking:
+ */
+ .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+ cmp \mode, \mode
+ .endm
+
+#endif /* ZIMAGE */
+
+/*
+ * Hypervisor stub installation functions.
+ *
+ * These must be called with the MMU and D-cache off.
+ * They are not ABI compliant and are only intended to be called from the kernel
+ * entry points in head.S.
+ */
+@ Call this from the primary CPU
+ENTRY(__hyp_stub_install)
+ store_primary_cpu_mode r4, r5, r6
+ENDPROC(__hyp_stub_install)
+
+ @ fall through...
+
+@ Secondary CPUs should call here
+ENTRY(__hyp_stub_install_secondary)
+ mrs r4, cpsr
+ and r4, r4, #MODE_MASK
+
+ /*
+ * If the secondary has booted with a different mode, give up
+ * immediately.
+ */
+ compare_cpu_mode_with_primary r4, r5, r6, r7
+ bxne lr
+
+ /*
+ * Once we have given up on one CPU, we do not try to install the
+ * stub hypervisor on the remaining ones: because the saved boot mode
+ * is modified, it can't compare equal to the CPSR mode field any
+ * more.
+ *
+ * Otherwise...
+ */
+
+ cmp r4, #HYP_MODE
+ bxne lr @ give up if the CPU is not in HYP mode
+
+/*
+ * Configure HSCTLR to set correct exception endianness/instruction set
+ * state etc.
+ * Turn off all traps
+ * Eventually, CPU-specific code might be needed -- assume not for now
+ *
+ * This code relies on the "eret" instruction to synchronize the
+ * various coprocessor accesses.
+ */
+ @ Now install the hypervisor stub:
+ adr r7, __hyp_stub_vectors
+ mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
+
+ @ Disable all traps, so we don't get any nasty surprise
+ mov r7, #0
+ mcr p15, 4, r7, c1, c1, 0 @ HCR
+ mcr p15, 4, r7, c1, c1, 2 @ HCPTR
+ mcr p15, 4, r7, c1, c1, 3 @ HSTR
+
+THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ orr r7, #(1 << 9) @ HSCTLR.EE
+#endif
+ mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
+
+ mrc p15, 4, r7, c1, c1, 1 @ HDCR
+ and r7, #0x1f @ Preserve HPMN
+ mcr p15, 4, r7, c1, c1, 1 @ HDCR
+
+#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
+ @ make CNTP_* and CNTPCT accessible from PL1
+ mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
+ lsr r7, #16
+ and r7, #0xf
+ cmp r7, #1
+ bne 1f
+ mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
+ orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
+ mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
+1:
+#endif
+
+ bic r7, r4, #MODE_MASK
+ orr r7, r7, #SVC_MODE
+THUMB( orr r7, r7, #PSR_T_BIT )
+ msr spsr_cxsf, r7 @ This is SPSR_hyp.
+
+ __MSR_ELR_HYP(14) @ msr elr_hyp, lr
+ __ERET @ return, switching to SVC mode
+ @ The boot CPU mode is left in r4.
+ENDPROC(__hyp_stub_install_secondary)
+
+__hyp_stub_do_trap:
+ cmp r0, #-1
+ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
+ mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
+ __ERET
+ENDPROC(__hyp_stub_do_trap)
+
+/*
+ * __hyp_set_vectors: Call this after boot to set the initial hypervisor
+ * vectors as part of hypervisor installation. On an SMP system, this should
+ * be called on each CPU.
+ *
+ * r0 must be the physical address of the new vector table (which must lie in
+ * the bottom 4GB of physical address space.
+ *
+ * r0 must be 32-byte aligned.
+ *
+ * Before calling this, you must check that the stub hypervisor is installed
+ * everywhere, by waiting for any secondary CPUs to be brought up and then
+ * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
+ *
+ * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
+ * something else went wrong... in such cases, trying to install a new
+ * hypervisor is unlikely to work as desired.
+ *
+ * When you call into your shiny new hypervisor, sp_hyp will contain junk,
+ * so you will need to set that to something sensible at the new hypervisor's
+ * initialisation entry point.
+ */
+ENTRY(__hyp_get_vectors)
+ mov r0, #-1
+ENDPROC(__hyp_get_vectors)
+ @ fall through
+ENTRY(__hyp_set_vectors)
+ __HVC(0)
+ bx lr
+ENDPROC(__hyp_set_vectors)
+
+#ifndef ZIMAGE
+.align 2
+.L__boot_cpu_mode_offset:
+ .long __boot_cpu_mode - .
+#endif
+
+.align 5
+__hyp_stub_vectors:
+__hyp_stub_reset: W(b) .
+__hyp_stub_und: W(b) .
+__hyp_stub_svc: W(b) .
+__hyp_stub_pabort: W(b) .
+__hyp_stub_dabort: W(b) .
+__hyp_stub_trap: W(b) __hyp_stub_do_trap
+__hyp_stub_irq: W(b) .
+__hyp_stub_fiq: W(b) .
+ENDPROC(__hyp_stub_vectors)
+
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 04eea22d795..90084a6de35 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -381,13 +381,20 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
- *childregs = *regs;
- childregs->ARM_r0 = 0;
- childregs->ARM_sp = stack_start;
-
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
- thread->cpu_context.sp = (unsigned long)childregs;
+
+ if (likely(regs)) {
+ *childregs = *regs;
+ childregs->ARM_r0 = 0;
+ childregs->ARM_sp = stack_start;
+ } else {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ thread->cpu_context.r4 = stk_sz;
+ thread->cpu_context.r5 = stack_start;
+ childregs->ARM_cpsr = SVC_MODE;
+ }
thread->cpu_context.pc = (unsigned long)ret_from_fork;
+ thread->cpu_context.sp = (unsigned long)childregs;
clear_ptrace_hw_breakpoint(p);
@@ -423,63 +430,6 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
}
EXPORT_SYMBOL(dump_fpu);
-/*
- * Shuffle the argument into the correct register before calling the
- * thread function. r4 is the thread argument, r5 is the pointer to
- * the thread function, and r6 points to the exit function.
- */
-extern void kernel_thread_helper(void);
-asm( ".pushsection .text\n"
-" .align\n"
-" .type kernel_thread_helper, #function\n"
-"kernel_thread_helper:\n"
-#ifdef CONFIG_TRACE_IRQFLAGS
-" bl trace_hardirqs_on\n"
-#endif
-" msr cpsr_c, r7\n"
-" mov r0, r4\n"
-" mov lr, r6\n"
-" mov pc, r5\n"
-" .size kernel_thread_helper, . - kernel_thread_helper\n"
-" .popsection");
-
-#ifdef CONFIG_ARM_UNWIND
-extern void kernel_thread_exit(long code);
-asm( ".pushsection .text\n"
-" .align\n"
-" .type kernel_thread_exit, #function\n"
-"kernel_thread_exit:\n"
-" .fnstart\n"
-" .cantunwind\n"
-" bl do_exit\n"
-" nop\n"
-" .fnend\n"
-" .size kernel_thread_exit, . - kernel_thread_exit\n"
-" .popsection");
-#else
-#define kernel_thread_exit do_exit
-#endif
-
-/*
- * Create a kernel thread.
- */
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof(regs));
-
- regs.ARM_r4 = (unsigned long)arg;
- regs.ARM_r5 = (unsigned long)fn;
- regs.ARM_r6 = (unsigned long)kernel_thread_exit;
- regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
- regs.ARM_pc = (unsigned long)kernel_thread_helper;
- regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
-
- return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index febafa0f552..da1d1aa20ad 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -53,6 +53,7 @@
#include <asm/traps.h>
#include <asm/unwind.h>
#include <asm/memblock.h>
+#include <asm/virt.h>
#include "atags.h"
#include "tcm.h"
@@ -703,6 +704,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
+void __init hyp_mode_check(void)
+{
+#ifdef CONFIG_ARM_VIRT_EXT
+ if (is_hyp_mode_available()) {
+ pr_info("CPU: All CPU(s) started in HYP mode.\n");
+ pr_info("CPU: Virtualization extensions available.\n");
+ } else if (is_hyp_mode_mismatched()) {
+ pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
+ __boot_cpu_mode & MODE_MASK);
+ pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
+ } else
+ pr_info("CPU: All CPU(s) started in SVC mode.\n");
+#endif
+}
+
void __init setup_arch(char **cmdline_p)
{
struct machine_desc *mdesc;
@@ -748,6 +764,10 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
}
#endif
+
+ if (!is_smp())
+ hyp_mode_check();
+
reserve_crashkernel();
tcm_init();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f27789e4e38..56f72d257eb 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/personality.h>
-#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d100eacdb79..8e20754dd31 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -43,6 +43,7 @@
#include <asm/ptrace.h>
#include <asm/localtimer.h>
#include <asm/smp_plat.h>
+#include <asm/virt.h>
#include <asm/mach/arch.h>
/*
@@ -202,8 +203,11 @@ int __cpuinit __cpu_disable(void)
/*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
+ *
+ * Caches are flushed to the Level of Unification Inner Shareable
+ * to write-back dirty lines to unified caches shared by all CPUs.
*/
- flush_cache_all();
+ flush_cache_louis();
local_flush_tlb_all();
clear_tasks_mm_cpumask(cpu);
@@ -355,6 +359,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
+
+ hyp_mode_check();
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f1..358bca3a995 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -17,6 +17,8 @@ extern void cpu_resume_mmu(void);
*/
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
+ u32 *ctx = ptr;
+
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
@@ -26,7 +28,20 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
cpu_do_suspend(ptr);
- flush_cache_all();
+ flush_cache_louis();
+
+ /*
+ * flush_cache_louis does not guarantee that
+ * save_ptr and ptr are cleaned to main memory,
+ * just up to the Level of Unification Inner Shareable.
+ * Since the context pointer and context itself
+ * are to be retrieved with the MMU off that
+ * data must be cleaned from all cache levels
+ * to main memory using "area" cache primitives.
+ */
+ __cpuc_flush_dcache_area(ctx, ptrsz);
+ __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
+
outer_clean_range(*save_ptr, *save_ptr + ptrsz);
outer_clean_range(virt_to_phys(save_ptr),
virt_to_phys(save_ptr) + sizeof(*save_ptr));
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 76cbb055dd0..c2a898aa57a 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -59,69 +59,6 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
}
-/* sys_execve() executes a new program.
- * This is called indirectly via a small wrapper
- */
-asmlinkage int sys_execve(const char __user *filenamei,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
-{
- int error;
- char * filename;
-
- filename = getname(filenamei);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, argv, envp, regs);
- putname(filename);
-out:
- return error;
-}
-
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
-{
- struct pt_regs regs;
- int ret;
-
- memset(&regs, 0, sizeof(struct pt_regs));
- ret = do_execve(filename,
- (const char __user *const __user *)argv,
- (const char __user *const __user *)envp, &regs);
- if (ret < 0)
- goto out;
-
- /*
- * Save argc to the register structure for userspace.
- */
- regs.ARM_r0 = ret;
-
- /*
- * We were successful. We won't be returning to our caller, but
- * instead to user space by manipulating the kernel stack.
- */
- asm( "add r0, %0, %1\n\t"
- "mov r1, %2\n\t"
- "mov r2, %3\n\t"
- "bl memmove\n\t" /* copy regs to top of stack */
- "mov r8, #0\n\t" /* not a syscall */
- "mov r9, %0\n\t" /* thread structure */
- "mov sp, r0\n\t" /* reposition stack pointer */
- "b ret_to_user"
- :
- : "r" (current_thread_info()),
- "Ir" (THREAD_START_SP - sizeof(regs)),
- "r" (&regs),
- "Ir" (sizeof(regs))
- : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
-
- out:
- return ret;
-}
-EXPORT_SYMBOL(kernel_execve);
-
/*
* Since loff_t is a 64 bit type we avoid a lot of ABI hassle
* with a different argument ordering.