diff options
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/entry.S | 17 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 101 | ||||
-rw-r--r-- | arch/arm64/kernel/ptrace.c | 73 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/sys.c | 76 | ||||
-rw-r--r-- | arch/arm64/kernel/sys32.S | 19 | ||||
-rw-r--r-- | arch/arm64/kernel/sys_compat.c | 38 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso.c | 20 |
10 files changed, 119 insertions, 251 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a6f3f7da688..cbfa4d28100 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -594,7 +594,7 @@ work_resched: /* * "slow" syscall return path. */ -ENTRY(ret_to_user) +ret_to_user: disable_irq // disable interrupts ldr x1, [tsk, #TI_FLAGS] and x2, x1, #_TIF_WORK_MASK @@ -611,7 +611,10 @@ ENDPROC(ret_to_user) */ ENTRY(ret_from_fork) bl schedule_tail - get_thread_info tsk + cbz x19, 1f // not a kernel thread + mov x0, x20 + blr x19 +1: get_thread_info tsk b ret_to_user ENDPROC(ret_from_fork) @@ -673,16 +676,6 @@ __sys_trace_return: /* * Special system call wrappers. */ -ENTRY(sys_execve_wrapper) - mov x3, sp - b sys_execve -ENDPROC(sys_execve_wrapper) - -ENTRY(sys_clone_wrapper) - mov x5, sp - b sys_clone -ENDPROC(sys_clone_wrapper) - ENTRY(sys_rt_sigreturn_wrapper) mov x0, sp b sys_rt_sigreturn diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index ecbf2d81ec5..c76c7241125 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types { ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, - - /* - * This isn't an architected event. - * We detect this event number and use the cycle counter instead. - */ - ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF, }; /* PMUv3 HW events mapping. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, @@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f22965ea1cf..8a5f3341861 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -240,27 +240,41 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *childregs = task_pt_regs(p); unsigned long tls = p->thread.tp_value; - *childregs = *regs; - childregs->regs[0] = 0; + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - if (is_compat_thread(task_thread_info(p))) - childregs->compat_sp = stack_start; - else { + if (likely(regs)) { + *childregs = *regs; + childregs->regs[0] = 0; + if (is_compat_thread(task_thread_info(p))) { + if (stack_start) + childregs->compat_sp = stack_start; + } else { + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + asm("mrs %0, tpidr_el0" : "=r" (tls)); + if (stack_start) { + /* 16-byte aligned stack mandatory on AArch64 */ + if (stack_start & 15) + return -EINVAL; + childregs->sp = stack_start; + } + } /* - * Read the current TLS pointer from tpidr_el0 as it may be - * out-of-sync with the saved value. + * If a TLS pointer was passed to clone (4th argument), use it + * for the new thread. */ - asm("mrs %0, tpidr_el0" : "=r" (tls)); - childregs->sp = stack_start; + if (clone_flags & CLONE_SETTLS) + tls = regs->regs[3]; + } else { + memset(childregs, 0, sizeof(struct pt_regs)); + childregs->pstate = PSR_MODE_EL1h; + p->thread.cpu_context.x19 = stack_start; + p->thread.cpu_context.x20 = stk_sz; } - - memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - p->thread.cpu_context.sp = (unsigned long)childregs; p->thread.cpu_context.pc = (unsigned long)ret_from_fork; - - /* If a TLS pointer was passed to clone, use that for the new thread. */ - if (clone_flags & CLONE_SETTLS) - tls = regs->regs[3]; + p->thread.cpu_context.sp = (unsigned long)childregs; p->thread.tp_value = tls; ptrace_hw_copy_thread(p); @@ -309,61 +323,6 @@ struct task_struct *__switch_to(struct task_struct *prev, return last; } -/* - * Fill in the task's elfregs structure for a core dump. - */ -int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) -{ - elf_core_copy_regs(elfregs, task_pt_regs(t)); - return 1; -} - -/* - * fill in the fpe structure for a core dump... - */ -int dump_fpu (struct pt_regs *regs, struct user_fp *fp) -{ - return 0; -} -EXPORT_SYMBOL(dump_fpu); - -/* - * Shuffle the argument into the correct register before calling the - * thread function. x1 is the thread argument, x2 is the pointer to - * the thread function, and x3 points to the exit function. - */ -extern void kernel_thread_helper(void); -asm( ".section .text\n" -" .align\n" -" .type kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -" mov x0, x1\n" -" mov x30, x3\n" -" br x2\n" -" .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); - -#define kernel_thread_exit do_exit - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - - regs.regs[1] = (unsigned long)arg; - regs.regs[2] = (unsigned long)fn; - regs.regs[3] = (unsigned long)kernel_thread_exit; - regs.pc = (unsigned long)kernel_thread_helper; - regs.pstate = PSR_MODE_EL1h; - - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2ea3968367c..6e1e77f1831 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -234,28 +234,33 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, struct arch_hw_breakpoint_ctrl ctrl, struct perf_event_attr *attr) { - int err, len, type; + int err, len, type, disabled = !ctrl.enabled; - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; - - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) + if (disabled) { + len = 0; + type = HW_BREAKPOINT_EMPTY; + } else { + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) + return -EINVAL; + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: return -EINVAL; - break; - default: - return -EINVAL; + } } attr->bp_len = len; attr->bp_type = type; - attr->disabled = !ctrl.enabled; + attr->disabled = disabled; return 0; } @@ -372,7 +377,7 @@ static int ptrace_hbp_set_addr(unsigned int note_type, #define PTRACE_HBP_ADDR_SZ sizeof(u64) #define PTRACE_HBP_CTRL_SZ sizeof(u32) -#define PTRACE_HBP_REG_OFF sizeof(u32) +#define PTRACE_HBP_PAD_SZ sizeof(u32) static int hw_break_get(struct task_struct *target, const struct user_regset *regset, @@ -380,7 +385,7 @@ static int hw_break_get(struct task_struct *target, void *kbuf, void __user *ubuf) { unsigned int note_type = regset->core_note_type; - int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + int ret, idx = 0, offset, limit; u32 info, ctrl; u64 addr; @@ -389,11 +394,20 @@ static int hw_break_get(struct task_struct *target, if (ret) return ret; - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, + sizeof(info)); + if (ret) + return ret; + + /* Pad */ + offset = offsetof(struct user_hwdebug_state, pad); + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, + offset + PTRACE_HBP_PAD_SZ); if (ret) return ret; /* (address, ctrl) registers */ + offset = offsetof(struct user_hwdebug_state, dbg_regs); limit = regset->n * regset->size; while (count && offset < limit) { ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); @@ -413,6 +427,13 @@ static int hw_break_get(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + offset, + offset + PTRACE_HBP_PAD_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_PAD_SZ; idx++; } @@ -425,12 +446,13 @@ static int hw_break_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { unsigned int note_type = regset->core_note_type; - int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + int ret, idx = 0, offset, limit; u32 ctrl; u64 addr; - /* Resource info */ - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + /* Resource info and pad */ + offset = offsetof(struct user_hwdebug_state, dbg_regs); + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); if (ret) return ret; @@ -454,6 +476,13 @@ static int hw_break_set(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + offset, + offset + PTRACE_HBP_PAD_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_PAD_SZ; idx++; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 48ffb9fb3fe..7665a9bfdb1 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -170,7 +170,19 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) void __init early_init_dt_add_memory_arch(u64 base, u64 size) { + base &= PAGE_MASK; size &= PAGE_MASK; + if (base + size < PHYS_OFFSET) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + if (base < PHYS_OFFSET) { + pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", + base, PHYS_OFFSET); + size -= PHYS_OFFSET - base; + base = PHYS_OFFSET; + } memblock_add(base, size); } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index b711525be21..538300f2273 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -46,7 +46,6 @@ #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> -#include <asm/mmu_context.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -212,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * before we continue. */ set_cpu_online(cpu, true); - while (!cpu_active(cpu)) - cpu_relax(); + complete(&cpu_running); /* * OK, it's off to the idle thread for us diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index b120df37de3..4364df85050 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -31,79 +31,11 @@ */ asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, int __user *parent_tidptr, unsigned long tls_val, - int __user *child_tidptr, struct pt_regs *regs) + int __user *child_tidptr) { - if (!newsp) - newsp = regs->sp; - /* 16-byte aligned stack mandatory on AArch64 */ - if (newsp & 15) - return -EINVAL; - return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -} - -/* - * sys_execve() executes a new program. - */ -asmlinkage long sys_execve(const char __user *filenamei, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs) -{ - long error; - struct filename *filename; - - filename = getname(filenamei); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename->name, argv, envp, regs); - putname(filename); -out: - return error; -} - -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - struct pt_regs regs; - int ret; - - memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve(filename, - (const char __user *const __user *)argv, - (const char __user *const __user *)envp, ®s); - if (ret < 0) - goto out; - - /* - * Save argc to the register structure for userspace. - */ - regs.regs[0] = ret; - - /* - * We were successful. We won't be returning to our caller, but - * instead to user space by manipulating the kernel stack. - */ - asm( "add x0, %0, %1\n\t" - "mov x1, %2\n\t" - "mov x2, %3\n\t" - "bl memmove\n\t" /* copy regs to top of stack */ - "mov x27, #0\n\t" /* not a syscall */ - "mov x28, %0\n\t" /* thread structure */ - "mov sp, x0\n\t" /* reposition stack pointer */ - "b ret_to_user" - : - : "r" (current_thread_info()), - "Ir" (THREAD_START_SP - sizeof(regs)), - "r" (®s), - "Ir" (sizeof(regs)) - : "x0", "x1", "x2", "x27", "x28", "x30", "memory"); - - out: - return ret; + return do_fork(clone_flags, newsp, current_pt_regs(), 0, + parent_tidptr, child_tidptr); } -EXPORT_SYMBOL(kernel_execve); asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -118,8 +50,6 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, /* * Wrappers to pass the pt_regs argument. */ -#define sys_execve sys_execve_wrapper -#define sys_clone sys_clone_wrapper #define sys_rt_sigreturn sys_rt_sigreturn_wrapper #define sys_sigaltstack sys_sigaltstack_wrapper diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S index 54c4aec47a0..7ef59e9245e 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/sys32.S @@ -26,25 +26,6 @@ /* * System call wrappers for the AArch32 compatibility layer. */ -compat_sys_fork_wrapper: - mov x0, sp - b compat_sys_fork -ENDPROC(compat_sys_fork_wrapper) - -compat_sys_vfork_wrapper: - mov x0, sp - b compat_sys_vfork -ENDPROC(compat_sys_vfork_wrapper) - -compat_sys_execve_wrapper: - mov x3, sp - b compat_sys_execve -ENDPROC(compat_sys_execve_wrapper) - -compat_sys_clone_wrapper: - mov x5, sp - b compat_sys_clone -ENDPROC(compat_sys_clone_wrapper) compat_sys_sigreturn_wrapper: mov x0, sp diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 906e3bd270b..6fabc1912da 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -28,43 +28,15 @@ #include <asm/cacheflush.h> #include <asm/unistd32.h> -asmlinkage int compat_sys_fork(struct pt_regs *regs) +asmlinkage int compat_sys_fork(void) { - return do_fork(SIGCHLD, regs->compat_sp, regs, 0, NULL, NULL); + return do_fork(SIGCHLD, 0, current_pt_regs(), 0, NULL, NULL); } -asmlinkage int compat_sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tidptr, int tls_val, - int __user *child_tidptr, struct pt_regs *regs) +asmlinkage int compat_sys_vfork(void) { - if (!newsp) - newsp = regs->compat_sp; - - return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -} - -asmlinkage int compat_sys_vfork(struct pt_regs *regs) -{ - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->compat_sp, - regs, 0, NULL, NULL); -} - -asmlinkage int compat_sys_execve(const char __user *filenamei, - compat_uptr_t argv, compat_uptr_t envp, - struct pt_regs *regs) -{ - int error; - struct filename *filename; - - filename = getname(filenamei); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = compat_do_execve(filename->name, compat_ptr(argv), - compat_ptr(envp), regs); - putname(filename); -out: - return error; + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, + current_pt_regs(), 0, NULL, NULL); } asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 17948fc7d66..ba457943a16 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -28,6 +28,7 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/slab.h> +#include <linux/timekeeper_internal.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> @@ -222,11 +223,10 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) /* * Update the vDSO data page to keep in sync with kernel timekeeping. */ -void update_vsyscall(struct timespec *ts, struct timespec *wtm, - struct clocksource *clock, u32 mult) +void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -237,13 +237,13 @@ void update_vsyscall(struct timespec *ts, struct timespec *wtm, vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = clock->cycle_last; - vdso_data->xtime_clock_sec = ts->tv_sec; - vdso_data->xtime_clock_nsec = ts->tv_nsec; - vdso_data->cs_mult = mult; - vdso_data->cs_shift = clock->shift; - vdso_data->wtm_clock_sec = wtm->tv_sec; - vdso_data->wtm_clock_nsec = wtm->tv_nsec; + vdso_data->cs_cycle_last = tk->clock->cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift; + vdso_data->cs_mult = tk->mult; + vdso_data->cs_shift = tk->shift; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); |