diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/calls.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 9 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 62 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 29 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 19 | ||||
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 48 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 11 |
10 files changed, 167 insertions, 35 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 1429d8989fb..c985b481192 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -59,10 +59,12 @@ int main(void) DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); +#ifdef CONFIG_VFP DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); #ifdef CONFIG_SMP DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); #endif +#endif #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 463ff4a0ec8..e337879595e 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -387,6 +387,7 @@ /* 375 */ CALL(sys_setns) CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) + CALL(sys_ni_syscall) /* reserved for sys_kcmp */ #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 978eac57e04..f45987037bf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -94,6 +94,15 @@ ENDPROC(ret_from_fork) .equ NR_syscalls,0 #define CALL(x) .equ NR_syscalls,NR_syscalls+1 #include "calls.S" + +/* + * Ensure that the system call table is equal to __NR_syscalls, + * which is the value the rest of the system sees + */ +.ifne NR_syscalls - __NR_syscalls +.error "__NR_syscalls is not equal to the size of the syscall table" +.endif + #undef CALL #define CALL(x) .long x diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index ba386bd9410..281bf330124 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -159,6 +159,12 @@ static int debug_arch_supported(void) arch >= ARM_DEBUG_ARCH_V7_1; } +/* Can we determine the watchpoint access type from the fsr? */ +static int debug_exception_updates_fsr(void) +{ + return 0; +} + /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { @@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + case 3: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; default: ret = -EINVAL; goto out; @@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; - /* - * Currently we rely on an overflow handler to take - * care of single-stepping the breakpoint when it fires. - * In the case of userspace breakpoints on a core with V7 debug, - * we can use the mismatch feature as a poor-man's hardware - * single-step, but this only works for per-task breakpoints. - */ - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || - !core_has_mismatch_brps() || !bp->hw.bp_target)) { - pr_warning("overflow handler required but none found\n"); - ret = -EINVAL; + if (!bp->overflow_handler) { + /* + * Mismatch breakpoints are required for single-stepping + * breakpoints. + */ + if (!core_has_mismatch_brps()) + return -EINVAL; + + /* We don't allow mismatch breakpoints in kernel space. */ + if (arch_check_bp_in_kernelspace(bp)) + return -EPERM; + + /* + * Per-cpu breakpoints are not supported by our stepping + * mechanism. + */ + if (!bp->hw.bp_target) + return -EINVAL; + + /* + * We only support specific access types if the fsr + * reports them. + */ + if (!debug_exception_updates_fsr() && + (info->ctrl.type == ARM_BREAKPOINT_LOAD || + info->ctrl.type == ARM_BREAKPOINT_STORE)) + return -EINVAL; } + out: return ret; } @@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, goto unlock; /* Check that the access type matches. */ - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : - HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; + if (debug_exception_updates_fsr()) { + access = (fsr & ARM_FSR_ACCESS_MASK) ? + HW_BREAKPOINT_W : HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; + } /* We have a winner. */ info->trigger = addr; diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index dfcdb9f7c12..e29c3337ca8 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -8,7 +8,9 @@ #include <linux/reboot.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/memblock.h> #include <asm/pgtable.h> +#include <linux/of_fdt.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -32,6 +34,29 @@ static atomic_t waiting_for_crash_ipi; int machine_kexec_prepare(struct kimage *image) { + struct kexec_segment *current_segment; + __be32 header; + int i, err; + + /* + * No segment at default ATAGs address. try to locate + * a dtb using magic. + */ + for (i = 0; i < image->nr_segments; i++) { + current_segment = &image->segment[i]; + + err = memblock_is_region_memory(current_segment->mem, + current_segment->memsz); + if (err) + return - EINVAL; + + err = get_user(header, (__be32*)current_segment->buf); + if (err) + return err; + + if (be32_to_cpu(header) == OF_DT_HEADER) + kexec_boot_atags = current_segment->mem; + } return 0; } @@ -122,7 +147,9 @@ void machine_kexec(struct kimage *image) kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; + if (!kexec_boot_atags) + kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; + /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 3e0fc5f7ed4..739db3a1b2d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -30,6 +30,9 @@ #include <asm/pgtable.h> #include <asm/traps.h> +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + #define REG_PC 15 #define REG_PSR 16 /* @@ -918,11 +921,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno, { unsigned long ip; + current_thread_info()->syscall = scno; + if (!test_thread_flag(TIF_SYSCALL_TRACE)) return scno; - current_thread_info()->syscall = scno; - /* * IP is used to denote syscall entry/exit: * IP = 0 -> entry, =1 -> exit @@ -941,15 +944,19 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno, asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) { - int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); + scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, scno); audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); - return ret; + return scno; } asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) { - int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); + scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, scno); audit_syscall_exit(regs); - return ret; + return scno; } diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index f4515393248..e21bac20d90 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> +#include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/syscore_ops.h> #include <linux/timer.h> @@ -27,6 +28,9 @@ struct clock_data { static void sched_clock_poll(unsigned long wrap_ticks); static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); +static int irqtime = -1; + +core_param(irqtime, irqtime, int, 0400); static struct clock_data cd = { .mult = NSEC_PER_SEC / HZ, @@ -157,6 +161,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) */ cd.epoch_ns = 0; + /* Enable IRQ time accounting if we have a fast enough sched_clock */ + if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) + enable_sched_clock_irqtime(); + pr_debug("Registered %pF as sched_clock source\n", read); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d7..d98c37e97d9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -51,7 +51,8 @@ struct secondary_data secondary_data; enum ipi_msg_type { - IPI_TIMER = 2, + IPI_WAKEUP, + IPI_TIMER, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, @@ -347,7 +348,8 @@ void arch_send_call_function_single_ipi(int cpu) } static const char *ipi_types[NR_IPI] = { -#define S(x,s) [x - IPI_TIMER] = s +#define S(x,s) [x] = s + S(IPI_WAKEUP, "CPU wakeup interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), @@ -500,10 +502,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); - if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) - __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); + if (ipinr < NR_IPI) + __inc_irq_stat(cpu, ipi_irqs[ipinr]); switch (ipinr) { + case IPI_WAKEUP: + break; + case IPI_TIMER: irq_enter(); ipi_timer(); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index fef42b21cec..e1f906989bb 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> -#include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> @@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk) disable_percpu_irq(clk->irq); } -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_COMMON_CLK + +/* + * Updates clockevent frequency when the cpu frequency changes. + * Called on the cpu that is changing frequency with interrupts disabled. + */ +static void twd_update_frequency(void *new_rate) +{ + twd_timer_rate = *((unsigned long *) new_rate); + + clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); +} + +static int twd_rate_change(struct notifier_block *nb, + unsigned long flags, void *data) +{ + struct clk_notifier_data *cnd = data; + + /* + * The twd clock events must be reprogrammed to account for the new + * frequency. The timer is local to a cpu, so cross-call to the + * changing cpu. + */ + if (flags == POST_RATE_CHANGE) + smp_call_function(twd_update_frequency, + (void *)&cnd->new_rate, 1); + + return NOTIFY_OK; +} + +static struct notifier_block twd_clk_nb = { + .notifier_call = twd_rate_change, +}; + +static int twd_clk_init(void) +{ + if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) + return clk_notifier_register(twd_clk, &twd_clk_nb); + + return 0; +} +core_initcall(twd_clk_init); + +#elif defined (CONFIG_CPU_FREQ) + +#include <linux/cpufreq.h> /* * Updates clockevent frequency when the cpu frequency changes. diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f7945218b8c..b0179b89a04 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #endif instr = *(u32 *) pc; } else if (thumb_mode(regs)) { - get_user(instr, (u16 __user *)pc); + if (get_user(instr, (u16 __user *)pc)) + goto die_sig; if (is_wide_instruction(instr)) { unsigned int instr2; - get_user(instr2, (u16 __user *)pc+1); + if (get_user(instr2, (u16 __user *)pc+1)) + goto die_sig; instr <<= 16; instr |= instr2; } - } else { - get_user(instr, (u32 __user *)pc); + } else if (get_user(instr, (u32 __user *)pc)) { + goto die_sig; } if (call_undef_hook(regs, instr) == 0) return; +die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", |