diff options
Diffstat (limited to 'arch/sh/kernel/ptrace_32.c')
-rw-r--r-- | arch/sh/kernel/ptrace_32.c | 183 |
1 files changed, 116 insertions, 67 deletions
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3392e835a37..90a15d29fee 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -2,7 +2,7 @@ * SuperH process tracing * * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka - * Copyright (C) 2002 - 2008 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> * @@ -17,7 +17,6 @@ #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> -#include <linux/slab.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/io.h> @@ -26,6 +25,7 @@ #include <linux/tracehook.h> #include <linux/elf.h> #include <linux/regset.h> +#include <linux/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -34,6 +34,9 @@ #include <asm/syscalls.h> #include <asm/fpu.h> +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + /* * This routine will get a word off of the process kernel stack. */ @@ -60,33 +63,64 @@ static inline int put_stack_long(struct task_struct *task, int offset, return 0; } -void user_enable_single_step(struct task_struct *child) +void ptrace_triggered(struct perf_event *bp, int nmi, + struct perf_sample_data *data, struct pt_regs *regs) { - /* Next scheduling will set up UBC */ - if (child->thread.ubc_pc == 0) - ubc_usercnt += 1; + struct perf_event_attr attr; - child->thread.ubc_pc = get_stack_long(child, - offsetof(struct pt_regs, pc)); + /* + * Disable the breakpoint request here since ptrace has defined a + * one-shot behaviour for breakpoint exceptions. + */ + attr = bp->attr; + attr.disabled = true; + modify_user_hw_breakpoint(bp, &attr); +} + +static int set_single_step(struct task_struct *tsk, unsigned long addr) +{ + struct thread_struct *thread = &tsk->thread; + struct perf_event *bp; + struct perf_event_attr attr; + + bp = thread->ptrace_bps[0]; + if (!bp) { + ptrace_breakpoint_init(&attr); + + attr.bp_addr = addr; + attr.bp_len = HW_BREAKPOINT_LEN_2; + attr.bp_type = HW_BREAKPOINT_R; + + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + if (IS_ERR(bp)) + return PTR_ERR(bp); + + thread->ptrace_bps[0] = bp; + } else { + int err; + + attr = bp->attr; + attr.bp_addr = addr; + err = modify_user_hw_breakpoint(bp, &attr); + if (unlikely(err)) + return err; + } + + return 0; +} + +void user_enable_single_step(struct task_struct *child) +{ + unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); set_tsk_thread_flag(child, TIF_SINGLESTEP); + + set_single_step(child, pc); } void user_disable_single_step(struct task_struct *child) { clear_tsk_thread_flag(child, TIF_SINGLESTEP); - - /* - * Ensure the UBC is not programmed at the next context switch. - * - * Normally this is not needed but there are sequences such as - * singlestep, signal delivery, and continue that leave the - * ubc_pc non-zero leading to spurious SIGTRAPs. - */ - if (child->thread.ubc_pc != 0) { - ubc_usercnt -= 1; - child->thread.ubc_pc = 0; - } } /* @@ -160,10 +194,10 @@ int fpregs_get(struct task_struct *target, if ((boot_cpu_data.flags & CPU_HAS_FPU)) return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.hard, 0, -1); + &target->thread.xstate->hardfpu, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.soft, 0, -1); + &target->thread.xstate->softfpu, 0, -1); } static int fpregs_set(struct task_struct *target, @@ -181,10 +215,10 @@ static int fpregs_set(struct task_struct *target, if ((boot_cpu_data.flags & CPU_HAS_FPU)) return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.hard, 0, -1); + &target->thread.xstate->hardfpu, 0, -1); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.soft, 0, -1); + &target->thread.xstate->softfpu, 0, -1); } static int fpregs_active(struct task_struct *target, @@ -240,6 +274,33 @@ static int dspregs_active(struct task_struct *target, } #endif +const struct pt_regs_offset regoffset_table[] = { + REGS_OFFSET_NAME(0), + REGS_OFFSET_NAME(1), + REGS_OFFSET_NAME(2), + REGS_OFFSET_NAME(3), + REGS_OFFSET_NAME(4), + REGS_OFFSET_NAME(5), + REGS_OFFSET_NAME(6), + REGS_OFFSET_NAME(7), + REGS_OFFSET_NAME(8), + REGS_OFFSET_NAME(9), + REGS_OFFSET_NAME(10), + REGS_OFFSET_NAME(11), + REGS_OFFSET_NAME(12), + REGS_OFFSET_NAME(13), + REGS_OFFSET_NAME(14), + REGS_OFFSET_NAME(15), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(pr), + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(gbr), + REG_OFFSET_NAME(mach), + REG_OFFSET_NAME(macl), + REG_OFFSET_NAME(tra), + REG_OFFSET_END, +}; + /* * These are our native regset flavours. */ @@ -304,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) return &user_sh_native_view; } -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) { - struct user * dummy = NULL; unsigned long __user *datap = (unsigned long __user *)data; int ret; @@ -322,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) if (addr < sizeof(struct pt_regs)) tmp = get_stack_long(child, addr); - else if (addr >= (long) &dummy->fpu && - addr < (long) &dummy->u_fpvalid) { + else if (addr >= offsetof(struct user, fpu) && + addr < offsetof(struct user, u_fpvalid)) { if (!tsk_used_math(child)) { - if (addr == (long)&dummy->fpu.fpscr) + if (addr == offsetof(struct user, fpu.fpscr)) tmp = FPSCR_INIT; else tmp = 0; - } else - tmp = ((long *)&child->thread.fpu) - [(addr - (long)&dummy->fpu) >> 2]; - } else if (addr == (long) &dummy->u_fpvalid) + } else { + unsigned long index; + index = addr - offsetof(struct user, fpu); + tmp = ((unsigned long *)child->thread.xstate) + [index >> 2]; + } + } else if (addr == offsetof(struct user, u_fpvalid)) tmp = !!tsk_used_math(child); else if (addr == PT_TEXT_ADDR) tmp = child->mm->start_code; @@ -356,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) if (addr < sizeof(struct pt_regs)) ret = put_stack_long(child, addr, data); - else if (addr >= (long) &dummy->fpu && - addr < (long) &dummy->u_fpvalid) { + else if (addr >= offsetof(struct user, fpu) && + addr < offsetof(struct user, u_fpvalid)) { + unsigned long index; + index = addr - offsetof(struct user, fpu); set_stopped_child_used_math(child); - ((long *)&child->thread.fpu) - [(addr - (long)&dummy->fpu) >> 2] = data; + ((unsigned long *)child->thread.xstate) + [index >> 2] = data; ret = 0; - } else if (addr == (long) &dummy->u_fpvalid) { + } else if (addr == offsetof(struct user, u_fpvalid)) { conditional_stopped_child_used_math(data, child); ret = 0; } @@ -372,58 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) return copy_regset_to_user(child, &user_sh_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), - (void __user *)data); + datap); case PTRACE_SETREGS: return copy_regset_from_user(child, &user_sh_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), - (const void __user *)data); + datap); #ifdef CONFIG_SH_FPU case PTRACE_GETFPREGS: return copy_regset_to_user(child, &user_sh_native_view, REGSET_FPU, 0, sizeof(struct user_fpu_struct), - (void __user *)data); + datap); case PTRACE_SETFPREGS: return copy_regset_from_user(child, &user_sh_native_view, REGSET_FPU, 0, sizeof(struct user_fpu_struct), - (const void __user *)data); + datap); #endif #ifdef CONFIG_SH_DSP case PTRACE_GETDSPREGS: return copy_regset_to_user(child, &user_sh_native_view, REGSET_DSP, 0, sizeof(struct pt_dspregs), - (void __user *)data); + datap); case PTRACE_SETDSPREGS: return copy_regset_from_user(child, &user_sh_native_view, REGSET_DSP, 0, sizeof(struct pt_dspregs), - (const void __user *)data); -#endif -#ifdef CONFIG_BINFMT_ELF_FDPIC - case PTRACE_GETFDPIC: { - unsigned long tmp = 0; - - switch (addr) { - case PTRACE_GETFDPIC_EXEC: - tmp = child->mm->context.exec_fdpic_loadmap; - break; - case PTRACE_GETFDPIC_INTERP: - tmp = child->mm->context.interp_fdpic_loadmap; - break; - default: - break; - } - - ret = 0; - if (put_user(tmp, datap)) { - ret = -EFAULT; - break; - } - break; - } + datap); #endif default: ret = ptrace_request(child, request, addr, data); @@ -459,6 +502,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[3], regs->regs[4], regs->regs[5], @@ -475,6 +521,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), regs->regs[0]); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->regs[0]); + step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); |