summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/kprobes.c')
-rw-r--r--arch/i386/kernel/kprobes.c176
1 files changed, 160 insertions, 16 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b45506..3762f6b35ab 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
* Rusty Russell).
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
+ * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
+ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ * <prasanna@in.ibm.com> added function-return probes.
*/
#include <linux/config.h>
@@ -30,15 +33,14 @@
#include <linux/ptrace.h>
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_esp;
/* copy of the kernel stack at the probe fire time */
@@ -68,16 +70,50 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
}
-void arch_remove_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
- regs->eip = (unsigned long)p->addr;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+ kprobe_old_eflags_prev = kprobe_old_eflags;
+ kprobe_saved_eflags_prev = kprobe_saved_eflags;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+ kprobe_old_eflags = kprobe_old_eflags_prev;
+ kprobe_saved_eflags = kprobe_saved_eflags_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ current_kprobe = p;
+ kprobe_saved_eflags = kprobe_old_eflags
+ = (regs->eflags & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->opcode))
+ kprobe_saved_eflags &= ~IF_MASK;
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -91,6 +127,50 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
+struct task_struct *arch_get_kprobe_task(void *ptr)
+{
+ return ((struct thread_info *) (((unsigned long) ptr) &
+ (~(THREAD_SIZE -1))))->task;
+}
+
+void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+{
+ unsigned long *sara = (unsigned long *)&regs->esp;
+ struct kretprobe_instance *ri;
+ static void *orig_ret_addr;
+
+ /*
+ * Save the return address when the return probe hits
+ * the first time, and use it to populate the (krprobe
+ * instance)->ret_addr for subsequent return probes at
+ * the same addrress since stack address would have
+ * the kretprobe_trampoline by then.
+ */
+ if (((void*) *sara) != kretprobe_trampoline)
+ orig_ret_addr = (void*) *sara;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->stack_addr = sara;
+ ri->ret_addr = orig_ret_addr;
+ add_rp_inst(ri);
+ /* Replace the return addr with trampoline addr */
+ *sara = (unsigned long) &kretprobe_trampoline;
+ } else {
+ rp->nmissed++;
+ }
+}
+
+void arch_kprobe_flush_task(struct task_struct *tk)
+{
+ struct kretprobe_instance *ri;
+ while ((ri = get_rp_inst_tsk(tk)) != NULL) {
+ *((unsigned long *)(ri->stack_addr)) =
+ (unsigned long) ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+}
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
@@ -127,8 +207,18 @@ static int kprobe_handler(struct pt_regs *regs)
unlock_kprobes();
goto no_kprobe;
}
- disarm_kprobe(p, regs);
- ret = 1;
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p, regs);
+ p->nmissed++;
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs)) {
@@ -163,11 +253,7 @@ static int kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
- kprobe_saved_eflags = kprobe_old_eflags
- = (regs->eflags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->opcode))
- kprobe_saved_eflags &= ~IF_MASK;
+ set_current_kprobe(p, regs);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -184,6 +270,55 @@ no_kprobe:
}
/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+ void kretprobe_trampoline_holder(void)
+ {
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+ }
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct kretprobe_instance *ri;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
+
+ tsk = arch_get_kprobe_task(sara);
+ head = kretprobe_inst_table_head(tsk);
+
+ hlist_for_each_entry(ri, node, head, hlist) {
+ if (ri->stack_addr == sara && ri->rp) {
+ if (ri->rp->handler)
+ ri->rp->handler(ri, regs);
+ }
+ }
+ return 0;
+}
+
+void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ struct kretprobe_instance *ri;
+ /* RA already popped */
+ unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
+
+ while ((ri = get_rp_inst(sara))) {
+ regs->eip = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+ regs->eflags &= ~TF_MASK;
+}
+
+/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
@@ -263,13 +398,22 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
- resume_execution(current_kprobe, regs);
+ if (current_kprobe->post_handler != trampoline_post_handler)
+ resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_eflags;
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
unlock_kprobes();
+out:
preempt_enable_no_resched();
/*