summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-11-21 09:59:27 +0100
committerIngo Molnar <mingo@kernel.org>2013-11-21 09:59:27 +0100
commit09897d78dbc3a544426f2272b5601c62922ccab9 (patch)
tree4c10b34aa8cbac758e5fe094a79687a9a763e28c
parente98a6e59dff885eb387163b1a7abe019a44ba90b (diff)
parentad439356ae5ae7688b39f1107fd5b874850fec18 (diff)
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core
Pull uprobes cleanups from Oleg Nesterov. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/powerpc/include/asm/uprobes.h5
-rw-r--r--arch/powerpc/kernel/uprobes.c2
-rw-r--r--include/linux/uprobes.h52
-rw-r--r--kernel/events/uprobes.c60
4 files changed, 66 insertions, 53 deletions
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 75c6ecdb8f3..7422a999a39 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- u8 insn[MAX_UINSN_BYTES];
- u8 ixol[MAX_UINSN_BYTES];
- u32 ainsn;
+ u32 insn;
+ u32 ixol;
};
};
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 59f419b935f..003b20964ea 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, auprobe->ainsn);
+ ret = emulate_step(regs, auprobe->insn);
if (ret > 0)
return true;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 319eae70fe8..e32251e00e6 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,16 +26,13 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
+#include <linux/types.h>
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
-#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
-# include <asm/uprobes.h>
-#endif
-
#define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1
@@ -60,6 +57,8 @@ struct uprobe_consumer {
};
#ifdef CONFIG_UPROBES
+#include <asm/uprobes.h>
+
enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
@@ -72,35 +71,28 @@ enum uprobe_task_state {
*/
struct uprobe_task {
enum uprobe_task_state state;
- struct arch_uprobe_task autask;
- struct return_instance *return_instances;
- unsigned int depth;
- struct uprobe *active_uprobe;
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ unsigned long vaddr;
+ };
+ struct {
+ struct callback_head dup_xol_work;
+ unsigned long dup_xol_addr;
+ };
+ };
+
+ struct uprobe *active_uprobe;
unsigned long xol_vaddr;
- unsigned long vaddr;
-};
-/*
- * On a breakpoint hit, thread contests for a slot. It frees the
- * slot after singlestep. Currently a fixed number of slots are
- * allocated.
- */
-struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- atomic_t slot_count; /* number of in-use slots */
- unsigned long *bitmap; /* 0 = free slot */
- struct page *page;
-
- /*
- * We keep the vma's vm_start rather than a pointer to the vma
- * itself. The probed process or a naughty kernel module could make
- * the vma go away, and we must handle that reasonably gracefully.
- */
- unsigned long vaddr; /* Page(s) of instruction slots */
+ struct return_instance *return_instances;
+ unsigned int depth;
};
+struct xol_area;
+
struct uprobes_state {
struct xol_area *xol_area;
};
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{
return false;
}
-static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
-{
- return 0;
-}
static inline void uprobe_free_utask(struct task_struct *t)
{
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 24b7d6ca871..b886a5e7d4f 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -73,6 +73,17 @@ struct uprobe {
struct inode *inode; /* Also hold a ref to inode */
loff_t offset;
unsigned long flags;
+
+ /*
+ * The generic code assumes that it has two members of unknown type
+ * owned by the arch-specific code:
+ *
+ * insn - copy_insn() saves the original instruction here for
+ * arch_uprobe_analyze_insn().
+ *
+ * ixol - potentially modified instruction to execute out of
+ * line, copied to xol_area by xol_get_insn_slot().
+ */
struct arch_uprobe arch;
};
@@ -86,6 +97,29 @@ struct return_instance {
};
/*
+ * Execute out of line area: anonymous executable mapping installed
+ * by the probed task to execute the copy of the original instruction
+ * mangled by set_swbp().
+ *
+ * On a breakpoint hit, thread contests for a slot. It frees the
+ * slot after singlestep. Currently a fixed number of slots are
+ * allocated.
+ */
+struct xol_area {
+ wait_queue_head_t wq; /* if all slots are busy */
+ atomic_t slot_count; /* number of in-use slots */
+ unsigned long *bitmap; /* 0 = free slot */
+ struct page *page;
+
+ /*
+ * We keep the vma's vm_start rather than a pointer to the vma
+ * itself. The probed process or a naughty kernel module could make
+ * the vma go away, and we must handle that reasonably gracefully.
+ */
+ unsigned long vaddr; /* Page(s) of instruction slots */
+};
+
+/*
* valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have
* changed after breakpoint was inserted.
@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
- return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+ return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
}
static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
- void *insn = uprobe->arch.insn;
- int size = MAX_UINSN_BYTES;
+ void *insn = &uprobe->arch.insn;
+ int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */
@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
goto out;
ret = -ENOTSUPP;
- if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn))
+ if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
/* Initialize the slot */
copy_to_page(area->page, xol_vaddr,
- uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
+ &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
/*
* We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too.
@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg)
static void dup_xol_work(struct callback_head *work)
{
- kfree(work);
-
if (current->flags & PF_EXITING)
return;
- if (!__create_xol_area(current->utask->vaddr))
+ if (!__create_xol_area(current->utask->dup_xol_addr))
uprobe_warn(current, "dup xol area");
}
@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{
struct uprobe_task *utask = current->utask;
struct mm_struct *mm = current->mm;
- struct callback_head *work;
struct xol_area *area;
t->utask = NULL;
@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
if (mm == t->mm)
return;
- /* TODO: move it into the union in uprobe_task */
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return uprobe_warn(t, "dup xol area");
-
- t->utask->vaddr = area->vaddr;
- init_task_work(work, dup_xol_work);
- task_work_add(t, work, true);
+ t->utask->dup_xol_addr = area->vaddr;
+ init_task_work(&t->utask->dup_xol_work, dup_xol_work);
+ task_work_add(t, &t->utask->dup_xol_work, true);
}
/*