summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-05-08 00:34:14 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 11:15:19 -0700
commit4c4308cb93450989846ac49faeb6dab943e7657e (patch)
treec06092cae6f95a243cdd758d07491cf5fa24a1dd /kernel
parent6f716acd5fa20ae6a35ab29ae37fa9189e839ed5 (diff)
kprobes: kretprobes simplifications
- consolidate duplicate code in all arch_prepare_kretprobe instances into common code - replace various odd helpers that use hlist_for_each_entry to get the first elemenet of a list with either a hlist_for_each_entry_save or an opencoded access to the first element in the caller - inline add_rp_inst into it's only remaining caller - use kretprobe_inst_table_head instead of opencoding it Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c64
1 files changed, 21 insertions, 43 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 22857003a65..f58f171bd65 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -358,46 +358,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
}
/* Called with kretprobe_lock held */
-struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
-{
- struct hlist_node *node;
- struct kretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
- return ri;
- return NULL;
-}
-
-/* Called with kretprobe_lock held */
-static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
- *rp)
-{
- struct hlist_node *node;
- struct kretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
- return ri;
- return NULL;
-}
-
-/* Called with kretprobe_lock held */
-void __kprobes add_rp_inst(struct kretprobe_instance *ri)
-{
- /*
- * Remove rp inst off the free list -
- * Add it back when probed function returns
- */
- hlist_del(&ri->uflist);
-
- /* Add rp inst onto table */
- INIT_HLIST_NODE(&ri->hlist);
- hlist_add_head(&ri->hlist,
- &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
-
- /* Also add this rp inst to the used list. */
- INIT_HLIST_NODE(&ri->uflist);
- hlist_add_head(&ri->uflist, &ri->rp->used_instances);
-}
-
-/* Called with kretprobe_lock held */
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
struct hlist_head *head)
{
@@ -450,7 +410,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_instance *ri;
- while ((ri = get_free_rp_inst(rp)) != NULL) {
+ struct hlist_node *pos, *next;
+
+ hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
hlist_del(&ri->uflist);
kfree(ri);
}
@@ -732,7 +694,21 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
/*TODO: consider to only swap the RA after the last pre_handler fired */
spin_lock_irqsave(&kretprobe_lock, flags);
- arch_prepare_kretprobe(rp, regs);
+ if (!hlist_empty(&rp->free_instances)) {
+ struct kretprobe_instance *ri;
+
+ ri = hlist_entry(rp->free_instances.first,
+ struct kretprobe_instance, uflist);
+ ri->rp = rp;
+ ri->task = current;
+ arch_prepare_kretprobe(ri, regs);
+
+ /* XXX(hch): why is there no hlist_move_head? */
+ hlist_del(&ri->uflist);
+ hlist_add_head(&ri->uflist, &ri->rp->used_instances);
+ hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
+ } else
+ rp->nmissed++;
spin_unlock_irqrestore(&kretprobe_lock, flags);
return 0;
}
@@ -795,11 +771,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
unsigned long flags;
struct kretprobe_instance *ri;
+ struct hlist_node *pos, *next;
unregister_kprobe(&rp->kp);
+
/* No race here */
spin_lock_irqsave(&kretprobe_lock, flags);
- while ((ri = get_used_rp_inst(rp)) != NULL) {
+ hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
ri->rp = NULL;
hlist_del(&ri->uflist);
}