uprobes: Introduce get_uprobe()

Cosmetic. Add the new trivial helper, get_uprobe(). It matches
put_uprobe() we already have and we can simplify a couple of its
users.

Tested-by: Pratyush Anand <panand@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Acked-by: Anton Arapov <arapov@gmail.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150721134003.GA4736@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Oleg Nesterov 2015-07-21 15:40:03 +02:00 committed by Ingo Molnar
parent 41d279aaf5
commit f231722a2b

View file

@ -366,6 +366,18 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
}
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
atomic_inc(&uprobe->ref);
return uprobe;
}
static void put_uprobe(struct uprobe *uprobe)
{
if (atomic_dec_and_test(&uprobe->ref))
kfree(uprobe);
}
static int match_uprobe(struct uprobe *l, struct uprobe *r)
{
if (l->inode < r->inode)
@ -393,10 +405,8 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
while (n) {
uprobe = rb_entry(n, struct uprobe, rb_node);
match = match_uprobe(&u, uprobe);
if (!match) {
atomic_inc(&uprobe->ref);
return uprobe;
}
if (!match)
return get_uprobe(uprobe);
if (match < 0)
n = n->rb_left;
@ -432,10 +442,8 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
parent = *p;
u = rb_entry(parent, struct uprobe, rb_node);
match = match_uprobe(uprobe, u);
if (!match) {
atomic_inc(&u->ref);
return u;
}
if (!match)
return get_uprobe(u);
if (match < 0)
p = &parent->rb_left;
@ -472,12 +480,6 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
return u;
}
static void put_uprobe(struct uprobe *uprobe)
{
if (atomic_dec_and_test(&uprobe->ref))
kfree(uprobe);
}
static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
{
struct uprobe *uprobe, *cur_uprobe;
@ -1039,14 +1041,14 @@ static void build_probe_list(struct inode *inode,
if (u->inode != inode || u->offset < min)
break;
list_add(&u->pending_list, head);
atomic_inc(&u->ref);
get_uprobe(u);
}
for (t = n; (t = rb_next(t)); ) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset > max)
break;
list_add(&u->pending_list, head);
atomic_inc(&u->ref);
get_uprobe(u);
}
}
spin_unlock(&uprobes_treelock);
@ -1437,7 +1439,7 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
return -ENOMEM;
*n = *o;
atomic_inc(&n->uprobe->ref);
get_uprobe(n->uprobe);
n->next = NULL;
*p = n;
@ -1565,8 +1567,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
}
atomic_inc(&uprobe->ref);
ri->uprobe = uprobe;
ri->uprobe = get_uprobe(uprobe);
ri->func = instruction_pointer(regs);
ri->orig_ret_vaddr = orig_ret_vaddr;
ri->chained = chained;