[PATCH] kretprobe spinlock deadlock patch
kprobe_flush_task() possibly calls kfree function during holding kretprobe_lock spinlock, if kfree function is probed by kretprobe that will incur spinlock deadlock. This patch moves kfree function out scope of kretprobe_lock. Signed-off-by: bibo, mao <bibo.mao@intel.com> Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
f2aa85a0cc
Коммит
99219a3fbc
|
@ -396,11 +396,12 @@ no_kprobe:
|
||||||
fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
|
fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = NULL;
|
struct kretprobe_instance *ri = NULL;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags, orig_ret_address = 0;
|
unsigned long flags, orig_ret_address = 0;
|
||||||
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(current);
|
head = kretprobe_inst_table_head(current);
|
||||||
|
|
||||||
|
@ -429,7 +430,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
|
|
||||||
if (orig_ret_address != trampoline_address)
|
if (orig_ret_address != trampoline_address)
|
||||||
/*
|
/*
|
||||||
|
@ -444,6 +445,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
|
||||||
|
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
return (void*)orig_ret_address;
|
return (void*)orig_ret_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -338,12 +338,13 @@ static void kretprobe_trampoline(void)
|
||||||
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = NULL;
|
struct kretprobe_instance *ri = NULL;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags, orig_ret_address = 0;
|
unsigned long flags, orig_ret_address = 0;
|
||||||
unsigned long trampoline_address =
|
unsigned long trampoline_address =
|
||||||
((struct fnptr *)kretprobe_trampoline)->ip;
|
((struct fnptr *)kretprobe_trampoline)->ip;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(current);
|
head = kretprobe_inst_table_head(current);
|
||||||
|
|
||||||
|
@ -369,7 +370,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
ri->rp->handler(ri, regs);
|
ri->rp->handler(ri, regs);
|
||||||
|
|
||||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
|
|
||||||
if (orig_ret_address != trampoline_address)
|
if (orig_ret_address != trampoline_address)
|
||||||
/*
|
/*
|
||||||
|
@ -387,6 +388,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* By returning a non-zero value, we are telling
|
* By returning a non-zero value, we are telling
|
||||||
* kprobe_handler() that we don't want the post_handler
|
* kprobe_handler() that we don't want the post_handler
|
||||||
|
|
|
@ -260,11 +260,12 @@ void kretprobe_trampoline_holder(void)
|
||||||
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = NULL;
|
struct kretprobe_instance *ri = NULL;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags, orig_ret_address = 0;
|
unsigned long flags, orig_ret_address = 0;
|
||||||
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(current);
|
head = kretprobe_inst_table_head(current);
|
||||||
|
|
||||||
|
@ -290,7 +291,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
ri->rp->handler(ri, regs);
|
ri->rp->handler(ri, regs);
|
||||||
|
|
||||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
|
|
||||||
if (orig_ret_address != trampoline_address)
|
if (orig_ret_address != trampoline_address)
|
||||||
/*
|
/*
|
||||||
|
@ -308,6 +309,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* By returning a non-zero value, we are telling
|
* By returning a non-zero value, we are telling
|
||||||
* kprobe_handler() that we don't want the post_handler
|
* kprobe_handler() that we don't want the post_handler
|
||||||
|
|
|
@ -369,11 +369,12 @@ void __kprobes kretprobe_trampoline_holder(void)
|
||||||
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = NULL;
|
struct kretprobe_instance *ri = NULL;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags, orig_ret_address = 0;
|
unsigned long flags, orig_ret_address = 0;
|
||||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(current);
|
head = kretprobe_inst_table_head(current);
|
||||||
|
|
||||||
|
@ -399,7 +400,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
ri->rp->handler(ri, regs);
|
ri->rp->handler(ri, regs);
|
||||||
|
|
||||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
|
|
||||||
if (orig_ret_address != trampoline_address) {
|
if (orig_ret_address != trampoline_address) {
|
||||||
/*
|
/*
|
||||||
|
@ -417,6 +418,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* By returning a non-zero value, we are telling
|
* By returning a non-zero value, we are telling
|
||||||
* kprobe_handler() that we don't want the post_handler
|
* kprobe_handler() that we don't want the post_handler
|
||||||
|
|
|
@ -405,11 +405,12 @@ no_kprobe:
|
||||||
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = NULL;
|
struct kretprobe_instance *ri = NULL;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags, orig_ret_address = 0;
|
unsigned long flags, orig_ret_address = 0;
|
||||||
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(current);
|
head = kretprobe_inst_table_head(current);
|
||||||
|
|
||||||
|
@ -435,7 +436,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
ri->rp->handler(ri, regs);
|
ri->rp->handler(ri, regs);
|
||||||
|
|
||||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
|
|
||||||
if (orig_ret_address != trampoline_address)
|
if (orig_ret_address != trampoline_address)
|
||||||
/*
|
/*
|
||||||
|
@ -453,6 +454,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* By returning a non-zero value, we are telling
|
* By returning a non-zero value, we are telling
|
||||||
* kprobe_handler() that we don't want the post_handler
|
* kprobe_handler() that we don't want the post_handler
|
||||||
|
|
|
@ -202,7 +202,7 @@ void unregister_kretprobe(struct kretprobe *rp);
|
||||||
struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
|
struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
|
||||||
void add_rp_inst(struct kretprobe_instance *ri);
|
void add_rp_inst(struct kretprobe_instance *ri);
|
||||||
void kprobe_flush_task(struct task_struct *tk);
|
void kprobe_flush_task(struct task_struct *tk);
|
||||||
void recycle_rp_inst(struct kretprobe_instance *ri);
|
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
|
||||||
#else /* CONFIG_KPROBES */
|
#else /* CONFIG_KPROBES */
|
||||||
|
|
||||||
#define __kprobes /**/
|
#define __kprobes /**/
|
||||||
|
|
|
@ -319,7 +319,8 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with kretprobe_lock held */
|
/* Called with kretprobe_lock held */
|
||||||
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
|
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
|
||||||
|
struct hlist_head *head)
|
||||||
{
|
{
|
||||||
/* remove rp inst off the rprobe_inst_table */
|
/* remove rp inst off the rprobe_inst_table */
|
||||||
hlist_del(&ri->hlist);
|
hlist_del(&ri->hlist);
|
||||||
|
@ -331,7 +332,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
|
||||||
hlist_add_head(&ri->uflist, &ri->rp->free_instances);
|
hlist_add_head(&ri->uflist, &ri->rp->free_instances);
|
||||||
} else
|
} else
|
||||||
/* Unregistering */
|
/* Unregistering */
|
||||||
kfree(ri);
|
hlist_add_head(&ri->hlist, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
|
struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
|
||||||
|
@ -348,17 +349,23 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
|
||||||
void __kprobes kprobe_flush_task(struct task_struct *tk)
|
void __kprobes kprobe_flush_task(struct task_struct *tk)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri;
|
struct kretprobe_instance *ri;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head, empty_rp;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(&empty_rp);
|
||||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||||
head = kretprobe_inst_table_head(tk);
|
head = kretprobe_inst_table_head(tk);
|
||||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||||
if (ri->task == tk)
|
if (ri->task == tk)
|
||||||
recycle_rp_inst(ri);
|
recycle_rp_inst(ri, &empty_rp);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||||
|
hlist_del(&ri->hlist);
|
||||||
|
kfree(ri);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_rp_inst(struct kretprobe *rp)
|
static inline void free_rp_inst(struct kretprobe *rp)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче