Probes fixes and clean ups for v6.5-rc1:
- Fix fprobe's rethook release timing issue(1). Release rethook after ftrace_ops is unregistered so that the rethook is not accessed after free. - Fix fprobe's rethook access timing issue(2). Stop rethook before ftrace_ops is unregistered so that the rethook is NOT keep using after exiting the unregister_fprobe(). - Fix eprobe cleanup logic. If it attaches to multiple events and failes to enable one of them, rollback all enabled events correctly. - Fix fprobe to unlock ftrace recursion lock correctly when it missed by another running kprobe. - Cleanup kprobe to remove unnecessary NULL. - Cleanup kprobe to remove unnecessary 0 initializations. -----BEGIN PGP SIGNATURE----- iQFPBAABCgA5FiEEh7BulGwFlgAOi5DV2/sHvwUrPxsFAmStawEbHG1hc2FtaS5o aXJhbWF0c3VAZ21haWwuY29tAAoJENv7B78FKz8bMBkIAJYun4zeXsFeUUNVZMP8 UlcyBt/uiB1Ch/t1T1wc55plWIDAvUfN+FEltwhb6MJsQgWEjKJxNcH+oquQeqSH OkUvV6a8BR73FWbCt1Tm2MQKEG1RHC1R4JCj5GCzP93rQSCnmvr0c1yb6+JKQRx4 aPgWUjDm9vhYlOXS6heHo0hf0MbXDl1kqHjvMU2MXUMk7NtQ2JEo1Whikf7Drl6D ufNqV54GmtuJhgIWAqSk+qWresRvXy0/5i4ONK1Kmq+pdhssjZ4KFsWFQTIkQzdU nP8t2EfOp3aglx7ANvEJ+COfFNi0aMnHVBo7BbzsOy7Cq7IZTfD6zOTYSfylGGdA Uw4= =RBbc -----END PGP SIGNATURE----- Merge tag 'probes-fixes-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull probes fixes from Masami Hiramatsu: - Fix fprobe's rethook release issues: - Release rethook after ftrace_ops is unregistered so that the rethook is not accessed after free. - Stop rethook before ftrace_ops is unregistered so that the rethook is NOT used after exiting unregister_fprobe() - Fix eprobe cleanup logic. If it attaches to multiple events and failes to enable one of them, rollback all enabled events correctly. - Fix fprobe to unlock ftrace recursion lock correctly when it missed by another running kprobe. - Cleanup kprobe to remove unnecessary NULL. - Cleanup kprobe to remove unnecessary 0 initializations. * tag 'probes-fixes-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: fprobe: Ensure running fprobe_exit_handler() finished before calling rethook_free() kernel: kprobes: Remove unnecessary ‘0’ values kprobes: Remove unnecessary ‘NULL’ values from correct_ret_addr fprobe: add unlock to match a succeeded ftrace_test_recursion_trylock kernel/trace: Fix cleanup logic of enable_trace_eprobe fprobe: Release rethook after the ftrace_ops is unregistered
This commit is contained in:
Коммит
9a3236ce48
|
@ -59,6 +59,7 @@ struct rethook_node {
|
|||
};
|
||||
|
||||
struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
|
||||
void rethook_stop(struct rethook *rh);
|
||||
void rethook_free(struct rethook *rh);
|
||||
void rethook_add_node(struct rethook *rh, struct rethook_node *node);
|
||||
struct rethook_node *rethook_try_get(struct rethook *rh);
|
||||
|
|
|
@ -1072,7 +1072,7 @@ static int kprobe_ftrace_enabled;
|
|||
static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
|
||||
int *cnt)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&kprobe_mutex);
|
||||
|
||||
|
@ -1110,7 +1110,7 @@ static int arm_kprobe_ftrace(struct kprobe *p)
|
|||
static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
|
||||
int *cnt)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&kprobe_mutex);
|
||||
|
||||
|
@ -2007,9 +2007,9 @@ void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
|
|||
unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
|
||||
void *frame_pointer)
|
||||
{
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct llist_node *first, *node = NULL;
|
||||
kprobe_opcode_t *correct_ret_addr;
|
||||
struct kretprobe *rp;
|
||||
|
||||
/* Find correct address and all nodes for this frame. */
|
||||
|
@ -2693,7 +2693,7 @@ void kprobe_free_init_mem(void)
|
|||
|
||||
static int __init init_kprobes(void)
|
||||
{
|
||||
int i, err = 0;
|
||||
int i, err;
|
||||
|
||||
/* FIXME allocate the probe table, currently defined statically */
|
||||
/* initialize all list heads */
|
||||
|
|
|
@ -102,12 +102,14 @@ static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
|
|||
|
||||
if (unlikely(kprobe_running())) {
|
||||
fp->nmissed++;
|
||||
return;
|
||||
goto recursion_unlock;
|
||||
}
|
||||
|
||||
kprobe_busy_begin();
|
||||
__fprobe_handler(ip, parent_ip, ops, fregs);
|
||||
kprobe_busy_end();
|
||||
|
||||
recursion_unlock:
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
}
|
||||
|
||||
|
@ -371,19 +373,16 @@ int unregister_fprobe(struct fprobe *fp)
|
|||
if (!fprobe_is_registered(fp))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* rethook_free() starts disabling the rethook, but the rethook handlers
|
||||
* may be running on other processors at this point. To make sure that all
|
||||
* current running handlers are finished, call unregister_ftrace_function()
|
||||
* after this.
|
||||
*/
|
||||
if (fp->rethook)
|
||||
rethook_free(fp->rethook);
|
||||
rethook_stop(fp->rethook);
|
||||
|
||||
ret = unregister_ftrace_function(&fp->ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (fp->rethook)
|
||||
rethook_free(fp->rethook);
|
||||
|
||||
ftrace_free_filter(&fp->ops);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -53,6 +53,19 @@ static void rethook_free_rcu(struct rcu_head *head)
|
|||
kfree(rh);
|
||||
}
|
||||
|
||||
/**
|
||||
* rethook_stop() - Stop using a rethook.
|
||||
* @rh: the struct rethook to stop.
|
||||
*
|
||||
* Stop using a rethook to prepare for freeing it. If you want to wait for
|
||||
* all running rethook handler before calling rethook_free(), you need to
|
||||
* call this first and wait RCU, and call rethook_free().
|
||||
*/
|
||||
void rethook_stop(struct rethook *rh)
|
||||
{
|
||||
WRITE_ONCE(rh->handler, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* rethook_free() - Free struct rethook.
|
||||
* @rh: the struct rethook to be freed.
|
||||
|
|
|
@ -644,6 +644,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
|
|||
struct trace_eprobe *ep;
|
||||
bool enabled;
|
||||
int ret = 0;
|
||||
int cnt = 0;
|
||||
|
||||
tp = trace_probe_primary_from_call(call);
|
||||
if (WARN_ON_ONCE(!tp))
|
||||
|
@ -667,12 +668,25 @@ static int enable_trace_eprobe(struct trace_event_call *call,
|
|||
if (ret)
|
||||
break;
|
||||
enabled = true;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Failed to enable one of them. Roll back all */
|
||||
if (enabled)
|
||||
disable_eprobe(ep, file->tr);
|
||||
if (enabled) {
|
||||
/*
|
||||
* It's a bug if one failed for something other than memory
|
||||
* not being available but another eprobe succeeded.
|
||||
*/
|
||||
WARN_ON_ONCE(ret != -ENOMEM);
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
ep = container_of(pos, struct trace_eprobe, tp);
|
||||
disable_eprobe(ep, file->tr);
|
||||
if (!--cnt)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (file)
|
||||
trace_probe_remove_file(tp, file);
|
||||
else
|
||||
|
|
Загрузка…
Ссылка в новой задаче