tracing/kprobes: Kill probe_enable_lock

enable_trace_probe() and disable_trace_probe() should not worry about
serialization, the caller (perf_trace_init or __ftrace_set_clr_event)
holds event_mutex.

They are also called by kprobe_trace_self_tests_init(), but this __init
function can't race with itself or trace_events.c

And note that this code depended on event_mutex even before 41a7dd420c
which introduced probe_enable_lock. In fact it assumes that the caller
kprobe_register() can never race with itself. Otherwise, say, tp->flags
manipulations are racy.

Link: http://lkml.kernel.org/r/20130620173809.GA13158@redhat.com

Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Oleg Nesterov 2013-06-20 19:38:09 +02:00 коммит произвёл Steven Rostedt
Родитель 288e984e62
Коммит 3fe3d6193e
1 изменённых файлов: 20 добавлений и 23 удалений

Просмотреть файл

@ -183,16 +183,15 @@ static struct trace_probe *find_trace_probe(const char *event,
return NULL; return NULL;
} }
/*
* This and enable_trace_probe/disable_trace_probe rely on event_mutex
* held by the caller, __ftrace_set_clr_event().
*/
static int trace_probe_nr_files(struct trace_probe *tp) static int trace_probe_nr_files(struct trace_probe *tp)
{ {
struct ftrace_event_file **file; struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
int ret = 0; int ret = 0;
/*
* Since all tp->files updater is protected by probe_enable_lock,
* we don't need to lock an rcu_read_lock.
*/
file = rcu_dereference_raw(tp->files);
if (file) if (file)
while (*(file++)) while (*(file++))
ret++; ret++;
@ -200,8 +199,6 @@ static int trace_probe_nr_files(struct trace_probe *tp)
return ret; return ret;
} }
static DEFINE_MUTEX(probe_enable_lock);
/* /*
* Enable trace_probe * Enable trace_probe
* if the file is NULL, enable "perf" handler, or enable "trace" handler. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
@ -211,8 +208,6 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
int ret = 0; int ret = 0;
mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old; struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp); int n = trace_probe_nr_files(tp);
@ -223,7 +218,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
GFP_KERNEL); GFP_KERNEL);
if (!new) { if (!new) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out;
} }
memcpy(new, old, n * sizeof(struct ftrace_event_file *)); memcpy(new, old, n * sizeof(struct ftrace_event_file *));
new[n] = file; new[n] = file;
@ -246,10 +241,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
else else
ret = enable_kprobe(&tp->rp.kp); ret = enable_kprobe(&tp->rp.kp);
} }
out:
out_unlock:
mutex_unlock(&probe_enable_lock);
return ret; return ret;
} }
@ -282,8 +274,6 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
int ret = 0; int ret = 0;
mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old; struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp); int n = trace_probe_nr_files(tp);
@ -292,7 +282,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
old = rcu_dereference_raw(tp->files); old = rcu_dereference_raw(tp->files);
if (n == 0 || trace_probe_file_index(tp, file) < 0) { if (n == 0 || trace_probe_file_index(tp, file) < 0) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out;
} }
if (n == 1) { /* Remove the last file */ if (n == 1) { /* Remove the last file */
@ -303,7 +293,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
GFP_KERNEL); GFP_KERNEL);
if (!new) { if (!new) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out;
} }
/* This copy & check loop copies the NULL stopper too */ /* This copy & check loop copies the NULL stopper too */
@ -326,10 +316,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
else else
disable_kprobe(&tp->rp.kp); disable_kprobe(&tp->rp.kp);
} }
out:
out_unlock:
mutex_unlock(&probe_enable_lock);
return ret; return ret;
} }
@ -1214,6 +1201,12 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
} }
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
/*
* called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
*
* kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
* lockless, but we can't race with this __init function.
*/
static __kprobes static __kprobes
int kprobe_register(struct ftrace_event_call *event, int kprobe_register(struct ftrace_event_call *event,
enum trace_reg type, void *data) enum trace_reg type, void *data)
@ -1379,6 +1372,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
return NULL; return NULL;
} }
/*
* Nobody but us can call enable_trace_probe/disable_trace_probe at this
* stage, we can do this lockless.
*/
static __init int kprobe_trace_self_tests_init(void) static __init int kprobe_trace_self_tests_init(void)
{ {
int ret, warn = 0; int ret, warn = 0;