tracing: Add support for SOFT_DISABLE to syscall events

The original SOFT_DISABLE patches didn't add support for soft disable
of syscall events; this adds it.

Add an array of ftrace_event_file pointers indexed by syscall number
to the trace array and remove the existing enabled bitmaps, which as a
result are now redundant.  The ftrace_event_file structs in turn
contain the soft disable flags we need for per-syscall soft disable
accounting.

Adding ftrace_event_files also means we can remove the USE_CALL_FILTER
bit, thus enabling multibuffer filter support for syscall events.

Link: http://lkml.kernel.org/r/6e72b566e85d8df8042f133efbc6c30e21fb017e.1382620672.git.tom.zanussi@linux.intel.com

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Tom Zanussi 2013-10-24 08:34:19 -05:00 коммит произвёл Steven Rostedt
Родитель 38de93abec
Коммит d562aff93b
3 изменённых файлов: 36 добавлений и 14 удалений

Просмотреть файл

@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_enter, \ .class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \ .event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \ }; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \
@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_exit, \ .class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \ .event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \ }; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \

Просмотреть файл

@ -192,8 +192,8 @@ struct trace_array {
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter; int sys_refcount_enter;
int sys_refcount_exit; int sys_refcount_exit;
DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); struct ftrace_event_file *enter_syscall_files[NR_syscalls];
DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); struct ftrace_event_file *exit_syscall_files[NR_syscalls];
#endif #endif
int stop_count; int stop_count;
int clock_id; int clock_id;

Просмотреть файл

@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
{ {
struct trace_array *tr = data; struct trace_array *tr = data;
struct ftrace_event_file *ftrace_file;
struct syscall_trace_enter *entry; struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
syscall_nr = trace_get_syscall_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
if (!ftrace_file)
return;
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
return; return;
sys_data = syscall_nr_to_meta(syscall_nr); sys_data = syscall_nr_to_meta(syscall_nr);
@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
entry->nr = syscall_nr; entry->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
if (!call_filter_check_discard(sys_data->enter_event, entry, if (!filter_check_discard(ftrace_file, entry, buffer, event))
buffer, event))
trace_current_buffer_unlock_commit(buffer, event, trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc); irq_flags, pc);
} }
@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
{ {
struct trace_array *tr = data; struct trace_array *tr = data;
struct ftrace_event_file *ftrace_file;
struct syscall_trace_exit *entry; struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
syscall_nr = trace_get_syscall_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
if (!ftrace_file)
return;
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
return; return;
sys_data = syscall_nr_to_meta(syscall_nr); sys_data = syscall_nr_to_meta(syscall_nr);
@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->nr = syscall_nr; entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs); entry->ret = syscall_get_return_value(current, regs);
if (!call_filter_check_discard(sys_data->exit_event, entry, if (!filter_check_discard(ftrace_file, entry, buffer, event))
buffer, event))
trace_current_buffer_unlock_commit(buffer, event, trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc); irq_flags, pc);
} }
@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
if (!tr->sys_refcount_enter) if (!tr->sys_refcount_enter)
ret = register_trace_sys_enter(ftrace_syscall_enter, tr); ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
if (!ret) { if (!ret) {
set_bit(num, tr->enabled_enter_syscalls); rcu_assign_pointer(tr->enter_syscall_files[num], file);
tr->sys_refcount_enter++; tr->sys_refcount_enter++;
} }
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
return; return;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
tr->sys_refcount_enter--; tr->sys_refcount_enter--;
clear_bit(num, tr->enabled_enter_syscalls); rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
if (!tr->sys_refcount_enter) if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr); unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
/*
* Callers expect the event to be completely disabled on
* return, so wait for current handlers to finish.
*/
synchronize_sched();
} }
static int reg_event_syscall_exit(struct ftrace_event_file *file, static int reg_event_syscall_exit(struct ftrace_event_file *file,
@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
if (!tr->sys_refcount_exit) if (!tr->sys_refcount_exit)
ret = register_trace_sys_exit(ftrace_syscall_exit, tr); ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
if (!ret) { if (!ret) {
set_bit(num, tr->enabled_exit_syscalls); rcu_assign_pointer(tr->exit_syscall_files[num], file);
tr->sys_refcount_exit++; tr->sys_refcount_exit++;
} }
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
return; return;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
tr->sys_refcount_exit--; tr->sys_refcount_exit--;
clear_bit(num, tr->enabled_exit_syscalls); rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
if (!tr->sys_refcount_exit) if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr); unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
/*
* Callers expect the event to be completely disabled on
* return, so wait for current handlers to finish.
*/
synchronize_sched();
} }
static int __init init_syscall_trace(struct ftrace_event_call *call) static int __init init_syscall_trace(struct ftrace_event_call *call)