The majority of these changes are from Masami Hiramatsu bringing
kprobes up to par with the latest changes to ftrace (multi buffering and the new function probes). He also discovered and fixed some bugs in doing so. When pulling in his patches, I also found a few minor bugs as well and fixed them. This also includes a compile fix for some archs that select the ring buffer but not tracing. I based this off of the last patch you took from me that fixed the merge conflict error, as that was the commit that had all the changes I needed for this set of changes. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQEcBAABAgAGBQJRjYnJAAoJEOdOSU1xswtMg9EH/iFs438FgrNMk2ZdQftmqcqA cqcactHo1mmoHjAoLZT/oDBjEThhVUuqzMXrFRutSYcTh4PsQEC3arX0mpsC+T12 UEEV/tZS3TXH+GXEyrOit/O3kzntQcDHwJDV4+0n80IrJmw4IDZbnV3R8DWjS6wp so+dq0A1pwehcG/upgpw1oTKsGv1G/p6vyf968B6W44icHEClLiph4JE2kzE6D3r fzSpOLaQoBEvwIRf6xRKxi240VqIItXwfG7pwNpPpSC37gRLzm74zGr+Sj93/k1y pARbZ/5XO7/pcVYQYupErRAoV5in+QMZ67k5G1vQIvyOS9r039catbQf/7PkzcI= =EZCE -----END PGP SIGNATURE----- Merge tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing/kprobes update from Steven Rostedt: "The majority of these changes are from Masami Hiramatsu bringing kprobes up to par with the latest changes to ftrace (multi buffering and the new function probes). He also discovered and fixed some bugs in doing so. When pulling in his patches, I also found a few minor bugs as well and fixed them. This also includes a compile fix for some archs that select the ring buffer but not tracing. I based this off of the last patch you took from me that fixed the merge conflict error, as that was the commit that had all the changes I needed for this set of changes." * tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing/kprobes: Support soft-mode disabling tracing/kprobes: Support ftrace_event_file base multibuffer tracing/kprobes: Pass trace_probe directly from dispatcher tracing/kprobes: Increment probe hit-count even if it is used by perf tracing/kprobes: Use bool for retprobe checker ftrace: Fix function probe when more than one probe is added ftrace: Fix the output of enabled_functions debug file ftrace: Fix locking in register_ftrace_function_probe() tracing: Add helper function trace_create_new_event() to remove duplicate code tracing: Modify soft-mode only if there's no other referrer tracing: Indicate enabled soft-mode in enable file tracing/kprobes: Fix to increment return event probe hit-count ftrace: Cleanup regex_lock and ftrace_lock around hash updating ftrace, kprobes: Fix a deadlock on ftrace_regex_lock ftrace: Have ftrace_regex_write() return either read or error tracing: Return error if register_ftrace_function_probe() fails for event_enable_func() tracing: Don't succeed if event_enable_func did not register anything ring-buffer: Select IRQ_WORK
This commit is contained in:
Коммит
26b840ae5d
|
@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
||||||
* not set this, then the ftrace infrastructure will add recursion
|
* not set this, then the ftrace infrastructure will add recursion
|
||||||
* protection for the caller.
|
* protection for the caller.
|
||||||
* STUB - The ftrace_ops is just a place holder.
|
* STUB - The ftrace_ops is just a place holder.
|
||||||
|
* INITIALIZED - The ftrace_ops has already been initialized (first use time
|
||||||
|
* register_ftrace_function() is called, it will initialized the ops)
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||||
|
@ -100,6 +102,7 @@ enum {
|
||||||
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
|
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
|
||||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
||||||
FTRACE_OPS_FL_STUB = 1 << 7,
|
FTRACE_OPS_FL_STUB = 1 << 7,
|
||||||
|
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ftrace_ops {
|
struct ftrace_ops {
|
||||||
|
@ -110,6 +113,7 @@ struct ftrace_ops {
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
struct ftrace_hash *notrace_hash;
|
struct ftrace_hash *notrace_hash;
|
||||||
struct ftrace_hash *filter_hash;
|
struct ftrace_hash *filter_hash;
|
||||||
|
struct mutex regex_lock;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -293,6 +293,7 @@ struct ftrace_event_file {
|
||||||
* caching and such. Which is mostly OK ;-)
|
* caching and such. Which is mostly OK ;-)
|
||||||
*/
|
*/
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
atomic_t sm_ref; /* soft-mode reference counter */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __TRACE_EVENT_FLAGS(name, value) \
|
#define __TRACE_EVENT_FLAGS(name, value) \
|
||||||
|
|
|
@ -71,6 +71,7 @@ config TRACE_CLOCK
|
||||||
config RING_BUFFER
|
config RING_BUFFER
|
||||||
bool
|
bool
|
||||||
select TRACE_CLOCK
|
select TRACE_CLOCK
|
||||||
|
select IRQ_WORK
|
||||||
|
|
||||||
config FTRACE_NMI_ENTER
|
config FTRACE_NMI_ENTER
|
||||||
bool
|
bool
|
||||||
|
@ -107,7 +108,6 @@ config TRACING
|
||||||
select BINARY_PRINTF
|
select BINARY_PRINTF
|
||||||
select EVENT_TRACING
|
select EVENT_TRACING
|
||||||
select TRACE_CLOCK
|
select TRACE_CLOCK
|
||||||
select IRQ_WORK
|
|
||||||
|
|
||||||
config GENERIC_TRACER
|
config GENERIC_TRACER
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -64,6 +64,13 @@
|
||||||
|
|
||||||
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
|
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
#define INIT_REGEX_LOCK(opsname) \
|
||||||
|
.regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
|
||||||
|
#else
|
||||||
|
#define INIT_REGEX_LOCK(opsname)
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
|
||||||
|
@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
|
||||||
while (likely(op = rcu_dereference_raw((op)->next)) && \
|
while (likely(op = rcu_dereference_raw((op)->next)) && \
|
||||||
unlikely((op) != &ftrace_list_end))
|
unlikely((op) != &ftrace_list_end))
|
||||||
|
|
||||||
|
static inline void ftrace_ops_init(struct ftrace_ops *ops)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
|
||||||
|
mutex_init(&ops->regex_lock);
|
||||||
|
ops->flags |= FTRACE_OPS_FL_INITIALIZED;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ftrace_nr_registered_ops - return number of ops registered
|
* ftrace_nr_registered_ops - return number of ops registered
|
||||||
*
|
*
|
||||||
|
@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
|
||||||
#else
|
#else
|
||||||
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
||||||
.func = function_profile_call,
|
.func = function_profile_call,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||||
|
INIT_REGEX_LOCK(ftrace_profile_ops)
|
||||||
};
|
};
|
||||||
|
|
||||||
static int register_ftrace_profiler(void)
|
static int register_ftrace_profiler(void)
|
||||||
|
@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.notrace_hash = EMPTY_HASH,
|
.notrace_hash = EMPTY_HASH,
|
||||||
.filter_hash = EMPTY_HASH,
|
.filter_hash = EMPTY_HASH,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||||
|
INIT_REGEX_LOCK(global_ops)
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_MUTEX(ftrace_regex_lock);
|
|
||||||
|
|
||||||
struct ftrace_page {
|
struct ftrace_page {
|
||||||
struct ftrace_page *next;
|
struct ftrace_page *next;
|
||||||
struct dyn_ftrace *records;
|
struct dyn_ftrace *records;
|
||||||
|
@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
|
||||||
|
|
||||||
void ftrace_free_filter(struct ftrace_ops *ops)
|
void ftrace_free_filter(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
|
ftrace_ops_init(ops);
|
||||||
free_ftrace_hash(ops->filter_hash);
|
free_ftrace_hash(ops->filter_hash);
|
||||||
free_ftrace_hash(ops->notrace_hash);
|
free_ftrace_hash(ops->notrace_hash);
|
||||||
}
|
}
|
||||||
|
@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||||
!ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
|
!ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
|
||||||
|
|
||||||
((iter->flags & FTRACE_ITER_ENABLED) &&
|
((iter->flags & FTRACE_ITER_ENABLED) &&
|
||||||
!(rec->flags & ~FTRACE_FL_MASK))) {
|
!(rec->flags & FTRACE_FL_ENABLED))) {
|
||||||
|
|
||||||
rec = NULL;
|
rec = NULL;
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
||||||
struct ftrace_hash *hash;
|
struct ftrace_hash *hash;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
ftrace_ops_init(ops);
|
||||||
|
|
||||||
if (unlikely(ftrace_disabled))
|
if (unlikely(ftrace_disabled))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
iter->ops = ops;
|
||||||
|
iter->flags = flag;
|
||||||
|
|
||||||
|
mutex_lock(&ops->regex_lock);
|
||||||
|
|
||||||
if (flag & FTRACE_ITER_NOTRACE)
|
if (flag & FTRACE_ITER_NOTRACE)
|
||||||
hash = ops->notrace_hash;
|
hash = ops->notrace_hash;
|
||||||
else
|
else
|
||||||
hash = ops->filter_hash;
|
hash = ops->filter_hash;
|
||||||
|
|
||||||
iter->ops = ops;
|
|
||||||
iter->flags = flag;
|
|
||||||
|
|
||||||
if (file->f_mode & FMODE_WRITE) {
|
if (file->f_mode & FMODE_WRITE) {
|
||||||
mutex_lock(&ftrace_lock);
|
|
||||||
iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
|
iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
|
||||||
mutex_unlock(&ftrace_lock);
|
|
||||||
|
|
||||||
if (!iter->hash) {
|
if (!iter->hash) {
|
||||||
trace_parser_put(&iter->parser);
|
trace_parser_put(&iter->parser);
|
||||||
kfree(iter);
|
kfree(iter);
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ftrace_regex_lock);
|
|
||||||
|
|
||||||
if ((file->f_mode & FMODE_WRITE) &&
|
if ((file->f_mode & FMODE_WRITE) &&
|
||||||
(file->f_flags & O_TRUNC))
|
(file->f_flags & O_TRUNC))
|
||||||
ftrace_filter_reset(iter->hash);
|
ftrace_filter_reset(iter->hash);
|
||||||
|
@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
file->private_data = iter;
|
file->private_data = iter;
|
||||||
mutex_unlock(&ftrace_regex_lock);
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&ops->regex_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
|
||||||
static struct ftrace_ops trace_probe_ops __read_mostly =
|
static struct ftrace_ops trace_probe_ops __read_mostly =
|
||||||
{
|
{
|
||||||
.func = function_trace_probe_call,
|
.func = function_trace_probe_call,
|
||||||
|
.flags = FTRACE_OPS_FL_INITIALIZED,
|
||||||
|
INIT_REGEX_LOCK(trace_probe_ops)
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ftrace_probe_registered;
|
static int ftrace_probe_registered;
|
||||||
|
@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (ftrace_probe_registered)
|
if (ftrace_probe_registered) {
|
||||||
|
/* still need to update the function call sites */
|
||||||
|
if (ftrace_enabled)
|
||||||
|
ftrace_run_update_code(FTRACE_UPDATE_CALLS);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
|
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
|
||||||
struct hlist_head *hhd = &ftrace_func_hash[i];
|
struct hlist_head *hhd = &ftrace_func_hash[i];
|
||||||
|
@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
if (WARN_ON(not))
|
if (WARN_ON(not))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&ftrace_lock);
|
mutex_lock(&trace_probe_ops.regex_lock);
|
||||||
|
|
||||||
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
||||||
if (!hash) {
|
if (!hash) {
|
||||||
count = -ENOMEM;
|
count = -ENOMEM;
|
||||||
goto out_unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(ftrace_disabled)) {
|
if (unlikely(ftrace_disabled)) {
|
||||||
count = -ENODEV;
|
count = -ENODEV;
|
||||||
goto out_unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&ftrace_lock);
|
||||||
|
|
||||||
do_for_each_ftrace_rec(pg, rec) {
|
do_for_each_ftrace_rec(pg, rec) {
|
||||||
|
|
||||||
if (!ftrace_match_record(rec, NULL, search, len, type))
|
if (!ftrace_match_record(rec, NULL, search, len, type))
|
||||||
|
@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&ftrace_lock);
|
mutex_unlock(&ftrace_lock);
|
||||||
|
out:
|
||||||
|
mutex_unlock(&trace_probe_ops.regex_lock);
|
||||||
free_ftrace_hash(hash);
|
free_ftrace_hash(hash);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
|
@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ftrace_lock);
|
mutex_lock(&trace_probe_ops.regex_lock);
|
||||||
|
|
||||||
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
||||||
if (!hash)
|
if (!hash)
|
||||||
|
@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
list_add(&entry->free_list, &free_list);
|
list_add(&entry->free_list, &free_list);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
mutex_lock(&ftrace_lock);
|
||||||
__disable_ftrace_function_probe();
|
__disable_ftrace_function_probe();
|
||||||
/*
|
/*
|
||||||
* Remove after the disable is called. Otherwise, if the last
|
* Remove after the disable is called. Otherwise, if the last
|
||||||
|
@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
list_del(&entry->free_list);
|
list_del(&entry->free_list);
|
||||||
ftrace_free_entry(entry);
|
ftrace_free_entry(entry);
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&ftrace_lock);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&ftrace_lock);
|
mutex_unlock(&trace_probe_ops.regex_lock);
|
||||||
free_ftrace_hash(hash);
|
free_ftrace_hash(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
|
||||||
if (!cnt)
|
if (!cnt)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&ftrace_regex_lock);
|
|
||||||
|
|
||||||
ret = -ENODEV;
|
|
||||||
if (unlikely(ftrace_disabled))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
if (file->f_mode & FMODE_READ) {
|
||||||
struct seq_file *m = file->private_data;
|
struct seq_file *m = file->private_data;
|
||||||
iter = m->private;
|
iter = m->private;
|
||||||
} else
|
} else
|
||||||
iter = file->private_data;
|
iter = file->private_data;
|
||||||
|
|
||||||
|
if (unlikely(ftrace_disabled))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* iter->hash is a local copy, so we don't need regex_lock */
|
||||||
|
|
||||||
parser = &iter->parser;
|
parser = &iter->parser;
|
||||||
read = trace_get_user(parser, ubuf, cnt, ppos);
|
read = trace_get_user(parser, ubuf, cnt, ppos);
|
||||||
|
|
||||||
|
@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
|
||||||
ret = ftrace_process_regex(iter->hash, parser->buffer,
|
ret = ftrace_process_regex(iter->hash, parser->buffer,
|
||||||
parser->idx, enable);
|
parser->idx, enable);
|
||||||
trace_parser_clear(parser);
|
trace_parser_clear(parser);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = read;
|
ret = read;
|
||||||
out_unlock:
|
out:
|
||||||
mutex_unlock(&ftrace_regex_lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||||
if (unlikely(ftrace_disabled))
|
if (unlikely(ftrace_disabled))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
mutex_lock(&ops->regex_lock);
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
orig_hash = &ops->filter_hash;
|
orig_hash = &ops->filter_hash;
|
||||||
else
|
else
|
||||||
orig_hash = &ops->notrace_hash;
|
orig_hash = &ops->notrace_hash;
|
||||||
|
|
||||||
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
||||||
if (!hash)
|
if (!hash) {
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out_regex_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&ftrace_regex_lock);
|
|
||||||
if (reset)
|
if (reset)
|
||||||
ftrace_filter_reset(hash);
|
ftrace_filter_reset(hash);
|
||||||
if (buf && !ftrace_match_records(hash, buf, len)) {
|
if (buf && !ftrace_match_records(hash, buf, len)) {
|
||||||
|
@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||||
mutex_unlock(&ftrace_lock);
|
mutex_unlock(&ftrace_lock);
|
||||||
|
|
||||||
out_regex_unlock:
|
out_regex_unlock:
|
||||||
mutex_unlock(&ftrace_regex_lock);
|
mutex_unlock(&ops->regex_lock);
|
||||||
|
|
||||||
free_ftrace_hash(hash);
|
free_ftrace_hash(hash);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
|
||||||
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
|
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
|
||||||
int remove, int reset)
|
int remove, int reset)
|
||||||
{
|
{
|
||||||
|
ftrace_ops_init(ops);
|
||||||
return ftrace_set_addr(ops, ip, remove, reset, 1);
|
return ftrace_set_addr(ops, ip, remove, reset, 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
|
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
|
||||||
|
@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||||
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
|
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
|
||||||
int len, int reset)
|
int len, int reset)
|
||||||
{
|
{
|
||||||
|
ftrace_ops_init(ops);
|
||||||
return ftrace_set_regex(ops, buf, len, reset, 1);
|
return ftrace_set_regex(ops, buf, len, reset, 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ftrace_set_filter);
|
EXPORT_SYMBOL_GPL(ftrace_set_filter);
|
||||||
|
@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
|
||||||
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
||||||
int len, int reset)
|
int len, int reset)
|
||||||
{
|
{
|
||||||
|
ftrace_ops_init(ops);
|
||||||
return ftrace_set_regex(ops, buf, len, reset, 0);
|
return ftrace_set_regex(ops, buf, len, reset, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
|
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
|
||||||
|
@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
|
||||||
{
|
{
|
||||||
char *func;
|
char *func;
|
||||||
|
|
||||||
|
ftrace_ops_init(ops);
|
||||||
|
|
||||||
while (buf) {
|
while (buf) {
|
||||||
func = strsep(&buf, ",");
|
func = strsep(&buf, ",");
|
||||||
ftrace_set_regex(ops, func, strlen(func), 0, enable);
|
ftrace_set_regex(ops, func, strlen(func), 0, enable);
|
||||||
|
@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
||||||
int filter_hash;
|
int filter_hash;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&ftrace_regex_lock);
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
if (file->f_mode & FMODE_READ) {
|
||||||
iter = m->private;
|
iter = m->private;
|
||||||
|
|
||||||
seq_release(inode, file);
|
seq_release(inode, file);
|
||||||
} else
|
} else
|
||||||
iter = file->private_data;
|
iter = file->private_data;
|
||||||
|
@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
||||||
|
|
||||||
trace_parser_put(parser);
|
trace_parser_put(parser);
|
||||||
|
|
||||||
|
mutex_lock(&iter->ops->regex_lock);
|
||||||
|
|
||||||
if (file->f_mode & FMODE_WRITE) {
|
if (file->f_mode & FMODE_WRITE) {
|
||||||
filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
|
filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
|
||||||
|
|
||||||
|
@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
||||||
|
|
||||||
mutex_unlock(&ftrace_lock);
|
mutex_unlock(&ftrace_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&iter->ops->regex_lock);
|
||||||
free_ftrace_hash(iter->hash);
|
free_ftrace_hash(iter->hash);
|
||||||
kfree(iter);
|
kfree(iter);
|
||||||
|
|
||||||
mutex_unlock(&ftrace_regex_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
|
||||||
|
|
||||||
static struct ftrace_ops global_ops = {
|
static struct ftrace_ops global_ops = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||||
|
INIT_REGEX_LOCK(global_ops)
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init ftrace_nodyn_init(void)
|
static int __init ftrace_nodyn_init(void)
|
||||||
|
@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ftrace_ops control_ops = {
|
static struct ftrace_ops control_ops = {
|
||||||
.func = ftrace_ops_control_func,
|
.func = ftrace_ops_control_func,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||||
|
INIT_REGEX_LOCK(control_ops)
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
|
ftrace_ops_init(ops);
|
||||||
|
|
||||||
mutex_lock(&ftrace_lock);
|
mutex_lock(&ftrace_lock);
|
||||||
|
|
||||||
ret = __register_ftrace_function(ops);
|
ret = __register_ftrace_function(ops);
|
||||||
|
|
|
@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
|
||||||
switch (enable) {
|
switch (enable) {
|
||||||
case 0:
|
case 0:
|
||||||
/*
|
/*
|
||||||
* When soft_disable is set and enable is cleared, we want
|
* When soft_disable is set and enable is cleared, the sm_ref
|
||||||
|
* reference counter is decremented. If it reaches 0, we want
|
||||||
* to clear the SOFT_DISABLED flag but leave the event in the
|
* to clear the SOFT_DISABLED flag but leave the event in the
|
||||||
* state that it was. That is, if the event was enabled and
|
* state that it was. That is, if the event was enabled and
|
||||||
* SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
|
* SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
|
||||||
|
@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
|
||||||
* "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
|
* "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
|
||||||
*/
|
*/
|
||||||
if (soft_disable) {
|
if (soft_disable) {
|
||||||
|
if (atomic_dec_return(&file->sm_ref) > 0)
|
||||||
|
break;
|
||||||
disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
|
disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
|
||||||
clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
|
clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
|
||||||
} else
|
} else
|
||||||
|
@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
|
||||||
*/
|
*/
|
||||||
if (!soft_disable)
|
if (!soft_disable)
|
||||||
clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
|
clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
|
||||||
else
|
else {
|
||||||
|
if (atomic_inc_return(&file->sm_ref) > 1)
|
||||||
|
break;
|
||||||
set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
|
set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
|
||||||
|
}
|
||||||
|
|
||||||
if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
|
if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
|
||||||
|
|
||||||
|
@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||||
if (file->flags & FTRACE_EVENT_FL_ENABLED) {
|
if (file->flags & FTRACE_EVENT_FL_ENABLED) {
|
||||||
if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
|
if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
|
||||||
buf = "0*\n";
|
buf = "0*\n";
|
||||||
|
else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
|
||||||
|
buf = "1*\n";
|
||||||
else
|
else
|
||||||
buf = "1\n";
|
buf = "1\n";
|
||||||
} else
|
} else
|
||||||
|
@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct ftrace_event_file *
|
||||||
|
trace_create_new_event(struct ftrace_event_call *call,
|
||||||
|
struct trace_array *tr)
|
||||||
|
{
|
||||||
|
struct ftrace_event_file *file;
|
||||||
|
|
||||||
|
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
||||||
|
if (!file)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
file->event_call = call;
|
||||||
|
file->tr = tr;
|
||||||
|
atomic_set(&file->sm_ref, 0);
|
||||||
|
list_add(&file->list, &tr->events);
|
||||||
|
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
/* Add an event to a trace directory */
|
/* Add an event to a trace directory */
|
||||||
static int
|
static int
|
||||||
__trace_add_new_event(struct ftrace_event_call *call,
|
__trace_add_new_event(struct ftrace_event_call *call,
|
||||||
|
@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
|
||||||
{
|
{
|
||||||
struct ftrace_event_file *file;
|
struct ftrace_event_file *file;
|
||||||
|
|
||||||
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
file = trace_create_new_event(call, tr);
|
||||||
if (!file)
|
if (!file)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
file->event_call = call;
|
|
||||||
file->tr = tr;
|
|
||||||
list_add(&file->list, &tr->events);
|
|
||||||
|
|
||||||
return event_create_dir(tr->event_dir, file, id, enable, filter, format);
|
return event_create_dir(tr->event_dir, file, id, enable, filter, format);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
|
||||||
{
|
{
|
||||||
struct ftrace_event_file *file;
|
struct ftrace_event_file *file;
|
||||||
|
|
||||||
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
file = trace_create_new_event(call, tr);
|
||||||
if (!file)
|
if (!file)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
file->event_call = call;
|
|
||||||
file->tr = tr;
|
|
||||||
list_add(&file->list, &tr->events);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_put;
|
goto out_put;
|
||||||
ret = register_ftrace_function_probe(glob, ops, data);
|
ret = register_ftrace_function_probe(glob, ops, data);
|
||||||
if (!ret)
|
/*
|
||||||
|
* The above returns on success the # of functions enabled,
|
||||||
|
* but if it didn't find any functions it returns zero.
|
||||||
|
* Consider no functions a failure too.
|
||||||
|
*/
|
||||||
|
if (!ret) {
|
||||||
|
ret = -ENOENT;
|
||||||
goto out_disable;
|
goto out_disable;
|
||||||
|
} else if (ret < 0)
|
||||||
|
goto out_disable;
|
||||||
|
/* Just return zero, not the number of enabled functions */
|
||||||
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&event_mutex);
|
mutex_unlock(&event_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
/**
|
/**
|
||||||
* Kprobe event core functions
|
* Kprobe event core functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct trace_probe {
|
struct trace_probe {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct kretprobe rp; /* Use rp.kp for kprobe use */
|
struct kretprobe rp; /* Use rp.kp for kprobe use */
|
||||||
|
@ -36,6 +35,7 @@ struct trace_probe {
|
||||||
const char *symbol; /* symbol name */
|
const char *symbol; /* symbol name */
|
||||||
struct ftrace_event_class class;
|
struct ftrace_event_class class;
|
||||||
struct ftrace_event_call call;
|
struct ftrace_event_call call;
|
||||||
|
struct ftrace_event_file **files;
|
||||||
ssize_t size; /* trace entry size */
|
ssize_t size; /* trace entry size */
|
||||||
unsigned int nr_args;
|
unsigned int nr_args;
|
||||||
struct probe_arg args[];
|
struct probe_arg args[];
|
||||||
|
@ -46,7 +46,7 @@ struct trace_probe {
|
||||||
(sizeof(struct probe_arg) * (n)))
|
(sizeof(struct probe_arg) * (n)))
|
||||||
|
|
||||||
|
|
||||||
static __kprobes int trace_probe_is_return(struct trace_probe *tp)
|
static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
|
||||||
{
|
{
|
||||||
return tp->rp.handler != NULL;
|
return tp->rp.handler != NULL;
|
||||||
}
|
}
|
||||||
|
@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
static int trace_probe_nr_files(struct trace_probe *tp)
|
||||||
static int enable_trace_probe(struct trace_probe *tp, int flag)
|
{
|
||||||
|
struct ftrace_event_file **file = tp->files;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (file)
|
||||||
|
while (*(file++))
|
||||||
|
ret++;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(probe_enable_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable trace_probe
|
||||||
|
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
tp->flags |= flag;
|
mutex_lock(&probe_enable_lock);
|
||||||
|
|
||||||
|
if (file) {
|
||||||
|
struct ftrace_event_file **new, **old = tp->files;
|
||||||
|
int n = trace_probe_nr_files(tp);
|
||||||
|
|
||||||
|
/* 1 is for new one and 1 is for stopper */
|
||||||
|
new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!new) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
memcpy(new, old, n * sizeof(struct ftrace_event_file *));
|
||||||
|
new[n] = file;
|
||||||
|
/* The last one keeps a NULL */
|
||||||
|
|
||||||
|
rcu_assign_pointer(tp->files, new);
|
||||||
|
tp->flags |= TP_FLAG_TRACE;
|
||||||
|
|
||||||
|
if (old) {
|
||||||
|
/* Make sure the probe is done with old files */
|
||||||
|
synchronize_sched();
|
||||||
|
kfree(old);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
tp->flags |= TP_FLAG_PROFILE;
|
||||||
|
|
||||||
if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
|
if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
|
||||||
!trace_probe_has_gone(tp)) {
|
!trace_probe_has_gone(tp)) {
|
||||||
if (trace_probe_is_return(tp))
|
if (trace_probe_is_return(tp))
|
||||||
|
@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
|
||||||
ret = enable_kprobe(&tp->rp.kp);
|
ret = enable_kprobe(&tp->rp.kp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&probe_enable_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
static int
|
||||||
static void disable_trace_probe(struct trace_probe *tp, int flag)
|
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
|
||||||
{
|
{
|
||||||
tp->flags &= ~flag;
|
int i;
|
||||||
|
|
||||||
|
if (tp->files) {
|
||||||
|
for (i = 0; tp->files[i]; i++)
|
||||||
|
if (tp->files[i] == file)
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable trace_probe
|
||||||
|
* if the file is NULL, disable "perf" handler, or disable "trace" handler.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&probe_enable_lock);
|
||||||
|
|
||||||
|
if (file) {
|
||||||
|
struct ftrace_event_file **new, **old = tp->files;
|
||||||
|
int n = trace_probe_nr_files(tp);
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
if (n == 0 || trace_probe_file_index(tp, file) < 0) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n == 1) { /* Remove the last file */
|
||||||
|
tp->flags &= ~TP_FLAG_TRACE;
|
||||||
|
new = NULL;
|
||||||
|
} else {
|
||||||
|
new = kzalloc(n * sizeof(struct ftrace_event_file *),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!new) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This copy & check loop copies the NULL stopper too */
|
||||||
|
for (i = 0, j = 0; j < n && i < n + 1; i++)
|
||||||
|
if (old[i] != file)
|
||||||
|
new[j++] = old[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_assign_pointer(tp->files, new);
|
||||||
|
|
||||||
|
/* Make sure the probe is done with old files */
|
||||||
|
synchronize_sched();
|
||||||
|
kfree(old);
|
||||||
|
} else
|
||||||
|
tp->flags &= ~TP_FLAG_PROFILE;
|
||||||
|
|
||||||
if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
|
if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
|
||||||
if (trace_probe_is_return(tp))
|
if (trace_probe_is_return(tp))
|
||||||
disable_kretprobe(&tp->rp);
|
disable_kretprobe(&tp->rp);
|
||||||
else
|
else
|
||||||
disable_kprobe(&tp->rp.kp);
|
disable_kprobe(&tp->rp.kp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&probe_enable_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Internal register function - just handle k*probes and flags */
|
/* Internal register function - just handle k*probes and flags */
|
||||||
|
@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Kprobe handler */
|
/* Kprobe handler */
|
||||||
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
static __kprobes void
|
||||||
|
__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
|
||||||
|
struct ftrace_event_file *ftrace_file)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
|
||||||
struct kprobe_trace_entry_head *entry;
|
struct kprobe_trace_entry_head *entry;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
|
@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
|
|
||||||
tp->nhit++;
|
WARN_ON(call != ftrace_file->event_call);
|
||||||
|
|
||||||
|
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
local_save_flags(irq_flags);
|
local_save_flags(irq_flags);
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
|
@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
dsize = __get_data_size(tp, regs);
|
dsize = __get_data_size(tp, regs);
|
||||||
size = sizeof(*entry) + tp->size + dsize;
|
size = sizeof(*entry) + tp->size + dsize;
|
||||||
|
|
||||||
event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
|
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||||
size, irq_flags, pc);
|
call->event.type,
|
||||||
|
size, irq_flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
entry->ip = (unsigned long)kp->addr;
|
entry->ip = (unsigned long)tp->rp.kp.addr;
|
||||||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||||
|
|
||||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||||
|
@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
irq_flags, pc, regs);
|
irq_flags, pc, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Kretprobe handler */
|
static __kprobes void
|
||||||
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
|
||||||
struct pt_regs *regs)
|
{
|
||||||
|
struct ftrace_event_file **file = tp->files;
|
||||||
|
|
||||||
|
/* Note: preempt is already disabled around the kprobe handler */
|
||||||
|
while (*file) {
|
||||||
|
__kprobe_trace_func(tp, regs, *file);
|
||||||
|
file++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Kretprobe handler */
|
||||||
|
static __kprobes void
|
||||||
|
__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
|
||||||
|
struct pt_regs *regs,
|
||||||
|
struct ftrace_event_file *ftrace_file)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
|
||||||
struct kretprobe_trace_entry_head *entry;
|
struct kretprobe_trace_entry_head *entry;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
|
@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
|
|
||||||
|
WARN_ON(call != ftrace_file->event_call);
|
||||||
|
|
||||||
|
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
local_save_flags(irq_flags);
|
local_save_flags(irq_flags);
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
|
|
||||||
dsize = __get_data_size(tp, regs);
|
dsize = __get_data_size(tp, regs);
|
||||||
size = sizeof(*entry) + tp->size + dsize;
|
size = sizeof(*entry) + tp->size + dsize;
|
||||||
|
|
||||||
event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
|
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||||
size, irq_flags, pc);
|
call->event.type,
|
||||||
|
size, irq_flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
irq_flags, pc, regs);
|
irq_flags, pc, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __kprobes void
|
||||||
|
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct ftrace_event_file **file = tp->files;
|
||||||
|
|
||||||
|
/* Note: preempt is already disabled around the kprobe handler */
|
||||||
|
while (*file) {
|
||||||
|
__kretprobe_trace_func(tp, ri, regs, *file);
|
||||||
|
file++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Event entry printers */
|
/* Event entry printers */
|
||||||
enum print_line_t
|
enum print_line_t
|
||||||
print_kprobe_event(struct trace_iterator *iter, int flags,
|
print_kprobe_event(struct trace_iterator *iter, int flags,
|
||||||
|
@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp)
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
|
||||||
/* Kprobe profile handler */
|
/* Kprobe profile handler */
|
||||||
static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
static __kprobes void
|
||||||
struct pt_regs *regs)
|
kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
struct kprobe_trace_entry_head *entry;
|
struct kprobe_trace_entry_head *entry;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry->ip = (unsigned long)kp->addr;
|
entry->ip = (unsigned long)tp->rp.kp.addr;
|
||||||
memset(&entry[1], 0, dsize);
|
memset(&entry[1], 0, dsize);
|
||||||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||||
|
|
||||||
|
@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Kretprobe profile handler */
|
/* Kretprobe profile handler */
|
||||||
static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
static __kprobes void
|
||||||
struct pt_regs *regs)
|
kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
struct kretprobe_trace_entry_head *entry;
|
struct kretprobe_trace_entry_head *entry;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event,
|
||||||
enum trace_reg type, void *data)
|
enum trace_reg type, void *data)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = (struct trace_probe *)event->data;
|
struct trace_probe *tp = (struct trace_probe *)event->data;
|
||||||
|
struct ftrace_event_file *file = data;
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TRACE_REG_REGISTER:
|
case TRACE_REG_REGISTER:
|
||||||
return enable_trace_probe(tp, TP_FLAG_TRACE);
|
return enable_trace_probe(tp, file);
|
||||||
case TRACE_REG_UNREGISTER:
|
case TRACE_REG_UNREGISTER:
|
||||||
disable_trace_probe(tp, TP_FLAG_TRACE);
|
return disable_trace_probe(tp, file);
|
||||||
return 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
case TRACE_REG_PERF_REGISTER:
|
case TRACE_REG_PERF_REGISTER:
|
||||||
return enable_trace_probe(tp, TP_FLAG_PROFILE);
|
return enable_trace_probe(tp, NULL);
|
||||||
case TRACE_REG_PERF_UNREGISTER:
|
case TRACE_REG_PERF_UNREGISTER:
|
||||||
disable_trace_probe(tp, TP_FLAG_PROFILE);
|
return disable_trace_probe(tp, NULL);
|
||||||
return 0;
|
|
||||||
case TRACE_REG_PERF_OPEN:
|
case TRACE_REG_PERF_OPEN:
|
||||||
case TRACE_REG_PERF_CLOSE:
|
case TRACE_REG_PERF_CLOSE:
|
||||||
case TRACE_REG_PERF_ADD:
|
case TRACE_REG_PERF_ADD:
|
||||||
|
@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||||
|
|
||||||
|
tp->nhit++;
|
||||||
|
|
||||||
if (tp->flags & TP_FLAG_TRACE)
|
if (tp->flags & TP_FLAG_TRACE)
|
||||||
kprobe_trace_func(kp, regs);
|
kprobe_trace_func(tp, regs);
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
if (tp->flags & TP_FLAG_PROFILE)
|
if (tp->flags & TP_FLAG_PROFILE)
|
||||||
kprobe_perf_func(kp, regs);
|
kprobe_perf_func(tp, regs);
|
||||||
#endif
|
#endif
|
||||||
return 0; /* We don't tweek kernel, so just return 0 */
|
return 0; /* We don't tweek kernel, so just return 0 */
|
||||||
}
|
}
|
||||||
|
@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||||
|
|
||||||
|
tp->nhit++;
|
||||||
|
|
||||||
if (tp->flags & TP_FLAG_TRACE)
|
if (tp->flags & TP_FLAG_TRACE)
|
||||||
kretprobe_trace_func(ri, regs);
|
kretprobe_trace_func(tp, ri, regs);
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
if (tp->flags & TP_FLAG_PROFILE)
|
if (tp->flags & TP_FLAG_PROFILE)
|
||||||
kretprobe_perf_func(ri, regs);
|
kretprobe_perf_func(tp, ri, regs);
|
||||||
#endif
|
#endif
|
||||||
return 0; /* We don't tweek kernel, so just return 0 */
|
return 0; /* We don't tweek kernel, so just return 0 */
|
||||||
}
|
}
|
||||||
|
@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
||||||
return a1 + a2 + a3 + a4 + a5 + a6;
|
return a1 + a2 + a3 + a4 + a5 + a6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct ftrace_event_file *
|
||||||
|
find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
|
||||||
|
{
|
||||||
|
struct ftrace_event_file *file;
|
||||||
|
|
||||||
|
list_for_each_entry(file, &tr->events, list)
|
||||||
|
if (file->event_call == &tp->call)
|
||||||
|
return file;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static __init int kprobe_trace_self_tests_init(void)
|
static __init int kprobe_trace_self_tests_init(void)
|
||||||
{
|
{
|
||||||
int ret, warn = 0;
|
int ret, warn = 0;
|
||||||
int (*target)(int, int, int, int, int, int);
|
int (*target)(int, int, int, int, int, int);
|
||||||
struct trace_probe *tp;
|
struct trace_probe *tp;
|
||||||
|
struct ftrace_event_file *file;
|
||||||
|
|
||||||
target = kprobe_trace_selftest_target;
|
target = kprobe_trace_selftest_target;
|
||||||
|
|
||||||
|
@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||||
"$stack $stack0 +0($stack)",
|
"$stack $stack0 +0($stack)",
|
||||||
create_trace_probe);
|
create_trace_probe);
|
||||||
if (WARN_ON_ONCE(ret)) {
|
if (WARN_ON_ONCE(ret)) {
|
||||||
pr_warning("error on probing function entry.\n");
|
pr_warn("error on probing function entry.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else {
|
} else {
|
||||||
/* Enable trace point */
|
/* Enable trace point */
|
||||||
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
||||||
if (WARN_ON_ONCE(tp == NULL)) {
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
pr_warning("error on getting new probe.\n");
|
pr_warn("error on getting new probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else
|
} else {
|
||||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
file = find_trace_probe_file(tp, top_trace_array());
|
||||||
|
if (WARN_ON_ONCE(file == NULL)) {
|
||||||
|
pr_warn("error on getting probe file.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
enable_trace_probe(tp, file);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
|
ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
|
||||||
"$retval", create_trace_probe);
|
"$retval", create_trace_probe);
|
||||||
if (WARN_ON_ONCE(ret)) {
|
if (WARN_ON_ONCE(ret)) {
|
||||||
pr_warning("error on probing function return.\n");
|
pr_warn("error on probing function return.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else {
|
} else {
|
||||||
/* Enable trace point */
|
/* Enable trace point */
|
||||||
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||||
if (WARN_ON_ONCE(tp == NULL)) {
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
pr_warning("error on getting new probe.\n");
|
pr_warn("error on getting 2nd new probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else
|
} else {
|
||||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
file = find_trace_probe_file(tp, top_trace_array());
|
||||||
|
if (WARN_ON_ONCE(file == NULL)) {
|
||||||
|
pr_warn("error on getting probe file.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
enable_trace_probe(tp, file);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (warn)
|
if (warn)
|
||||||
|
@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||||
/* Disable trace points before removing it */
|
/* Disable trace points before removing it */
|
||||||
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
||||||
if (WARN_ON_ONCE(tp == NULL)) {
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
pr_warning("error on getting test probe.\n");
|
pr_warn("error on getting test probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else
|
} else {
|
||||||
disable_trace_probe(tp, TP_FLAG_TRACE);
|
file = find_trace_probe_file(tp, top_trace_array());
|
||||||
|
if (WARN_ON_ONCE(file == NULL)) {
|
||||||
|
pr_warn("error on getting probe file.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
disable_trace_probe(tp, file);
|
||||||
|
}
|
||||||
|
|
||||||
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||||
if (WARN_ON_ONCE(tp == NULL)) {
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
pr_warning("error on getting 2nd test probe.\n");
|
pr_warn("error on getting 2nd test probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
} else
|
} else {
|
||||||
disable_trace_probe(tp, TP_FLAG_TRACE);
|
file = find_trace_probe_file(tp, top_trace_array());
|
||||||
|
if (WARN_ON_ONCE(file == NULL)) {
|
||||||
|
pr_warn("error on getting probe file.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
disable_trace_probe(tp, file);
|
||||||
|
}
|
||||||
|
|
||||||
ret = traceprobe_command("-:testprobe", create_trace_probe);
|
ret = traceprobe_command("-:testprobe", create_trace_probe);
|
||||||
if (WARN_ON_ONCE(ret)) {
|
if (WARN_ON_ONCE(ret)) {
|
||||||
pr_warning("error on deleting a probe.\n");
|
pr_warn("error on deleting a probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = traceprobe_command("-:testprobe2", create_trace_probe);
|
ret = traceprobe_command("-:testprobe2", create_trace_probe);
|
||||||
if (WARN_ON_ONCE(ret)) {
|
if (WARN_ON_ONCE(ret)) {
|
||||||
pr_warning("error on deleting a probe.\n");
|
pr_warn("error on deleting a probe.\n");
|
||||||
warn++;
|
warn++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче