tracing/fgraph: Have wakeup and irqsoff tracers ignore graph functions too
Currently both the wakeup and irqsoff traces do not handle set_graph_notrace well. The ftrace infrastructure will ignore the return paths of all functions leaving them hanging without an end: # echo '*spin*' > set_graph_notrace # cat trace [...] _raw_spin_lock() { preempt_count_add() { do_raw_spin_lock() { update_rq_clock(); Where the '*spin*' functions should have looked like this: _raw_spin_lock() { preempt_count_add(); do_raw_spin_lock(); } update_rq_clock(); Instead, have the wakeup and irqsoff tracers ignore the functions that are set by the set_graph_notrace like the function_graph tracer does. Move the logic in the function_graph tracer into a header to allow wakeup and irqsoff tracers to use it as well. Cc: Namhyung Kim <namhyung.kim@lge.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Родитель
794de08a16
Коммит
1a41442864
|
@ -846,6 +846,17 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
|
|||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
extern unsigned int fgraph_max_depth;
|
||||
|
||||
static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
|
||||
{
|
||||
/* trace it when it is-nested-in or is a function enabled. */
|
||||
return !(trace->depth || ftrace_graph_addr(trace->func)) ||
|
||||
(trace->depth < 0) ||
|
||||
(fgraph_max_depth && trace->depth >= fgraph_max_depth);
|
||||
}
|
||||
|
||||
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
static inline enum print_line_t
|
||||
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
|
|
|
@ -65,7 +65,7 @@ struct fgraph_data {
|
|||
|
||||
#define TRACE_GRAPH_INDENT 2
|
||||
|
||||
static unsigned int max_depth;
|
||||
unsigned int fgraph_max_depth;
|
||||
|
||||
static struct tracer_opt trace_opts[] = {
|
||||
/* Display overruns? (for self-debug purpose) */
|
||||
|
@ -384,10 +384,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
|
|||
if (!ftrace_trace_task(tr))
|
||||
return 0;
|
||||
|
||||
/* trace it when it is-nested-in or is a function enabled. */
|
||||
if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
|
||||
ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
|
||||
(max_depth && trace->depth >= max_depth))
|
||||
if (ftrace_graph_ignore_func(trace))
|
||||
return 0;
|
||||
|
||||
if (ftrace_graph_ignore_irqs())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1500,7 +1500,7 @@ graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
max_depth = val;
|
||||
fgraph_max_depth = val;
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
|
@ -1514,7 +1514,7 @@ graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|||
char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
|
||||
int n;
|
||||
|
||||
n = sprintf(buf, "%d\n", max_depth);
|
||||
n = sprintf(buf, "%d\n", fgraph_max_depth);
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
|
||||
}
|
||||
|
|
|
@ -175,6 +175,18 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
|
|||
int ret;
|
||||
int pc;
|
||||
|
||||
if (ftrace_graph_ignore_func(trace))
|
||||
return 0;
|
||||
/*
|
||||
* Do not trace a function if it's filtered by set_graph_notrace.
|
||||
* Make the index of ret stack negative to indicate that it should
|
||||
* ignore further functions. But it needs its own ret stack entry
|
||||
* to recover the original index in order to continue tracing after
|
||||
* returning from the function.
|
||||
*/
|
||||
if (ftrace_graph_notrace_addr(trace->func))
|
||||
return 1;
|
||||
|
||||
if (!func_prolog_dec(tr, &data, &flags))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -239,6 +239,18 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
|
|||
unsigned long flags;
|
||||
int pc, ret = 0;
|
||||
|
||||
if (ftrace_graph_ignore_func(trace))
|
||||
return 0;
|
||||
/*
|
||||
* Do not trace a function if it's filtered by set_graph_notrace.
|
||||
* Make the index of ret stack negative to indicate that it should
|
||||
* ignore further functions. But it needs its own ret stack entry
|
||||
* to recover the original index in order to continue tracing after
|
||||
* returning from the function.
|
||||
*/
|
||||
if (ftrace_graph_notrace_addr(trace->func))
|
||||
return 1;
|
||||
|
||||
if (!func_prolog_preempt_disable(tr, &data, &pc))
|
||||
return 0;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче