ftrace: Create new ftrace_internal.h header
In order to move function graph infrastructure into its own file (fgraph.h) it needs to access various functions and variables in ftrace.c that are currently static. Create a new file called ftrace-internal.h that holds the function prototypes and the extern declarations of the variables needed by fgraph.c as well, and make them global in ftrace.c such that they can be used outside that file. Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Родитель
761efe8a94
Коммит
3306fc4aff
|
@ -40,6 +40,7 @@
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
|
||||||
|
#include "ftrace_internal.h"
|
||||||
#include "trace_output.h"
|
#include "trace_output.h"
|
||||||
#include "trace_stat.h"
|
#include "trace_stat.h"
|
||||||
|
|
||||||
|
@ -77,7 +78,7 @@
|
||||||
#define ASSIGN_OPS_HASH(opsname, val)
|
#define ASSIGN_OPS_HASH(opsname, val)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
|
||||||
INIT_OPS_HASH(ftrace_list_end)
|
INIT_OPS_HASH(ftrace_list_end)
|
||||||
|
@ -112,11 +113,11 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
||||||
*/
|
*/
|
||||||
static int ftrace_disabled __read_mostly;
|
static int ftrace_disabled __read_mostly;
|
||||||
|
|
||||||
static DEFINE_MUTEX(ftrace_lock);
|
DEFINE_MUTEX(ftrace_lock);
|
||||||
|
|
||||||
static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||||
static struct ftrace_ops global_ops;
|
struct ftrace_ops global_ops;
|
||||||
|
|
||||||
#if ARCH_SUPPORTS_FTRACE_OPS
|
#if ARCH_SUPPORTS_FTRACE_OPS
|
||||||
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
||||||
|
@ -127,26 +128,6 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
|
||||||
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
|
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
|
||||||
* can use rcu_dereference_raw_notrace() is that elements removed from this list
|
|
||||||
* are simply leaked, so there is no need to interact with a grace-period
|
|
||||||
* mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
|
|
||||||
* concurrent insertions into the ftrace_global_list.
|
|
||||||
*
|
|
||||||
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
|
||||||
*/
|
|
||||||
#define do_for_each_ftrace_op(op, list) \
|
|
||||||
op = rcu_dereference_raw_notrace(list); \
|
|
||||||
do
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Optimized for just a single item in the list (as that is the normal case).
|
|
||||||
*/
|
|
||||||
#define while_for_each_ftrace_op(op) \
|
|
||||||
while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
|
|
||||||
unlikely((op) != &ftrace_list_end))
|
|
||||||
|
|
||||||
static inline void ftrace_ops_init(struct ftrace_ops *ops)
|
static inline void ftrace_ops_init(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
@ -187,17 +168,11 @@ static void ftrace_sync_ipi(void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
static void update_function_graph_func(void);
|
|
||||||
|
|
||||||
/* Both enabled by default (can be cleared by function_graph tracer flags */
|
/* Both enabled by default (can be cleared by function_graph tracer flags */
|
||||||
static bool fgraph_sleep_time = true;
|
static bool fgraph_sleep_time = true;
|
||||||
static bool fgraph_graph_time = true;
|
static bool fgraph_graph_time = true;
|
||||||
|
|
||||||
#else
|
|
||||||
static inline void update_function_graph_func(void) { }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
|
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -334,7 +309,7 @@ static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
|
||||||
|
|
||||||
static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
||||||
|
|
||||||
static int __register_ftrace_function(struct ftrace_ops *ops)
|
int __register_ftrace_function(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
if (ops->flags & FTRACE_OPS_FL_DELETED)
|
if (ops->flags & FTRACE_OPS_FL_DELETED)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -375,7 +350,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1022,9 +997,7 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
||||||
#endif /* CONFIG_FUNCTION_PROFILER */
|
#endif /* CONFIG_FUNCTION_PROFILER */
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
static int ftrace_graph_active;
|
int ftrace_graph_active;
|
||||||
#else
|
|
||||||
# define ftrace_graph_active 0
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
@ -1067,7 +1040,7 @@ static const struct ftrace_hash empty_hash = {
|
||||||
};
|
};
|
||||||
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
|
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
|
||||||
|
|
||||||
static struct ftrace_ops global_ops = {
|
struct ftrace_ops global_ops = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.local_hash.notrace_hash = EMPTY_HASH,
|
.local_hash.notrace_hash = EMPTY_HASH,
|
||||||
.local_hash.filter_hash = EMPTY_HASH,
|
.local_hash.filter_hash = EMPTY_HASH,
|
||||||
|
@ -1503,7 +1476,7 @@ static bool hash_contains_ip(unsigned long ip,
|
||||||
* This needs to be called with preemption disabled as
|
* This needs to be called with preemption disabled as
|
||||||
* the hashes are freed with call_rcu_sched().
|
* the hashes are freed with call_rcu_sched().
|
||||||
*/
|
*/
|
||||||
static int
|
int
|
||||||
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
||||||
{
|
{
|
||||||
struct ftrace_ops_hash hash;
|
struct ftrace_ops_hash hash;
|
||||||
|
@ -2682,7 +2655,7 @@ static void ftrace_startup_all(int command)
|
||||||
update_all_ops = false;
|
update_all_ops = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ftrace_startup(struct ftrace_ops *ops, int command)
|
int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -2724,7 +2697,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -6177,7 +6150,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static struct ftrace_ops global_ops = {
|
struct ftrace_ops global_ops = {
|
||||||
.func = ftrace_stub,
|
.func = ftrace_stub,
|
||||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||||
FTRACE_OPS_FL_INITIALIZED |
|
FTRACE_OPS_FL_INITIALIZED |
|
||||||
|
@ -6194,31 +6167,10 @@ core_initcall(ftrace_nodyn_init);
|
||||||
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
|
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
|
||||||
static inline void ftrace_startup_enable(int command) { }
|
static inline void ftrace_startup_enable(int command) { }
|
||||||
static inline void ftrace_startup_all(int command) { }
|
static inline void ftrace_startup_all(int command) { }
|
||||||
/* Keep as macros so we do not need to define the commands */
|
|
||||||
# define ftrace_startup(ops, command) \
|
|
||||||
({ \
|
|
||||||
int ___ret = __register_ftrace_function(ops); \
|
|
||||||
if (!___ret) \
|
|
||||||
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
|
|
||||||
___ret; \
|
|
||||||
})
|
|
||||||
# define ftrace_shutdown(ops, command) \
|
|
||||||
({ \
|
|
||||||
int ___ret = __unregister_ftrace_function(ops); \
|
|
||||||
if (!___ret) \
|
|
||||||
(ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
|
|
||||||
___ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
# define ftrace_startup_sysctl() do { } while (0)
|
# define ftrace_startup_sysctl() do { } while (0)
|
||||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||||
|
|
||||||
static inline int
|
|
||||||
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ftrace_update_trampoline(struct ftrace_ops *ops)
|
static void ftrace_update_trampoline(struct ftrace_ops *ops)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -6930,7 +6882,7 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
|
||||||
* function against the global ops, and not just trace any function
|
* function against the global ops, and not just trace any function
|
||||||
* that any ftrace_ops registered.
|
* that any ftrace_ops registered.
|
||||||
*/
|
*/
|
||||||
static void update_function_graph_func(void)
|
void update_function_graph_func(void)
|
||||||
{
|
{
|
||||||
struct ftrace_ops *op;
|
struct ftrace_ops *op;
|
||||||
bool do_test = false;
|
bool do_test = false;
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
|
||||||
|
#define _LINUX_KERNEL_FTRACE_INTERNAL_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_FUNCTION_TRACER
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
||||||
|
* can use rcu_dereference_raw_notrace() is that elements removed from this list
|
||||||
|
* are simply leaked, so there is no need to interact with a grace-period
|
||||||
|
* mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
|
||||||
|
* concurrent insertions into the ftrace_global_list.
|
||||||
|
*
|
||||||
|
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
||||||
|
*/
|
||||||
|
#define do_for_each_ftrace_op(op, list) \
|
||||||
|
op = rcu_dereference_raw_notrace(list); \
|
||||||
|
do
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Optimized for just a single item in the list (as that is the normal case).
|
||||||
|
*/
|
||||||
|
#define while_for_each_ftrace_op(op) \
|
||||||
|
while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
|
||||||
|
unlikely((op) != &ftrace_list_end))
|
||||||
|
|
||||||
|
extern struct ftrace_ops __rcu *ftrace_ops_list;
|
||||||
|
extern struct ftrace_ops ftrace_list_end;
|
||||||
|
extern struct mutex ftrace_lock;
|
||||||
|
extern struct ftrace_ops global_ops;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
|
int ftrace_startup(struct ftrace_ops *ops, int command);
|
||||||
|
int ftrace_shutdown(struct ftrace_ops *ops, int command);
|
||||||
|
int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
|
||||||
|
|
||||||
|
#else /* !CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
|
int __register_ftrace_function(struct ftrace_ops *ops);
|
||||||
|
int __unregister_ftrace_function(struct ftrace_ops *ops);
|
||||||
|
/* Keep as macros so we do not need to define the commands */
|
||||||
|
# define ftrace_startup(ops, command) \
|
||||||
|
({ \
|
||||||
|
int ___ret = __register_ftrace_function(ops); \
|
||||||
|
if (!___ret) \
|
||||||
|
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
|
||||||
|
___ret; \
|
||||||
|
})
|
||||||
|
# define ftrace_shutdown(ops, command) \
|
||||||
|
({ \
|
||||||
|
int ___ret = __unregister_ftrace_function(ops); \
|
||||||
|
if (!___ret) \
|
||||||
|
(ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
|
||||||
|
___ret; \
|
||||||
|
})
|
||||||
|
static inline int
|
||||||
|
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
extern int ftrace_graph_active;
|
||||||
|
void update_function_graph_func(void);
|
||||||
|
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
|
||||||
|
# define ftrace_graph_active 0
|
||||||
|
static inline void update_function_graph_func(void) { }
|
||||||
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||||
|
|
||||||
|
#else /* !CONFIG_FUNCTION_TRACER */
|
||||||
|
#endif /* CONFIG_FUNCTION_TRACER */
|
||||||
|
|
||||||
|
#endif
|
Загрузка…
Ссылка в новой задаче