ftrace: mcount call site on boot nops core

This is the infrastructure to the converting the mcount call sites
recorded by the __mcount_loc section into nops on boot. It also allows
for using these sites to enable tracing as normal. When the __mcount_loc
section is used, the "ftraced" kernel thread is disabled.

This uses the current infrastructure to record the mcount call sites
as well as convert them to nops. The mcount function is kept as a stub
on boot up and not converted to the ftrace_record_ip function. We use the
ftrace_record_ip to only record from the table.

This patch does not handle modules. That comes with a later patch.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Steven Rostedt 2008-08-14 15:45:08 -04:00 коммит произвёл Ingo Molnar
Родитель 8da3821ba5
Коммит 68bf21aa15
4 изменённых файлов: 124 добавлений и 43 удалений

Просмотреть файл

@ -7,6 +7,16 @@
#ifndef __ASSEMBLY__
extern void mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
* call mcount is "e8 <4 byte offset>"
* The addr points to the 4 byte offset and the caller of this
* function wants the pointer to e8. Simply subtract one.
*/
return addr - 1;
}
#endif
#endif /* CONFIG_FTRACE */

Просмотреть файл

@ -162,4 +162,10 @@ static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif
#endif /* _LINUX_FTRACE_H */

Просмотреть файл

@ -60,6 +60,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/idr.h>
#include <linux/ftrace.h>
#include <asm/io.h>
#include <asm/bugs.h>
@ -687,6 +688,8 @@ asmlinkage void __init start_kernel(void)
acpi_early_init(); /* before LAPIC and SMP init */
ftrace_init();
/* Do the rest non-__init'ed, we're now alive */
rest_init();
}

Просмотреть файл

@ -792,47 +792,7 @@ static int ftrace_update_code(void)
return 1;
}
static int ftraced(void *ignore)
{
unsigned long usecs;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
/* check once a second */
schedule_timeout(HZ);
if (unlikely(ftrace_disabled))
continue;
mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftraced_lock);
if (!ftraced_suspend && !ftraced_stop &&
ftrace_update_code()) {
usecs = nsecs_to_usecs(ftrace_update_time);
if (ftrace_update_tot_cnt > 100000) {
ftrace_update_tot_cnt = 0;
pr_info("hm, dftrace overflow: %lu change%s"
" (%lu total) in %lu usec%s\n",
ftrace_update_cnt,
ftrace_update_cnt != 1 ? "s" : "",
ftrace_update_tot_cnt,
usecs, usecs != 1 ? "s" : "");
ftrace_disabled = 1;
WARN_ON_ONCE(1);
}
}
mutex_unlock(&ftraced_lock);
mutex_unlock(&ftrace_sysctl_lock);
ftrace_shutdown_replenish();
}
__set_current_state(TASK_RUNNING);
return 0;
}
static int __init ftrace_dyn_table_alloc(void)
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
{
struct ftrace_page *pg;
int cnt;
@ -859,7 +819,9 @@ static int __init ftrace_dyn_table_alloc(void)
pg = ftrace_pages = ftrace_pages_start;
cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
cnt = num_to_init / ENTRIES_PER_PAGE;
pr_info("ftrace: allocating %ld hash entries in %d pages\n",
num_to_init, cnt);
for (i = 0; i < cnt; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@ -1556,6 +1518,104 @@ static __init int ftrace_init_debugfs(void)
fs_initcall(ftrace_init_debugfs);
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
static int ftrace_convert_nops(unsigned long *start,
unsigned long *end)
{
unsigned long *p;
unsigned long addr;
unsigned long flags;
p = start;
while (p < end) {
addr = ftrace_call_adjust(*p++);
ftrace_record_ip(addr);
ftrace_shutdown_replenish();
}
/* p is ignored */
local_irq_save(flags);
__ftrace_update_code(p);
local_irq_restore(flags);
return 0;
}
extern unsigned long __start_mcount_loc[];
extern unsigned long __stop_mcount_loc[];
void __init ftrace_init(void)
{
unsigned long count, addr, flags;
int ret;
/* Keep the ftrace pointer to the stub */
addr = (unsigned long)ftrace_stub;
local_irq_save(flags);
ftrace_dyn_arch_init(&addr);
local_irq_restore(flags);
/* ftrace_dyn_arch_init places the return code in addr */
if (addr)
goto failed;
count = __stop_mcount_loc - __start_mcount_loc;
ret = ftrace_dyn_table_alloc(count);
if (ret)
goto failed;
last_ftrace_enabled = ftrace_enabled = 1;
ret = ftrace_convert_nops(__start_mcount_loc,
__stop_mcount_loc);
return;
failed:
ftrace_disabled = 1;
}
#else /* CONFIG_FTRACE_MCOUNT_RECORD */
static int ftraced(void *ignore)
{
unsigned long usecs;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
/* check once a second */
schedule_timeout(HZ);
if (unlikely(ftrace_disabled))
continue;
mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftraced_lock);
if (!ftraced_suspend && !ftraced_stop &&
ftrace_update_code()) {
usecs = nsecs_to_usecs(ftrace_update_time);
if (ftrace_update_tot_cnt > 100000) {
ftrace_update_tot_cnt = 0;
pr_info("hm, dftrace overflow: %lu change%s"
" (%lu total) in %lu usec%s\n",
ftrace_update_cnt,
ftrace_update_cnt != 1 ? "s" : "",
ftrace_update_tot_cnt,
usecs, usecs != 1 ? "s" : "");
ftrace_disabled = 1;
WARN_ON_ONCE(1);
}
}
mutex_unlock(&ftraced_lock);
mutex_unlock(&ftrace_sysctl_lock);
ftrace_shutdown_replenish();
}
__set_current_state(TASK_RUNNING);
return 0;
}
static int __init ftrace_dynamic_init(void)
{
struct task_struct *p;
@ -1572,7 +1632,7 @@ static int __init ftrace_dynamic_init(void)
goto failed;
}
ret = ftrace_dyn_table_alloc();
ret = ftrace_dyn_table_alloc(NR_TO_INIT);
if (ret)
goto failed;
@ -1593,6 +1653,8 @@ static int __init ftrace_dynamic_init(void)
}
core_initcall(ftrace_dynamic_init);
#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
#else
# define ftrace_startup() do { } while (0)
# define ftrace_shutdown() do { } while (0)