This patch adds full support for ftrace for PowerPC (both 64 and 32 bit).
This includes dynamic tracing and function filtering.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Steven Rostedt 2008-05-14 23:49:44 -04:00 коммит произвёл Thomas Gleixner
Родитель e0eca07bad
Коммит 4e491d14f2
11 изменённых файлов: 405 добавлений и 10 удалений

Просмотреть файл

@ -105,11 +105,12 @@ config ARCH_NO_VIRT_TO_BUS
config PPC config PPC
bool bool
default y default y
select HAVE_FTRACE
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_LMB select HAVE_LMB
select HAVE_OPROFILE
config EARLY_PRINTK config EARLY_PRINTK
bool bool

Просмотреть файл

@ -12,6 +12,18 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC CFLAGS_btext.o += -fPIC
endif endif
ifdef CONFIG_FTRACE
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -pg
CFLAGS_REMOVE_prom_init.o = -pg
ifdef CONFIG_DYNAMIC_FTRACE
# dynamic ftrace setup.
CFLAGS_REMOVE_ftrace.o = -pg
endif
endif
obj-y := cputable.o ptrace.o syscalls.o \ obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \ irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \ init_task.o process.o systbl.o idle.o \
@ -78,6 +90,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
ifneq ($(CONFIG_PPC_INDIRECT_IO),y) ifneq ($(CONFIG_PPC_INDIRECT_IO),y)

Просмотреть файл

@ -1035,3 +1035,133 @@ machine_check_in_rtas:
/* XXX load up BATs and panic */ /* XXX load up BATs and panic */
#endif /* CONFIG_PPC_RTAS */ #endif /* CONFIG_PPC_RTAS */
#ifdef CONFIG_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
stwu r1,-48(r1)
stw r3, 12(r1)
stw r4, 16(r1)
stw r5, 20(r1)
stw r6, 24(r1)
mflr r3
stw r7, 28(r1)
mfcr r5
stw r8, 32(r1)
stw r9, 36(r1)
stw r10,40(r1)
stw r3, 44(r1)
stw r5, 8(r1)
.globl mcount_call
mcount_call:
bl ftrace_stub
nop
lwz r6, 8(r1)
lwz r0, 44(r1)
lwz r3, 12(r1)
mtctr r0
lwz r4, 16(r1)
mtcr r6
lwz r5, 20(r1)
lwz r6, 24(r1)
lwz r0, 52(r1)
lwz r7, 28(r1)
lwz r8, 32(r1)
mtlr r0
lwz r9, 36(r1)
lwz r10,40(r1)
addi r1, r1, 48
bctr
_GLOBAL(ftrace_caller)
/* Based off of objdump optput from glibc */
stwu r1,-48(r1)
stw r3, 12(r1)
stw r4, 16(r1)
stw r5, 20(r1)
stw r6, 24(r1)
mflr r3
lwz r4, 52(r1)
mfcr r5
stw r7, 28(r1)
stw r8, 32(r1)
stw r9, 36(r1)
stw r10,40(r1)
stw r3, 44(r1)
stw r5, 8(r1)
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
lwz r6, 8(r1)
lwz r0, 44(r1)
lwz r3, 12(r1)
mtctr r0
lwz r4, 16(r1)
mtcr r6
lwz r5, 20(r1)
lwz r6, 24(r1)
lwz r0, 52(r1)
lwz r7, 28(r1)
lwz r8, 32(r1)
mtlr r0
lwz r9, 36(r1)
lwz r10,40(r1)
addi r1, r1, 48
bctr
#else
_GLOBAL(mcount)
_GLOBAL(_mcount)
stwu r1,-48(r1)
stw r3, 12(r1)
stw r4, 16(r1)
stw r5, 20(r1)
stw r6, 24(r1)
mflr r3
lwz r4, 52(r1)
mfcr r5
stw r7, 28(r1)
stw r8, 32(r1)
stw r9, 36(r1)
stw r10,40(r1)
stw r3, 44(r1)
stw r5, 8(r1)
LOAD_REG_ADDR(r5, ftrace_trace_function)
#if 0
mtctr r3
mr r1, r5
bctrl
#endif
lwz r5,0(r5)
#if 1
mtctr r5
bctrl
#else
bl ftrace_stub
#endif
nop
lwz r6, 8(r1)
lwz r0, 44(r1)
lwz r3, 12(r1)
mtctr r0
lwz r4, 16(r1)
mtcr r6
lwz r5, 20(r1)
lwz r6, 24(r1)
lwz r0, 52(r1)
lwz r7, 28(r1)
lwz r8, 32(r1)
mtlr r0
lwz r9, 36(r1)
lwz r10,40(r1)
addi r1, r1, 48
bctr
#endif
_GLOBAL(ftrace_stub)
blr
#endif /* CONFIG_MCOUNT */

Просмотреть файл

@ -870,3 +870,65 @@ _GLOBAL(enter_prom)
ld r0,16(r1) ld r0,16(r1)
mtlr r0 mtlr r0
blr blr
#ifdef CONFIG_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
/* Taken from output of objdump from lib64/glibc */
mflr r3
stdu r1, -112(r1)
std r3, 128(r1)
.globl mcount_call
mcount_call:
bl ftrace_stub
nop
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
blr
_GLOBAL(ftrace_caller)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
stdu r1, -112(r1)
std r3, 128(r1)
ld r4, 16(r11)
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
_GLOBAL(ftrace_stub)
blr
#else
_GLOBAL(mcount)
blr
_GLOBAL(_mcount)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
stdu r1, -112(r1)
std r3, 128(r1)
ld r4, 16(r11)
LOAD_REG_ADDR(r5,ftrace_trace_function)
ld r5,0(r5)
ld r5,0(r5)
mtctr r5
bctrl
nop
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
_GLOBAL(ftrace_stub)
blr
#endif
#endif

Просмотреть файл

@ -0,0 +1,165 @@
/*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
*
*/
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
#define CALL_BACK 4
static unsigned int ftrace_nop = 0x60000000;
#ifdef CONFIG_PPC32
# define GET_ADDR(addr) addr
#else
/* PowerPC64's functions are data that points to the functions */
# define GET_ADDR(addr) *(unsigned long *)addr
#endif
notrace int ftrace_ip_converted(unsigned long ip)
{
unsigned int save;
ip -= CALL_BACK;
save = *(unsigned int *)ip;
return save == ftrace_nop;
}
static unsigned int notrace ftrace_calc_offset(long ip, long addr)
{
return (int)((addr + CALL_BACK) - ip);
}
notrace unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static unsigned int op;
addr = GET_ADDR(addr);
/* Set to "bl addr" */
op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffe);
/*
* No locking needed, this must be called via kstop_machine
* which in essence is like running on a uniprocessor machine.
*/
return (unsigned char *)&op;
}
#ifdef CONFIG_PPC64
# define _ASM_ALIGN " .align 3 "
# define _ASM_PTR " .llong "
#else
# define _ASM_ALIGN " .align 2 "
# define _ASM_PTR " .long "
#endif
notrace int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
unsigned replaced;
unsigned old = *(unsigned *)old_code;
unsigned new = *(unsigned *)new_code;
int faulted = 0;
/* move the IP back to the start of the call */
ip -= CALL_BACK;
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing.
*
* No real locking needed, this code is run through
* kstop_machine.
*/
asm volatile (
"1: lwz %1, 0(%2)\n"
" cmpw %1, %5\n"
" bne 2f\n"
" stwu %3, 0(%2)\n"
"2:\n"
".section .fixup, \"ax\"\n"
"3: li %0, 1\n"
" b 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
_ASM_ALIGN "\n"
_ASM_PTR "1b, 3b\n"
".previous"
: "=r"(faulted), "=r"(replaced)
: "r"(ip), "r"(new),
"0"(faulted), "r"(old)
: "memory");
if (replaced != old && replaced != new)
faulted = 2;
if (!faulted)
flush_icache_range(ip, ip + 8);
return faulted;
}
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[4], *new;
int ret;
ip += CALL_BACK;
memcpy(old, &ftrace_call, 4);
new = ftrace_call_replace(ip, (unsigned long)func);
ret = ftrace_modify_code(ip, old, new);
return ret;
}
notrace int ftrace_mcount_set(unsigned long *data)
{
unsigned long ip = (long)(&mcount_call);
unsigned long *addr = data;
unsigned char old[4], *new;
/* ip is at the location, but modify code will subtact this */
ip += CALL_BACK;
/*
* Replace the mcount stub with a pointer to the
* ip recorder function.
*/
memcpy(old, &mcount_call, 4);
new = ftrace_call_replace(ip, *addr);
*addr = ftrace_modify_code(ip, old, new);
return 0;
}
int __init ftrace_dyn_arch_init(void *data)
{
/* This is running in kstop_machine */
ftrace_mcount_set(data);
return 0;
}

Просмотреть файл

@ -120,7 +120,8 @@ EXPORT_SYMBOL(_outsl_ns);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0) #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
void _memset_io(volatile void __iomem *addr, int c, unsigned long n) notrace void
_memset_io(volatile void __iomem *addr, int c, unsigned long n)
{ {
void *p = (void __force *)addr; void *p = (void __force *)addr;
u32 lc = c; u32 lc = c;

Просмотреть файл

@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1; int distribute_irqs = 1;
static inline unsigned long get_hard_enabled(void) static inline notrace unsigned long get_hard_enabled(void)
{ {
unsigned long enabled; unsigned long enabled;
@ -108,13 +108,13 @@ static inline unsigned long get_hard_enabled(void)
return enabled; return enabled;
} }
static inline void set_soft_enabled(unsigned long enable) static inline notrace void set_soft_enabled(unsigned long enable)
{ {
__asm__ __volatile__("stb %0,%1(13)" __asm__ __volatile__("stb %0,%1(13)"
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
} }
void raw_local_irq_restore(unsigned long en) notrace void raw_local_irq_restore(unsigned long en)
{ {
/* /*
* get_paca()->soft_enabled = en; * get_paca()->soft_enabled = en;

Просмотреть файл

@ -47,6 +47,11 @@
#include <asm/kgdb.h> #include <asm/kgdb.h>
#endif #endif
#ifdef CONFIG_FTRACE
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
extern void bootx_init(unsigned long r4, unsigned long phys); extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid; int boot_cpuid;
@ -81,7 +86,7 @@ int ucache_bsize;
* from the address that it was linked at, so we must use RELOC/PTRRELOC * from the address that it was linked at, so we must use RELOC/PTRRELOC
* to access static data (including strings). -- paulus * to access static data (including strings). -- paulus
*/ */
unsigned long __init early_init(unsigned long dt_ptr) notrace unsigned long __init early_init(unsigned long dt_ptr)
{ {
unsigned long offset = reloc_offset(); unsigned long offset = reloc_offset();
struct cpu_spec *spec; struct cpu_spec *spec;
@ -111,7 +116,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
* This is called very early on the boot process, after a minimal * This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called. * MMU environment has been set up but before MMU_init is called.
*/ */
void __init machine_init(unsigned long dt_ptr, unsigned long phys) notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys)
{ {
/* Enable early debugging if any specified (see udbg.h) */ /* Enable early debugging if any specified (see udbg.h) */
udbg_early_init(); udbg_early_init();
@ -133,7 +138,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
#ifdef CONFIG_BOOKE_WDT #ifdef CONFIG_BOOKE_WDT
/* Checks wdt=x and wdt_period=xx command-line option */ /* Checks wdt=x and wdt_period=xx command-line option */
int __init early_parse_wdt(char *p) notrace int __init early_parse_wdt(char *p)
{ {
if (p && strncmp(p, "0", 1) != 0) if (p && strncmp(p, "0", 1) != 0)
booke_wdt_enabled = 1; booke_wdt_enabled = 1;

Просмотреть файл

@ -85,6 +85,11 @@ struct ppc64_caches ppc64_caches = {
}; };
EXPORT_SYMBOL_GPL(ppc64_caches); EXPORT_SYMBOL_GPL(ppc64_caches);
#ifdef CONFIG_FTRACE
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
/* /*
* These are used in binfmt_elf.c to put aux entries on the stack * These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started. * for each elf executable being started.

Просмотреть файл

@ -1,5 +1,10 @@
CFLAGS_bootx_init.o += -fPIC CFLAGS_bootx_init.o += -fPIC
ifdef CONFIG_FTRACE
# Do not trace early boot code
CFLAGS_REMOVE_bootx_init.o = -pg
endif
obj-y += pic.o setup.o time.o feature.o pci.o \ obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o pfunc_core.o \ sleep.o low_i2c.o cache.o pfunc_core.o \
pfunc_base.o pfunc_base.o

Просмотреть файл

@ -123,6 +123,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int ret; int ret;
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled; int save_tracer_enabled = tracer_enabled;
char *func_name;
/* The ftrace test PASSED */ /* The ftrace test PASSED */
printk(KERN_CONT "PASSED\n"); printk(KERN_CONT "PASSED\n");
@ -142,9 +143,15 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
return ret; return ret;
} }
/*
* Some archs *cough*PowerPC*cough* add charachters to the
* start of the function names. We simply put a '*' to
* accomodate them.
*/
func_name = "*" STR(DYN_FTRACE_TEST_NAME);
/* filter only on our function */ /* filter only on our function */
ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME), ftrace_set_filter(func_name, strlen(func_name), 1);
sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
/* enable tracing */ /* enable tracing */
tr->ctrl = 1; tr->ctrl = 1;