jump label: Make dynamic no-op selection available outside of ftrace
Move Steve's code for finding the best 5-byte no-op from ftrace.c to alternative.c. The idea is that other consumers (in this case jump label) want to make use of that code. Signed-off-by: Jason Baron <jbaron@redhat.com> LKML-Reference: <96259ae74172dcac99c0020c249743c523a92e18.1284733808.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Родитель
e9d2b06414
Коммит
f49aa44856
|
@ -180,4 +180,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
|
||||||
extern void *text_poke(void *addr, const void *opcode, size_t len);
|
extern void *text_poke(void *addr, const void *opcode, size_t len);
|
||||||
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
|
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
|
||||||
|
|
||||||
|
#if defined(CONFIG_DYNAMIC_FTRACE)
|
||||||
|
#define IDEAL_NOP_SIZE_5 5
|
||||||
|
extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
|
||||||
|
extern void arch_init_ideal_nop5(void);
|
||||||
|
#else
|
||||||
|
static inline void arch_init_ideal_nop5(void) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
#endif /* _ASM_X86_ALTERNATIVE_H */
|
||||||
|
|
|
@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_DYNAMIC_FTRACE)
|
||||||
|
|
||||||
|
unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
|
||||||
|
|
||||||
|
void __init arch_init_ideal_nop5(void)
|
||||||
|
{
|
||||||
|
extern const unsigned char ftrace_test_p6nop[];
|
||||||
|
extern const unsigned char ftrace_test_nop5[];
|
||||||
|
extern const unsigned char ftrace_test_jmp[];
|
||||||
|
int faulted = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is no good nop for all x86 archs.
|
||||||
|
* We will default to using the P6_NOP5, but first we
|
||||||
|
* will test to make sure that the nop will actually
|
||||||
|
* work on this CPU. If it faults, we will then
|
||||||
|
* go to a lesser efficient 5 byte nop. If that fails
|
||||||
|
* we then just use a jmp as our nop. This isn't the most
|
||||||
|
* efficient nop, but we can not use a multi part nop
|
||||||
|
* since we would then risk being preempted in the middle
|
||||||
|
* of that nop, and if we enabled tracing then, it might
|
||||||
|
* cause a system crash.
|
||||||
|
*
|
||||||
|
* TODO: check the cpuid to determine the best nop.
|
||||||
|
*/
|
||||||
|
asm volatile (
|
||||||
|
"ftrace_test_jmp:"
|
||||||
|
"jmp ftrace_test_p6nop\n"
|
||||||
|
"nop\n"
|
||||||
|
"nop\n"
|
||||||
|
"nop\n" /* 2 byte jmp + 3 bytes */
|
||||||
|
"ftrace_test_p6nop:"
|
||||||
|
P6_NOP5
|
||||||
|
"jmp 1f\n"
|
||||||
|
"ftrace_test_nop5:"
|
||||||
|
".byte 0x66,0x66,0x66,0x66,0x90\n"
|
||||||
|
"1:"
|
||||||
|
".section .fixup, \"ax\"\n"
|
||||||
|
"2: movl $1, %0\n"
|
||||||
|
" jmp ftrace_test_nop5\n"
|
||||||
|
"3: movl $2, %0\n"
|
||||||
|
" jmp 1b\n"
|
||||||
|
".previous\n"
|
||||||
|
_ASM_EXTABLE(ftrace_test_p6nop, 2b)
|
||||||
|
_ASM_EXTABLE(ftrace_test_nop5, 3b)
|
||||||
|
: "=r"(faulted) : "0" (faulted));
|
||||||
|
|
||||||
|
switch (faulted) {
|
||||||
|
case 0:
|
||||||
|
pr_info("converting mcount calls to 0f 1f 44 00 00\n");
|
||||||
|
memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
pr_info("converting mcount calls to 66 66 66 66 90\n");
|
||||||
|
memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
pr_info("converting mcount calls to jmp . + 5\n");
|
||||||
|
memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
|
||||||
return mod_code_status;
|
return mod_code_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
|
|
||||||
|
|
||||||
static unsigned char *ftrace_nop_replace(void)
|
static unsigned char *ftrace_nop_replace(void)
|
||||||
{
|
{
|
||||||
return ftrace_nop;
|
return ideal_nop5;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||||
|
|
||||||
int __init ftrace_dyn_arch_init(void *data)
|
int __init ftrace_dyn_arch_init(void *data)
|
||||||
{
|
{
|
||||||
extern const unsigned char ftrace_test_p6nop[];
|
|
||||||
extern const unsigned char ftrace_test_nop5[];
|
|
||||||
extern const unsigned char ftrace_test_jmp[];
|
|
||||||
int faulted = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There is no good nop for all x86 archs.
|
|
||||||
* We will default to using the P6_NOP5, but first we
|
|
||||||
* will test to make sure that the nop will actually
|
|
||||||
* work on this CPU. If it faults, we will then
|
|
||||||
* go to a lesser efficient 5 byte nop. If that fails
|
|
||||||
* we then just use a jmp as our nop. This isn't the most
|
|
||||||
* efficient nop, but we can not use a multi part nop
|
|
||||||
* since we would then risk being preempted in the middle
|
|
||||||
* of that nop, and if we enabled tracing then, it might
|
|
||||||
* cause a system crash.
|
|
||||||
*
|
|
||||||
* TODO: check the cpuid to determine the best nop.
|
|
||||||
*/
|
|
||||||
asm volatile (
|
|
||||||
"ftrace_test_jmp:"
|
|
||||||
"jmp ftrace_test_p6nop\n"
|
|
||||||
"nop\n"
|
|
||||||
"nop\n"
|
|
||||||
"nop\n" /* 2 byte jmp + 3 bytes */
|
|
||||||
"ftrace_test_p6nop:"
|
|
||||||
P6_NOP5
|
|
||||||
"jmp 1f\n"
|
|
||||||
"ftrace_test_nop5:"
|
|
||||||
".byte 0x66,0x66,0x66,0x66,0x90\n"
|
|
||||||
"1:"
|
|
||||||
".section .fixup, \"ax\"\n"
|
|
||||||
"2: movl $1, %0\n"
|
|
||||||
" jmp ftrace_test_nop5\n"
|
|
||||||
"3: movl $2, %0\n"
|
|
||||||
" jmp 1b\n"
|
|
||||||
".previous\n"
|
|
||||||
_ASM_EXTABLE(ftrace_test_p6nop, 2b)
|
|
||||||
_ASM_EXTABLE(ftrace_test_nop5, 3b)
|
|
||||||
: "=r"(faulted) : "0" (faulted));
|
|
||||||
|
|
||||||
switch (faulted) {
|
|
||||||
case 0:
|
|
||||||
pr_info("converting mcount calls to 0f 1f 44 00 00\n");
|
|
||||||
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
pr_info("converting mcount calls to 66 66 66 66 90\n");
|
|
||||||
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
pr_info("converting mcount calls to jmp . + 5\n");
|
|
||||||
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The return code is retured via data */
|
/* The return code is retured via data */
|
||||||
*(unsigned long *)data = 0;
|
*(unsigned long *)data = 0;
|
||||||
|
|
||||||
|
|
|
@ -112,6 +112,7 @@
|
||||||
#include <asm/numa_64.h>
|
#include <asm/numa_64.h>
|
||||||
#endif
|
#endif
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
|
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
|
||||||
|
@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p)
|
||||||
{
|
{
|
||||||
int acpi = 0;
|
int acpi = 0;
|
||||||
int k8 = 0;
|
int k8 = 0;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
|
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
|
||||||
|
@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p)
|
||||||
x86_init.oem.banner();
|
x86_init.oem.banner();
|
||||||
|
|
||||||
mcheck_init();
|
mcheck_init();
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
arch_init_ideal_nop5();
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
|
Загрузка…
Ссылка в новой задаче