Merge 'tip/perf/urgent' into perf/core to pick fixes
Needed to build perf/core buildable in some cases. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Коммит
33be4ef116
|
@ -223,27 +223,48 @@ static unsigned long
|
|||
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
||||
{
|
||||
struct kprobe *kp;
|
||||
unsigned long faddr;
|
||||
|
||||
kp = get_kprobe((void *)addr);
|
||||
/* There is no probe, return original address */
|
||||
if (!kp)
|
||||
faddr = ftrace_location(addr);
|
||||
/*
|
||||
* Addresses inside the ftrace location are refused by
|
||||
* arch_check_ftrace_location(). Something went terribly wrong
|
||||
* if such an address is checked here.
|
||||
*/
|
||||
if (WARN_ON(faddr && faddr != addr))
|
||||
return 0UL;
|
||||
/*
|
||||
* Use the current code if it is not modified by Kprobe
|
||||
* and it cannot be modified by ftrace.
|
||||
*/
|
||||
if (!kp && !faddr)
|
||||
return addr;
|
||||
|
||||
/*
|
||||
* Basically, kp->ainsn.insn has an original instruction.
|
||||
* However, RIP-relative instruction can not do single-stepping
|
||||
* at different place, __copy_instruction() tweaks the displacement of
|
||||
* that instruction. In that case, we can't recover the instruction
|
||||
* from the kp->ainsn.insn.
|
||||
* Basically, kp->ainsn.insn has an original instruction.
|
||||
* However, RIP-relative instruction can not do single-stepping
|
||||
* at different place, __copy_instruction() tweaks the displacement of
|
||||
* that instruction. In that case, we can't recover the instruction
|
||||
* from the kp->ainsn.insn.
|
||||
*
|
||||
* On the other hand, kp->opcode has a copy of the first byte of
|
||||
* the probed instruction, which is overwritten by int3. And
|
||||
* the instruction at kp->addr is not modified by kprobes except
|
||||
* for the first byte, we can recover the original instruction
|
||||
* from it and kp->opcode.
|
||||
* On the other hand, in case on normal Kprobe, kp->opcode has a copy
|
||||
* of the first byte of the probed instruction, which is overwritten
|
||||
* by int3. And the instruction at kp->addr is not modified by kprobes
|
||||
* except for the first byte, we can recover the original instruction
|
||||
* from it and kp->opcode.
|
||||
*
|
||||
* In case of Kprobes using ftrace, we do not have a copy of
|
||||
* the original instruction. In fact, the ftrace location might
|
||||
* be modified at anytime and even could be in an inconsistent state.
|
||||
* Fortunately, we know that the original code is the ideal 5-byte
|
||||
* long NOP.
|
||||
*/
|
||||
memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
buf[0] = kp->opcode;
|
||||
memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
if (faddr)
|
||||
memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
|
||||
else
|
||||
buf[0] = kp->opcode;
|
||||
return (unsigned long)buf;
|
||||
}
|
||||
|
||||
|
@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
|||
* Recover the probed instruction at addr for further analysis.
|
||||
* Caller must lock kprobes by kprobe_mutex, or disable preemption
|
||||
* for preventing to release referencing kprobes.
|
||||
* Returns zero if the instruction can not get recovered.
|
||||
*/
|
||||
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
|
||||
{
|
||||
|
@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
|
|||
* normally used, we just go through if there is no kprobe.
|
||||
*/
|
||||
__addr = recover_probed_instruction(buf, addr);
|
||||
if (!__addr)
|
||||
return 0;
|
||||
kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
|
||||
insn_get_length(&insn);
|
||||
|
||||
|
@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
|
|||
unsigned long recovered_insn =
|
||||
recover_probed_instruction(buf, (unsigned long)src);
|
||||
|
||||
if (!recovered_insn)
|
||||
return 0;
|
||||
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
||||
insn_get_length(&insn);
|
||||
/* Another subsystem puts a breakpoint, failed to recover */
|
||||
|
|
|
@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
|
|||
*/
|
||||
return 0;
|
||||
recovered_insn = recover_probed_instruction(buf, addr);
|
||||
if (!recovered_insn)
|
||||
return 0;
|
||||
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
||||
insn_get_length(&insn);
|
||||
/* Another subsystem puts a breakpoint */
|
||||
|
|
|
@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
|
|||
memcpy_t fn = r->fn.memcpy;
|
||||
int i;
|
||||
|
||||
memcpy_alloc_mem(&src, &dst, len);
|
||||
memcpy_alloc_mem(&dst, &src, len);
|
||||
|
||||
if (prefault)
|
||||
fn(dst, src, len);
|
||||
|
@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
|
|||
void *src = NULL, *dst = NULL;
|
||||
int i;
|
||||
|
||||
memcpy_alloc_mem(&src, &dst, len);
|
||||
memcpy_alloc_mem(&dst, &src, len);
|
||||
|
||||
if (prefault)
|
||||
fn(dst, src, len);
|
||||
|
|
|
@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
|
|||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(RAW_ARCH),sparc64)
|
||||
ARCH ?= sparc
|
||||
endif
|
||||
|
||||
ARCH ?= $(RAW_ARCH)
|
||||
|
||||
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
|
||||
|
|
|
@ -5,10 +5,11 @@ int main(void)
|
|||
{
|
||||
int ret = 0;
|
||||
pthread_attr_t thread_attr;
|
||||
cpu_set_t cs;
|
||||
|
||||
pthread_attr_init(&thread_attr);
|
||||
/* don't care abt exact args, just the API itself in libpthread */
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,10 @@ static int perf_flag_probe(void)
|
|||
if (cpu < 0)
|
||||
cpu = 0;
|
||||
|
||||
/*
|
||||
* Using -1 for the pid is a workaround to avoid gratuitous jump label
|
||||
* changes.
|
||||
*/
|
||||
while (1) {
|
||||
/* check cloexec flag */
|
||||
fd = sys_perf_event_open(&attr, pid, cpu, -1,
|
||||
|
@ -47,16 +51,24 @@ static int perf_flag_probe(void)
|
|||
err, strerror_r(err, sbuf, sizeof(sbuf)));
|
||||
|
||||
/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
|
||||
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
|
||||
while (1) {
|
||||
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
|
||||
if (fd < 0 && pid == -1 && errno == EACCES) {
|
||||
pid = 0;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
err = errno;
|
||||
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
|
||||
if (WARN_ONCE(fd < 0 && err != EBUSY,
|
||||
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
|
||||
err, strerror_r(err, sbuf, sizeof(sbuf))))
|
||||
return -1;
|
||||
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ struct perf_mmap {
|
|||
int mask;
|
||||
int refcnt;
|
||||
unsigned int prev;
|
||||
char event_copy[PERF_SAMPLE_MAX_SIZE];
|
||||
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
struct perf_evlist {
|
||||
|
|
|
@ -11,6 +11,11 @@
|
|||
#include <symbol/kallsyms.h>
|
||||
#include "debug.h"
|
||||
|
||||
#ifndef EM_AARCH64
|
||||
#define EM_AARCH64 183 /* ARM 64 bit */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
|
||||
extern char *cplus_demangle(const char *, int);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче