Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Mostly tooling and PMU driver fixes, but also a number of late updates such as the reworking of the call-chain size limiting logic to make call-graph recording more robust, plus tooling side changes for the new 'backwards ring-buffer' extension to the perf ring-buffer" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits) perf record: Read from backward ring buffer perf record: Rename variable to make code clear perf record: Prevent reading invalid data in record__mmap_read perf evlist: Add API to pause/resume perf trace: Use the ptr->name beautifier as default for "filename" args perf trace: Use the fd->name beautifier as default for "fd" args perf report: Add srcline_from/to branch sort keys perf evsel: Record fd into perf_mmap perf evsel: Add overwrite attribute and check write_backward perf tools: Set buildid dir under symfs when --symfs is provided perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced perf annotate: Sort list of recognised instructions perf annotate: Fix identification of ARM blt and bls instructions perf tools: Fix usage of max_stack sysctl perf callchain: Stop validating callchains by the max_stack sysctl perf trace: Fix exit_group() formatting perf top: Use machine->kptr_restrict_warned perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1 perf machine: Do not bail out if not managing to read ref reloc symbol perf/x86/intel/p4: Trival indentation fix, remove space ...
This commit is contained in:
Коммит
bdc6b758e4
|
@ -61,6 +61,7 @@ show up in /proc/sys/kernel:
|
||||||
- perf_cpu_time_max_percent
|
- perf_cpu_time_max_percent
|
||||||
- perf_event_paranoid
|
- perf_event_paranoid
|
||||||
- perf_event_max_stack
|
- perf_event_max_stack
|
||||||
|
- perf_event_max_contexts_per_stack
|
||||||
- pid_max
|
- pid_max
|
||||||
- powersave-nap [ PPC only ]
|
- powersave-nap [ PPC only ]
|
||||||
- printk
|
- printk
|
||||||
|
@ -668,6 +669,19 @@ The default value is 127.
|
||||||
|
|
||||||
==============================================================
|
==============================================================
|
||||||
|
|
||||||
|
perf_event_max_contexts_per_stack:
|
||||||
|
|
||||||
|
Controls maximum number of stack frame context entries for
|
||||||
|
(attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for
|
||||||
|
instance, when using 'perf record -g' or 'perf trace --call-graph fp'.
|
||||||
|
|
||||||
|
This can only be done when no events are in use that have callchains
|
||||||
|
enabled, otherwise writing to this file will return -EBUSY.
|
||||||
|
|
||||||
|
The default value is 8.
|
||||||
|
|
||||||
|
==============================================================
|
||||||
|
|
||||||
pid_max:
|
pid_max:
|
||||||
|
|
||||||
PID allocation wrap value. When the kernel's next PID value
|
PID allocation wrap value. When the kernel's next PID value
|
||||||
|
|
|
@ -8881,6 +8881,7 @@ F: arch/*/kernel/*/perf_event*.c
|
||||||
F: arch/*/kernel/*/*/perf_event*.c
|
F: arch/*/kernel/*/*/perf_event*.c
|
||||||
F: arch/*/include/asm/perf_event.h
|
F: arch/*/include/asm/perf_event.h
|
||||||
F: arch/*/kernel/perf_callchain.c
|
F: arch/*/kernel/perf_callchain.c
|
||||||
|
F: arch/*/events/*
|
||||||
F: tools/perf/
|
F: tools/perf/
|
||||||
|
|
||||||
PERSONALITY HANDLING
|
PERSONALITY HANDLING
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct arc_callchain_trace {
|
||||||
static int callchain_trace(unsigned int addr, void *data)
|
static int callchain_trace(unsigned int addr, void *data)
|
||||||
{
|
{
|
||||||
struct arc_callchain_trace *ctrl = data;
|
struct arc_callchain_trace *ctrl = data;
|
||||||
struct perf_callchain_entry *entry = ctrl->perf_stuff;
|
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
|
||||||
perf_callchain_store(entry, addr);
|
perf_callchain_store(entry, addr);
|
||||||
|
|
||||||
if (ctrl->depth++ < 3)
|
if (ctrl->depth++ < 3)
|
||||||
|
@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct arc_callchain_trace ctrl = {
|
struct arc_callchain_trace ctrl = {
|
||||||
.depth = 0,
|
.depth = 0,
|
||||||
|
@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* User stack can't be unwound trivially with kernel dwarf unwinder
|
* User stack can't be unwound trivially with kernel dwarf unwinder
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct frame_tail {
|
||||||
*/
|
*/
|
||||||
static struct frame_tail __user *
|
static struct frame_tail __user *
|
||||||
user_backtrace(struct frame_tail __user *tail,
|
user_backtrace(struct frame_tail __user *tail,
|
||||||
struct perf_callchain_entry *entry)
|
struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
struct frame_tail buftail;
|
struct frame_tail buftail;
|
||||||
unsigned long err;
|
unsigned long err;
|
||||||
|
@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct frame_tail __user *tail;
|
struct frame_tail __user *tail;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
|
|
||||||
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
||||||
|
|
||||||
while ((entry->nr < sysctl_perf_event_max_stack) &&
|
while ((entry->nr < entry->max_stack) &&
|
||||||
tail && !((unsigned long)tail & 0x3))
|
tail && !((unsigned long)tail & 0x3))
|
||||||
tail = user_backtrace(tail, entry);
|
tail = user_backtrace(tail, entry);
|
||||||
}
|
}
|
||||||
|
@ -89,13 +89,13 @@ static int
|
||||||
callchain_trace(struct stackframe *fr,
|
callchain_trace(struct stackframe *fr,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
perf_callchain_store(entry, fr->pc);
|
perf_callchain_store(entry, fr->pc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stackframe fr;
|
struct stackframe fr;
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct frame_tail {
|
||||||
*/
|
*/
|
||||||
static struct frame_tail __user *
|
static struct frame_tail __user *
|
||||||
user_backtrace(struct frame_tail __user *tail,
|
user_backtrace(struct frame_tail __user *tail,
|
||||||
struct perf_callchain_entry *entry)
|
struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
struct frame_tail buftail;
|
struct frame_tail buftail;
|
||||||
unsigned long err;
|
unsigned long err;
|
||||||
|
@ -76,7 +76,7 @@ struct compat_frame_tail {
|
||||||
|
|
||||||
static struct compat_frame_tail __user *
|
static struct compat_frame_tail __user *
|
||||||
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
||||||
struct perf_callchain_entry *entry)
|
struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
struct compat_frame_tail buftail;
|
struct compat_frame_tail buftail;
|
||||||
unsigned long err;
|
unsigned long err;
|
||||||
|
@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_COMPAT */
|
#endif /* CONFIG_COMPAT */
|
||||||
|
|
||||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||||
|
@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||||
|
|
||||||
tail = (struct frame_tail __user *)regs->regs[29];
|
tail = (struct frame_tail __user *)regs->regs[29];
|
||||||
|
|
||||||
while (entry->nr < sysctl_perf_event_max_stack &&
|
while (entry->nr < entry->max_stack &&
|
||||||
tail && !((unsigned long)tail & 0xf))
|
tail && !((unsigned long)tail & 0xf))
|
||||||
tail = user_backtrace(tail, entry);
|
tail = user_backtrace(tail, entry);
|
||||||
} else {
|
} else {
|
||||||
|
@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||||
|
|
||||||
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
||||||
|
|
||||||
while ((entry->nr < sysctl_perf_event_max_stack) &&
|
while ((entry->nr < entry->max_stack) &&
|
||||||
tail && !((unsigned long)tail & 0x3))
|
tail && !((unsigned long)tail & 0x3))
|
||||||
tail = compat_user_backtrace(tail, entry);
|
tail = compat_user_backtrace(tail, entry);
|
||||||
#endif
|
#endif
|
||||||
|
@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||||
*/
|
*/
|
||||||
static int callchain_trace(struct stackframe *frame, void *data)
|
static int callchain_trace(struct stackframe *frame, void *data)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
perf_callchain_store(entry, frame->pc);
|
perf_callchain_store(entry, frame->pc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
|
|
|
@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
|
||||||
|
|
||||||
static struct metag_frame __user *
|
static struct metag_frame __user *
|
||||||
user_backtrace(struct metag_frame __user *user_frame,
|
user_backtrace(struct metag_frame __user *user_frame,
|
||||||
struct perf_callchain_entry *entry)
|
struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
struct metag_frame frame;
|
struct metag_frame frame;
|
||||||
unsigned long calladdr;
|
unsigned long calladdr;
|
||||||
|
@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long sp = regs->ctx.AX[0].U0;
|
unsigned long sp = regs->ctx.AX[0].U0;
|
||||||
struct metag_frame __user *frame;
|
struct metag_frame __user *frame;
|
||||||
|
@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
|
|
||||||
--frame;
|
--frame;
|
||||||
|
|
||||||
while ((entry->nr < sysctl_perf_event_max_stack) && frame)
|
while ((entry->nr < entry->max_stack) && frame)
|
||||||
frame = user_backtrace(frame, entry);
|
frame = user_backtrace(frame, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,13 +78,13 @@ static int
|
||||||
callchain_trace(struct stackframe *fr,
|
callchain_trace(struct stackframe *fr,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
perf_callchain_store(entry, fr->pc);
|
perf_callchain_store(entry, fr->pc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stackframe fr;
|
struct stackframe fr;
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
* the user stack callchains, we will add it here.
|
* the user stack callchains, we will add it here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
|
||||||
unsigned long reg29)
|
unsigned long reg29)
|
||||||
{
|
{
|
||||||
unsigned long *sp = (unsigned long *)reg29;
|
unsigned long *sp = (unsigned long *)reg29;
|
||||||
|
@ -35,13 +35,13 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
||||||
addr = *sp++;
|
addr = *sp++;
|
||||||
if (__kernel_text_address(addr)) {
|
if (__kernel_text_address(addr)) {
|
||||||
perf_callchain_store(entry, addr);
|
perf_callchain_store(entry, addr);
|
||||||
if (entry->nr >= sysctl_perf_event_max_stack)
|
if (entry->nr >= entry->max_stack)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long sp = regs->regs[29];
|
unsigned long sp = regs->regs[29];
|
||||||
|
@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||||
}
|
}
|
||||||
do {
|
do {
|
||||||
perf_callchain_store(entry, pc);
|
perf_callchain_store(entry, pc);
|
||||||
if (entry->nr >= sysctl_perf_event_max_stack)
|
if (entry->nr >= entry->max_stack)
|
||||||
break;
|
break;
|
||||||
pc = unwind_stack(current, &sp, pc, &ra);
|
pc = unwind_stack(current, &sp, pc, &ra);
|
||||||
} while (pc);
|
} while (pc);
|
||||||
|
|
|
@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long sp, next_sp;
|
unsigned long sp, next_sp;
|
||||||
unsigned long next_ip;
|
unsigned long next_ip;
|
||||||
|
@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
next_ip = regs->nip;
|
next_ip = regs->nip;
|
||||||
lr = regs->link;
|
lr = regs->link;
|
||||||
level = 0;
|
level = 0;
|
||||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (level == 0)
|
if (level == 0)
|
||||||
|
@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
|
||||||
puc == (unsigned long) &sf->uc;
|
puc == (unsigned long) &sf->uc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long sp, next_sp;
|
unsigned long sp, next_sp;
|
||||||
|
@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||||
sp = regs->gpr[1];
|
sp = regs->gpr[1];
|
||||||
perf_callchain_store(entry, next_ip);
|
perf_callchain_store(entry, next_ip);
|
||||||
|
|
||||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
while (entry->nr < entry->max_stack) {
|
||||||
fp = (unsigned long __user *) sp;
|
fp = (unsigned long __user *) sp;
|
||||||
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||||
return;
|
return;
|
||||||
|
@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||||
read_user_stack_64(&uregs[PT_R1], &sp))
|
read_user_stack_64(&uregs[PT_R1], &sp))
|
||||||
return;
|
return;
|
||||||
level = 0;
|
level = 0;
|
||||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||||
perf_callchain_store(entry, next_ip);
|
perf_callchain_store(entry, next_ip);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
||||||
return mctx->mc_gregs;
|
return mctx->mc_gregs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned int sp, next_sp;
|
unsigned int sp, next_sp;
|
||||||
|
@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||||
sp = regs->gpr[1];
|
sp = regs->gpr[1];
|
||||||
perf_callchain_store(entry, next_ip);
|
perf_callchain_store(entry, next_ip);
|
||||||
|
|
||||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
while (entry->nr < entry->max_stack) {
|
||||||
fp = (unsigned int __user *) (unsigned long) sp;
|
fp = (unsigned int __user *) (unsigned long) sp;
|
||||||
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
||||||
return;
|
return;
|
||||||
|
@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||||
read_user_stack_32(&uregs[PT_R1], &sp))
|
read_user_stack_32(&uregs[PT_R1], &sp))
|
||||||
return;
|
return;
|
||||||
level = 0;
|
level = 0;
|
||||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||||
perf_callchain_store(entry, next_ip);
|
perf_callchain_store(entry, next_ip);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (current_is_64bit())
|
if (current_is_64bit())
|
||||||
perf_callchain_user_64(entry, regs);
|
perf_callchain_user_64(entry, regs);
|
||||||
|
|
|
@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
|
||||||
|
|
||||||
static int __perf_callchain_kernel(void *data, unsigned long address)
|
static int __perf_callchain_kernel(void *data, unsigned long address)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
|
|
||||||
perf_callchain_store(entry, address);
|
perf_callchain_store(entry, address);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
|
|
|
@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
|
||||||
|
|
||||||
static void callchain_address(void *data, unsigned long addr, int reliable)
|
static void callchain_address(void *data, unsigned long addr, int reliable)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
|
|
||||||
if (reliable)
|
if (reliable)
|
||||||
perf_callchain_store(entry, addr);
|
perf_callchain_store(entry, addr);
|
||||||
|
@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
perf_callchain_store(entry, regs->pc);
|
perf_callchain_store(entry, regs->pc);
|
||||||
|
|
||||||
|
|
|
@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
|
||||||
}
|
}
|
||||||
pure_initcall(init_hw_perf_events);
|
pure_initcall(init_hw_perf_events);
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long ksp, fp;
|
unsigned long ksp, fp;
|
||||||
|
@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
} while (entry->nr < entry->max_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
|
||||||
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
|
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long ufp;
|
unsigned long ufp;
|
||||||
|
@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||||
pc = sf.callers_pc;
|
pc = sf.callers_pc;
|
||||||
ufp = (unsigned long)sf.fp + STACK_BIAS;
|
ufp = (unsigned long)sf.fp + STACK_BIAS;
|
||||||
perf_callchain_store(entry, pc);
|
perf_callchain_store(entry, pc);
|
||||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
} while (entry->nr < entry->max_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long ufp;
|
unsigned long ufp;
|
||||||
|
@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||||
ufp = (unsigned long)sf.fp;
|
ufp = (unsigned long)sf.fp;
|
||||||
}
|
}
|
||||||
perf_callchain_store(entry, pc);
|
perf_callchain_store(entry, pc);
|
||||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
} while (entry->nr < entry->max_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u64 saved_fault_address = current_thread_info()->fault_address;
|
u64 saved_fault_address = current_thread_info()->fault_address;
|
||||||
u8 saved_fault_code = get_thread_fault_code();
|
u8 saved_fault_code = get_thread_fault_code();
|
||||||
|
|
|
@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
|
||||||
/*
|
/*
|
||||||
* Tile specific backtracing code for perf_events.
|
* Tile specific backtracing code for perf_events.
|
||||||
*/
|
*/
|
||||||
static inline void perf_callchain(struct perf_callchain_entry *entry,
|
static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct KBacktraceIterator kbt;
|
struct KBacktraceIterator kbt;
|
||||||
|
@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
perf_callchain(entry, regs);
|
perf_callchain(entry, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
perf_callchain(entry, regs);
|
perf_callchain(entry, regs);
|
||||||
|
|
|
@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
|
||||||
|
|
||||||
static int backtrace_address(void *data, unsigned long addr, int reliable)
|
static int backtrace_address(void *data, unsigned long addr, int reliable)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
|
|
||||||
return perf_callchain_store(entry, addr);
|
return perf_callchain_store(entry, addr);
|
||||||
}
|
}
|
||||||
|
@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||||
/* TODO: We don't support guest os callchain now */
|
/* TODO: We don't support guest os callchain now */
|
||||||
|
@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
|
||||||
#include <asm/compat.h>
|
#include <asm/compat.h>
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
/* 32-bit process in 64-bit kernel. */
|
/* 32-bit process in 64-bit kernel. */
|
||||||
unsigned long ss_base, cs_base;
|
unsigned long ss_base, cs_base;
|
||||||
|
@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||||
|
|
||||||
fp = compat_ptr(ss_base + regs->bp);
|
fp = compat_ptr(ss_base + regs->bp);
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
while (entry->nr < entry->max_stack) {
|
||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
frame.next_frame = 0;
|
frame.next_frame = 0;
|
||||||
frame.return_address = 0;
|
frame.return_address = 0;
|
||||||
|
@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline int
|
static inline int
|
||||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stack_frame frame;
|
struct stack_frame frame;
|
||||||
const void __user *fp;
|
const void __user *fp;
|
||||||
|
@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
while (entry->nr < entry->max_stack) {
|
||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
frame.next_frame = NULL;
|
frame.next_frame = NULL;
|
||||||
frame.return_address = 0;
|
frame.return_address = 0;
|
||||||
|
|
|
@ -891,7 +891,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
pkg = topology_phys_to_logical_pkg(phys_id);
|
pkg = topology_phys_to_logical_pkg(phys_id);
|
||||||
if (WARN_ON_ONCE(pkg < 0))
|
if (pkg < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
|
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
|
||||||
|
|
|
@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
|
||||||
|
|
||||||
static int callchain_trace(struct stackframe *frame, void *data)
|
static int callchain_trace(struct stackframe *frame, void *data)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
|
|
||||||
perf_callchain_store(entry, frame->pc);
|
perf_callchain_store(entry, frame->pc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
|
xtensa_backtrace_kernel(regs, entry->max_stack,
|
||||||
callchain_trace, NULL, entry);
|
callchain_trace, NULL, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
|
xtensa_backtrace_user(regs, entry->max_stack,
|
||||||
callchain_trace, entry);
|
callchain_trace, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,14 @@ struct perf_callchain_entry {
|
||||||
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
|
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct perf_callchain_entry_ctx {
|
||||||
|
struct perf_callchain_entry *entry;
|
||||||
|
u32 max_stack;
|
||||||
|
u32 nr;
|
||||||
|
short contexts;
|
||||||
|
bool contexts_maxed;
|
||||||
|
};
|
||||||
|
|
||||||
struct perf_raw_record {
|
struct perf_raw_record {
|
||||||
u32 size;
|
u32 size;
|
||||||
void *data;
|
void *data;
|
||||||
|
@ -1061,20 +1069,36 @@ extern void perf_event_fork(struct task_struct *tsk);
|
||||||
/* Callchains */
|
/* Callchains */
|
||||||
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
||||||
|
|
||||||
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
|
||||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
|
||||||
extern struct perf_callchain_entry *
|
extern struct perf_callchain_entry *
|
||||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||||
bool crosstask, bool add_mark);
|
u32 max_stack, bool crosstask, bool add_mark);
|
||||||
extern int get_callchain_buffers(void);
|
extern int get_callchain_buffers(void);
|
||||||
extern void put_callchain_buffers(void);
|
extern void put_callchain_buffers(void);
|
||||||
|
|
||||||
extern int sysctl_perf_event_max_stack;
|
extern int sysctl_perf_event_max_stack;
|
||||||
|
extern int sysctl_perf_event_max_contexts_per_stack;
|
||||||
|
|
||||||
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
|
||||||
{
|
{
|
||||||
if (entry->nr < sysctl_perf_event_max_stack) {
|
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
|
||||||
|
struct perf_callchain_entry *entry = ctx->entry;
|
||||||
entry->ip[entry->nr++] = ip;
|
entry->ip[entry->nr++] = ip;
|
||||||
|
++ctx->contexts;
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
ctx->contexts_maxed = true;
|
||||||
|
return -1; /* no more room, stop walking the stack */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
|
||||||
|
{
|
||||||
|
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
|
||||||
|
struct perf_callchain_entry *entry = ctx->entry;
|
||||||
|
entry->ip[entry->nr++] = ip;
|
||||||
|
++ctx->nr;
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
return -1; /* no more room, stop walking the stack */
|
return -1; /* no more room, stop walking the stack */
|
||||||
|
|
|
@ -862,6 +862,7 @@ enum perf_event_type {
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PERF_MAX_STACK_DEPTH 127
|
#define PERF_MAX_STACK_DEPTH 127
|
||||||
|
#define PERF_MAX_CONTEXTS_PER_STACK 8
|
||||||
|
|
||||||
enum perf_callchain_context {
|
enum perf_callchain_context {
|
||||||
PERF_CONTEXT_HV = (__u64)-32,
|
PERF_CONTEXT_HV = (__u64)-32,
|
||||||
|
|
|
@ -136,7 +136,8 @@ u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
|
||||||
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
|
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
|
trace = get_perf_callchain(regs, init_nr, kernel, user,
|
||||||
|
sysctl_perf_event_max_stack, false, false);
|
||||||
|
|
||||||
if (unlikely(!trace))
|
if (unlikely(!trace))
|
||||||
/* couldn't fetch the stack trace */
|
/* couldn't fetch the stack trace */
|
||||||
|
|
|
@ -19,11 +19,13 @@ struct callchain_cpus_entries {
|
||||||
};
|
};
|
||||||
|
|
||||||
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
|
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
|
||||||
|
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
|
||||||
|
|
||||||
static inline size_t perf_callchain_entry__sizeof(void)
|
static inline size_t perf_callchain_entry__sizeof(void)
|
||||||
{
|
{
|
||||||
return (sizeof(struct perf_callchain_entry) +
|
return (sizeof(struct perf_callchain_entry) +
|
||||||
sizeof(__u64) * sysctl_perf_event_max_stack);
|
sizeof(__u64) * (sysctl_perf_event_max_stack +
|
||||||
|
sysctl_perf_event_max_contexts_per_stack));
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
||||||
|
@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
|
||||||
static struct callchain_cpus_entries *callchain_cpus_entries;
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
||||||
|
|
||||||
|
|
||||||
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||||
if (!kernel && !user)
|
if (!kernel && !user)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
|
return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct perf_callchain_entry *
|
struct perf_callchain_entry *
|
||||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||||
bool crosstask, bool add_mark)
|
u32 max_stack, bool crosstask, bool add_mark)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry;
|
struct perf_callchain_entry *entry;
|
||||||
|
struct perf_callchain_entry_ctx ctx;
|
||||||
int rctx;
|
int rctx;
|
||||||
|
|
||||||
entry = get_callchain_entry(&rctx);
|
entry = get_callchain_entry(&rctx);
|
||||||
|
@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||||
if (!entry)
|
if (!entry)
|
||||||
goto exit_put;
|
goto exit_put;
|
||||||
|
|
||||||
entry->nr = init_nr;
|
ctx.entry = entry;
|
||||||
|
ctx.max_stack = max_stack;
|
||||||
|
ctx.nr = entry->nr = init_nr;
|
||||||
|
ctx.contexts = 0;
|
||||||
|
ctx.contexts_maxed = false;
|
||||||
|
|
||||||
if (kernel && !user_mode(regs)) {
|
if (kernel && !user_mode(regs)) {
|
||||||
if (add_mark)
|
if (add_mark)
|
||||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
|
||||||
perf_callchain_kernel(entry, regs);
|
perf_callchain_kernel(&ctx, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (user) {
|
if (user) {
|
||||||
|
@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||||
goto exit_put;
|
goto exit_put;
|
||||||
|
|
||||||
if (add_mark)
|
if (add_mark)
|
||||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
|
||||||
perf_callchain_user(entry, regs);
|
perf_callchain_user(&ctx, regs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,10 +232,15 @@ exit_put:
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used for sysctl_perf_event_max_stack and
|
||||||
|
* sysctl_perf_event_max_contexts_per_stack.
|
||||||
|
*/
|
||||||
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||||
{
|
{
|
||||||
int new_value = sysctl_perf_event_max_stack, ret;
|
int *value = table->data;
|
||||||
|
int new_value = *value, ret;
|
||||||
struct ctl_table new_table = *table;
|
struct ctl_table new_table = *table;
|
||||||
|
|
||||||
new_table.data = &new_value;
|
new_table.data = &new_value;
|
||||||
|
@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||||
if (atomic_read(&nr_callchain_events))
|
if (atomic_read(&nr_callchain_events))
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
else
|
else
|
||||||
sysctl_perf_event_max_stack = new_value;
|
*value = new_value;
|
||||||
|
|
||||||
mutex_unlock(&callchain_mutex);
|
mutex_unlock(&callchain_mutex);
|
||||||
|
|
||||||
|
|
|
@ -1149,13 +1149,22 @@ static struct ctl_table kern_table[] = {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "perf_event_max_stack",
|
.procname = "perf_event_max_stack",
|
||||||
.data = NULL, /* filled in by handler */
|
.data = &sysctl_perf_event_max_stack,
|
||||||
.maxlen = sizeof(sysctl_perf_event_max_stack),
|
.maxlen = sizeof(sysctl_perf_event_max_stack),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = perf_event_max_stack_handler,
|
.proc_handler = perf_event_max_stack_handler,
|
||||||
.extra1 = &zero,
|
.extra1 = &zero,
|
||||||
.extra2 = &six_hundred_forty_kb,
|
.extra2 = &six_hundred_forty_kb,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.procname = "perf_event_max_contexts_per_stack",
|
||||||
|
.data = &sysctl_perf_event_max_contexts_per_stack,
|
||||||
|
.maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = perf_event_max_stack_handler,
|
||||||
|
.extra1 = &zero,
|
||||||
|
.extra2 = &one_thousand,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KMEMCHECK
|
#ifdef CONFIG_KMEMCHECK
|
||||||
{
|
{
|
||||||
|
|
|
@ -103,12 +103,13 @@ OPTIONS
|
||||||
|
|
||||||
If --branch-stack option is used, following sort keys are also
|
If --branch-stack option is used, following sort keys are also
|
||||||
available:
|
available:
|
||||||
dso_from, dso_to, symbol_from, symbol_to, mispredict.
|
|
||||||
|
|
||||||
- dso_from: name of library or module branched from
|
- dso_from: name of library or module branched from
|
||||||
- dso_to: name of library or module branched to
|
- dso_to: name of library or module branched to
|
||||||
- symbol_from: name of function branched from
|
- symbol_from: name of function branched from
|
||||||
- symbol_to: name of function branched to
|
- symbol_to: name of function branched to
|
||||||
|
- srcline_from: source file and line branched from
|
||||||
|
- srcline_to: source file and line branched to
|
||||||
- mispredict: "N" for predicted branch, "Y" for mispredicted branch
|
- mispredict: "N" for predicted branch, "Y" for mispredicted branch
|
||||||
- in_tx: branch in TSX transaction
|
- in_tx: branch in TSX transaction
|
||||||
- abort: TSX transaction abort.
|
- abort: TSX transaction abort.
|
||||||
|
@ -248,7 +249,7 @@ OPTIONS
|
||||||
Note that when using the --itrace option the synthesized callchain size
|
Note that when using the --itrace option the synthesized callchain size
|
||||||
will override this value if the synthesized callchain size is bigger.
|
will override this value if the synthesized callchain size is bigger.
|
||||||
|
|
||||||
Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
|
Default: 127
|
||||||
|
|
||||||
-G::
|
-G::
|
||||||
--inverted::
|
--inverted::
|
||||||
|
|
|
@ -267,7 +267,7 @@ include::itrace.txt[]
|
||||||
Note that when using the --itrace option the synthesized callchain size
|
Note that when using the --itrace option the synthesized callchain size
|
||||||
will override this value if the synthesized callchain size is bigger.
|
will override this value if the synthesized callchain size is bigger.
|
||||||
|
|
||||||
Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
|
Default: 127
|
||||||
|
|
||||||
--ns::
|
--ns::
|
||||||
Use 9 decimal places when displaying time (i.e. show the nanoseconds)
|
Use 9 decimal places when displaying time (i.e. show the nanoseconds)
|
||||||
|
|
|
@ -143,7 +143,8 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
|
||||||
Implies '--call-graph dwarf' when --call-graph not present on the
|
Implies '--call-graph dwarf' when --call-graph not present on the
|
||||||
command line, on systems where DWARF unwinding was built in.
|
command line, on systems where DWARF unwinding was built in.
|
||||||
|
|
||||||
Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
|
Default: /proc/sys/kernel/perf_event_max_stack when present for
|
||||||
|
live sessions (without --input/-i), 127 otherwise.
|
||||||
|
|
||||||
--min-stack::
|
--min-stack::
|
||||||
Set the stack depth limit when parsing the callchain, anything
|
Set the stack depth limit when parsing the callchain, anything
|
||||||
|
|
|
@ -324,8 +324,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
|
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
|
||||||
"Skip symbols that cannot be annotated"),
|
"Skip symbols that cannot be annotated"),
|
||||||
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
|
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory",
|
||||||
|
symbol__config_symfs),
|
||||||
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
||||||
"Interleave source code with assembly code (default)"),
|
"Interleave source code with assembly code (default)"),
|
||||||
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
||||||
|
|
|
@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
|
||||||
if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
|
if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
|
scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
|
||||||
buildid_dir, sbuildid);
|
buildid_dir, DSO__NAME_KCORE, sbuildid);
|
||||||
|
|
||||||
if (!force &&
|
if (!force &&
|
||||||
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
|
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
|
||||||
|
@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
|
||||||
if (build_id_cache__kcore_dir(dir, sizeof(dir)))
|
if (build_id_cache__kcore_dir(dir, sizeof(dir)))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s",
|
scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
|
||||||
buildid_dir, sbuildid, dir);
|
buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
|
||||||
|
|
||||||
if (mkdir_p(to_dir, 0755))
|
if (mkdir_p(to_dir, 0755))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -812,8 +812,9 @@ static const struct option options[] = {
|
||||||
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
|
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
|
||||||
"separator for columns, no spaces will be added between "
|
"separator for columns, no spaces will be added between "
|
||||||
"columns '.' is reserved."),
|
"columns '.' is reserved."),
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory",
|
||||||
|
symbol__config_symfs),
|
||||||
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
|
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
|
||||||
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
|
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
|
||||||
"How to display percentage of filtered entries", parse_filter_percentage),
|
"How to display percentage of filtered entries", parse_filter_percentage),
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
#include <asm/bug.h>
|
||||||
|
|
||||||
|
|
||||||
struct record {
|
struct record {
|
||||||
|
@ -82,27 +83,87 @@ static int process_synthesized_event(struct perf_tool *tool,
|
||||||
return record__write(rec, event, event->header.size);
|
return record__write(rec, event, event->header.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
|
||||||
|
{
|
||||||
|
struct perf_event_header *pheader;
|
||||||
|
u64 evt_head = head;
|
||||||
|
int size = mask + 1;
|
||||||
|
|
||||||
|
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
|
||||||
|
pheader = (struct perf_event_header *)(buf + (head & mask));
|
||||||
|
*start = head;
|
||||||
|
while (true) {
|
||||||
|
if (evt_head - head >= (unsigned int)size) {
|
||||||
|
pr_debug("Finshed reading backward ring buffer: rewind\n");
|
||||||
|
if (evt_head - head > (unsigned int)size)
|
||||||
|
evt_head -= pheader->size;
|
||||||
|
*end = evt_head;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
|
||||||
|
|
||||||
|
if (pheader->size == 0) {
|
||||||
|
pr_debug("Finshed reading backward ring buffer: get start\n");
|
||||||
|
*end = evt_head;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
evt_head += pheader->size;
|
||||||
|
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
|
||||||
|
}
|
||||||
|
WARN_ONCE(1, "Shouldn't get here\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rb_find_range(struct perf_evlist *evlist,
|
||||||
|
void *data, int mask, u64 head, u64 old,
|
||||||
|
u64 *start, u64 *end)
|
||||||
|
{
|
||||||
|
if (!evlist->backward) {
|
||||||
|
*start = old;
|
||||||
|
*end = head;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return backward_rb_find_range(data, mask, head, start, end);
|
||||||
|
}
|
||||||
|
|
||||||
static int record__mmap_read(struct record *rec, int idx)
|
static int record__mmap_read(struct record *rec, int idx)
|
||||||
{
|
{
|
||||||
struct perf_mmap *md = &rec->evlist->mmap[idx];
|
struct perf_mmap *md = &rec->evlist->mmap[idx];
|
||||||
u64 head = perf_mmap__read_head(md);
|
u64 head = perf_mmap__read_head(md);
|
||||||
u64 old = md->prev;
|
u64 old = md->prev;
|
||||||
|
u64 end = head, start = old;
|
||||||
unsigned char *data = md->base + page_size;
|
unsigned char *data = md->base + page_size;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
void *buf;
|
void *buf;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (old == head)
|
if (rb_find_range(rec->evlist, data, md->mask, head,
|
||||||
|
old, &start, &end))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (start == end)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rec->samples++;
|
rec->samples++;
|
||||||
|
|
||||||
size = head - old;
|
size = end - start;
|
||||||
|
if (size > (unsigned long)(md->mask) + 1) {
|
||||||
|
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||||
|
|
||||||
if ((old & md->mask) + size != (head & md->mask)) {
|
md->prev = head;
|
||||||
buf = &data[old & md->mask];
|
perf_evlist__mmap_consume(rec->evlist, idx);
|
||||||
size = md->mask + 1 - (old & md->mask);
|
return 0;
|
||||||
old += size;
|
}
|
||||||
|
|
||||||
|
if ((start & md->mask) + size != (end & md->mask)) {
|
||||||
|
buf = &data[start & md->mask];
|
||||||
|
size = md->mask + 1 - (start & md->mask);
|
||||||
|
start += size;
|
||||||
|
|
||||||
if (record__write(rec, buf, size) < 0) {
|
if (record__write(rec, buf, size) < 0) {
|
||||||
rc = -1;
|
rc = -1;
|
||||||
|
@ -110,16 +171,16 @@ static int record__mmap_read(struct record *rec, int idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = &data[old & md->mask];
|
buf = &data[start & md->mask];
|
||||||
size = head - old;
|
size = end - start;
|
||||||
old += size;
|
start += size;
|
||||||
|
|
||||||
if (record__write(rec, buf, size) < 0) {
|
if (record__write(rec, buf, size) < 0) {
|
||||||
rc = -1;
|
rc = -1;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
md->prev = old;
|
md->prev = head;
|
||||||
perf_evlist__mmap_consume(rec->evlist, idx);
|
perf_evlist__mmap_consume(rec->evlist, idx);
|
||||||
out:
|
out:
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -691,7 +691,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
.ordered_events = true,
|
.ordered_events = true,
|
||||||
.ordering_requires_timestamps = true,
|
.ordering_requires_timestamps = true,
|
||||||
},
|
},
|
||||||
.max_stack = sysctl_perf_event_max_stack,
|
.max_stack = PERF_MAX_STACK_DEPTH,
|
||||||
.pretty_printing_style = "normal",
|
.pretty_printing_style = "normal",
|
||||||
.socket_filter = -1,
|
.socket_filter = -1,
|
||||||
};
|
};
|
||||||
|
@ -770,8 +770,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
"columns '.' is reserved."),
|
"columns '.' is reserved."),
|
||||||
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
|
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
|
||||||
"Only display entries resolved to a symbol"),
|
"Only display entries resolved to a symbol"),
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory",
|
||||||
|
symbol__config_symfs),
|
||||||
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
|
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
|
||||||
"list of cpus to profile"),
|
"list of cpus to profile"),
|
||||||
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
|
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
|
||||||
|
|
|
@ -2010,8 +2010,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
"file", "kallsyms pathname"),
|
"file", "kallsyms pathname"),
|
||||||
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
|
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
|
||||||
"When printing symbols do not display call chain"),
|
"When printing symbols do not display call chain"),
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory",
|
||||||
|
symbol__config_symfs),
|
||||||
OPT_CALLBACK('F', "fields", NULL, "str",
|
OPT_CALLBACK('F', "fields", NULL, "str",
|
||||||
"comma separated output fields prepend with 'type:'. "
|
"comma separated output fields prepend with 'type:'. "
|
||||||
"Valid types: hw,sw,trace,raw. "
|
"Valid types: hw,sw,trace,raw. "
|
||||||
|
@ -2067,8 +2068,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
scripting_max_stack = sysctl_perf_event_max_stack;
|
|
||||||
|
|
||||||
setup_scripting();
|
setup_scripting();
|
||||||
|
|
||||||
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
|
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
|
||||||
|
|
|
@ -66,6 +66,7 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <sys/prctl.h>
|
#include <sys/prctl.h>
|
||||||
#include <locale.h>
|
#include <locale.h>
|
||||||
|
#include <math.h>
|
||||||
|
|
||||||
#define DEFAULT_SEPARATOR " "
|
#define DEFAULT_SEPARATOR " "
|
||||||
#define CNTR_NOT_SUPPORTED "<not supported>"
|
#define CNTR_NOT_SUPPORTED "<not supported>"
|
||||||
|
@ -991,12 +992,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
|
||||||
const char *fmt;
|
const char *fmt;
|
||||||
|
|
||||||
if (csv_output) {
|
if (csv_output) {
|
||||||
fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
|
fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
|
||||||
} else {
|
} else {
|
||||||
if (big_num)
|
if (big_num)
|
||||||
fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
|
fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
|
||||||
else
|
else
|
||||||
fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
|
fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
|
||||||
}
|
}
|
||||||
|
|
||||||
aggr_printout(evsel, id, nr);
|
aggr_printout(evsel, id, nr);
|
||||||
|
@ -1909,6 +1910,9 @@ static int add_default_attributes(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!evsel_list->nr_entries) {
|
if (!evsel_list->nr_entries) {
|
||||||
|
if (target__has_cpu(&target))
|
||||||
|
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
|
||||||
|
|
||||||
if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
|
if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
|
if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
|
||||||
|
@ -2000,7 +2004,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
|
||||||
union perf_event *event,
|
union perf_event *event,
|
||||||
struct perf_session *session)
|
struct perf_session *session)
|
||||||
{
|
{
|
||||||
struct stat_round_event *round = &event->stat_round;
|
struct stat_round_event *stat_round = &event->stat_round;
|
||||||
struct perf_evsel *counter;
|
struct perf_evsel *counter;
|
||||||
struct timespec tsh, *ts = NULL;
|
struct timespec tsh, *ts = NULL;
|
||||||
const char **argv = session->header.env.cmdline_argv;
|
const char **argv = session->header.env.cmdline_argv;
|
||||||
|
@ -2009,12 +2013,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
|
||||||
evlist__for_each(evsel_list, counter)
|
evlist__for_each(evsel_list, counter)
|
||||||
perf_stat_process_counter(&stat_config, counter);
|
perf_stat_process_counter(&stat_config, counter);
|
||||||
|
|
||||||
if (round->type == PERF_STAT_ROUND_TYPE__FINAL)
|
if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
|
||||||
update_stats(&walltime_nsecs_stats, round->time);
|
update_stats(&walltime_nsecs_stats, stat_round->time);
|
||||||
|
|
||||||
if (stat_config.interval && round->time) {
|
if (stat_config.interval && stat_round->time) {
|
||||||
tsh.tv_sec = round->time / NSECS_PER_SEC;
|
tsh.tv_sec = stat_round->time / NSECS_PER_SEC;
|
||||||
tsh.tv_nsec = round->time % NSECS_PER_SEC;
|
tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
|
||||||
ts = &tsh;
|
ts = &tsh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1945,8 +1945,9 @@ int cmd_timechart(int argc, const char **argv,
|
||||||
OPT_CALLBACK('p', "process", NULL, "process",
|
OPT_CALLBACK('p', "process", NULL, "process",
|
||||||
"process selector. Pass a pid or process name.",
|
"process selector. Pass a pid or process name.",
|
||||||
parse_process),
|
parse_process),
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory",
|
||||||
|
symbol__config_symfs),
|
||||||
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
|
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
|
||||||
"min. number of tasks to print"),
|
"min. number of tasks to print"),
|
||||||
OPT_BOOLEAN('t', "topology", &tchart.topology,
|
OPT_BOOLEAN('t', "topology", &tchart.topology,
|
||||||
|
|
|
@ -732,7 +732,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
|
||||||
if (machine__resolve(machine, &al, sample) < 0)
|
if (machine__resolve(machine, &al, sample) < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!top->kptr_restrict_warned &&
|
if (!machine->kptr_restrict_warned &&
|
||||||
symbol_conf.kptr_restrict &&
|
symbol_conf.kptr_restrict &&
|
||||||
al.cpumode == PERF_RECORD_MISC_KERNEL) {
|
al.cpumode == PERF_RECORD_MISC_KERNEL) {
|
||||||
ui__warning(
|
ui__warning(
|
||||||
|
@ -743,7 +743,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
|
||||||
" modules" : "");
|
" modules" : "");
|
||||||
if (use_browser <= 0)
|
if (use_browser <= 0)
|
||||||
sleep(5);
|
sleep(5);
|
||||||
top->kptr_restrict_warned = true;
|
machine->kptr_restrict_warned = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (al.sym == NULL) {
|
if (al.sym == NULL) {
|
||||||
|
@ -759,7 +759,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
|
||||||
* --hide-kernel-symbols, even if the user specifies an
|
* --hide-kernel-symbols, even if the user specifies an
|
||||||
* invalid --vmlinux ;-)
|
* invalid --vmlinux ;-)
|
||||||
*/
|
*/
|
||||||
if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
|
if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
|
||||||
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
|
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
|
||||||
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
||||||
if (symbol_conf.vmlinux_name) {
|
if (symbol_conf.vmlinux_name) {
|
||||||
|
|
|
@ -576,84 +576,54 @@ static struct syscall_fmt {
|
||||||
bool hexret;
|
bool hexret;
|
||||||
} syscall_fmts[] = {
|
} syscall_fmts[] = {
|
||||||
{ .name = "access", .errmsg = true,
|
{ .name = "access", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
|
.arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
|
||||||
[1] = SCA_ACCMODE, /* mode */ }, },
|
|
||||||
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
|
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
|
||||||
{ .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
|
{ .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
|
||||||
{ .name = "brk", .hexret = true,
|
{ .name = "brk", .hexret = true,
|
||||||
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
|
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
|
||||||
{ .name = "chdir", .errmsg = true,
|
{ .name = "chdir", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "chmod", .errmsg = true, },
|
||||||
{ .name = "chmod", .errmsg = true,
|
{ .name = "chroot", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "chroot", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
|
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
|
||||||
{ .name = "clone", .errpid = true, },
|
{ .name = "clone", .errpid = true, },
|
||||||
{ .name = "close", .errmsg = true,
|
{ .name = "close", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
|
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
|
||||||
{ .name = "connect", .errmsg = true, },
|
{ .name = "connect", .errmsg = true, },
|
||||||
{ .name = "creat", .errmsg = true,
|
{ .name = "creat", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "dup", .errmsg = true, },
|
||||||
{ .name = "dup", .errmsg = true,
|
{ .name = "dup2", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "dup3", .errmsg = true, },
|
||||||
{ .name = "dup2", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "dup3", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
|
{ .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
|
||||||
{ .name = "eventfd2", .errmsg = true,
|
{ .name = "eventfd2", .errmsg = true,
|
||||||
.arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
|
.arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
|
||||||
{ .name = "faccessat", .errmsg = true,
|
{ .name = "faccessat", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
{ .name = "fadvise64", .errmsg = true, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "fallocate", .errmsg = true, },
|
||||||
{ .name = "fadvise64", .errmsg = true,
|
{ .name = "fchdir", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "fchmod", .errmsg = true, },
|
||||||
{ .name = "fallocate", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "fchdir", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "fchmod", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "fchmodat", .errmsg = true,
|
{ .name = "fchmodat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "fchown", .errmsg = true, },
|
||||||
{ .name = "fchown", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "fchownat", .errmsg = true,
|
{ .name = "fchownat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "fcntl", .errmsg = true,
|
{ .name = "fcntl", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
|
||||||
[1] = SCA_STRARRAY, /* cmd */ },
|
|
||||||
.arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
|
.arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
|
||||||
{ .name = "fdatasync", .errmsg = true,
|
{ .name = "fdatasync", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "flock", .errmsg = true,
|
{ .name = "flock", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
|
||||||
[1] = SCA_FLOCK, /* cmd */ }, },
|
{ .name = "fsetxattr", .errmsg = true, },
|
||||||
{ .name = "fsetxattr", .errmsg = true,
|
{ .name = "fstat", .errmsg = true, .alias = "newfstat", },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
|
||||||
{ .name = "fstat", .errmsg = true, .alias = "newfstat",
|
{ .name = "fstatfs", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "fsync", .errmsg = true, },
|
||||||
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat",
|
{ .name = "ftruncate", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "fstatfs", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "fsync", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "ftruncate", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "futex", .errmsg = true,
|
{ .name = "futex", .errmsg = true,
|
||||||
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
|
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
|
||||||
{ .name = "futimesat", .errmsg = true,
|
{ .name = "futimesat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "getdents", .errmsg = true, },
|
||||||
{ .name = "getdents", .errmsg = true,
|
{ .name = "getdents64", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "getdents64", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
|
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
|
||||||
{ .name = "getpid", .errpid = true, },
|
{ .name = "getpid", .errpid = true, },
|
||||||
{ .name = "getpgid", .errpid = true, },
|
{ .name = "getpgid", .errpid = true, },
|
||||||
|
@ -661,12 +631,10 @@ static struct syscall_fmt {
|
||||||
{ .name = "getrandom", .errmsg = true,
|
{ .name = "getrandom", .errmsg = true,
|
||||||
.arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
|
.arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
|
||||||
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
|
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
|
||||||
{ .name = "getxattr", .errmsg = true,
|
{ .name = "getxattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "inotify_add_watch", .errmsg = true, },
|
||||||
{ .name = "inotify_add_watch", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "ioctl", .errmsg = true,
|
{ .name = "ioctl", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = {
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
/*
|
/*
|
||||||
* FIXME: Make this available to all arches.
|
* FIXME: Make this available to all arches.
|
||||||
|
@ -680,41 +648,28 @@ static struct syscall_fmt {
|
||||||
{ .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
|
{ .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
|
||||||
{ .name = "kill", .errmsg = true,
|
{ .name = "kill", .errmsg = true,
|
||||||
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
|
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
|
||||||
{ .name = "lchown", .errmsg = true,
|
{ .name = "lchown", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "lgetxattr", .errmsg = true, },
|
||||||
{ .name = "lgetxattr", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "linkat", .errmsg = true,
|
{ .name = "linkat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
{ .name = "listxattr", .errmsg = true,
|
{ .name = "listxattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "llistxattr", .errmsg = true, },
|
||||||
{ .name = "llistxattr", .errmsg = true,
|
{ .name = "lremovexattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "lremovexattr", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "lseek", .errmsg = true,
|
{ .name = "lseek", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
|
||||||
[2] = SCA_STRARRAY, /* whence */ },
|
|
||||||
.arg_parm = { [2] = &strarray__whences, /* whence */ }, },
|
.arg_parm = { [2] = &strarray__whences, /* whence */ }, },
|
||||||
{ .name = "lsetxattr", .errmsg = true,
|
{ .name = "lsetxattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "lstat", .errmsg = true, .alias = "newlstat", },
|
||||||
{ .name = "lstat", .errmsg = true, .alias = "newlstat",
|
{ .name = "lsxattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "lsxattr", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "madvise", .errmsg = true,
|
{ .name = "madvise", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_HEX, /* start */
|
.arg_scnprintf = { [0] = SCA_HEX, /* start */
|
||||||
[2] = SCA_MADV_BHV, /* behavior */ }, },
|
[2] = SCA_MADV_BHV, /* behavior */ }, },
|
||||||
{ .name = "mkdir", .errmsg = true,
|
{ .name = "mkdir", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "mkdirat", .errmsg = true,
|
{ .name = "mkdirat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
[1] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "mknod", .errmsg = true, },
|
||||||
{ .name = "mknod", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "mknodat", .errmsg = true,
|
{ .name = "mknodat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "mlock", .errmsg = true,
|
{ .name = "mlock", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
|
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
|
||||||
{ .name = "mlockall", .errmsg = true,
|
{ .name = "mlockall", .errmsg = true,
|
||||||
|
@ -722,8 +677,7 @@ static struct syscall_fmt {
|
||||||
{ .name = "mmap", .hexret = true,
|
{ .name = "mmap", .hexret = true,
|
||||||
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
|
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
|
||||||
[2] = SCA_MMAP_PROT, /* prot */
|
[2] = SCA_MMAP_PROT, /* prot */
|
||||||
[3] = SCA_MMAP_FLAGS, /* flags */
|
[3] = SCA_MMAP_FLAGS, /* flags */ }, },
|
||||||
[4] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "mprotect", .errmsg = true,
|
{ .name = "mprotect", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_HEX, /* start */
|
.arg_scnprintf = { [0] = SCA_HEX, /* start */
|
||||||
[2] = SCA_MMAP_PROT, /* prot */ }, },
|
[2] = SCA_MMAP_PROT, /* prot */ }, },
|
||||||
|
@ -740,17 +694,14 @@ static struct syscall_fmt {
|
||||||
{ .name = "name_to_handle_at", .errmsg = true,
|
{ .name = "name_to_handle_at", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
||||||
{ .name = "newfstatat", .errmsg = true,
|
{ .name = "newfstatat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "open", .errmsg = true,
|
{ .name = "open", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
|
.arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
|
||||||
[1] = SCA_OPEN_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "open_by_handle_at", .errmsg = true,
|
{ .name = "open_by_handle_at", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
||||||
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
|
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
|
||||||
{ .name = "openat", .errmsg = true,
|
{ .name = "openat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
||||||
[1] = SCA_FILENAME, /* filename */
|
|
||||||
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
|
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
|
||||||
{ .name = "perf_event_open", .errmsg = true,
|
{ .name = "perf_event_open", .errmsg = true,
|
||||||
.arg_scnprintf = { [2] = SCA_INT, /* cpu */
|
.arg_scnprintf = { [2] = SCA_INT, /* cpu */
|
||||||
|
@ -760,39 +711,26 @@ static struct syscall_fmt {
|
||||||
.arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
|
.arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
|
||||||
{ .name = "poll", .errmsg = true, .timeout = true, },
|
{ .name = "poll", .errmsg = true, .timeout = true, },
|
||||||
{ .name = "ppoll", .errmsg = true, .timeout = true, },
|
{ .name = "ppoll", .errmsg = true, .timeout = true, },
|
||||||
{ .name = "pread", .errmsg = true, .alias = "pread64",
|
{ .name = "pread", .errmsg = true, .alias = "pread64", },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "preadv", .errmsg = true, .alias = "pread", },
|
||||||
{ .name = "preadv", .errmsg = true, .alias = "pread",
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
|
{ .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
|
||||||
{ .name = "pwrite", .errmsg = true, .alias = "pwrite64",
|
{ .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "pwritev", .errmsg = true, },
|
||||||
{ .name = "pwritev", .errmsg = true,
|
{ .name = "read", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "readlink", .errmsg = true, },
|
||||||
{ .name = "read", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "readlink", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
|
|
||||||
{ .name = "readlinkat", .errmsg = true,
|
{ .name = "readlinkat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
||||||
[1] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "readv", .errmsg = true, },
|
||||||
{ .name = "readv", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "recvfrom", .errmsg = true,
|
{ .name = "recvfrom", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[3] = SCA_MSG_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "recvmmsg", .errmsg = true,
|
{ .name = "recvmmsg", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[3] = SCA_MSG_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "recvmsg", .errmsg = true,
|
{ .name = "recvmsg", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[2] = SCA_MSG_FLAGS, /* flags */ }, },
|
{ .name = "removexattr", .errmsg = true, },
|
||||||
{ .name = "removexattr", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "renameat", .errmsg = true,
|
{ .name = "renameat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
||||||
{ .name = "rmdir", .errmsg = true,
|
{ .name = "rmdir", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "rt_sigaction", .errmsg = true,
|
{ .name = "rt_sigaction", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
|
.arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
|
||||||
{ .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
|
{ .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
|
||||||
|
@ -807,22 +745,17 @@ static struct syscall_fmt {
|
||||||
[1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
|
[1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
|
||||||
{ .name = "select", .errmsg = true, .timeout = true, },
|
{ .name = "select", .errmsg = true, .timeout = true, },
|
||||||
{ .name = "sendmmsg", .errmsg = true,
|
{ .name = "sendmmsg", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[3] = SCA_MSG_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "sendmsg", .errmsg = true,
|
{ .name = "sendmsg", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[2] = SCA_MSG_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "sendto", .errmsg = true,
|
{ .name = "sendto", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */
|
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
|
||||||
[3] = SCA_MSG_FLAGS, /* flags */ }, },
|
|
||||||
{ .name = "set_tid_address", .errpid = true, },
|
{ .name = "set_tid_address", .errpid = true, },
|
||||||
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
|
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
|
||||||
{ .name = "setpgid", .errmsg = true, },
|
{ .name = "setpgid", .errmsg = true, },
|
||||||
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
|
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
|
||||||
{ .name = "setxattr", .errmsg = true,
|
{ .name = "setxattr", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "shutdown", .errmsg = true, },
|
||||||
{ .name = "shutdown", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "socket", .errmsg = true,
|
{ .name = "socket", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
|
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
|
||||||
[1] = SCA_SK_TYPE, /* type */ },
|
[1] = SCA_SK_TYPE, /* type */ },
|
||||||
|
@ -831,10 +764,8 @@ static struct syscall_fmt {
|
||||||
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
|
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
|
||||||
[1] = SCA_SK_TYPE, /* type */ },
|
[1] = SCA_SK_TYPE, /* type */ },
|
||||||
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
|
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
|
||||||
{ .name = "stat", .errmsg = true, .alias = "newstat",
|
{ .name = "stat", .errmsg = true, .alias = "newstat", },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "statfs", .errmsg = true, },
|
||||||
{ .name = "statfs", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
|
|
||||||
{ .name = "swapoff", .errmsg = true,
|
{ .name = "swapoff", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
|
.arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
|
||||||
{ .name = "swapon", .errmsg = true,
|
{ .name = "swapon", .errmsg = true,
|
||||||
|
@ -845,29 +776,21 @@ static struct syscall_fmt {
|
||||||
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
|
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
|
||||||
{ .name = "tkill", .errmsg = true,
|
{ .name = "tkill", .errmsg = true,
|
||||||
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
|
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
|
||||||
{ .name = "truncate", .errmsg = true,
|
{ .name = "truncate", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
|
|
||||||
{ .name = "uname", .errmsg = true, .alias = "newuname", },
|
{ .name = "uname", .errmsg = true, .alias = "newuname", },
|
||||||
{ .name = "unlinkat", .errmsg = true,
|
{ .name = "unlinkat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
|
||||||
[1] = SCA_FILENAME, /* pathname */ }, },
|
{ .name = "utime", .errmsg = true, },
|
||||||
{ .name = "utime", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "utimensat", .errmsg = true,
|
{ .name = "utimensat", .errmsg = true,
|
||||||
.arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
|
.arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
|
||||||
[1] = SCA_FILENAME, /* filename */ }, },
|
{ .name = "utimes", .errmsg = true, },
|
||||||
{ .name = "utimes", .errmsg = true,
|
{ .name = "vmsplice", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
|
|
||||||
{ .name = "vmsplice", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
{ .name = "wait4", .errpid = true,
|
{ .name = "wait4", .errpid = true,
|
||||||
.arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
|
.arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
|
||||||
{ .name = "waitid", .errpid = true,
|
{ .name = "waitid", .errpid = true,
|
||||||
.arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
|
.arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
|
||||||
{ .name = "write", .errmsg = true,
|
{ .name = "write", .errmsg = true, },
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
{ .name = "writev", .errmsg = true, },
|
||||||
{ .name = "writev", .errmsg = true,
|
|
||||||
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int syscall_fmt__cmp(const void *name, const void *fmtp)
|
static int syscall_fmt__cmp(const void *name, const void *fmtp)
|
||||||
|
@ -1160,6 +1083,24 @@ static int trace__tool_process(struct perf_tool *tool,
|
||||||
return trace__process_event(trace, machine, event, sample);
|
return trace__process_event(trace, machine, event, sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
|
||||||
|
{
|
||||||
|
struct machine *machine = vmachine;
|
||||||
|
|
||||||
|
if (machine->kptr_restrict_warned)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (symbol_conf.kptr_restrict) {
|
||||||
|
pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
|
||||||
|
"Check /proc/sys/kernel/kptr_restrict.\n\n"
|
||||||
|
"Kernel samples will not be resolved.\n");
|
||||||
|
machine->kptr_restrict_warned = true;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return machine__resolve_kernel_addr(vmachine, addrp, modp);
|
||||||
|
}
|
||||||
|
|
||||||
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
|
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
|
||||||
{
|
{
|
||||||
int err = symbol__init(NULL);
|
int err = symbol__init(NULL);
|
||||||
|
@ -1171,7 +1112,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
|
||||||
if (trace->host == NULL)
|
if (trace->host == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
|
if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
|
||||||
return -errno;
|
return -errno;
|
||||||
|
|
||||||
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
|
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
|
||||||
|
@ -1186,7 +1127,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
|
||||||
static int syscall__set_arg_fmts(struct syscall *sc)
|
static int syscall__set_arg_fmts(struct syscall *sc)
|
||||||
{
|
{
|
||||||
struct format_field *field;
|
struct format_field *field;
|
||||||
int idx = 0;
|
int idx = 0, len;
|
||||||
|
|
||||||
sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
|
sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
|
||||||
if (sc->arg_scnprintf == NULL)
|
if (sc->arg_scnprintf == NULL)
|
||||||
|
@ -1198,12 +1139,31 @@ static int syscall__set_arg_fmts(struct syscall *sc)
|
||||||
for (field = sc->args; field; field = field->next) {
|
for (field = sc->args; field; field = field->next) {
|
||||||
if (sc->fmt && sc->fmt->arg_scnprintf[idx])
|
if (sc->fmt && sc->fmt->arg_scnprintf[idx])
|
||||||
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
|
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
|
||||||
|
else if (strcmp(field->type, "const char *") == 0 &&
|
||||||
|
(strcmp(field->name, "filename") == 0 ||
|
||||||
|
strcmp(field->name, "path") == 0 ||
|
||||||
|
strcmp(field->name, "pathname") == 0))
|
||||||
|
sc->arg_scnprintf[idx] = SCA_FILENAME;
|
||||||
else if (field->flags & FIELD_IS_POINTER)
|
else if (field->flags & FIELD_IS_POINTER)
|
||||||
sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
|
sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
|
||||||
else if (strcmp(field->type, "pid_t") == 0)
|
else if (strcmp(field->type, "pid_t") == 0)
|
||||||
sc->arg_scnprintf[idx] = SCA_PID;
|
sc->arg_scnprintf[idx] = SCA_PID;
|
||||||
else if (strcmp(field->type, "umode_t") == 0)
|
else if (strcmp(field->type, "umode_t") == 0)
|
||||||
sc->arg_scnprintf[idx] = SCA_MODE_T;
|
sc->arg_scnprintf[idx] = SCA_MODE_T;
|
||||||
|
else if ((strcmp(field->type, "int") == 0 ||
|
||||||
|
strcmp(field->type, "unsigned int") == 0 ||
|
||||||
|
strcmp(field->type, "long") == 0) &&
|
||||||
|
(len = strlen(field->name)) >= 2 &&
|
||||||
|
strcmp(field->name + len - 2, "fd") == 0) {
|
||||||
|
/*
|
||||||
|
* /sys/kernel/tracing/events/syscalls/sys_enter*
|
||||||
|
* egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
|
||||||
|
* 65 int
|
||||||
|
* 23 unsigned int
|
||||||
|
* 7 unsigned long
|
||||||
|
*/
|
||||||
|
sc->arg_scnprintf[idx] = SCA_FD;
|
||||||
|
}
|
||||||
++idx;
|
++idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1534,7 +1494,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
|
||||||
if (sc->is_exit) {
|
if (sc->is_exit) {
|
||||||
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
|
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
|
||||||
trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
|
trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
|
||||||
fprintf(trace->output, "%-70s\n", ttrace->entry_str);
|
fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ttrace->entry_pending = true;
|
ttrace->entry_pending = true;
|
||||||
|
@ -2887,12 +2847,12 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||||
mmap_pages_user_set = false;
|
mmap_pages_user_set = false;
|
||||||
|
|
||||||
if (trace.max_stack == UINT_MAX) {
|
if (trace.max_stack == UINT_MAX) {
|
||||||
trace.max_stack = sysctl_perf_event_max_stack;
|
trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
|
||||||
max_stack_user_set = false;
|
max_stack_user_set = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
||||||
if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
|
if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
|
||||||
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
|
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -549,6 +549,9 @@ int main(int argc, const char **argv)
|
||||||
if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
|
if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
|
||||||
sysctl_perf_event_max_stack = value;
|
sysctl_perf_event_max_stack = value;
|
||||||
|
|
||||||
|
if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
|
||||||
|
sysctl_perf_event_max_contexts_per_stack = value;
|
||||||
|
|
||||||
cmd = extract_argv0_path(argv[0]);
|
cmd = extract_argv0_path(argv[0]);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
cmd = "perf-help";
|
cmd = "perf-help";
|
||||||
|
|
|
@ -354,9 +354,6 @@ static struct ins_ops nop_ops = {
|
||||||
.scnprintf = nop__scnprintf,
|
.scnprintf = nop__scnprintf,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be sorted by name!
|
|
||||||
*/
|
|
||||||
static struct ins instructions[] = {
|
static struct ins instructions[] = {
|
||||||
{ .name = "add", .ops = &mov_ops, },
|
{ .name = "add", .ops = &mov_ops, },
|
||||||
{ .name = "addl", .ops = &mov_ops, },
|
{ .name = "addl", .ops = &mov_ops, },
|
||||||
|
@ -372,8 +369,8 @@ static struct ins instructions[] = {
|
||||||
{ .name = "bgt", .ops = &jump_ops, },
|
{ .name = "bgt", .ops = &jump_ops, },
|
||||||
{ .name = "bhi", .ops = &jump_ops, },
|
{ .name = "bhi", .ops = &jump_ops, },
|
||||||
{ .name = "bl", .ops = &call_ops, },
|
{ .name = "bl", .ops = &call_ops, },
|
||||||
{ .name = "blt", .ops = &jump_ops, },
|
|
||||||
{ .name = "bls", .ops = &jump_ops, },
|
{ .name = "bls", .ops = &jump_ops, },
|
||||||
|
{ .name = "blt", .ops = &jump_ops, },
|
||||||
{ .name = "blx", .ops = &call_ops, },
|
{ .name = "blx", .ops = &call_ops, },
|
||||||
{ .name = "bne", .ops = &jump_ops, },
|
{ .name = "bne", .ops = &jump_ops, },
|
||||||
#endif
|
#endif
|
||||||
|
@ -449,18 +446,39 @@ static struct ins instructions[] = {
|
||||||
{ .name = "xbeginq", .ops = &jump_ops, },
|
{ .name = "xbeginq", .ops = &jump_ops, },
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ins__cmp(const void *name, const void *insp)
|
static int ins__key_cmp(const void *name, const void *insp)
|
||||||
{
|
{
|
||||||
const struct ins *ins = insp;
|
const struct ins *ins = insp;
|
||||||
|
|
||||||
return strcmp(name, ins->name);
|
return strcmp(name, ins->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ins *ins__find(const char *name)
|
static int ins__cmp(const void *a, const void *b)
|
||||||
|
{
|
||||||
|
const struct ins *ia = a;
|
||||||
|
const struct ins *ib = b;
|
||||||
|
|
||||||
|
return strcmp(ia->name, ib->name);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ins__sort(void)
|
||||||
{
|
{
|
||||||
const int nmemb = ARRAY_SIZE(instructions);
|
const int nmemb = ARRAY_SIZE(instructions);
|
||||||
|
|
||||||
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
|
qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ins *ins__find(const char *name)
|
||||||
|
{
|
||||||
|
const int nmemb = ARRAY_SIZE(instructions);
|
||||||
|
static bool sorted;
|
||||||
|
|
||||||
|
if (!sorted) {
|
||||||
|
ins__sort();
|
||||||
|
sorted = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
|
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
|
||||||
|
@ -1122,7 +1140,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
|
||||||
} else if (dso__is_kcore(dso)) {
|
} else if (dso__is_kcore(dso)) {
|
||||||
goto fallback;
|
goto fallback;
|
||||||
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
|
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
|
||||||
strstr(command, "[kernel.kallsyms]") ||
|
strstr(command, DSO__NAME_KALLSYMS) ||
|
||||||
access(symfs_filename, R_OK)) {
|
access(symfs_filename, R_OK)) {
|
||||||
free(filename);
|
free(filename);
|
||||||
fallback:
|
fallback:
|
||||||
|
|
|
@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
|
||||||
size_t name_len;
|
size_t name_len;
|
||||||
bool in_kernel = false;
|
bool in_kernel = false;
|
||||||
|
|
||||||
if (!pos->hit)
|
if (!pos->hit && !dso__is_vdso(pos))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dso__is_vdso(pos)) {
|
if (dso__is_vdso(pos)) {
|
||||||
|
|
|
@ -298,8 +298,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
|
||||||
*/
|
*/
|
||||||
callchain_param.order = ORDER_CALLER;
|
callchain_param.order = ORDER_CALLER;
|
||||||
err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
|
err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
|
||||||
sample, NULL, NULL,
|
sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
|
||||||
sysctl_perf_event_max_stack);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
callchain_param.order = saved_order;
|
callchain_param.order = saved_order;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "auxtrace.h"
|
#include "auxtrace.h"
|
||||||
#include "util.h"
|
#include "util.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
#include "vdso.h"
|
||||||
|
|
||||||
char dso__symtab_origin(const struct dso *dso)
|
char dso__symtab_origin(const struct dso *dso)
|
||||||
{
|
{
|
||||||
|
@ -62,9 +63,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
|
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
|
||||||
/* skip the locally configured cache if a symfs is given */
|
if (dso__build_id_filename(dso, filename, size) == NULL)
|
||||||
if (symbol_conf.symfs[0] ||
|
|
||||||
(dso__build_id_filename(dso, filename, size) == NULL))
|
|
||||||
ret = -1;
|
ret = -1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1169,7 +1168,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
|
||||||
struct dso *pos;
|
struct dso *pos;
|
||||||
|
|
||||||
list_for_each_entry(pos, head, node) {
|
list_for_each_entry(pos, head, node) {
|
||||||
if (with_hits && !pos->hit)
|
if (with_hits && !pos->hit && !dso__is_vdso(pos))
|
||||||
continue;
|
continue;
|
||||||
if (pos->has_build_id) {
|
if (pos->has_build_id) {
|
||||||
have_build_id = true;
|
have_build_id = true;
|
||||||
|
|
|
@ -44,6 +44,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
|
||||||
perf_evlist__set_maps(evlist, cpus, threads);
|
perf_evlist__set_maps(evlist, cpus, threads);
|
||||||
fdarray__init(&evlist->pollfd, 64);
|
fdarray__init(&evlist->pollfd, 64);
|
||||||
evlist->workload.pid = -1;
|
evlist->workload.pid = -1;
|
||||||
|
evlist->backward = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct perf_evlist *perf_evlist__new(void)
|
struct perf_evlist *perf_evlist__new(void)
|
||||||
|
@ -679,6 +680,33 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||||
|
int fd = evlist->mmap[i].fd;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (fd < 0)
|
||||||
|
continue;
|
||||||
|
err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int perf_evlist__pause(struct perf_evlist *evlist)
|
||||||
|
{
|
||||||
|
return perf_evlist__set_paused(evlist, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
int perf_evlist__resume(struct perf_evlist *evlist)
|
||||||
|
{
|
||||||
|
return perf_evlist__set_paused(evlist, false);
|
||||||
|
}
|
||||||
|
|
||||||
/* When check_messup is true, 'end' must points to a good entry */
|
/* When check_messup is true, 'end' must points to a good entry */
|
||||||
static union perf_event *
|
static union perf_event *
|
||||||
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
|
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
|
||||||
|
@ -881,6 +909,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
|
||||||
if (evlist->mmap[idx].base != NULL) {
|
if (evlist->mmap[idx].base != NULL) {
|
||||||
munmap(evlist->mmap[idx].base, evlist->mmap_len);
|
munmap(evlist->mmap[idx].base, evlist->mmap_len);
|
||||||
evlist->mmap[idx].base = NULL;
|
evlist->mmap[idx].base = NULL;
|
||||||
|
evlist->mmap[idx].fd = -1;
|
||||||
atomic_set(&evlist->mmap[idx].refcnt, 0);
|
atomic_set(&evlist->mmap[idx].refcnt, 0);
|
||||||
}
|
}
|
||||||
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
|
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
|
||||||
|
@ -901,10 +930,14 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
|
||||||
|
|
||||||
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
|
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
|
||||||
if (cpu_map__empty(evlist->cpus))
|
if (cpu_map__empty(evlist->cpus))
|
||||||
evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
||||||
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
||||||
|
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||||
|
evlist->mmap[i].fd = -1;
|
||||||
return evlist->mmap != NULL ? 0 : -ENOMEM;
|
return evlist->mmap != NULL ? 0 : -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -941,6 +974,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
|
||||||
evlist->mmap[idx].base = NULL;
|
evlist->mmap[idx].base = NULL;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
evlist->mmap[idx].fd = fd;
|
||||||
|
|
||||||
if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
|
if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
|
||||||
&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
|
&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
|
||||||
|
|
|
@ -28,6 +28,7 @@ struct record_opts;
|
||||||
struct perf_mmap {
|
struct perf_mmap {
|
||||||
void *base;
|
void *base;
|
||||||
int mask;
|
int mask;
|
||||||
|
int fd;
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
u64 prev;
|
u64 prev;
|
||||||
struct auxtrace_mmap auxtrace_mmap;
|
struct auxtrace_mmap auxtrace_mmap;
|
||||||
|
@ -43,6 +44,7 @@ struct perf_evlist {
|
||||||
bool overwrite;
|
bool overwrite;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
bool has_user_cpus;
|
bool has_user_cpus;
|
||||||
|
bool backward;
|
||||||
size_t mmap_len;
|
size_t mmap_len;
|
||||||
int id_pos;
|
int id_pos;
|
||||||
int is_pos;
|
int is_pos;
|
||||||
|
@ -135,6 +137,8 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
|
||||||
|
|
||||||
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
|
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
|
||||||
|
|
||||||
|
int perf_evlist__pause(struct perf_evlist *evlist);
|
||||||
|
int perf_evlist__resume(struct perf_evlist *evlist);
|
||||||
int perf_evlist__open(struct perf_evlist *evlist);
|
int perf_evlist__open(struct perf_evlist *evlist);
|
||||||
void perf_evlist__close(struct perf_evlist *evlist);
|
void perf_evlist__close(struct perf_evlist *evlist);
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ static struct {
|
||||||
bool clockid;
|
bool clockid;
|
||||||
bool clockid_wrong;
|
bool clockid_wrong;
|
||||||
bool lbr_flags;
|
bool lbr_flags;
|
||||||
|
bool write_backward;
|
||||||
} perf_missing_features;
|
} perf_missing_features;
|
||||||
|
|
||||||
static clockid_t clockid;
|
static clockid_t clockid;
|
||||||
|
@ -1376,6 +1377,8 @@ fallback_missing_features:
|
||||||
if (perf_missing_features.lbr_flags)
|
if (perf_missing_features.lbr_flags)
|
||||||
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
|
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
|
||||||
PERF_SAMPLE_BRANCH_NO_CYCLES);
|
PERF_SAMPLE_BRANCH_NO_CYCLES);
|
||||||
|
if (perf_missing_features.write_backward)
|
||||||
|
evsel->attr.write_backward = false;
|
||||||
retry_sample_id:
|
retry_sample_id:
|
||||||
if (perf_missing_features.sample_id_all)
|
if (perf_missing_features.sample_id_all)
|
||||||
evsel->attr.sample_id_all = 0;
|
evsel->attr.sample_id_all = 0;
|
||||||
|
@ -1438,6 +1441,12 @@ retry_open:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out_close;
|
goto out_close;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (evsel->overwrite &&
|
||||||
|
perf_missing_features.write_backward) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out_close;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1500,6 +1509,10 @@ try_fallback:
|
||||||
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
|
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
|
||||||
perf_missing_features.lbr_flags = true;
|
perf_missing_features.lbr_flags = true;
|
||||||
goto fallback_missing_features;
|
goto fallback_missing_features;
|
||||||
|
} else if (!perf_missing_features.write_backward &&
|
||||||
|
evsel->attr.write_backward) {
|
||||||
|
perf_missing_features.write_backward = true;
|
||||||
|
goto fallback_missing_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_close:
|
out_close:
|
||||||
|
|
|
@ -112,6 +112,7 @@ struct perf_evsel {
|
||||||
bool tracking;
|
bool tracking;
|
||||||
bool per_pkg;
|
bool per_pkg;
|
||||||
bool precise_max;
|
bool precise_max;
|
||||||
|
bool overwrite;
|
||||||
/* parse modifier helper */
|
/* parse modifier helper */
|
||||||
int exclude_GH;
|
int exclude_GH;
|
||||||
int nr_members;
|
int nr_members;
|
||||||
|
|
|
@ -117,6 +117,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
|
||||||
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
||||||
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
|
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (h->branch_info->srcline_from)
|
||||||
|
hists__new_col_len(hists, HISTC_SRCLINE_FROM,
|
||||||
|
strlen(h->branch_info->srcline_from));
|
||||||
|
if (h->branch_info->srcline_to)
|
||||||
|
hists__new_col_len(hists, HISTC_SRCLINE_TO,
|
||||||
|
strlen(h->branch_info->srcline_to));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->mem_info) {
|
if (h->mem_info) {
|
||||||
|
@ -1042,6 +1049,8 @@ void hist_entry__delete(struct hist_entry *he)
|
||||||
if (he->branch_info) {
|
if (he->branch_info) {
|
||||||
map__zput(he->branch_info->from.map);
|
map__zput(he->branch_info->from.map);
|
||||||
map__zput(he->branch_info->to.map);
|
map__zput(he->branch_info->to.map);
|
||||||
|
free_srcline(he->branch_info->srcline_from);
|
||||||
|
free_srcline(he->branch_info->srcline_to);
|
||||||
zfree(&he->branch_info);
|
zfree(&he->branch_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,8 @@ enum hist_column {
|
||||||
HISTC_MEM_IADDR_SYMBOL,
|
HISTC_MEM_IADDR_SYMBOL,
|
||||||
HISTC_TRANSACTION,
|
HISTC_TRANSACTION,
|
||||||
HISTC_CYCLES,
|
HISTC_CYCLES,
|
||||||
|
HISTC_SRCLINE_FROM,
|
||||||
|
HISTC_SRCLINE_TO,
|
||||||
HISTC_TRACE,
|
HISTC_TRACE,
|
||||||
HISTC_NR_COLS, /* Last entry */
|
HISTC_NR_COLS, /* Last entry */
|
||||||
};
|
};
|
||||||
|
|
|
@ -43,6 +43,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
|
||||||
|
|
||||||
machine->symbol_filter = NULL;
|
machine->symbol_filter = NULL;
|
||||||
machine->id_hdr_size = 0;
|
machine->id_hdr_size = 0;
|
||||||
|
machine->kptr_restrict_warned = false;
|
||||||
machine->comm_exec = false;
|
machine->comm_exec = false;
|
||||||
machine->kernel_start = 0;
|
machine->kernel_start = 0;
|
||||||
|
|
||||||
|
@ -709,7 +710,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
|
||||||
if (machine__is_host(machine)) {
|
if (machine__is_host(machine)) {
|
||||||
vmlinux_name = symbol_conf.vmlinux_name;
|
vmlinux_name = symbol_conf.vmlinux_name;
|
||||||
if (!vmlinux_name)
|
if (!vmlinux_name)
|
||||||
vmlinux_name = "[kernel.kallsyms]";
|
vmlinux_name = DSO__NAME_KALLSYMS;
|
||||||
|
|
||||||
kernel = machine__findnew_kernel(machine, vmlinux_name,
|
kernel = machine__findnew_kernel(machine, vmlinux_name,
|
||||||
"[kernel]", DSO_TYPE_KERNEL);
|
"[kernel]", DSO_TYPE_KERNEL);
|
||||||
|
@ -1135,10 +1136,10 @@ int machine__create_kernel_maps(struct machine *machine)
|
||||||
{
|
{
|
||||||
struct dso *kernel = machine__get_kernel(machine);
|
struct dso *kernel = machine__get_kernel(machine);
|
||||||
const char *name;
|
const char *name;
|
||||||
u64 addr = machine__get_running_kernel_start(machine, &name);
|
u64 addr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!addr || kernel == NULL)
|
if (kernel == NULL)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
ret = __machine__create_kernel_maps(machine, kernel);
|
ret = __machine__create_kernel_maps(machine, kernel);
|
||||||
|
@ -1160,8 +1161,9 @@ int machine__create_kernel_maps(struct machine *machine)
|
||||||
*/
|
*/
|
||||||
map_groups__fixup_end(&machine->kmaps);
|
map_groups__fixup_end(&machine->kmaps);
|
||||||
|
|
||||||
if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
|
addr = machine__get_running_kernel_start(machine, &name);
|
||||||
addr)) {
|
if (!addr) {
|
||||||
|
} else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
|
||||||
machine__destroy_kernel_maps(machine);
|
machine__destroy_kernel_maps(machine);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1769,11 +1771,6 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
|
||||||
*/
|
*/
|
||||||
int mix_chain_nr = i + 1 + lbr_nr + 1;
|
int mix_chain_nr = i + 1 + lbr_nr + 1;
|
||||||
|
|
||||||
if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
|
|
||||||
pr_warning("corrupted callchain. skipping...\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < mix_chain_nr; j++) {
|
for (j = 0; j < mix_chain_nr; j++) {
|
||||||
if (callchain_param.order == ORDER_CALLEE) {
|
if (callchain_param.order == ORDER_CALLEE) {
|
||||||
if (j < i + 1)
|
if (j < i + 1)
|
||||||
|
@ -1811,9 +1808,9 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||||
{
|
{
|
||||||
struct branch_stack *branch = sample->branch_stack;
|
struct branch_stack *branch = sample->branch_stack;
|
||||||
struct ip_callchain *chain = sample->callchain;
|
struct ip_callchain *chain = sample->callchain;
|
||||||
int chain_nr = min(max_stack, (int)chain->nr);
|
int chain_nr = chain->nr;
|
||||||
u8 cpumode = PERF_RECORD_MISC_USER;
|
u8 cpumode = PERF_RECORD_MISC_USER;
|
||||||
int i, j, err;
|
int i, j, err, nr_entries;
|
||||||
int skip_idx = -1;
|
int skip_idx = -1;
|
||||||
int first_call = 0;
|
int first_call = 0;
|
||||||
|
|
||||||
|
@ -1828,7 +1825,6 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||||
* Based on DWARF debug information, some architectures skip
|
* Based on DWARF debug information, some architectures skip
|
||||||
* a callchain entry saved by the kernel.
|
* a callchain entry saved by the kernel.
|
||||||
*/
|
*/
|
||||||
if (chain->nr < sysctl_perf_event_max_stack)
|
|
||||||
skip_idx = arch_skip_callchain_idx(thread, chain);
|
skip_idx = arch_skip_callchain_idx(thread, chain);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1889,12 +1885,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||||
}
|
}
|
||||||
|
|
||||||
check_calls:
|
check_calls:
|
||||||
if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
|
for (i = first_call, nr_entries = 0;
|
||||||
pr_warning("corrupted callchain. skipping...\n");
|
i < chain_nr && nr_entries < max_stack; i++) {
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = first_call; i < chain_nr; i++) {
|
|
||||||
u64 ip;
|
u64 ip;
|
||||||
|
|
||||||
if (callchain_param.order == ORDER_CALLEE)
|
if (callchain_param.order == ORDER_CALLEE)
|
||||||
|
@ -1908,6 +1900,9 @@ check_calls:
|
||||||
#endif
|
#endif
|
||||||
ip = chain->ips[j];
|
ip = chain->ips[j];
|
||||||
|
|
||||||
|
if (ip < PERF_CONTEXT_MAX)
|
||||||
|
++nr_entries;
|
||||||
|
|
||||||
err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
|
err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -28,6 +28,7 @@ struct machine {
|
||||||
pid_t pid;
|
pid_t pid;
|
||||||
u16 id_hdr_size;
|
u16 id_hdr_size;
|
||||||
bool comm_exec;
|
bool comm_exec;
|
||||||
|
bool kptr_restrict_warned;
|
||||||
char *root_dir;
|
char *root_dir;
|
||||||
struct rb_root threads;
|
struct rb_root threads;
|
||||||
pthread_rwlock_t threads_lock;
|
pthread_rwlock_t threads_lock;
|
||||||
|
|
|
@ -264,8 +264,7 @@ static SV *perl_process_callchain(struct perf_sample *sample,
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
|
if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
|
||||||
sample, NULL, NULL,
|
sample, NULL, NULL, scripting_max_stack) != 0) {
|
||||||
sysctl_perf_event_max_stack) != 0) {
|
|
||||||
pr_err("Failed to resolve callchain. Skipping\n");
|
pr_err("Failed to resolve callchain. Skipping\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -353,6 +353,88 @@ struct sort_entry sort_srcline = {
|
||||||
.se_width_idx = HISTC_SRCLINE,
|
.se_width_idx = HISTC_SRCLINE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* --sort srcline_from */
|
||||||
|
|
||||||
|
static int64_t
|
||||||
|
sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
|
{
|
||||||
|
if (!left->branch_info->srcline_from) {
|
||||||
|
struct map *map = left->branch_info->from.map;
|
||||||
|
if (!map)
|
||||||
|
left->branch_info->srcline_from = SRCLINE_UNKNOWN;
|
||||||
|
else
|
||||||
|
left->branch_info->srcline_from = get_srcline(map->dso,
|
||||||
|
map__rip_2objdump(map,
|
||||||
|
left->branch_info->from.al_addr),
|
||||||
|
left->branch_info->from.sym, true);
|
||||||
|
}
|
||||||
|
if (!right->branch_info->srcline_from) {
|
||||||
|
struct map *map = right->branch_info->from.map;
|
||||||
|
if (!map)
|
||||||
|
right->branch_info->srcline_from = SRCLINE_UNKNOWN;
|
||||||
|
else
|
||||||
|
right->branch_info->srcline_from = get_srcline(map->dso,
|
||||||
|
map__rip_2objdump(map,
|
||||||
|
right->branch_info->from.al_addr),
|
||||||
|
right->branch_info->from.sym, true);
|
||||||
|
}
|
||||||
|
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
|
||||||
|
size_t size, unsigned int width)
|
||||||
|
{
|
||||||
|
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct sort_entry sort_srcline_from = {
|
||||||
|
.se_header = "From Source:Line",
|
||||||
|
.se_cmp = sort__srcline_from_cmp,
|
||||||
|
.se_snprintf = hist_entry__srcline_from_snprintf,
|
||||||
|
.se_width_idx = HISTC_SRCLINE_FROM,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* --sort srcline_to */
|
||||||
|
|
||||||
|
static int64_t
|
||||||
|
sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
|
{
|
||||||
|
if (!left->branch_info->srcline_to) {
|
||||||
|
struct map *map = left->branch_info->to.map;
|
||||||
|
if (!map)
|
||||||
|
left->branch_info->srcline_to = SRCLINE_UNKNOWN;
|
||||||
|
else
|
||||||
|
left->branch_info->srcline_to = get_srcline(map->dso,
|
||||||
|
map__rip_2objdump(map,
|
||||||
|
left->branch_info->to.al_addr),
|
||||||
|
left->branch_info->from.sym, true);
|
||||||
|
}
|
||||||
|
if (!right->branch_info->srcline_to) {
|
||||||
|
struct map *map = right->branch_info->to.map;
|
||||||
|
if (!map)
|
||||||
|
right->branch_info->srcline_to = SRCLINE_UNKNOWN;
|
||||||
|
else
|
||||||
|
right->branch_info->srcline_to = get_srcline(map->dso,
|
||||||
|
map__rip_2objdump(map,
|
||||||
|
right->branch_info->to.al_addr),
|
||||||
|
right->branch_info->to.sym, true);
|
||||||
|
}
|
||||||
|
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
|
||||||
|
size_t size, unsigned int width)
|
||||||
|
{
|
||||||
|
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct sort_entry sort_srcline_to = {
|
||||||
|
.se_header = "To Source:Line",
|
||||||
|
.se_cmp = sort__srcline_to_cmp,
|
||||||
|
.se_snprintf = hist_entry__srcline_to_snprintf,
|
||||||
|
.se_width_idx = HISTC_SRCLINE_TO,
|
||||||
|
};
|
||||||
|
|
||||||
/* --sort srcfile */
|
/* --sort srcfile */
|
||||||
|
|
||||||
static char no_srcfile[1];
|
static char no_srcfile[1];
|
||||||
|
@ -1347,6 +1429,8 @@ static struct sort_dimension bstack_sort_dimensions[] = {
|
||||||
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
|
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
|
||||||
DIM(SORT_ABORT, "abort", sort_abort),
|
DIM(SORT_ABORT, "abort", sort_abort),
|
||||||
DIM(SORT_CYCLES, "cycles", sort_cycles),
|
DIM(SORT_CYCLES, "cycles", sort_cycles),
|
||||||
|
DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
|
||||||
|
DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
|
||||||
};
|
};
|
||||||
|
|
||||||
#undef DIM
|
#undef DIM
|
||||||
|
|
|
@ -215,6 +215,8 @@ enum sort_type {
|
||||||
SORT_ABORT,
|
SORT_ABORT,
|
||||||
SORT_IN_TX,
|
SORT_IN_TX,
|
||||||
SORT_CYCLES,
|
SORT_CYCLES,
|
||||||
|
SORT_SRCLINE_FROM,
|
||||||
|
SORT_SRCLINE_TO,
|
||||||
|
|
||||||
/* memory mode specific sort keys */
|
/* memory mode specific sort keys */
|
||||||
__SORT_MEMORY_MODE,
|
__SORT_MEMORY_MODE,
|
||||||
|
|
|
@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
|
||||||
{
|
{
|
||||||
int ctx = evsel_context(counter);
|
int ctx = evsel_context(counter);
|
||||||
|
|
||||||
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
|
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
|
||||||
|
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
|
||||||
update_stats(&runtime_nsecs_stats[cpu], count[0]);
|
update_stats(&runtime_nsecs_stats[cpu], count[0]);
|
||||||
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
||||||
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
|
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
|
||||||
|
@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu,
|
||||||
|
|
||||||
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
|
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
|
||||||
|
|
||||||
out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
|
out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_branch_misses(int cpu,
|
static void print_branch_misses(int cpu,
|
||||||
|
@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
|
||||||
ratio = total / avg;
|
ratio = total / avg;
|
||||||
|
|
||||||
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
|
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
|
||||||
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
|
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
|
||||||
|
perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
|
||||||
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
|
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
|
||||||
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
|
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
|
||||||
avg / ratio);
|
avg / ratio);
|
||||||
|
|
|
@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
|
||||||
|
|
||||||
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
|
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
|
||||||
|
|
||||||
scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
|
scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
|
||||||
sbuild_id);
|
DSO__NAME_KCORE, sbuild_id);
|
||||||
|
|
||||||
/* Use /proc/kallsyms if possible */
|
/* Use /proc/kallsyms if possible */
|
||||||
if (is_host) {
|
if (is_host) {
|
||||||
|
@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
|
||||||
if (!find_matching_kcore(map, path, sizeof(path)))
|
if (!find_matching_kcore(map, path, sizeof(path)))
|
||||||
return strdup(path);
|
return strdup(path);
|
||||||
|
|
||||||
scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
|
scnprintf(path, sizeof(path), "%s/%s/%s",
|
||||||
buildid_dir, sbuild_id);
|
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
|
||||||
|
|
||||||
if (access(path, F_OK)) {
|
if (access(path, F_OK)) {
|
||||||
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
|
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
|
||||||
|
@ -1769,7 +1769,7 @@ do_kallsyms:
|
||||||
|
|
||||||
if (err > 0 && !dso__is_kcore(dso)) {
|
if (err > 0 && !dso__is_kcore(dso)) {
|
||||||
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
|
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
|
||||||
dso__set_long_name(dso, "[kernel.kallsyms]", false);
|
dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
|
||||||
map__fixup_start(map);
|
map__fixup_start(map);
|
||||||
map__fixup_end(map);
|
map__fixup_end(map);
|
||||||
}
|
}
|
||||||
|
@ -2033,3 +2033,26 @@ void symbol__exit(void)
|
||||||
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
|
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
|
||||||
symbol_conf.initialized = false;
|
symbol_conf.initialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int symbol__config_symfs(const struct option *opt __maybe_unused,
|
||||||
|
const char *dir, int unset __maybe_unused)
|
||||||
|
{
|
||||||
|
char *bf = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
symbol_conf.symfs = strdup(dir);
|
||||||
|
if (symbol_conf.symfs == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* skip the locally configured cache if a symfs is given, and
|
||||||
|
* config buildid dir to symfs/.debug
|
||||||
|
*/
|
||||||
|
ret = asprintf(&bf, "%s/%s", dir, ".debug");
|
||||||
|
if (ret < 0)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
set_buildid_dir(bf);
|
||||||
|
|
||||||
|
free(bf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
||||||
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define DSO__NAME_KALLSYMS "[kernel.kallsyms]"
|
||||||
|
#define DSO__NAME_KCORE "[kernel.kcore]"
|
||||||
|
|
||||||
/** struct symbol - symtab entry
|
/** struct symbol - symtab entry
|
||||||
*
|
*
|
||||||
* @ignore - resolvable but tools ignore it (e.g. idle routines)
|
* @ignore - resolvable but tools ignore it (e.g. idle routines)
|
||||||
|
@ -183,6 +186,8 @@ struct branch_info {
|
||||||
struct addr_map_symbol from;
|
struct addr_map_symbol from;
|
||||||
struct addr_map_symbol to;
|
struct addr_map_symbol to;
|
||||||
struct branch_flags flags;
|
struct branch_flags flags;
|
||||||
|
char *srcline_from;
|
||||||
|
char *srcline_to;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mem_info {
|
struct mem_info {
|
||||||
|
@ -287,6 +292,8 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
|
||||||
bool symbol__restricted_filename(const char *filename,
|
bool symbol__restricted_filename(const char *filename,
|
||||||
const char *restricted_filename);
|
const char *restricted_filename);
|
||||||
bool symbol__is_idle(struct symbol *sym);
|
bool symbol__is_idle(struct symbol *sym);
|
||||||
|
int symbol__config_symfs(const struct option *opt __maybe_unused,
|
||||||
|
const char *dir, int unset __maybe_unused);
|
||||||
|
|
||||||
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||||
struct symsrc *runtime_ss, symbol_filter_t filter,
|
struct symsrc *runtime_ss, symbol_filter_t filter,
|
||||||
|
|
|
@ -27,7 +27,6 @@ struct perf_top {
|
||||||
int max_stack;
|
int max_stack;
|
||||||
bool hide_kernel_symbols, hide_user_symbols, zero;
|
bool hide_kernel_symbols, hide_user_symbols, zero;
|
||||||
bool use_tui, use_stdio;
|
bool use_tui, use_stdio;
|
||||||
bool kptr_restrict_warned;
|
|
||||||
bool vmlinux_warned;
|
bool vmlinux_warned;
|
||||||
bool dump_symtab;
|
bool dump_symtab;
|
||||||
struct hist_entry *sym_filter_entry;
|
struct hist_entry *sym_filter_entry;
|
||||||
|
|
|
@ -33,7 +33,8 @@ struct callchain_param callchain_param = {
|
||||||
unsigned int page_size;
|
unsigned int page_size;
|
||||||
int cacheline_size;
|
int cacheline_size;
|
||||||
|
|
||||||
unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
|
int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
|
||||||
|
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
|
||||||
|
|
||||||
bool test_attr__enabled;
|
bool test_attr__enabled;
|
||||||
|
|
||||||
|
|
|
@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig);
|
||||||
|
|
||||||
extern unsigned int page_size;
|
extern unsigned int page_size;
|
||||||
extern int cacheline_size;
|
extern int cacheline_size;
|
||||||
extern unsigned int sysctl_perf_event_max_stack;
|
extern int sysctl_perf_event_max_stack;
|
||||||
|
extern int sysctl_perf_event_max_contexts_per_stack;
|
||||||
|
|
||||||
struct parse_tag {
|
struct parse_tag {
|
||||||
char tag;
|
char tag;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче