Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf changes from Ingo Molnar:
 "Lots of changes:

   - (much) improved assembly annotation support in perf report, with
     jump visualization, searching, navigation, visual output
     improvements and more.

    - kernel support for AMD IBS PMU hardware features.  Notably 'perf
      record -e cycles:p' and 'perf top -e cycles:p' should work without
      skid now, like PEBS does on the Intel side, because it takes
      advantage of IBS transparently.

    - the libtracevents library: it is the first step towards unifying
      tracing tooling and perf, and it also gives a tracing library for
      external tools like powertop to rely on.

    - infrastructure: various improvements and refactoring of the UI
      modules and related code

    - infrastructure: cleanup and simplification of the profiling
      targets code (--uid, --pid, --tid, --cpu, --all-cpus, etc.)

    - tons of robustness fixes all around

    - various ftrace updates: speedups, cleanups, robustness
      improvements.

    - typing 'make' in tools/ will now give you a menu of projects to
      build and a short help text to explain what each does.

    - ... and lots of other changes I forgot to list.

  The perf record make bzImage + perf report regression you reported
  should be fixed."

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (166 commits)
  tracing: Remove kernel_lock annotations
  tracing: Fix initial buffer_size_kb state
  ring-buffer: Merge separate resize loops
  perf evsel: Create events initially disabled -- again
  perf tools: Split term type into value type and term type
  perf hists: Fix callchain ip printf format
  perf target: Add uses_mmap field
  ftrace: Remove selecting FRAME_POINTER with FUNCTION_TRACER
  ftrace/x86: Have x86 ftrace use the ftrace_modify_all_code()
  ftrace: Make ftrace_modify_all_code() global for archs to use
  ftrace: Return record ip addr for ftrace_location()
  ftrace: Consolidate ftrace_location() and ftrace_text_reserved()
  ftrace: Speed up search by skipping pages by address
  ftrace: Remove extra helper functions
  ftrace: Sort all function addresses, not just per page
  tracing: change CPU ring buffer state from tracing_cpumask
  tracing: Check return value of tracing_dentry_percpu()
  ring-buffer: Reset head page before running self test
  ring-buffer: Add integrity check at end of iter read
  ring-buffer: Make addition of pages in ring buffer atomic
  ...
This commit is contained in:
Linus Torvalds 2012-05-22 18:18:55 -07:00
Родитель 88d6ae8dc3 73787190d0
Коммит 2ff2b289a6
114 изменённых файлов: 13443 добавлений и 5008 удалений

Просмотреть файл

@ -1471,6 +1471,13 @@ kernelrelease:
kernelversion: kernelversion:
@echo $(KERNELVERSION) @echo $(KERNELVERSION)
# Clear a bunch of variables before executing the submake
tools/: FORCE
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
tools/%: FORCE
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
# Single targets # Single targets
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Single targets are compatible with: # Single targets are compatible with:

Просмотреть файл

@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
idx = la_ptr; idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) { for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx) if (cpuc->current_idx[j] == idx)
break; break;
@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
hwc = &event->hw; hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (alpha_perf_event_set_period(event, hwc, idx)) { if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, &data, regs)) { if (perf_event_overflow(event, &data, regs)) {

Просмотреть файл

@ -11,7 +11,7 @@ CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_TIMERFD is not set # CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set # CONFIG_EVENTFD is not set
# CONFIG_AIO is not set # CONFIG_AIO is not set
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set # CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set

Просмотреть файл

@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
*/ */
armv6_pmcr_write(pmcr); armv6_pmcr_write(pmcr);
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
hwc = &event->hw; hwc = &event->hw;
armpmu_event_update(event, hwc, idx); armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;

Просмотреть файл

@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
*/ */
regs = get_irq_regs(); regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
hwc = &event->hw; hwc = &event->hw;
armpmu_event_update(event, hwc, idx); armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;

Просмотреть файл

@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
regs = get_irq_regs(); regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
hwc = &event->hw; hwc = &event->hw;
armpmu_event_update(event, hwc, idx); armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;
@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
regs = get_irq_regs(); regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
hwc = &event->hw; hwc = &event->hw;
armpmu_event_update(event, hwc, idx); armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;

Просмотреть файл

@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
regs = get_irq_regs(); regs = get_irq_regs();
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0, 0);
switch (counters) { switch (counters) {
#define HANDLE_COUNTER(n) \ #define HANDLE_COUNTER(n) \

Просмотреть файл

@ -32,7 +32,7 @@ CONFIG_RD_LZMA=y
CONFIG_INITRAMFS_COMPRESSION_GZIP=y CONFIG_INITRAMFS_COMPRESSION_GZIP=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y CONFIG_EMBEDDED=y
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_OPROFILE=y CONFIG_OPROFILE=y
CONFIG_KPROBES=y CONFIG_KPROBES=y

Просмотреть файл

@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set # CONFIG_ELF_CORE is not set
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_MODULES=y CONFIG_MODULES=y

Просмотреть файл

@ -9,7 +9,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set # CONFIG_ELF_CORE is not set
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_MODULES=y CONFIG_MODULES=y

Просмотреть файл

@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) { if (record) {
struct perf_sample_data data; struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL); perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR) if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr); perf_get_data_addr(regs, &data.addr);

Просмотреть файл

@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) { if (record) {
struct perf_sample_data data; struct perf_sample_data data;
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0); fsl_emb_pmu_stop(event, 0);

Просмотреть файл

@ -5,7 +5,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16 CONFIG_LOG_BUF_SHIFT=16
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_PROFILING=y CONFIG_PROFILING=y

Просмотреть файл

@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=18 CONFIG_LOG_BUF_SHIFT=18
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_PROFILING=y CONFIG_PROFILING=y

Просмотреть файл

@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
regs = args->regs; regs = args->regs;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a /* If the PMU has the TOE IRQ enable bits, we need to do a
@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
if (val & (1ULL << 31)) if (val & (1ULL << 31))
continue; continue;
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!sparc_perf_event_set_period(event, hwc, idx)) if (!sparc_perf_event_set_period(event, hwc, idx))
continue; continue;

Просмотреть файл

@ -40,7 +40,6 @@ config X86
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KVM select HAVE_KVM
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB

Просмотреть файл

@ -34,6 +34,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void mcount(void); extern void mcount(void);
extern int modifying_ftrace_code;
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)
{ {
@ -50,6 +51,8 @@ struct dyn_arch_ftrace {
/* No extra data needed for x86 */ /* No extra data needed for x86 */
}; };
int ftrace_int3_handler(struct pt_regs *regs);
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */

Просмотреть файл

@ -134,6 +134,8 @@
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
#define MSR_AMD64_IBSFETCH_REG_COUNT 3
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
#define MSR_AMD64_IBSOPCTL 0xc0011033 #define MSR_AMD64_IBSOPCTL 0xc0011033
#define MSR_AMD64_IBSOPRIP 0xc0011034 #define MSR_AMD64_IBSOPRIP 0xc0011034
#define MSR_AMD64_IBSOPDATA 0xc0011035 #define MSR_AMD64_IBSOPDATA 0xc0011035
@ -141,8 +143,11 @@
#define MSR_AMD64_IBSOPDATA3 0xc0011037 #define MSR_AMD64_IBSOPDATA3 0xc0011037
#define MSR_AMD64_IBSDCLINAD 0xc0011038 #define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 #define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSOP_REG_COUNT 7
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b #define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
/* Fam 15h MSRs */ /* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200 #define MSR_F15H_PERF_CTL 0xc0010200

Просмотреть файл

@ -158,6 +158,7 @@ struct x86_pmu_capability {
#define IBS_CAPS_OPCNT (1U<<4) #define IBS_CAPS_OPCNT (1U<<4)
#define IBS_CAPS_BRNTRGT (1U<<5) #define IBS_CAPS_BRNTRGT (1U<<5)
#define IBS_CAPS_OPCNTEXT (1U<<6) #define IBS_CAPS_OPCNTEXT (1U<<6)
#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \ | IBS_CAPS_FETCHSAM \
@ -170,21 +171,28 @@ struct x86_pmu_capability {
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F #define IBSCTL_LVT_OFFSET_MASK 0x0F
/* IbsFetchCtl bits/masks */ /* ibs fetch bits/masks */
#define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48) #define IBS_FETCH_ENABLE (1ULL<<48)
#define IBS_FETCH_CNT 0xFFFF0000ULL #define IBS_FETCH_CNT 0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
/* IbsOpCtl bits */ /* ibs op bits/masks */
/* lower 4 bits of the current count are ignored: */
#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18) #define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17) #define IBS_OP_ENABLE (1ULL<<17)
#define IBS_OP_MAX_CNT 0x0000FFFFULL #define IBS_OP_MAX_CNT 0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#define IBS_RIP_INVALID (1ULL<<38)
#ifdef CONFIG_X86_LOCAL_APIC
extern u32 get_ibs_caps(void); extern u32 get_ibs_caps(void);
#else
static inline u32 get_ibs_caps(void) { return 0; }
#endif
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void); extern void perf_events_lapic_init(void);

Просмотреть файл

@ -484,9 +484,6 @@ static int __x86_pmu_event_init(struct perf_event *event)
/* mark unused */ /* mark unused */
event->hw.extra_reg.idx = EXTRA_REG_NONE; event->hw.extra_reg.idx = EXTRA_REG_NONE;
/* mark not used */
event->hw.extra_reg.idx = EXTRA_REG_NONE;
event->hw.branch_reg.idx = EXTRA_REG_NONE; event->hw.branch_reg.idx = EXTRA_REG_NONE;
return x86_pmu.hw_config(event); return x86_pmu.hw_config(event);
@ -1186,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
int idx, handled = 0; int idx, handled = 0;
u64 val; u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
/* /*
@ -1222,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
* event overflow * event overflow
*/ */
handled++; handled++;
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, event->hw.last_period);
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;

Просмотреть файл

@ -134,8 +134,13 @@ static u64 amd_pmu_event_map(int hw_event)
static int amd_pmu_hw_config(struct perf_event *event) static int amd_pmu_hw_config(struct perf_event *event)
{ {
int ret = x86_pmu_hw_config(event); int ret;
/* pass precise event sampling to ibs: */
if (event->attr.precise_ip && get_ibs_caps())
return -ENOENT;
ret = x86_pmu_hw_config(event);
if (ret) if (ret)
return ret; return ret;
@ -205,10 +210,8 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
* when we come here * when we come here
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < x86_pmu.num_counters; i++) {
if (nb->owners[i] == event) { if (cmpxchg(nb->owners + i, event, NULL) == event)
cmpxchg(nb->owners+i, event, NULL);
break; break;
}
} }
} }

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/ptrace.h>
#include <asm/apic.h> #include <asm/apic.h>
@ -16,36 +17,591 @@ static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
static struct pmu perf_ibs; #include <linux/kprobes.h>
#include <linux/hardirq.h>
#include <asm/nmi.h>
#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
enum ibs_states {
IBS_ENABLED = 0,
IBS_STARTED = 1,
IBS_STOPPING = 2,
IBS_MAX_STATES,
};
struct cpu_perf_ibs {
struct perf_event *event;
unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
};
struct perf_ibs {
struct pmu pmu;
unsigned int msr;
u64 config_mask;
u64 cnt_mask;
u64 enable_mask;
u64 valid_mask;
u64 max_period;
unsigned long offset_mask[1];
int offset_max;
struct cpu_perf_ibs __percpu *pcpu;
u64 (*get_count)(u64 config);
};
struct perf_ibs_data {
u32 size;
union {
u32 data[0]; /* data buffer starts here */
u32 caps;
};
u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
};
static int
perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
{
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int overflow = 0;
/*
* If we are way outside a reasonable range then just skip forward:
*/
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
if (unlikely(left < (s64)min)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
/*
* If the hw period that triggers the sw overflow is too short
* we might hit the irq handler. This biases the results.
* Thus we shorten the next-to-last period and set the last
* period to the max period.
*/
if (left > max) {
left -= max;
if (left > max)
left = max;
else if (left < min)
left = min;
}
*hw_period = (u64)left;
return overflow;
}
static int
perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - width;
u64 prev_raw_count;
u64 delta;
/*
* Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
prev_raw_count = local64_read(&hwc->prev_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
return 0;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return 1;
}
static struct perf_ibs perf_ibs_fetch;
static struct perf_ibs perf_ibs_op;
static struct perf_ibs *get_ibs_pmu(int type)
{
if (perf_ibs_fetch.pmu.type == type)
return &perf_ibs_fetch;
if (perf_ibs_op.pmu.type == type)
return &perf_ibs_op;
return NULL;
}
/*
* Use IBS for precise event sampling:
*
* perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
* perf record -a -e r076:p ... # same as -e cpu-cycles:p
* perf record -a -e r0C1:p ... # use ibs op counting micro-ops
*
* IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
* MSRC001_1033) is used to select either cycle or micro-ops counting
* mode.
*
* The rip of IBS samples has skid 0. Thus, IBS supports precise
* levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
* rip is invalid when IBS was not able to record the rip correctly.
* We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
*
*/
static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
{
switch (event->attr.precise_ip) {
case 0:
return -ENOENT;
case 1:
case 2:
break;
default:
return -EOPNOTSUPP;
}
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
switch (event->attr.config) {
case PERF_COUNT_HW_CPU_CYCLES:
*config = 0;
return 0;
}
break;
case PERF_TYPE_RAW:
switch (event->attr.config) {
case 0x0076:
*config = 0;
return 0;
case 0x00C1:
*config = IBS_OP_CNT_CTL;
return 0;
}
break;
default:
return -ENOENT;
}
return -EOPNOTSUPP;
}
static int perf_ibs_init(struct perf_event *event) static int perf_ibs_init(struct perf_event *event)
{ {
if (perf_ibs.type != event->attr.type) struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs;
u64 max_cnt, config;
int ret;
perf_ibs = get_ibs_pmu(event->attr.type);
if (perf_ibs) {
config = event->attr.config;
} else {
perf_ibs = &perf_ibs_op;
ret = perf_ibs_precise_event(event, &config);
if (ret)
return ret;
}
if (event->pmu != &perf_ibs->pmu)
return -ENOENT; return -ENOENT;
if (config & ~perf_ibs->config_mask)
return -EINVAL;
if (hwc->sample_period) {
if (config & perf_ibs->cnt_mask)
/* raw max_cnt may not be set */
return -EINVAL;
if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
/*
* lower 4 bits can not be set in ibs max cnt,
* but allowing it in case we adjust the
* sample period to set a frequency.
*/
return -EINVAL;
hwc->sample_period &= ~0x0FULL;
if (!hwc->sample_period)
hwc->sample_period = 0x10;
} else {
max_cnt = config & perf_ibs->cnt_mask;
config &= ~perf_ibs->cnt_mask;
event->attr.sample_period = max_cnt << 4;
hwc->sample_period = event->attr.sample_period;
}
if (!hwc->sample_period)
return -EINVAL;
/*
* If we modify hwc->sample_period, we also need to update
* hwc->last_period and hwc->period_left.
*/
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->config_base = perf_ibs->msr;
hwc->config = config;
return 0; return 0;
} }
static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 *period)
{
int overflow;
/* ignore lower 4 bits in min count: */
overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
local64_set(&hwc->prev_count, 0);
return overflow;
}
static u64 get_ibs_fetch_count(u64 config)
{
return (config & IBS_FETCH_CNT) >> 12;
}
static u64 get_ibs_op_count(u64 config)
{
u64 count = 0;
if (config & IBS_OP_VAL)
count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
if (ibs_caps & IBS_CAPS_RDWROPCNT)
count += (config & IBS_OP_CUR_CNT) >> 32;
return count;
}
static void
perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
u64 *config)
{
u64 count = perf_ibs->get_count(*config);
/*
* Set width to 64 since we do not overflow on max width but
* instead on max count. In perf_ibs_set_period() we clear
* prev count manually on overflow.
*/
while (!perf_event_try_update(event, count, 64)) {
rdmsrl(event->hw.config_base, *config);
count = perf_ibs->get_count(*config);
}
}
static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 config)
{
wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
}
/*
* Erratum #420 Instruction-Based Sampling Engine May Generate
* Interrupt that Cannot Be Cleared:
*
* Must clear counter mask first, then clear the enable bit. See
* Revision Guide for AMD Family 10h Processors, Publication #41322.
*/
static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 config)
{
config &= ~perf_ibs->cnt_mask;
wrmsrl(hwc->config_base, config);
config &= ~perf_ibs->enable_mask;
wrmsrl(hwc->config_base, config);
}
/*
* We cannot restore the ibs pmu state, so we always needs to update
* the event while stopping it and then reset the state when starting
* again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
* perf_ibs_start()/perf_ibs_stop() and instead always do it.
*/
static void perf_ibs_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
u64 period;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
perf_ibs_set_period(perf_ibs, hwc, &period);
set_bit(IBS_STARTED, pcpu->state);
perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
perf_event_update_userpage(event);
}
static void perf_ibs_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
u64 config;
int stopping;
stopping = test_and_clear_bit(IBS_STARTED, pcpu->state);
if (!stopping && (hwc->state & PERF_HES_UPTODATE))
return;
rdmsrl(hwc->config_base, config);
if (stopping) {
set_bit(IBS_STOPPING, pcpu->state);
perf_ibs_disable_event(perf_ibs, hwc, config);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
}
if (hwc->state & PERF_HES_UPTODATE)
return;
/*
* Clear valid bit to not count rollovers on update, rollovers
* are only updated in the irq handler.
*/
config &= ~perf_ibs->valid_mask;
perf_ibs_event_update(perf_ibs, event, &config);
hwc->state |= PERF_HES_UPTODATE;
}
static int perf_ibs_add(struct perf_event *event, int flags) static int perf_ibs_add(struct perf_event *event, int flags)
{ {
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
if (test_and_set_bit(IBS_ENABLED, pcpu->state))
return -ENOSPC;
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
pcpu->event = event;
if (flags & PERF_EF_START)
perf_ibs_start(event, PERF_EF_RELOAD);
return 0; return 0;
} }
static void perf_ibs_del(struct perf_event *event, int flags) static void perf_ibs_del(struct perf_event *event, int flags)
{ {
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
return;
perf_ibs_stop(event, PERF_EF_UPDATE);
pcpu->event = NULL;
perf_event_update_userpage(event);
} }
static struct pmu perf_ibs = { static void perf_ibs_read(struct perf_event *event) { }
.event_init= perf_ibs_init,
.add= perf_ibs_add, static struct perf_ibs perf_ibs_fetch = {
.del= perf_ibs_del, .pmu = {
.task_ctx_nr = perf_invalid_context,
.event_init = perf_ibs_init,
.add = perf_ibs_add,
.del = perf_ibs_del,
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
},
.msr = MSR_AMD64_IBSFETCHCTL,
.config_mask = IBS_FETCH_CONFIG_MASK,
.cnt_mask = IBS_FETCH_MAX_CNT,
.enable_mask = IBS_FETCH_ENABLE,
.valid_mask = IBS_FETCH_VAL,
.max_period = IBS_FETCH_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
.offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
.get_count = get_ibs_fetch_count,
}; };
static struct perf_ibs perf_ibs_op = {
.pmu = {
.task_ctx_nr = perf_invalid_context,
.event_init = perf_ibs_init,
.add = perf_ibs_add,
.del = perf_ibs_del,
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
},
.msr = MSR_AMD64_IBSOPCTL,
.config_mask = IBS_OP_CONFIG_MASK,
.cnt_mask = IBS_OP_MAX_CNT,
.enable_mask = IBS_OP_ENABLE,
.valid_mask = IBS_OP_VAL,
.max_period = IBS_OP_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
.offset_max = MSR_AMD64_IBSOP_REG_COUNT,
.get_count = get_ibs_op_count,
};
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_event *event = pcpu->event;
struct hw_perf_event *hwc = &event->hw;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
struct perf_ibs_data ibs_data;
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
u64 *buf, *config, period;
if (!test_bit(IBS_STARTED, pcpu->state)) {
/*
* Catch spurious interrupts after stopping IBS: After
* disabling IBS there could be still incomming NMIs
* with samples that even have the valid bit cleared.
* Mark all this NMIs as handled.
*/
return test_and_clear_bit(IBS_STOPPING, pcpu->state) ? 1 : 0;
}
msr = hwc->config_base;
buf = ibs_data.regs;
rdmsrl(msr, *buf);
if (!(*buf++ & perf_ibs->valid_mask))
return 0;
config = &ibs_data.regs[0];
perf_ibs_event_update(perf_ibs, event, config);
perf_sample_data_init(&data, 0, hwc->last_period);
if (!perf_ibs_set_period(perf_ibs, hwc, &period))
goto out; /* no sw counter overflow */
ibs_data.caps = ibs_caps;
size = 1;
offset = 1;
check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
if (event->attr.sample_type & PERF_SAMPLE_RAW)
offset_max = perf_ibs->offset_max;
else if (check_rip)
offset_max = 2;
else
offset_max = 1;
do {
rdmsrl(msr + offset, *buf++);
size++;
offset = find_next_bit(perf_ibs->offset_mask,
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
ibs_data.size = sizeof(u64) * size;
regs = *iregs;
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
regs.flags &= ~PERF_EFLAGS_EXACT;
} else {
instruction_pointer_set(&regs, ibs_data.regs[1]);
regs.flags |= PERF_EFLAGS_EXACT;
}
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw.size = sizeof(u32) + ibs_data.size;
raw.data = ibs_data.data;
data.raw = &raw;
}
throttle = perf_event_overflow(event, &data, &regs);
out:
if (throttle)
perf_ibs_disable_event(perf_ibs, hwc, *config);
else
perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
perf_event_update_userpage(event);
return 1;
}
static int __kprobes
perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
int handled = 0;
handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled;
}
static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
{
struct cpu_perf_ibs __percpu *pcpu;
int ret;
pcpu = alloc_percpu(struct cpu_perf_ibs);
if (!pcpu)
return -ENOMEM;
perf_ibs->pcpu = pcpu;
ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
if (ret) {
perf_ibs->pcpu = NULL;
free_percpu(pcpu);
}
return ret;
}
static __init int perf_event_ibs_init(void) static __init int perf_event_ibs_init(void)
{ {
if (!ibs_caps) if (!ibs_caps)
return -ENODEV; /* ibs not supported by the cpu */ return -ENODEV; /* ibs not supported by the cpu */
perf_pmu_register(&perf_ibs, "ibs", -1); perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ibs_caps & IBS_CAPS_OPCNT)
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
return 0; return 0;

Просмотреть файл

@ -1027,8 +1027,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
u64 status; u64 status;
int handled; int handled;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
/* /*
@ -1082,7 +1080,7 @@ again:
if (!intel_pmu_save_and_restart(event)) if (!intel_pmu_save_and_restart(event))
continue; continue;
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack; data.br_stack = &cpuc->lbr_stack;

Просмотреть файл

@ -316,8 +316,7 @@ int intel_pmu_drain_bts_buffer(void)
ds->bts_index = ds->bts_buffer_base; ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period;
regs.ip = 0; regs.ip = 0;
/* /*
@ -564,8 +563,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
if (!intel_pmu_save_and_restart(event)) if (!intel_pmu_save_and_restart(event))
return; return;
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period;
/* /*
* We use the interrupt regs as a base because the PEBS record * We use the interrupt regs as a base because the PEBS record

Просмотреть файл

@ -1005,8 +1005,6 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
int idx, handled = 0; int idx, handled = 0;
u64 val; u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@ -1034,10 +1032,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
handled += overflow; handled += overflow;
/* event overflow for sure */ /* event overflow for sure */
data.period = event->hw.last_period; perf_sample_data_init(&data, 0, hwc->last_period);
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }

Просмотреть файл

@ -24,40 +24,21 @@
#include <trace/syscall.h> #include <trace/syscall.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kprobes.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/nmi.h>
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/*
* modifying_code is set to notify NMIs that they need to use
* memory barriers when entering or exiting. But we don't want
* to burden NMIs with unnecessary memory barriers when code
* modification is not being done (which is most of the time).
*
* A mutex is already held when ftrace_arch_code_modify_prepare
* and post_process are called. No locks need to be taken here.
*
* Stop machine will make sure currently running NMIs are done
* and new NMIs will see the updated variable before we need
* to worry about NMIs doing memory barriers.
*/
static int modifying_code __read_mostly;
static DEFINE_PER_CPU(int, save_modifying_code);
int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_prepare(void)
{ {
set_kernel_text_rw(); set_kernel_text_rw();
set_all_modules_text_rw(); set_all_modules_text_rw();
modifying_code = 1;
return 0; return 0;
} }
int ftrace_arch_code_modify_post_process(void) int ftrace_arch_code_modify_post_process(void)
{ {
modifying_code = 0;
set_all_modules_text_ro(); set_all_modules_text_ro();
set_kernel_text_ro(); set_kernel_text_ro();
return 0; return 0;
@ -90,134 +71,6 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return calc.code; return calc.code;
} }
/*
* Modifying code must take extra care. On an SMP machine, if
* the code being modified is also being executed on another CPU
* that CPU will have undefined results and possibly take a GPF.
* We use kstop_machine to stop other CPUS from exectuing code.
* But this does not stop NMIs from happening. We still need
* to protect against that. We separate out the modification of
* the code to take care of this.
*
* Two buffers are added: An IP buffer and a "code" buffer.
*
* 1) Put the instruction pointer into the IP buffer
* and the new code into the "code" buffer.
* 2) Wait for any running NMIs to finish and set a flag that says
* we are modifying code, it is done in an atomic operation.
* 3) Write the code
* 4) clear the flag.
* 5) Wait for any running NMIs to finish.
*
* If an NMI is executed, the first thing it does is to call
* "ftrace_nmi_enter". This will check if the flag is set to write
* and if it is, it will write what is in the IP and "code" buffers.
*
* The trick is, it does not matter if everyone is writing the same
* content to the code location. Also, if a CPU is executing code
* it is OK to write to that code location if the contents being written
* are the same as what exists.
*/
#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
static const void *mod_code_newcode; /* holds the text to write to the IP */
static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
int ftrace_arch_read_dyn_info(char *buf, int size)
{
int r;
r = snprintf(buf, size, "%u %u",
nmi_wait_count,
atomic_read(&nmi_update_count));
return r;
}
static void clear_mod_flag(void)
{
int old = atomic_read(&nmi_running);
for (;;) {
int new = old & ~MOD_CODE_WRITE_FLAG;
if (old == new)
break;
old = atomic_cmpxchg(&nmi_running, old, new);
}
}
static void ftrace_mod_code(void)
{
/*
* Yes, more than one CPU process can be writing to mod_code_status.
* (and the code itself)
* But if one were to fail, then they all should, and if one were
* to succeed, then they all should.
*/
mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
MCOUNT_INSN_SIZE);
/* if we fail, then kill any new writers */
if (mod_code_status)
clear_mod_flag();
}
void ftrace_nmi_enter(void)
{
__this_cpu_write(save_modifying_code, modifying_code);
if (!__this_cpu_read(save_modifying_code))
return;
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
smp_mb();
}
void ftrace_nmi_exit(void)
{
if (!__this_cpu_read(save_modifying_code))
return;
/* Finish all executions before clearing nmi_running */
smp_mb();
atomic_dec(&nmi_running);
}
static void wait_for_nmi_and_set_mod_flag(void)
{
if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
return;
do {
cpu_relax();
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
nmi_wait_count++;
}
static void wait_for_nmi(void)
{
if (!atomic_read(&nmi_running))
return;
do {
cpu_relax();
} while (atomic_read(&nmi_running));
nmi_wait_count++;
}
static inline int static inline int
within(unsigned long addr, unsigned long start, unsigned long end) within(unsigned long addr, unsigned long start, unsigned long end)
{ {
@ -238,26 +91,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
if (within(ip, (unsigned long)_text, (unsigned long)_etext)) if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa(ip)); ip = (unsigned long)__va(__pa(ip));
mod_code_ip = (void *)ip; return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
mod_code_newcode = new_code;
/* The buffers need to be visible before we let NMIs write them */
smp_mb();
wait_for_nmi_and_set_mod_flag();
/* Make sure all running NMIs have finished before we write the code */
smp_mb();
ftrace_mod_code();
/* Make sure the write happens before clearing the bit */
smp_mb();
clear_mod_flag();
wait_for_nmi();
return mod_code_status;
} }
static const unsigned char *ftrace_nop_replace(void) static const unsigned char *ftrace_nop_replace(void)
@ -334,6 +168,336 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret; return ret;
} }
int modifying_ftrace_code __read_mostly;
/*
* A breakpoint was added to the code address we are about to
* modify, and this is the handle that will just skip over it.
* We are either changing a nop into a trace call, or a trace
* call to a nop. While the change is taking place, we treat
* it just like it was a nop.
*/
int ftrace_int3_handler(struct pt_regs *regs)
{
if (WARN_ON_ONCE(!regs))
return 0;
if (!ftrace_location(regs->ip - 1))
return 0;
regs->ip += MCOUNT_INSN_SIZE - 1;
return 1;
}
static int ftrace_write(unsigned long ip, const char *val, int size)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
* CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
* of the kernel text mapping to modify the kernel text.
*
* For 32bit kernels, these mappings are same and we can use
* kernel identity mapping to modify code.
*/
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa(ip));
return probe_kernel_write((void *)ip, val, size);
}
static int add_break(unsigned long ip, const char *old)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
unsigned char brk = BREAKPOINT_INSTRUCTION;
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
if (ftrace_write(ip, &brk, 1))
return -EPERM;
return 0;
}
static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned const char *old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, addr);
return add_break(rec->ip, old);
}
static int add_brk_on_nop(struct dyn_ftrace *rec)
{
unsigned const char *old;
old = ftrace_nop_replace();
return add_break(rec->ip, old);
}
static int add_breakpoints(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
int ret;
ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR;
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */
return add_brk_on_nop(rec);
case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */
return add_brk_on_call(rec, ftrace_addr);
}
return 0;
}
/*
* On error, we need to remove breakpoints. This needs to
* be done caefully. If the address does not currently have a
* breakpoint, we know we are done. Otherwise, we look at the
* remaining 4 bytes of the instruction. If it matches a nop
* we replace the breakpoint with the nop. Otherwise we replace
* it with the call instruction.
*/
static int remove_breakpoint(struct dyn_ftrace *rec)
{
unsigned char ins[MCOUNT_INSN_SIZE];
unsigned char brk = BREAKPOINT_INSTRUCTION;
const unsigned char *nop;
unsigned long ftrace_addr;
unsigned long ip = rec->ip;
/* If we fail the read, just give up */
if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* If this does not have a breakpoint, we are done */
if (ins[0] != brk)
return -1;
nop = ftrace_nop_replace();
/*
* If the last 4 bytes of the instruction do not match
* a nop, then we assume that this is a call to ftrace_addr.
*/
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
/*
* For extra paranoidism, we check if the breakpoint is on
* a call that would actually jump to the ftrace_addr.
* If not, don't touch the breakpoint, we make just create
* a disaster.
*/
ftrace_addr = (unsigned long)FTRACE_ADDR;
nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL;
}
return probe_kernel_write((void *)ip, &nop[0], 1);
}
static int add_update_code(unsigned long ip, unsigned const char *new)
{
/* skip breakpoint */
ip++;
new++;
if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
return -EPERM;
return 0;
}
static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
unsigned const char *new;
new = ftrace_call_replace(ip, addr);
return add_update_code(ip, new);
}
static int add_update_nop(struct dyn_ftrace *rec)
{
unsigned long ip = rec->ip;
unsigned const char *new;
new = ftrace_nop_replace();
return add_update_code(ip, new);
}
static int add_update(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
int ret;
ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR;
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */
return add_update_call(rec, ftrace_addr);
case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */
return add_update_nop(rec);
}
return 0;
}
static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
unsigned const char *new;
new = ftrace_call_replace(ip, addr);
if (ftrace_write(ip, new, 1))
return -EPERM;
return 0;
}
static int finish_update_nop(struct dyn_ftrace *rec)
{
unsigned long ip = rec->ip;
unsigned const char *new;
new = ftrace_nop_replace();
if (ftrace_write(ip, new, 1))
return -EPERM;
return 0;
}
static int finish_update(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
int ret;
ret = ftrace_update_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR;
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */
return finish_update_call(rec, ftrace_addr);
case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */
return finish_update_nop(rec);
}
return 0;
}
static void do_sync_core(void *data)
{
sync_core();
}
static void run_sync(void)
{
int enable_irqs = irqs_disabled();
/* We may be called with interrupts disbled (on bootup). */
if (enable_irqs)
local_irq_enable();
on_each_cpu(do_sync_core, NULL, 1);
if (enable_irqs)
local_irq_disable();
}
void ftrace_replace_code(int enable)
{
struct ftrace_rec_iter *iter;
struct dyn_ftrace *rec;
const char *report = "adding breakpoints";
int count = 0;
int ret;
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
ret = add_breakpoints(rec, enable);
if (ret)
goto remove_breakpoints;
count++;
}
run_sync();
report = "updating code";
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
ret = add_update(rec, enable);
if (ret)
goto remove_breakpoints;
}
run_sync();
report = "removing breakpoints";
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
ret = finish_update(rec, enable);
if (ret)
goto remove_breakpoints;
}
run_sync();
return;
remove_breakpoints:
ftrace_bug(ret, rec ? rec->ip : 0);
printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
remove_breakpoint(rec);
}
}
void arch_ftrace_update_code(int command)
{
modifying_ftrace_code++;
ftrace_modify_all_code(command);
modifying_ftrace_code--;
}
int __init ftrace_dyn_arch_init(void *data) int __init ftrace_dyn_arch_init(void *data)
{ {
/* The return code is retured via data */ /* The return code is retured via data */

Просмотреть файл

@ -84,7 +84,7 @@ __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
#define nmi_to_desc(type) (&nmi_desc[type]) #define nmi_to_desc(type) (&nmi_desc[type])
static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
{ {
struct nmi_desc *desc = nmi_to_desc(type); struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *a; struct nmiaction *a;
@ -166,7 +166,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
} }
EXPORT_SYMBOL_GPL(unregister_nmi_handler); EXPORT_SYMBOL_GPL(unregister_nmi_handler);
static notrace __kprobes void static __kprobes void
pci_serr_error(unsigned char reason, struct pt_regs *regs) pci_serr_error(unsigned char reason, struct pt_regs *regs)
{ {
/* check to see if anyone registered against these types of errors */ /* check to see if anyone registered against these types of errors */
@ -197,7 +197,7 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
outb(reason, NMI_REASON_PORT); outb(reason, NMI_REASON_PORT);
} }
static notrace __kprobes void static __kprobes void
io_check_error(unsigned char reason, struct pt_regs *regs) io_check_error(unsigned char reason, struct pt_regs *regs)
{ {
unsigned long i; unsigned long i;
@ -228,7 +228,7 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
outb(reason, NMI_REASON_PORT); outb(reason, NMI_REASON_PORT);
} }
static notrace __kprobes void static __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs) unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{ {
int handled; int handled;
@ -270,7 +270,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip); static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
static notrace __kprobes void default_do_nmi(struct pt_regs *regs) static __kprobes void default_do_nmi(struct pt_regs *regs)
{ {
unsigned char reason = 0; unsigned char reason = 0;
int handled; int handled;

Просмотреть файл

@ -50,6 +50,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/ftrace.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/i387.h>
@ -303,8 +304,13 @@ gp_in_kernel:
} }
/* May run on IST stack. */ /* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
{ {
#ifdef CONFIG_DYNAMIC_FTRACE
/* ftrace must be first, everything else may cause a recursive crash */
if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
return;
#endif
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP) SIGTRAP) == NOTIFY_STOP)

Просмотреть файл

@ -486,8 +486,8 @@
CPU_DISCARD(init.data) \ CPU_DISCARD(init.data) \
MEM_DISCARD(init.data) \ MEM_DISCARD(init.data) \
KERNEL_CTORS() \ KERNEL_CTORS() \
*(.init.rodata) \
MCOUNT_REC() \ MCOUNT_REC() \
*(.init.rodata) \
FTRACE_EVENTS() \ FTRACE_EVENTS() \
TRACE_SYSCALLS() \ TRACE_SYSCALLS() \
DEV_DISCARD(init.rodata) \ DEV_DISCARD(init.rodata) \

Просмотреть файл

@ -286,10 +286,16 @@ struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
#define for_ftrace_rec_iter(iter) \
for (iter = ftrace_rec_iter_start(); \
iter; \
iter = ftrace_rec_iter_next(iter))
int ftrace_update_record(struct dyn_ftrace *rec, int enable); int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command); void ftrace_run_stop_machine(int command);
int ftrace_location(unsigned long ip); unsigned long ftrace_location(unsigned long ip);
extern ftrace_func_t ftrace_trace_function; extern ftrace_func_t ftrace_trace_function;
@ -308,11 +314,14 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */ /* defined in arch */
extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void *data); extern int ftrace_dyn_arch_init(void *data);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func); extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void); extern void ftrace_caller(void);
extern void ftrace_call(void); extern void ftrace_call(void);
extern void mcount_call(void); extern void mcount_call(void);
void ftrace_modify_all_code(int command);
#ifndef FTRACE_ADDR #ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller) #define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif #endif
@ -485,8 +494,12 @@ static inline void __ftrace_enabled_restore(int enabled)
extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else #else
static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } /*
static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } * Use defines instead of static inlines because some arches will make code out
* of the CALLER_ADDR, when we really want these to be a real nop.
*/
# define trace_preempt_on(a0, a1) do { } while (0)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif #endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD #ifdef CONFIG_FTRACE_MCOUNT_RECORD

Просмотреть файл

@ -480,15 +480,16 @@ do { \
#define trace_printk(fmt, args...) \ #define trace_printk(fmt, args...) \
do { \ do { \
__trace_printk_check_format(fmt, ##args); \ static const char *trace_printk_fmt \
if (__builtin_constant_p(fmt)) { \ __attribute__((section("__trace_printk_fmt"))) = \
static const char *trace_printk_fmt \ __builtin_constant_p(fmt) ? fmt : NULL; \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\ \
__trace_printk_check_format(fmt, ##args); \
\
if (__builtin_constant_p(fmt)) \
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
} else \ else \
__trace_printk(_THIS_IP_, fmt, ##args); \ __trace_printk(_THIS_IP_, fmt, ##args); \
} while (0) } while (0)
extern __printf(2, 3) extern __printf(2, 3)

Просмотреть файл

@ -1084,10 +1084,8 @@ extern void perf_pmu_unregister(struct pmu *pmu);
extern int perf_num_counters(void); extern int perf_num_counters(void);
extern const char *perf_pmu_name(void); extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *prev, extern void __perf_event_task_sched(struct task_struct *prev,
struct task_struct *task); struct task_struct *next);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child); extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task); extern void perf_event_free_task(struct task_struct *task);
@ -1132,11 +1130,14 @@ struct perf_sample_data {
struct perf_branch_stack *br_stack; struct perf_branch_stack *br_stack;
}; };
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{ {
/* remaining struct members initialized in perf_prepare_sample() */
data->addr = addr; data->addr = addr;
data->raw = NULL; data->raw = NULL;
data->br_stack = NULL; data->br_stack = NULL;
data->period = period;
} }
extern void perf_output_sample(struct perf_output_handle *handle, extern void perf_output_sample(struct perf_output_handle *handle,
@ -1204,20 +1205,13 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
extern struct static_key_deferred perf_sched_events; extern struct static_key_deferred perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched(struct task_struct *prev,
struct task_struct *task) struct task_struct *task)
{
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{ {
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
if (static_key_false(&perf_sched_events.key)) if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next); __perf_event_task_sched(prev, task);
} }
extern void perf_event_mmap(struct vm_area_struct *vma); extern void perf_event_mmap(struct vm_area_struct *vma);
@ -1292,11 +1286,8 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_task_tick(void); extern void perf_event_task_tick(void);
#else #else
static inline void static inline void
perf_event_task_sched_in(struct task_struct *prev, perf_event_task_sched(struct task_struct *prev,
struct task_struct *task) { } struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_free_task(struct task_struct *task) { }

Просмотреть файл

@ -96,9 +96,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \ __ring_buffer_alloc((size), (flags), &__key); \
}) })
#define RING_BUFFER_ALL_CPUS -1
void ring_buffer_free(struct ring_buffer *buffer); void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
@ -129,7 +131,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct ring_buffer *buffer); unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset(struct ring_buffer *buffer); void ring_buffer_reset(struct ring_buffer *buffer);

Просмотреть файл

@ -1198,7 +1198,7 @@ menu "Kernel Performance Events And Counters"
config PERF_EVENTS config PERF_EVENTS
bool "Kernel performance events and counters" bool "Kernel performance events and counters"
default y if (PROFILING || PERF_COUNTERS) default y if PROFILING
depends on HAVE_PERF_EVENTS depends on HAVE_PERF_EVENTS
select ANON_INODES select ANON_INODES
select IRQ_WORK select IRQ_WORK
@ -1225,18 +1225,6 @@ config PERF_EVENTS
Say Y if unsure. Say Y if unsure.
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
help
This config has been obsoleted by the PERF_EVENTS
config option - please see that one for details.
It has no effect on the kernel whether you enable
it or not, it is a compatibility placeholder.
Say N if unsure.
config DEBUG_PERF_USE_VMALLOC config DEBUG_PERF_USE_VMALLOC
default n default n
bool "Debug: use vmalloc to back perf mmap() buffers" bool "Debug: use vmalloc to back perf mmap() buffers"

Просмотреть файл

@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
* accessing the event control register. If a NMI hits, then it will * accessing the event control register. If a NMI hits, then it will
* not restart the event. * not restart the event.
*/ */
void __perf_event_task_sched_out(struct task_struct *task, static void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next) struct task_struct *next)
{ {
int ctxn; int ctxn;
@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
* accessing the event control register. If a NMI hits, then it will * accessing the event control register. If a NMI hits, then it will
* keep the event running. * keep the event running.
*/ */
void __perf_event_task_sched_in(struct task_struct *prev, static void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) struct task_struct *task)
{ {
struct perf_event_context *ctx; struct perf_event_context *ctx;
int ctxn; int ctxn;
@ -2305,6 +2305,12 @@ void __perf_event_task_sched_in(struct task_struct *prev,
perf_branch_stack_sched_in(prev, task); perf_branch_stack_sched_in(prev, task);
} }
void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
{
__perf_event_task_sched_out(prev, next);
__perf_event_task_sched_in(prev, next);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{ {
u64 frequency = event->attr.sample_freq; u64 frequency = event->attr.sample_freq;
@ -4957,7 +4963,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
if (rctx < 0) if (rctx < 0)
return; return;
perf_sample_data_init(&data, addr); perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
@ -5215,7 +5221,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
.data = record, .data = record,
}; };
perf_sample_data_init(&data, addr); perf_sample_data_init(&data, addr, 0);
data.raw = &raw; data.raw = &raw;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) { hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
@ -5318,7 +5324,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
struct perf_sample_data sample; struct perf_sample_data sample;
struct pt_regs *regs = data; struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr); perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs)) if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs); perf_swevent_event(bp, 1, &sample, regs);
@ -5344,13 +5350,12 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
event->pmu->read(event); event->pmu->read(event);
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period;
regs = get_irq_regs(); regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) { if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current))) if (!(event->attr.exclude_idle && is_idle_task(current)))
if (perf_event_overflow(event, &data, regs)) if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART; ret = HRTIMER_NORESTART;
} }

Просмотреть файл

@ -1914,7 +1914,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
sched_info_switch(prev, next); sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next); perf_event_task_sched(prev, next);
fire_sched_out_preempt_notifiers(prev, next); fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next); prepare_lock_switch(rq, next);
prepare_arch_switch(next); prepare_arch_switch(next);
@ -1957,13 +1957,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/ */
prev_state = prev->state; prev_state = prev->state;
finish_arch_switch(prev); finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_disable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
perf_event_task_sched_in(prev, current);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev); finish_lock_switch(rq, prev);
finish_arch_post_lock_switch(); finish_arch_post_lock_switch();

Просмотреть файл

@ -141,7 +141,6 @@ if FTRACE
config FUNCTION_TRACER config FUNCTION_TRACER
bool "Kernel Function Tracer" bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
select KALLSYMS select KALLSYMS
select GENERIC_TRACER select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER

Просмотреть файл

@ -1383,13 +1383,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
static int ftrace_cmp_recs(const void *a, const void *b) static int ftrace_cmp_recs(const void *a, const void *b)
{ {
const struct dyn_ftrace *reca = a; const struct dyn_ftrace *key = a;
const struct dyn_ftrace *recb = b; const struct dyn_ftrace *rec = b;
if (reca->ip > recb->ip) if (key->flags < rec->ip)
return 1;
if (reca->ip < recb->ip)
return -1; return -1;
if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
return 1;
return 0;
}
static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
struct dyn_ftrace key;
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
if (end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
sizeof(struct dyn_ftrace),
ftrace_cmp_recs);
if (rec)
return rec->ip;
}
return 0; return 0;
} }
@ -1397,28 +1420,34 @@ static int ftrace_cmp_recs(const void *a, const void *b)
* ftrace_location - return true if the ip giving is a traced location * ftrace_location - return true if the ip giving is a traced location
* @ip: the instruction pointer to check * @ip: the instruction pointer to check
* *
* Returns 1 if @ip given is a pointer to a ftrace location. * Returns rec->ip if @ip given is a pointer to a ftrace location.
* That is, the instruction that is either a NOP or call to * That is, the instruction that is either a NOP or call to
* the function tracer. It checks the ftrace internal tables to * the function tracer. It checks the ftrace internal tables to
* determine if the address belongs or not. * determine if the address belongs or not.
*/ */
int ftrace_location(unsigned long ip) unsigned long ftrace_location(unsigned long ip)
{ {
struct ftrace_page *pg; return ftrace_location_range(ip, ip);
struct dyn_ftrace *rec; }
struct dyn_ftrace key;
key.ip = ip; /**
* ftrace_text_reserved - return true if range contains an ftrace location
* @start: start of range to search
* @end: end of range to search (inclusive). @end points to the last byte to check.
*
* Returns 1 if @start and @end contains a ftrace location.
* That is, the instruction that is either a NOP or call to
* the function tracer. It checks the ftrace internal tables to
* determine if the address belongs or not.
*/
int ftrace_text_reserved(void *start, void *end)
{
unsigned long ret;
for (pg = ftrace_pages_start; pg; pg = pg->next) { ret = ftrace_location_range((unsigned long)start,
rec = bsearch(&key, pg->records, pg->index, (unsigned long)end);
sizeof(struct dyn_ftrace),
ftrace_cmp_recs);
if (rec)
return 1;
}
return 0; return (int)!!ret;
} }
static void __ftrace_hash_rec_update(struct ftrace_ops *ops, static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
@ -1520,35 +1549,6 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
__ftrace_hash_rec_update(ops, filter_hash, 1); __ftrace_hash_rec_update(ops, filter_hash, 1);
} }
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
{
if (ftrace_pages->index == ftrace_pages->size) {
/* We should have allocated enough */
if (WARN_ON(!ftrace_pages->next))
return NULL;
ftrace_pages = ftrace_pages->next;
}
return &ftrace_pages->records[ftrace_pages->index++];
}
static struct dyn_ftrace *
ftrace_record_ip(unsigned long ip)
{
struct dyn_ftrace *rec;
if (ftrace_disabled)
return NULL;
rec = ftrace_alloc_dyn_node(ip);
if (!rec)
return NULL;
rec->ip = ip;
return rec;
}
static void print_ip_ins(const char *fmt, unsigned char *p) static void print_ip_ins(const char *fmt, unsigned char *p)
{ {
int i; int i;
@ -1598,21 +1598,6 @@ void ftrace_bug(int failed, unsigned long ip)
} }
} }
/* Return 1 if the address range is reserved for ftrace */
int ftrace_text_reserved(void *start, void *end)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
do_for_each_ftrace_rec(pg, rec) {
if (rec->ip <= (unsigned long)end &&
rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
return 1;
} while_for_each_ftrace_rec();
return 0;
}
static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
{ {
unsigned long flag = 0UL; unsigned long flag = 0UL;
@ -1698,7 +1683,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
return -1; /* unknow ftrace bug */ return -1; /* unknow ftrace bug */
} }
static void ftrace_replace_code(int update) void __weak ftrace_replace_code(int enable)
{ {
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct ftrace_page *pg; struct ftrace_page *pg;
@ -1708,7 +1693,7 @@ static void ftrace_replace_code(int update)
return; return;
do_for_each_ftrace_rec(pg, rec) { do_for_each_ftrace_rec(pg, rec) {
failed = __ftrace_replace_code(rec, update); failed = __ftrace_replace_code(rec, enable);
if (failed) { if (failed) {
ftrace_bug(failed, rec->ip); ftrace_bug(failed, rec->ip);
/* Stop processing */ /* Stop processing */
@ -1826,22 +1811,27 @@ int __weak ftrace_arch_code_modify_post_process(void)
return 0; return 0;
} }
void ftrace_modify_all_code(int command)
{
if (command & FTRACE_UPDATE_CALLS)
ftrace_replace_code(1);
else if (command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
if (command & FTRACE_UPDATE_TRACE_FUNC)
ftrace_update_ftrace_func(ftrace_trace_function);
if (command & FTRACE_START_FUNC_RET)
ftrace_enable_ftrace_graph_caller();
else if (command & FTRACE_STOP_FUNC_RET)
ftrace_disable_ftrace_graph_caller();
}
static int __ftrace_modify_code(void *data) static int __ftrace_modify_code(void *data)
{ {
int *command = data; int *command = data;
if (*command & FTRACE_UPDATE_CALLS) ftrace_modify_all_code(*command);
ftrace_replace_code(1);
else if (*command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
if (*command & FTRACE_UPDATE_TRACE_FUNC)
ftrace_update_ftrace_func(ftrace_trace_function);
if (*command & FTRACE_START_FUNC_RET)
ftrace_enable_ftrace_graph_caller();
else if (*command & FTRACE_STOP_FUNC_RET)
ftrace_disable_ftrace_graph_caller();
return 0; return 0;
} }
@ -2469,57 +2459,35 @@ static int
ftrace_avail_open(struct inode *inode, struct file *file) ftrace_avail_open(struct inode *inode, struct file *file)
{ {
struct ftrace_iterator *iter; struct ftrace_iterator *iter;
int ret;
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
return -ENODEV; return -ENODEV;
iter = kzalloc(sizeof(*iter), GFP_KERNEL); iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (!iter) if (iter) {
return -ENOMEM; iter->pg = ftrace_pages_start;
iter->ops = &global_ops;
iter->pg = ftrace_pages_start;
iter->ops = &global_ops;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = iter;
} else {
kfree(iter);
} }
return ret; return iter ? 0 : -ENOMEM;
} }
static int static int
ftrace_enabled_open(struct inode *inode, struct file *file) ftrace_enabled_open(struct inode *inode, struct file *file)
{ {
struct ftrace_iterator *iter; struct ftrace_iterator *iter;
int ret;
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
return -ENODEV; return -ENODEV;
iter = kzalloc(sizeof(*iter), GFP_KERNEL); iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (!iter) if (iter) {
return -ENOMEM; iter->pg = ftrace_pages_start;
iter->flags = FTRACE_ITER_ENABLED;
iter->pg = ftrace_pages_start; iter->ops = &global_ops;
iter->flags = FTRACE_ITER_ENABLED;
iter->ops = &global_ops;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = iter;
} else {
kfree(iter);
} }
return ret; return iter ? 0 : -ENOMEM;
} }
static void ftrace_filter_reset(struct ftrace_hash *hash) static void ftrace_filter_reset(struct ftrace_hash *hash)
@ -3688,22 +3656,36 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
return 0; return 0;
} }
static void ftrace_swap_recs(void *a, void *b, int size) static int ftrace_cmp_ips(const void *a, const void *b)
{ {
struct dyn_ftrace *reca = a; const unsigned long *ipa = a;
struct dyn_ftrace *recb = b; const unsigned long *ipb = b;
struct dyn_ftrace t;
t = *reca; if (*ipa > *ipb)
*reca = *recb; return 1;
*recb = t; if (*ipa < *ipb)
return -1;
return 0;
}
static void ftrace_swap_ips(void *a, void *b, int size)
{
unsigned long *ipa = a;
unsigned long *ipb = b;
unsigned long t;
t = *ipa;
*ipa = *ipb;
*ipb = t;
} }
static int ftrace_process_locs(struct module *mod, static int ftrace_process_locs(struct module *mod,
unsigned long *start, unsigned long *start,
unsigned long *end) unsigned long *end)
{ {
struct ftrace_page *start_pg;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long count; unsigned long count;
unsigned long *p; unsigned long *p;
unsigned long addr; unsigned long addr;
@ -3715,8 +3697,11 @@ static int ftrace_process_locs(struct module *mod,
if (!count) if (!count)
return 0; return 0;
pg = ftrace_allocate_pages(count); sort(start, count, sizeof(*start),
if (!pg) ftrace_cmp_ips, ftrace_swap_ips);
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
return -ENOMEM; return -ENOMEM;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
@ -3729,7 +3714,7 @@ static int ftrace_process_locs(struct module *mod,
if (!mod) { if (!mod) {
WARN_ON(ftrace_pages || ftrace_pages_start); WARN_ON(ftrace_pages || ftrace_pages_start);
/* First initialization */ /* First initialization */
ftrace_pages = ftrace_pages_start = pg; ftrace_pages = ftrace_pages_start = start_pg;
} else { } else {
if (!ftrace_pages) if (!ftrace_pages)
goto out; goto out;
@ -3740,11 +3725,11 @@ static int ftrace_process_locs(struct module *mod,
ftrace_pages = ftrace_pages->next; ftrace_pages = ftrace_pages->next;
} }
ftrace_pages->next = pg; ftrace_pages->next = start_pg;
ftrace_pages = pg;
} }
p = start; p = start;
pg = start_pg;
while (p < end) { while (p < end) {
addr = ftrace_call_adjust(*p++); addr = ftrace_call_adjust(*p++);
/* /*
@ -3755,17 +3740,26 @@ static int ftrace_process_locs(struct module *mod,
*/ */
if (!addr) if (!addr)
continue; continue;
if (!ftrace_record_ip(addr))
break; if (pg->index == pg->size) {
/* We should have allocated enough */
if (WARN_ON(!pg->next))
break;
pg = pg->next;
}
rec = &pg->records[pg->index++];
rec->ip = addr;
} }
/* These new locations need to be initialized */ /* We should have used all pages */
ftrace_new_pgs = pg; WARN_ON(pg->next);
/* Make each individual set of pages sorted by ips */ /* Assign the last page to ftrace_pages */
for (; pg; pg = pg->next) ftrace_pages = pg;
sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
ftrace_cmp_recs, ftrace_swap_recs); /* These new locations need to be initialized */
ftrace_new_pgs = start_pg;
/* /*
* We only need to disable interrupts on start up * We only need to disable interrupts on start up

Просмотреть файл

@ -23,6 +23,8 @@
#include <asm/local.h> #include <asm/local.h>
#include "trace.h" #include "trace.h"
static void update_pages_handler(struct work_struct *work);
/* /*
* The ring buffer header is special. We must manually up keep it. * The ring buffer header is special. We must manually up keep it.
*/ */
@ -449,6 +451,7 @@ struct ring_buffer_per_cpu {
raw_spinlock_t reader_lock; /* serialize readers */ raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock; arch_spinlock_t lock;
struct lock_class_key lock_key; struct lock_class_key lock_key;
unsigned int nr_pages;
struct list_head *pages; struct list_head *pages;
struct buffer_page *head_page; /* read from head */ struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */ struct buffer_page *tail_page; /* write to tail */
@ -466,13 +469,18 @@ struct ring_buffer_per_cpu {
unsigned long read_bytes; unsigned long read_bytes;
u64 write_stamp; u64 write_stamp;
u64 read_stamp; u64 read_stamp;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
int nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
struct work_struct update_pages_work;
struct completion update_done;
}; };
struct ring_buffer { struct ring_buffer {
unsigned pages;
unsigned flags; unsigned flags;
int cpus; int cpus;
atomic_t record_disabled; atomic_t record_disabled;
atomic_t resize_disabled;
cpumask_var_t cpumask; cpumask_var_t cpumask;
struct lock_class_key *reader_lock_key; struct lock_class_key *reader_lock_key;
@ -937,6 +945,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = cpu_buffer->pages; struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp; struct buffer_page *bpage, *tmp;
/* Reset the head page if it exists */
if (cpu_buffer->head_page)
rb_set_head_page(cpu_buffer);
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
@ -963,14 +975,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
return 0; return 0;
} }
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
unsigned nr_pages)
{ {
int i;
struct buffer_page *bpage, *tmp; struct buffer_page *bpage, *tmp;
LIST_HEAD(pages);
unsigned i;
WARN_ON(!nr_pages);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page; struct page *page;
@ -981,15 +989,13 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
*/ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL | __GFP_NORETRY, GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu_buffer->cpu)); cpu_to_node(cpu));
if (!bpage) if (!bpage)
goto free_pages; goto free_pages;
rb_check_bpage(cpu_buffer, bpage); list_add(&bpage->list, pages);
list_add(&bpage->list, &pages); page = alloc_pages_node(cpu_to_node(cpu),
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
GFP_KERNEL | __GFP_NORETRY, 0); GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
goto free_pages; goto free_pages;
@ -997,6 +1003,27 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_init_page(bpage->page); rb_init_page(bpage->page);
} }
return 0;
free_pages:
list_for_each_entry_safe(bpage, tmp, pages, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
return -ENOMEM;
}
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
LIST_HEAD(pages);
WARN_ON(!nr_pages);
if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
return -ENOMEM;
/* /*
* The ring buffer page list is a circular list that does not * The ring buffer page list is a circular list that does not
* start and end with a list head. All page list items point to * start and end with a list head. All page list items point to
@ -1005,20 +1032,15 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->pages = pages.next; cpu_buffer->pages = pages.next;
list_del(&pages); list_del(&pages);
cpu_buffer->nr_pages = nr_pages;
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
return 0; return 0;
free_pages:
list_for_each_entry_safe(bpage, tmp, &pages, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
return -ENOMEM;
} }
static struct ring_buffer_per_cpu * static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage; struct buffer_page *bpage;
@ -1035,6 +1057,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
raw_spin_lock_init(&cpu_buffer->reader_lock); raw_spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
init_completion(&cpu_buffer->update_done);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu));
@ -1052,7 +1076,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
ret = rb_allocate_pages(cpu_buffer, buffer->pages); ret = rb_allocate_pages(cpu_buffer, nr_pages);
if (ret < 0) if (ret < 0)
goto fail_free_reader; goto fail_free_reader;
@ -1113,7 +1137,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
{ {
struct ring_buffer *buffer; struct ring_buffer *buffer;
int bsize; int bsize;
int cpu; int cpu, nr_pages;
/* keep it in its own cache line */ /* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@ -1124,14 +1148,14 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer; goto fail_free_buffer;
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags; buffer->flags = flags;
buffer->clock = trace_clock_local; buffer->clock = trace_clock_local;
buffer->reader_lock_key = key; buffer->reader_lock_key = key;
/* need at least two pages */ /* need at least two pages */
if (buffer->pages < 2) if (nr_pages < 2)
buffer->pages = 2; nr_pages = 2;
/* /*
* In case of non-hotplug cpu, if the ring-buffer is allocated * In case of non-hotplug cpu, if the ring-buffer is allocated
@ -1154,7 +1178,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
buffer->buffers[cpu] = buffer->buffers[cpu] =
rb_allocate_cpu_buffer(buffer, cpu); rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu]) if (!buffer->buffers[cpu])
goto fail_free_buffers; goto fail_free_buffers;
} }
@ -1222,58 +1246,222 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static void static inline unsigned long rb_page_entries(struct buffer_page *bpage)
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
{ {
struct buffer_page *bpage; return local_read(&bpage->entries) & RB_WRITE_MASK;
struct list_head *p;
unsigned i;
raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
goto out;
p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
goto out;
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
out:
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
} }
static void static inline unsigned long rb_page_write(struct buffer_page *bpage)
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *pages, unsigned nr_pages)
{ {
struct buffer_page *bpage; return local_read(&bpage->write) & RB_WRITE_MASK;
struct list_head *p; }
unsigned i;
static int
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
struct buffer_page *to_remove_page, *tmp_iter_page;
struct buffer_page *last_page, *first_page;
unsigned int nr_removed;
unsigned long head_bit;
int page_entries;
head_bit = 0;
raw_spin_lock_irq(&cpu_buffer->reader_lock); raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer); atomic_inc(&cpu_buffer->record_disabled);
/*
* We don't race with the readers since we have acquired the reader
* lock. We also don't race with writers after disabling recording.
* This makes it easy to figure out the first and the last page to be
* removed from the list. We unlink all the pages in between including
* the first and last pages. This is done in a busy loop so that we
* lose the least number of traces.
* The pages are freed after we restart recording and unlock readers.
*/
tail_page = &cpu_buffer->tail_page->list;
for (i = 0; i < nr_pages; i++) { /*
if (RB_WARN_ON(cpu_buffer, list_empty(pages))) * tail page might be on reader page, we remove the next page
goto out; * from the ring buffer
p = pages->next; */
bpage = list_entry(p, struct buffer_page, list); if (cpu_buffer->tail_page == cpu_buffer->reader_page)
list_del_init(&bpage->list); tail_page = rb_list_head(tail_page->next);
list_add_tail(&bpage->list, cpu_buffer->pages); to_remove = tail_page;
/* start of pages to remove */
first_page = list_entry(rb_list_head(to_remove->next),
struct buffer_page, list);
for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
} }
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
out: next_page = rb_list_head(to_remove)->next;
/*
* Now we remove all pages between tail_page and next_page.
* Make sure that we have head_bit value preserved for the
* next page
*/
tail_page->next = (struct list_head *)((unsigned long)next_page |
head_bit);
next_page = rb_list_head(next_page);
next_page->prev = tail_page;
/* make sure pages points to a valid page in the ring buffer */
cpu_buffer->pages = next_page;
/* update head page */
if (head_bit)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
/*
* change read pointer to make sure any read iterators reset
* themselves
*/
cpu_buffer->read = 0;
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock); raw_spin_unlock_irq(&cpu_buffer->reader_lock);
RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
/* last buffer page to remove */
last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
list);
tmp_iter_page = first_page;
do {
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
/* update the counters */
page_entries = rb_page_entries(to_remove_page);
if (page_entries) {
/*
* If something was added to this page, it was full
* since it is not the tail page. So we deduct the
* bytes consumed in ring buffer from here.
* No need to update overruns, since this page is
* deleted from ring buffer and its entries are
* already accounted for.
*/
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
/*
* We have already removed references to this list item, just
* free up the buffer_page and its page
*/
free_buffer_page(to_remove_page);
nr_removed--;
} while (to_remove_page != last_page);
RB_WARN_ON(cpu_buffer, nr_removed);
return nr_removed == 0;
}
static int
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *pages = &cpu_buffer->new_pages;
int retries, success;
raw_spin_lock_irq(&cpu_buffer->reader_lock);
/*
* We are holding the reader lock, so the reader page won't be swapped
* in the ring buffer. Now we are racing with the writer trying to
* move head page and the tail page.
* We are going to adapt the reader page update process where:
* 1. We first splice the start and end of list of new pages between
* the head page and its previous page.
* 2. We cmpxchg the prev_page->next to point from head page to the
* start of new pages list.
* 3. Finally, we update the head->prev to the end of new list.
*
* We will try this process 10 times, to make sure that we don't keep
* spinning.
*/
retries = 10;
success = 0;
while (retries--) {
struct list_head *head_page, *prev_page, *r;
struct list_head *last_page, *first_page;
struct list_head *head_page_with_bit;
head_page = &rb_set_head_page(cpu_buffer)->list;
prev_page = head_page->prev;
first_page = pages->next;
last_page = pages->prev;
head_page_with_bit = (struct list_head *)
((unsigned long)head_page | RB_PAGE_HEAD);
last_page->next = head_page_with_bit;
first_page->prev = prev_page;
r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
if (r == head_page_with_bit) {
/*
* yay, we replaced the page pointer to our new list,
* now, we just have to update to head page's prev
* pointer to point to end of list
*/
head_page->prev = last_page;
success = 1;
break;
}
}
if (success)
INIT_LIST_HEAD(pages);
/*
* If we weren't successful in adding in new pages, warn and stop
* tracing
*/
RB_WARN_ON(cpu_buffer, !success);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
/* free pages if they weren't inserted */
if (!success) {
struct buffer_page *bpage, *tmp;
list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
}
return success;
}
static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
int success;
if (cpu_buffer->nr_pages_to_update > 0)
success = rb_insert_pages(cpu_buffer);
else
success = rb_remove_pages(cpu_buffer,
-cpu_buffer->nr_pages_to_update);
if (success)
cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
}
static void update_pages_handler(struct work_struct *work)
{
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
struct ring_buffer_per_cpu, update_pages_work);
rb_update_pages(cpu_buffer);
complete(&cpu_buffer->update_done);
} }
/** /**
@ -1283,16 +1471,14 @@ out:
* *
* Minimum size is 2 * BUF_PAGE_SIZE. * Minimum size is 2 * BUF_PAGE_SIZE.
* *
* Returns -1 on failure. * Returns 0 on success and < 0 on failure.
*/ */
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
int cpu_id)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned nr_pages, rm_pages, new_pages; unsigned nr_pages;
struct buffer_page *bpage, *tmp; int cpu, err = 0;
unsigned long buffer_size;
LIST_HEAD(pages);
int i, cpu;
/* /*
* Always succeed at resizing a non-existent buffer: * Always succeed at resizing a non-existent buffer:
@ -1302,113 +1488,154 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
size *= BUF_PAGE_SIZE; size *= BUF_PAGE_SIZE;
buffer_size = buffer->pages * BUF_PAGE_SIZE;
/* we need a minimum of two pages */ /* we need a minimum of two pages */
if (size < BUF_PAGE_SIZE * 2) if (size < BUF_PAGE_SIZE * 2)
size = BUF_PAGE_SIZE * 2; size = BUF_PAGE_SIZE * 2;
if (size == buffer_size)
return size;
atomic_inc(&buffer->record_disabled);
/* Make sure all writers are done with this buffer. */
synchronize_sched();
mutex_lock(&buffer->mutex);
get_online_cpus();
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
if (size < buffer_size) { /*
* Don't succeed if resizing is disabled, as a reader might be
* manipulating the ring buffer and is expecting a sane state while
* this is true.
*/
if (atomic_read(&buffer->resize_disabled))
return -EBUSY;
/* easy case, just free pages */ /* prevent another thread from changing buffer sizes */
if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) mutex_lock(&buffer->mutex);
goto out_fail;
rm_pages = buffer->pages - nr_pages;
if (cpu_id == RING_BUFFER_ALL_CPUS) {
/* calculate the pages to update */
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
rb_remove_pages(cpu_buffer, rm_pages);
}
goto out;
}
/* cpu_buffer->nr_pages_to_update = nr_pages -
* This is a bit more difficult. We only want to add pages cpu_buffer->nr_pages;
* when we can allocate enough for all CPUs. We do this
* by allocating all the pages and storing them on a local
* link list. If we succeed in our allocation, then we
* add these pages to the cpu_buffers. Otherwise we just free
* them all and return -ENOMEM;
*/
if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
goto out_fail;
new_pages = nr_pages - buffer->pages;
for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) {
struct page *page;
/* /*
* __GFP_NORETRY flag makes sure that the allocation * nothing more to do for removing pages or no update
* fails gracefully without invoking oom-killer and
* the system is not destabilized.
*/ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage), if (cpu_buffer->nr_pages_to_update <= 0)
cache_line_size()), continue;
GFP_KERNEL | __GFP_NORETRY, /*
cpu_to_node(cpu)); * to add pages, make sure all new pages can be
if (!bpage) * allocated without receiving ENOMEM
goto free_pages; */
list_add(&bpage->list, &pages); INIT_LIST_HEAD(&cpu_buffer->new_pages);
page = alloc_pages_node(cpu_to_node(cpu), if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
GFP_KERNEL | __GFP_NORETRY, 0); &cpu_buffer->new_pages, cpu)) {
if (!page) /* not enough memory for new pages */
goto free_pages; err = -ENOMEM;
bpage->page = page_address(page); goto out_err;
rb_init_page(bpage->page); }
} }
}
for_each_buffer_cpu(buffer, cpu) { get_online_cpus();
cpu_buffer = buffer->buffers[cpu]; /*
rb_insert_pages(cpu_buffer, &pages, new_pages); * Fire off all the required work handlers
} * We can't schedule on offline CPUs, but it's not necessary
* since we can change their buffer sizes without any race.
*/
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
if (!cpu_buffer->nr_pages_to_update)
continue;
if (RB_WARN_ON(buffer, !list_empty(&pages))) if (cpu_online(cpu))
goto out_fail; schedule_work_on(cpu,
&cpu_buffer->update_pages_work);
else
rb_update_pages(cpu_buffer);
}
/* wait for all the updates to complete */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
if (!cpu_buffer->nr_pages_to_update)
continue;
if (cpu_online(cpu))
wait_for_completion(&cpu_buffer->update_done);
cpu_buffer->nr_pages_to_update = 0;
}
put_online_cpus();
} else {
cpu_buffer = buffer->buffers[cpu_id];
if (nr_pages == cpu_buffer->nr_pages)
goto out;
cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer->nr_pages;
INIT_LIST_HEAD(&cpu_buffer->new_pages);
if (cpu_buffer->nr_pages_to_update > 0 &&
__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
&cpu_buffer->new_pages, cpu_id)) {
err = -ENOMEM;
goto out_err;
}
get_online_cpus();
if (cpu_online(cpu_id)) {
schedule_work_on(cpu_id,
&cpu_buffer->update_pages_work);
wait_for_completion(&cpu_buffer->update_done);
} else
rb_update_pages(cpu_buffer);
cpu_buffer->nr_pages_to_update = 0;
put_online_cpus();
}
out: out:
buffer->pages = nr_pages; /*
put_online_cpus(); * The ring buffer resize can happen with the ring buffer
* enabled, so that the update disturbs the tracing as little
* as possible. But if the buffer is disabled, we do not need
* to worry about that, and we can take the time to verify
* that the buffer is not corrupt.
*/
if (atomic_read(&buffer->record_disabled)) {
atomic_inc(&buffer->record_disabled);
/*
* Even though the buffer was disabled, we must make sure
* that it is truly disabled before calling rb_check_pages.
* There could have been a race between checking
* record_disable and incrementing it.
*/
synchronize_sched();
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer);
}
atomic_dec(&buffer->record_disabled);
}
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return size; return size;
free_pages: out_err:
list_for_each_entry_safe(bpage, tmp, &pages, list) { for_each_buffer_cpu(buffer, cpu) {
list_del_init(&bpage->list); struct buffer_page *bpage, *tmp;
free_buffer_page(bpage);
}
put_online_cpus();
mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return -ENOMEM;
/* cpu_buffer = buffer->buffers[cpu];
* Something went totally wrong, and we are too paranoid cpu_buffer->nr_pages_to_update = 0;
* to even clean up the mess.
*/ if (list_empty(&cpu_buffer->new_pages))
out_fail: continue;
put_online_cpus();
list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
}
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled); return err;
return -1;
} }
EXPORT_SYMBOL_GPL(ring_buffer_resize); EXPORT_SYMBOL_GPL(ring_buffer_resize);
@ -1447,21 +1674,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
return __rb_page_index(iter->head_page, iter->head); return __rb_page_index(iter->head_page, iter->head);
} }
static inline unsigned long rb_page_write(struct buffer_page *bpage)
{
return local_read(&bpage->write) & RB_WRITE_MASK;
}
static inline unsigned rb_page_commit(struct buffer_page *bpage) static inline unsigned rb_page_commit(struct buffer_page *bpage)
{ {
return local_read(&bpage->page->commit); return local_read(&bpage->page->commit);
} }
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
{
return local_read(&bpage->entries) & RB_WRITE_MASK;
}
/* Size is determined by what has been committed */ /* Size is determined by what has been committed */
static inline unsigned rb_page_size(struct buffer_page *bpage) static inline unsigned rb_page_size(struct buffer_page *bpage)
{ {
@ -1510,7 +1727,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* assign the commit to the tail. * assign the commit to the tail.
*/ */
again: again:
max_count = cpu_buffer->buffer->pages * 100; max_count = cpu_buffer->nr_pages * 100;
while (cpu_buffer->commit_page != cpu_buffer->tail_page) { while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
if (RB_WARN_ON(cpu_buffer, !(--max_count))) if (RB_WARN_ON(cpu_buffer, !(--max_count)))
@ -3486,6 +3703,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
iter->cpu_buffer = cpu_buffer; iter->cpu_buffer = cpu_buffer;
atomic_inc(&buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
return iter; return iter;
@ -3548,7 +3766,14 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
{ {
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
/*
* Ring buffer is disabled from recording, here's a good place
* to check the integrity of the ring buffer.
*/
rb_check_pages(cpu_buffer);
atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
kfree(iter); kfree(iter);
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_finish); EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
@ -3588,9 +3813,18 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
* ring_buffer_size - return the size of the ring buffer (in bytes) * ring_buffer_size - return the size of the ring buffer (in bytes)
* @buffer: The ring buffer. * @buffer: The ring buffer.
*/ */
unsigned long ring_buffer_size(struct ring_buffer *buffer) unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
{ {
return BUF_PAGE_SIZE * buffer->pages; /*
* Earlier, this method returned
* BUF_PAGE_SIZE * buffer->nr_pages
* Since the nr_pages field is now removed, we have converted this to
* return the per cpu buffer value.
*/
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
} }
EXPORT_SYMBOL_GPL(ring_buffer_size); EXPORT_SYMBOL_GPL(ring_buffer_size);
@ -3611,6 +3845,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->commit_page = cpu_buffer->head_page; cpu_buffer->commit_page = cpu_buffer->head_page;
INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
local_set(&cpu_buffer->reader_page->write, 0); local_set(&cpu_buffer->reader_page->write, 0);
local_set(&cpu_buffer->reader_page->entries, 0); local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0); local_set(&cpu_buffer->reader_page->page->commit, 0);
@ -3647,8 +3882,12 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return; return;
atomic_inc(&buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
synchronize_sched();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
@ -3664,6 +3903,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&buffer->resize_disabled);
} }
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
@ -3765,8 +4005,11 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
!cpumask_test_cpu(cpu, buffer_b->cpumask)) !cpumask_test_cpu(cpu, buffer_b->cpumask))
goto out; goto out;
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
/* At least make sure the two buffers are somewhat the same */ /* At least make sure the two buffers are somewhat the same */
if (buffer_a->pages != buffer_b->pages) if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
goto out; goto out;
ret = -EAGAIN; ret = -EAGAIN;
@ -3780,9 +4023,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
if (atomic_read(&buffer_b->record_disabled)) if (atomic_read(&buffer_b->record_disabled))
goto out; goto out;
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
if (atomic_read(&cpu_buffer_a->record_disabled)) if (atomic_read(&cpu_buffer_a->record_disabled))
goto out; goto out;
@ -4071,6 +4311,8 @@ static int rb_cpu_notify(struct notifier_block *self,
struct ring_buffer *buffer = struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify); container_of(self, struct ring_buffer, cpu_notify);
long cpu = (long)hcpu; long cpu = (long)hcpu;
int cpu_i, nr_pages_same;
unsigned int nr_pages;
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
@ -4078,8 +4320,23 @@ static int rb_cpu_notify(struct notifier_block *self,
if (cpumask_test_cpu(cpu, buffer->cpumask)) if (cpumask_test_cpu(cpu, buffer->cpumask))
return NOTIFY_OK; return NOTIFY_OK;
nr_pages = 0;
nr_pages_same = 1;
/* check if all cpu sizes are same */
for_each_buffer_cpu(buffer, cpu_i) {
/* fill in the size from first enabled cpu */
if (nr_pages == 0)
nr_pages = buffer->buffers[cpu_i]->nr_pages;
if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
nr_pages_same = 0;
break;
}
}
/* allocate minimum pages, user can later expand it */
if (!nr_pages_same)
nr_pages = 2;
buffer->buffers[cpu] = buffer->buffers[cpu] =
rb_allocate_cpu_buffer(buffer, cpu); rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu]) { if (!buffer->buffers[cpu]) {
WARN(1, "failed to allocate ring buffer on CPU %ld\n", WARN(1, "failed to allocate ring buffer on CPU %ld\n",
cpu); cpu);

Просмотреть файл

@ -87,18 +87,6 @@ static int tracing_disabled = 1;
DEFINE_PER_CPU(int, ftrace_cpu_disabled); DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
__this_cpu_inc(ftrace_cpu_disabled);
}
static inline void ftrace_enable_cpu(void)
{
__this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}
cpumask_var_t __read_mostly tracing_buffer_mask; cpumask_var_t __read_mostly tracing_buffer_mask;
/* /*
@ -629,7 +617,6 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{ {
int len; int len;
void *ret;
if (s->len <= s->readpos) if (s->len <= s->readpos)
return -EBUSY; return -EBUSY;
@ -637,9 +624,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
len = s->len - s->readpos; len = s->len - s->readpos;
if (cnt > len) if (cnt > len)
cnt = len; cnt = len;
ret = memcpy(buf, s->buffer + s->readpos, cnt); memcpy(buf, s->buffer + s->readpos, cnt);
if (!ret)
return -EFAULT;
s->readpos += cnt; s->readpos += cnt;
return cnt; return cnt;
@ -751,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
if (ret == -EBUSY) { if (ret == -EBUSY) {
@ -766,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
"Failed to swap buffers due to commit in progress\n"); "Failed to swap buffers due to commit in progress\n");
} }
ftrace_enable_cpu();
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu); __update_max_tr(tr, tsk, cpu);
@ -782,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
* Register a new plugin tracer. * Register a new plugin tracer.
*/ */
int register_tracer(struct tracer *type) int register_tracer(struct tracer *type)
__releases(kernel_lock)
__acquires(kernel_lock)
{ {
struct tracer *t; struct tracer *t;
int ret = 0; int ret = 0;
@ -841,7 +820,8 @@ __acquires(kernel_lock)
/* If we expanded the buffers, make sure the max is expanded too */ /* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, trace_buf_size); ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
/* the test is responsible for initializing and enabling */ /* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name); pr_info("Testing tracer %s: ", type->name);
@ -857,7 +837,8 @@ __acquires(kernel_lock)
/* Shrink the max buffer again */ /* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, 1); ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
printk(KERN_CONT "PASSED\n"); printk(KERN_CONT "PASSED\n");
} }
@ -917,13 +898,6 @@ out:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
} }
static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{
ftrace_disable_cpu();
ring_buffer_reset_cpu(buffer, cpu);
ftrace_enable_cpu();
}
void tracing_reset(struct trace_array *tr, int cpu) void tracing_reset(struct trace_array *tr, int cpu)
{ {
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->buffer;
@ -932,7 +906,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_sched();
__tracing_reset(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
@ -950,7 +924,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
tr->time_start = ftrace_now(tr->cpu); tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
__tracing_reset(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
@ -1498,25 +1472,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
#endif /* CONFIG_STACKTRACE */ #endif /* CONFIG_STACKTRACE */
/* created for use with alloc_percpu */
struct trace_buffer_struct {
char buffer[TRACE_BUF_SIZE];
};
static struct trace_buffer_struct *trace_percpu_buffer;
static struct trace_buffer_struct *trace_percpu_sirq_buffer;
static struct trace_buffer_struct *trace_percpu_irq_buffer;
static struct trace_buffer_struct *trace_percpu_nmi_buffer;
/*
* The buffer used is dependent on the context. There is a per cpu
* buffer for normal context, softirq contex, hard irq context and
* for NMI context. Thise allows for lockless recording.
*
* Note, if the buffers failed to be allocated, then this returns NULL
*/
static char *get_trace_buf(void)
{
struct trace_buffer_struct *percpu_buffer;
struct trace_buffer_struct *buffer;
/*
* If we have allocated per cpu buffers, then we do not
* need to do any locking.
*/
if (in_nmi())
percpu_buffer = trace_percpu_nmi_buffer;
else if (in_irq())
percpu_buffer = trace_percpu_irq_buffer;
else if (in_softirq())
percpu_buffer = trace_percpu_sirq_buffer;
else
percpu_buffer = trace_percpu_buffer;
if (!percpu_buffer)
return NULL;
buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
return buffer->buffer;
}
static int alloc_percpu_trace_buffer(void)
{
struct trace_buffer_struct *buffers;
struct trace_buffer_struct *sirq_buffers;
struct trace_buffer_struct *irq_buffers;
struct trace_buffer_struct *nmi_buffers;
buffers = alloc_percpu(struct trace_buffer_struct);
if (!buffers)
goto err_warn;
sirq_buffers = alloc_percpu(struct trace_buffer_struct);
if (!sirq_buffers)
goto err_sirq;
irq_buffers = alloc_percpu(struct trace_buffer_struct);
if (!irq_buffers)
goto err_irq;
nmi_buffers = alloc_percpu(struct trace_buffer_struct);
if (!nmi_buffers)
goto err_nmi;
trace_percpu_buffer = buffers;
trace_percpu_sirq_buffer = sirq_buffers;
trace_percpu_irq_buffer = irq_buffers;
trace_percpu_nmi_buffer = nmi_buffers;
return 0;
err_nmi:
free_percpu(irq_buffers);
err_irq:
free_percpu(sirq_buffers);
err_sirq:
free_percpu(buffers);
err_warn:
WARN(1, "Could not allocate percpu trace_printk buffer");
return -ENOMEM;
}
void trace_printk_init_buffers(void)
{
static int buffers_allocated;
if (buffers_allocated)
return;
if (alloc_percpu_trace_buffer())
return;
pr_info("ftrace: Allocated trace_printk buffers\n");
buffers_allocated = 1;
}
/** /**
* trace_vbprintk - write binary msg to tracing buffer * trace_vbprintk - write binary msg to tracing buffer
* *
*/ */
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{ {
static arch_spinlock_t trace_buf_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint; struct ftrace_event_call *call = &event_bprint;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
struct bprint_entry *entry; struct bprint_entry *entry;
unsigned long flags; unsigned long flags;
int disable; char *tbuffer;
int cpu, len = 0, size, pc; int len = 0, size, pc;
if (unlikely(tracing_selftest_running || tracing_disabled)) if (unlikely(tracing_selftest_running || tracing_disabled))
return 0; return 0;
@ -1526,43 +1594,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
pc = preempt_count(); pc = preempt_count();
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disable = atomic_inc_return(&data->disabled); tbuffer = get_trace_buf();
if (unlikely(disable != 1)) if (!tbuffer) {
len = 0;
goto out;
}
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out; goto out;
/* Lockdep uses trace_printk for lock tracing */ local_save_flags(flags);
local_irq_save(flags);
arch_spin_lock(&trace_buf_lock);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
goto out_unlock;
size = sizeof(*entry) + sizeof(u32) * len; size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->buffer; buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
flags, pc); flags, pc);
if (!event) if (!event)
goto out_unlock; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->ip = ip; entry->ip = ip;
entry->fmt = fmt; entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len); memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) { if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
out_unlock:
arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
out: out:
atomic_dec_return(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
unpause_graph_tracing(); unpause_graph_tracing();
@ -1588,58 +1649,53 @@ int trace_array_printk(struct trace_array *tr,
int trace_array_vprintk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args) unsigned long ip, const char *fmt, va_list args)
{ {
static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print; struct ftrace_event_call *call = &event_print;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
struct trace_array_cpu *data; int len = 0, size, pc;
int cpu, len = 0, size, pc;
struct print_entry *entry; struct print_entry *entry;
unsigned long irq_flags; unsigned long flags;
int disable; char *tbuffer;
if (tracing_disabled || tracing_selftest_running) if (tracing_disabled || tracing_selftest_running)
return 0; return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
pc = preempt_count(); pc = preempt_count();
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disable = atomic_inc_return(&data->disabled);
if (unlikely(disable != 1)) tbuffer = get_trace_buf();
if (!tbuffer) {
len = 0;
goto out;
}
len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE)
goto out; goto out;
pause_graph_tracing(); local_save_flags(flags);
raw_local_irq_save(irq_flags);
arch_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1; size = sizeof(*entry) + len + 1;
buffer = tr->buffer; buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, pc); flags, pc);
if (!event) if (!event)
goto out_unlock; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->ip = ip; entry->ip = ip;
memcpy(&entry->buf, trace_buf, len); memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0'; entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) { if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, irq_flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
out_unlock:
arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out: out:
atomic_dec_return(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
unpause_graph_tracing();
return len; return len;
} }
@ -1652,14 +1708,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter) static void trace_iterator_increment(struct trace_iterator *iter)
{ {
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
iter->idx++; iter->idx++;
if (iter->buffer_iter[iter->cpu]) if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
ftrace_enable_cpu();
} }
static struct trace_entry * static struct trace_entry *
@ -1669,17 +1720,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
if (buf_iter) if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts); event = ring_buffer_iter_peek(buf_iter, ts);
else else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts, event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
lost_events); lost_events);
ftrace_enable_cpu();
if (event) { if (event) {
iter->ent_size = ring_buffer_event_length(event); iter->ent_size = ring_buffer_event_length(event);
return ring_buffer_event_data(event); return ring_buffer_event_data(event);
@ -1769,11 +1815,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
static void trace_consume(struct trace_iterator *iter) static void trace_consume(struct trace_iterator *iter)
{ {
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
&iter->lost_events); &iter->lost_events);
ftrace_enable_cpu();
} }
static void *s_next(struct seq_file *m, void *v, loff_t *pos) static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@ -1862,16 +1905,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
iter->cpu = 0; iter->cpu = 0;
iter->idx = -1; iter->idx = -1;
ftrace_disable_cpu();
if (cpu_file == TRACE_PIPE_ALL_CPU) { if (cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu); tracing_iter_reset(iter, cpu);
} else } else
tracing_iter_reset(iter, cpu_file); tracing_iter_reset(iter, cpu_file);
ftrace_enable_cpu();
iter->leftover = 0; iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l)) for (p = iter; p && l < *pos; p = s_next(m, p, &l))
; ;
@ -2332,15 +2371,13 @@ static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file) __tracing_open(struct inode *inode, struct file *file)
{ {
long cpu_file = (long) inode->i_private; long cpu_file = (long) inode->i_private;
void *fail_ret = ERR_PTR(-ENOMEM);
struct trace_iterator *iter; struct trace_iterator *iter;
struct seq_file *m; int cpu;
int cpu, ret;
if (tracing_disabled) if (tracing_disabled)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
iter = kzalloc(sizeof(*iter), GFP_KERNEL); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
if (!iter) if (!iter)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -2397,32 +2434,15 @@ __tracing_open(struct inode *inode, struct file *file)
tracing_iter_reset(iter, cpu); tracing_iter_reset(iter, cpu);
} }
ret = seq_open(file, &tracer_seq_ops);
if (ret < 0) {
fail_ret = ERR_PTR(ret);
goto fail_buffer;
}
m = file->private_data;
m->private = iter;
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return iter; return iter;
fail_buffer:
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
free_cpumask_var(iter->started);
tracing_start();
fail: fail:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
kfree(iter->trace); kfree(iter->trace);
kfree(iter); seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
return fail_ret;
} }
int tracing_open_generic(struct inode *inode, struct file *filp) int tracing_open_generic(struct inode *inode, struct file *filp)
@ -2458,11 +2478,10 @@ static int tracing_release(struct inode *inode, struct file *file)
tracing_start(); tracing_start();
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
seq_release(inode, file);
mutex_destroy(&iter->mutex); mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started); free_cpumask_var(iter->started);
kfree(iter->trace); kfree(iter->trace);
kfree(iter); seq_release_private(inode, file);
return 0; return 0;
} }
@ -2648,10 +2667,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (cpumask_test_cpu(cpu, tracing_cpumask) && if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) { !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled); atomic_inc(&global_trace.data[cpu]->disabled);
ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
} }
if (!cpumask_test_cpu(cpu, tracing_cpumask) && if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) { cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled); atomic_dec(&global_trace.data[cpu]->disabled);
ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
} }
} }
arch_spin_unlock(&ftrace_max_lock); arch_spin_unlock(&ftrace_max_lock);
@ -2974,7 +2995,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
return t->init(tr); return t->init(tr);
} }
static int __tracing_resize_ring_buffer(unsigned long size) static void set_buffer_entries(struct trace_array *tr, unsigned long val)
{
int cpu;
for_each_tracing_cpu(cpu)
tr->data[cpu]->entries = val;
}
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
{ {
int ret; int ret;
@ -2985,19 +3013,32 @@ static int __tracing_resize_ring_buffer(unsigned long size)
*/ */
ring_buffer_expanded = 1; ring_buffer_expanded = 1;
ret = ring_buffer_resize(global_trace.buffer, size); ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!current_trace->use_max_tr) if (!current_trace->use_max_tr)
goto out; goto out;
ret = ring_buffer_resize(max_tr.buffer, size); ret = ring_buffer_resize(max_tr.buffer, size, cpu);
if (ret < 0) { if (ret < 0) {
int r; int r = 0;
if (cpu == RING_BUFFER_ALL_CPUS) {
int i;
for_each_tracing_cpu(i) {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[i]->entries,
i);
if (r < 0)
break;
}
} else {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[cpu]->entries,
cpu);
}
r = ring_buffer_resize(global_trace.buffer,
global_trace.entries);
if (r < 0) { if (r < 0) {
/* /*
* AARGH! We are left with different * AARGH! We are left with different
@ -3019,43 +3060,39 @@ static int __tracing_resize_ring_buffer(unsigned long size)
return ret; return ret;
} }
max_tr.entries = size; if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&max_tr, size);
else
max_tr.data[cpu]->entries = size;
out: out:
global_trace.entries = size; if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&global_trace, size);
else
global_trace.data[cpu]->entries = size;
return ret; return ret;
} }
static ssize_t tracing_resize_ring_buffer(unsigned long size) static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
{ {
int cpu, ret = size; int ret = size;
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
tracing_stop(); if (cpu_id != RING_BUFFER_ALL_CPUS) {
/* make sure, this cpu is enabled in the mask */
/* disable all cpu buffers */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
for_each_tracing_cpu(cpu) { ret = -EINVAL;
if (global_trace.data[cpu]) goto out;
atomic_inc(&global_trace.data[cpu]->disabled); }
if (max_tr.data[cpu])
atomic_inc(&max_tr.data[cpu]->disabled);
} }
if (size != global_trace.entries) ret = __tracing_resize_ring_buffer(size, cpu_id);
ret = __tracing_resize_ring_buffer(size);
if (ret < 0) if (ret < 0)
ret = -ENOMEM; ret = -ENOMEM;
for_each_tracing_cpu(cpu) { out:
if (global_trace.data[cpu])
atomic_dec(&global_trace.data[cpu]->disabled);
if (max_tr.data[cpu])
atomic_dec(&max_tr.data[cpu]->disabled);
}
tracing_start();
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return ret; return ret;
@ -3078,7 +3115,8 @@ int tracing_update_buffers(void)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) if (!ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(trace_buf_size); ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return ret; return ret;
@ -3102,7 +3140,8 @@ static int tracing_set_tracer(const char *buf)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) { if (!ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(trace_buf_size); ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = 0; ret = 0;
@ -3128,8 +3167,8 @@ static int tracing_set_tracer(const char *buf)
* The max_tr ring buffer has some state (e.g. ring->clock) and * The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it. * we want preserve it.
*/ */
ring_buffer_resize(max_tr.buffer, 1); ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
max_tr.entries = 1; set_buffer_entries(&max_tr, 1);
} }
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
@ -3137,10 +3176,17 @@ static int tracing_set_tracer(const char *buf)
topts = create_trace_option_files(current_trace); topts = create_trace_option_files(current_trace);
if (current_trace->use_max_tr) { if (current_trace->use_max_tr) {
ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); int cpu;
if (ret < 0) /* we need to make per cpu buffer sizes equivalent */
goto out; for_each_tracing_cpu(cpu) {
max_tr.entries = global_trace.entries; ret = ring_buffer_resize(max_tr.buffer,
global_trace.data[cpu]->entries,
cpu);
if (ret < 0)
goto out;
max_tr.data[cpu]->entries =
global_trace.data[cpu]->entries;
}
} }
if (t->init) { if (t->init) {
@ -3642,30 +3688,82 @@ out_err:
goto out; goto out;
} }
struct ftrace_entries_info {
struct trace_array *tr;
int cpu;
};
static int tracing_entries_open(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info;
if (tracing_disabled)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->tr = &global_trace;
info->cpu = (unsigned long)inode->i_private;
filp->private_data = info;
return 0;
}
static ssize_t static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf, tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_array *tr = filp->private_data; struct ftrace_entries_info *info = filp->private_data;
char buf[96]; struct trace_array *tr = info->tr;
int r; char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n", if (info->cpu == RING_BUFFER_ALL_CPUS) {
tr->entries >> 10, int cpu, buf_size_same;
trace_buf_size >> 10); unsigned long size;
else
r = sprintf(buf, "%lu\n", tr->entries >> 10); size = 0;
buf_size_same = 1;
/* check if all cpu sizes are same */
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
size = tr->data[cpu]->entries;
if (size != tr->data[cpu]->entries) {
buf_size_same = 0;
break;
}
}
if (buf_size_same) {
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
size >> 10,
trace_buf_size >> 10);
else
r = sprintf(buf, "%lu\n", size >> 10);
} else
r = sprintf(buf, "X\n");
} else
r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
return ret;
} }
static ssize_t static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf, tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct ftrace_entries_info *info = filp->private_data;
unsigned long val; unsigned long val;
int ret; int ret;
@ -3680,7 +3778,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
/* value is in KB */ /* value is in KB */
val <<= 10; val <<= 10;
ret = tracing_resize_ring_buffer(val); ret = tracing_resize_ring_buffer(val, info->cpu);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -3689,6 +3787,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt; return cnt;
} }
static int
tracing_entries_release(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info = filp->private_data;
kfree(info);
return 0;
}
static ssize_t static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf, tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
@ -3700,7 +3808,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
size += tr->entries >> 10; size += tr->data[cpu]->entries >> 10;
if (!ring_buffer_expanded) if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10; expanded_size += trace_buf_size >> 10;
} }
@ -3734,7 +3842,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_STOP_ON_FREE) if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off(); tracing_off();
/* resize the ring buffer to 0 */ /* resize the ring buffer to 0 */
tracing_resize_ring_buffer(0); tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
return 0; return 0;
} }
@ -3749,14 +3857,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
struct print_entry *entry; struct print_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
struct page *pages[2]; struct page *pages[2];
void *map_page[2];
int nr_pages = 1; int nr_pages = 1;
ssize_t written; ssize_t written;
void *page1;
void *page2;
int offset; int offset;
int size; int size;
int len; int len;
int ret; int ret;
int i;
if (tracing_disabled) if (tracing_disabled)
return -EINVAL; return -EINVAL;
@ -3795,9 +3903,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
goto out; goto out;
} }
page1 = kmap_atomic(pages[0]); for (i = 0; i < nr_pages; i++)
if (nr_pages == 2) map_page[i] = kmap_atomic(pages[i]);
page2 = kmap_atomic(pages[1]);
local_save_flags(irq_flags); local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */ size = sizeof(*entry) + cnt + 2; /* possible \n added */
@ -3815,10 +3922,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (nr_pages == 2) { if (nr_pages == 2) {
len = PAGE_SIZE - offset; len = PAGE_SIZE - offset;
memcpy(&entry->buf, page1 + offset, len); memcpy(&entry->buf, map_page[0] + offset, len);
memcpy(&entry->buf[len], page2, cnt - len); memcpy(&entry->buf[len], map_page[1], cnt - len);
} else } else
memcpy(&entry->buf, page1 + offset, cnt); memcpy(&entry->buf, map_page[0] + offset, cnt);
if (entry->buf[cnt - 1] != '\n') { if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n'; entry->buf[cnt] = '\n';
@ -3833,11 +3940,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
*fpos += written; *fpos += written;
out_unlock: out_unlock:
if (nr_pages == 2) for (i = 0; i < nr_pages; i++){
kunmap_atomic(page2); kunmap_atomic(map_page[i]);
kunmap_atomic(page1); put_page(pages[i]);
while (nr_pages > 0) }
put_page(pages[--nr_pages]);
out: out:
return written; return written;
} }
@ -3933,9 +4039,10 @@ static const struct file_operations tracing_pipe_fops = {
}; };
static const struct file_operations tracing_entries_fops = { static const struct file_operations tracing_entries_fops = {
.open = tracing_open_generic, .open = tracing_entries_open,
.read = tracing_entries_read, .read = tracing_entries_read,
.write = tracing_entries_write, .write = tracing_entries_write,
.release = tracing_entries_release,
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
@ -4367,6 +4474,9 @@ static void tracing_init_debugfs_percpu(long cpu)
struct dentry *d_cpu; struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */ char cpu_dir[30]; /* 30 characters should be more than enough */
if (!d_percpu)
return;
snprintf(cpu_dir, 30, "cpu%ld", cpu); snprintf(cpu_dir, 30, "cpu%ld", cpu);
d_cpu = debugfs_create_dir(cpu_dir, d_percpu); d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
if (!d_cpu) { if (!d_cpu) {
@ -4387,6 +4497,9 @@ static void tracing_init_debugfs_percpu(long cpu)
trace_create_file("stats", 0444, d_cpu, trace_create_file("stats", 0444, d_cpu,
(void *) cpu, &tracing_stats_fops); (void *) cpu, &tracing_stats_fops);
trace_create_file("buffer_size_kb", 0444, d_cpu,
(void *) cpu, &tracing_entries_fops);
} }
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
@ -4718,7 +4831,7 @@ static __init int tracer_init_debugfs(void)
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer, trace_create_file("buffer_size_kb", 0644, d_tracer,
&global_trace, &tracing_entries_fops); (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer, trace_create_file("buffer_total_size_kb", 0444, d_tracer,
&global_trace, &tracing_total_entries_fops); &global_trace, &tracing_total_entries_fops);
@ -4957,6 +5070,10 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask; goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */ /* To save memory, keep the ring buffer size to its minimum */
if (ring_buffer_expanded) if (ring_buffer_expanded)
ring_buf_size = trace_buf_size; ring_buf_size = trace_buf_size;
@ -4975,7 +5092,6 @@ __init static int tracer_alloc_buffers(void)
WARN_ON(1); WARN_ON(1);
goto out_free_cpumask; goto out_free_cpumask;
} }
global_trace.entries = ring_buffer_size(global_trace.buffer);
if (global_trace.buffer_disabled) if (global_trace.buffer_disabled)
tracing_off(); tracing_off();
@ -4988,7 +5104,6 @@ __init static int tracer_alloc_buffers(void)
ring_buffer_free(global_trace.buffer); ring_buffer_free(global_trace.buffer);
goto out_free_cpumask; goto out_free_cpumask;
} }
max_tr.entries = 1;
#endif #endif
/* Allocate the first page for all buffers */ /* Allocate the first page for all buffers */
@ -4997,6 +5112,12 @@ __init static int tracer_alloc_buffers(void)
max_tr.data[i] = &per_cpu(max_tr_data, i); max_tr.data[i] = &per_cpu(max_tr_data, i);
} }
set_buffer_entries(&global_trace,
ring_buffer_size(global_trace.buffer, 0));
#ifdef CONFIG_TRACER_MAX_TRACE
set_buffer_entries(&max_tr, 1);
#endif
trace_init_cmdlines(); trace_init_cmdlines();
register_tracer(&nop_trace); register_tracer(&nop_trace);

Просмотреть файл

@ -131,6 +131,7 @@ struct trace_array_cpu {
atomic_t disabled; atomic_t disabled;
void *buffer_page; /* ring buffer spare */ void *buffer_page; /* ring buffer spare */
unsigned long entries;
unsigned long saved_latency; unsigned long saved_latency;
unsigned long critical_start; unsigned long critical_start;
unsigned long critical_end; unsigned long critical_end;
@ -152,7 +153,6 @@ struct trace_array_cpu {
*/ */
struct trace_array { struct trace_array {
struct ring_buffer *buffer; struct ring_buffer *buffer;
unsigned long entries;
int cpu; int cpu;
int buffer_disabled; int buffer_disabled;
cycle_t time_start; cycle_t time_start;
@ -826,6 +826,8 @@ extern struct list_head ftrace_events;
extern const char *__start___trace_bprintk_fmt[]; extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[]; extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
extern struct ftrace_event_call \ extern struct ftrace_event_call \

Просмотреть файл

@ -51,6 +51,10 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
const char **iter; const char **iter;
char *fmt; char *fmt;
/* allocate the trace_printk per cpu buffers */
if (start != end)
trace_printk_init_buffers();
mutex_lock(&btrace_mutex); mutex_lock(&btrace_mutex);
for (iter = start; iter < end; iter++) { for (iter = start; iter < end; iter++) {
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);

77
tools/Makefile Normal file
Просмотреть файл

@ -0,0 +1,77 @@
include scripts/Makefile.include
help:
@echo 'Possible targets:'
@echo ''
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@echo ' lguest - a minimal 32-bit x86 hypervisor'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
@echo ' usb - USB testing tools'
@echo ' virtio - vhost test module'
@echo ' vm - misc vm tools'
@echo ' x86_energy_perf_policy - Intel energy policy tool'
@echo ''
@echo 'You can do:'
@echo ' $$ make -C tools/<tool>_install'
@echo ''
@echo ' from the kernel command line to build and install one of'
@echo ' the tools above'
@echo ''
@echo ' $$ make tools/install'
@echo ''
@echo ' installs all tools.'
@echo ''
@echo 'Cleaning targets:'
@echo ''
@echo ' all of the above with the "_clean" string appended cleans'
@echo ' the respective build directory.'
@echo ' clean: a summary clean target to clean _all_ folders'
cpupower: FORCE
$(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1)
firewire lguest perf usb virtio vm: FORCE
$(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1)
selftests: FORCE
$(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1)
turbostat x86_energy_perf_policy: FORCE
$(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1)
cpupower_install:
$(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install
firewire_install lguest_install perf_install usb_install virtio_install vm_install:
$(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install
selftests_install:
$(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install
turbostat_install x86_energy_perf_policy_install:
$(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install
install: cpupower_install firewire_install lguest_install perf_install \
selftests_install turbostat_install usb_install virtio_install \
vm_install x86_energy_perf_policy_install
cpupower_clean:
$(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean
firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
$(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean
selftests_clean:
$(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
turbostat_clean x86_energy_perf_policy_clean:
$(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \
turbostat_clean usb_clean virtio_clean vm_clean \
x86_energy_perf_policy_clean
.PHONY: FORCE

Просмотреть файл

@ -0,0 +1,303 @@
# trace-cmd version
EP_VERSION = 1
EP_PATCHLEVEL = 1
EP_EXTRAVERSION = 0
# file format version
FILE_VERSION = 6
MAKEFLAGS += --no-print-directory
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR
# because make sets default values, so the simpler ?= approach
# won't work as expected.
define allow-override
$(if $(or $(findstring environment,$(origin $(1))),\
$(findstring command line,$(origin $(1)))),,\
$(eval $(1) = $(2)))
endef
# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
EXT = -std=gnu99
INSTALL = install
# Use DESTDIR for installing into a different root directory.
# This is useful for building a package. The program will be
# installed in this directory as if it was the root directory.
# Then the build tool can move it later.
DESTDIR ?=
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
prefix ?= /usr/local
bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
man_dir = $(prefix)/share/man
man_dir_SQ = '$(subst ','\'',$(man_dir))'
html_install = $(prefix)/share/kernelshark/html
html_install_SQ = '$(subst ','\'',$(html_install))'
img_install = $(prefix)/share/kernelshark/html/images
img_install_SQ = '$(subst ','\'',$(img_install))'
export man_dir man_dir_SQ html_install html_install_SQ INSTALL
export img_install img_install_SQ
export DESTDIR DESTDIR_SQ
# copy a bit from Linux kbuild
ifeq ("$(origin V)", "command line")
VERBOSE = $(V)
endif
ifndef VERBOSE
VERBOSE = 0
endif
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
endif
ifeq ($(BUILD_SRC),)
ifneq ($(BUILD_OUTPUT),)
define build_output
$(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
endef
saved-output := $(BUILD_OUTPUT)
BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
$(if $(BUILD_OUTPUT),, \
$(error output directory "$(saved-output)" does not exist))
all: sub-make
gui: force
$(call build_output, all_cmd)
$(filter-out gui,$(MAKECMDGOALS)): sub-make
sub-make: force
$(call build_output, $(MAKECMDGOALS))
# Leave processing to above invocation of make
skip-makefile := 1
endif # BUILD_OUTPUT
endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
objtree := $(CURDIR)
src := $(srctree)
obj := $(objtree)
export prefix bindir src obj
# Shell quotes
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
LIB_FILE = libtraceevent.a libtraceevent.so
CONFIG_INCLUDES =
CONFIG_LIBS =
CONFIG_FLAGS =
VERSION = $(EP_VERSION)
PATCHLEVEL = $(EP_PATCHLEVEL)
EXTRAVERSION = $(EP_EXTRAVERSION)
OBJ = $@
N =
export Q VERBOSE
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -Wall
# Append required CFLAGS
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
override CFLAGS += $(udis86-flags)
ifeq ($(VERBOSE),1)
Q =
print_compile =
print_app_build =
print_fpic_compile =
print_shared_lib_compile =
print_plugin_obj_compile =
print_plugin_build =
print_install =
else
Q = @
print_compile = echo ' CC '$(OBJ);
print_app_build = echo ' BUILD '$(OBJ);
print_fpic_compile = echo ' CC FPIC '$(OBJ);
print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
print_plugin_obj_compile = echo ' CC PLUGIN OBJ '$(OBJ);
print_plugin_build = echo ' CC PLUGI '$(OBJ);
print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
do_fpic_compile = \
($(print_fpic_compile) \
$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
do_app_build = \
($(print_app_build) \
$(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
do_compile_shared_library = \
($(print_shared_lib_compile) \
$(CC) --shared $^ -o $@)
do_compile_plugin_obj = \
($(print_plugin_obj_compile) \
$(CC) -c $(CFLAGS) -fPIC -o $@ $<)
do_plugin_build = \
($(print_plugin_build) \
$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<)
do_build_static_lib = \
($(print_static_lib_build) \
$(RM) $@; $(AR) rcs $@ $^)
define do_compile
$(print_compile) \
$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
endef
$(obj)/%.o: $(src)/%.c
$(Q)$(call do_compile)
%.o: $(src)/%.c
$(Q)$(call do_compile)
PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o
ALL_OBJS = $(PEVENT_LIB_OBJS)
CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
all: all_cmd
all_cmd: $(CMD_TARGETS)
libtraceevent.so: $(PEVENT_LIB_OBJS)
$(Q)$(do_compile_shared_library)
libtraceevent.a: $(PEVENT_LIB_OBJS)
$(Q)$(do_build_static_lib)
$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
$(Q)$(do_fpic_compile)
define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; \
echo \#define VERSION_CODE $(shell \
expr $(VERSION) \* 256 + $(PATCHLEVEL)); \
echo '#define EXTRAVERSION ' $(EXTRAVERSION); \
echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \
echo '#define FILE_VERSION '$(FILE_VERSION); \
) > $1
endef
define update_version.h
($(call make_version.h, $@.tmp); \
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
rm -f $@.tmp; \
else \
echo ' UPDATE $@'; \
mv -f $@.tmp $@; \
fi);
endef
ep_version.h: force
$(Q)$(N)$(call update_version.h)
VERSION_FILES = ep_version.h
define update_dir
(echo $1 > $@.tmp; \
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
rm -f $@.tmp; \
else \
echo ' UPDATE $@'; \
mv -f $@.tmp $@; \
fi);
endef
## make deps
all_objs := $(sort $(ALL_OBJS))
all_deps := $(all_objs:%.o=.%.d)
define check_deps
$(CC) -M $(CFLAGS) $< > $@;
endef
$(gui_deps): ks_version.h
$(non_gui_deps): tc_version.h
$(all_deps): .%.d: $(src)/%.c
$(Q)$(call check_deps)
$(all_objs) : %.o : .%.d
dep_includes := $(wildcard $(all_deps))
ifneq ($(dep_includes),)
include $(dep_includes)
endif
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px
TAGS: force
$(RM) TAGS
find . -name '*.[ch]' | xargs etags
define do_install
$(print_install) \
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
$(INSTALL) $1 '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd install_plugins install_python
$(Q)$(call do_install,$(LIB_FILE),$(bindir_SQ))
install: install_lib
clean:
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES).*.d
$(RM) tags TAGS
endif # skip-makefile
PHONY += force
force:
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,804 @@
/*
* Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License (not later!)
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _PARSE_EVENTS_H
#define _PARSE_EVENTS_H
#include <stdarg.h>
#include <regex.h>
#ifndef __unused
#define __unused __attribute__ ((unused))
#endif
/* ----------------------- trace_seq ----------------------- */
#ifndef TRACE_SEQ_BUF_SIZE
#define TRACE_SEQ_BUF_SIZE 4096
#endif
#ifndef DEBUG_RECORD
#define DEBUG_RECORD 0
#endif
struct pevent_record {
unsigned long long ts;
unsigned long long offset;
long long missed_events; /* buffer dropped events before */
int record_size; /* size of binary record */
int size; /* size of data */
void *data;
int cpu;
int ref_count;
int locked; /* Do not free, even if ref_count is zero */
void *private;
#if DEBUG_RECORD
struct pevent_record *prev;
struct pevent_record *next;
long alloc_addr;
#endif
};
/*
* Trace sequences are used to allow a function to call several other functions
* to create a string of data to use (up to a max of PAGE_SIZE).
*/
struct trace_seq {
char *buffer;
unsigned int buffer_size;
unsigned int len;
unsigned int readpos;
};
void trace_seq_init(struct trace_seq *s);
void trace_seq_destroy(struct trace_seq *s);
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
__attribute__ ((format (printf, 2, 0)));
extern int trace_seq_puts(struct trace_seq *s, const char *str);
extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
extern void trace_seq_terminate(struct trace_seq *s);
extern int trace_seq_do_printf(struct trace_seq *s);
/* ----------------------- pevent ----------------------- */
struct pevent;
struct event_format;
typedef int (*pevent_event_handler_func)(struct trace_seq *s,
struct pevent_record *record,
struct event_format *event,
void *context);
typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
typedef int (*pevent_plugin_unload_func)(void);
struct plugin_option {
struct plugin_option *next;
void *handle;
char *file;
char *name;
char *plugin_alias;
char *description;
char *value;
void *private;
int set;
};
/*
* Plugin hooks that can be called:
*
* PEVENT_PLUGIN_LOADER: (required)
* The function name to initialized the plugin.
*
* int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
*
* PEVENT_PLUGIN_UNLOADER: (optional)
* The function called just before unloading
*
* int PEVENT_PLUGIN_UNLOADER(void)
*
* PEVENT_PLUGIN_OPTIONS: (optional)
* Plugin options that can be set before loading
*
* struct plugin_option PEVENT_PLUGIN_OPTIONS[] = {
* {
* .name = "option-name",
* .plugin_alias = "overide-file-name", (optional)
* .description = "description of option to show users",
* },
* {
* .name = NULL,
* },
* };
*
* Array must end with .name = NULL;
*
*
* .plugin_alias is used to give a shorter name to access
* the vairable. Useful if a plugin handles more than one event.
*
* PEVENT_PLUGIN_ALIAS: (optional)
* The name to use for finding options (uses filename if not defined)
*/
#define PEVENT_PLUGIN_LOADER pevent_plugin_loader
#define PEVENT_PLUGIN_UNLOADER pevent_plugin_unloader
#define PEVENT_PLUGIN_OPTIONS pevent_plugin_options
#define PEVENT_PLUGIN_ALIAS pevent_plugin_alias
#define _MAKE_STR(x) #x
#define MAKE_STR(x) _MAKE_STR(x)
#define PEVENT_PLUGIN_LOADER_NAME MAKE_STR(PEVENT_PLUGIN_LOADER)
#define PEVENT_PLUGIN_UNLOADER_NAME MAKE_STR(PEVENT_PLUGIN_UNLOADER)
#define PEVENT_PLUGIN_OPTIONS_NAME MAKE_STR(PEVENT_PLUGIN_OPTIONS)
#define PEVENT_PLUGIN_ALIAS_NAME MAKE_STR(PEVENT_PLUGIN_ALIAS)
#define NSECS_PER_SEC 1000000000ULL
#define NSECS_PER_USEC 1000ULL
enum format_flags {
FIELD_IS_ARRAY = 1,
FIELD_IS_POINTER = 2,
FIELD_IS_SIGNED = 4,
FIELD_IS_STRING = 8,
FIELD_IS_DYNAMIC = 16,
FIELD_IS_LONG = 32,
FIELD_IS_FLAG = 64,
FIELD_IS_SYMBOLIC = 128,
};
struct format_field {
struct format_field *next;
struct event_format *event;
char *type;
char *name;
int offset;
int size;
unsigned int arraylen;
unsigned int elementsize;
unsigned long flags;
};
struct format {
int nr_common;
int nr_fields;
struct format_field *common_fields;
struct format_field *fields;
};
struct print_arg_atom {
char *atom;
};
struct print_arg_string {
char *string;
int offset;
};
struct print_arg_field {
char *name;
struct format_field *field;
};
struct print_flag_sym {
struct print_flag_sym *next;
char *value;
char *str;
};
struct print_arg_typecast {
char *type;
struct print_arg *item;
};
struct print_arg_flags {
struct print_arg *field;
char *delim;
struct print_flag_sym *flags;
};
struct print_arg_symbol {
struct print_arg *field;
struct print_flag_sym *symbols;
};
struct print_arg_dynarray {
struct format_field *field;
struct print_arg *index;
};
struct print_arg;
struct print_arg_op {
char *op;
int prio;
struct print_arg *left;
struct print_arg *right;
};
struct pevent_function_handler;
struct print_arg_func {
struct pevent_function_handler *func;
struct print_arg *args;
};
enum print_arg_type {
PRINT_NULL,
PRINT_ATOM,
PRINT_FIELD,
PRINT_FLAGS,
PRINT_SYMBOL,
PRINT_TYPE,
PRINT_STRING,
PRINT_BSTRING,
PRINT_DYNAMIC_ARRAY,
PRINT_OP,
PRINT_FUNC,
};
struct print_arg {
struct print_arg *next;
enum print_arg_type type;
union {
struct print_arg_atom atom;
struct print_arg_field field;
struct print_arg_typecast typecast;
struct print_arg_flags flags;
struct print_arg_symbol symbol;
struct print_arg_func func;
struct print_arg_string string;
struct print_arg_op op;
struct print_arg_dynarray dynarray;
};
};
struct print_fmt {
char *format;
struct print_arg *args;
};
struct event_format {
struct pevent *pevent;
char *name;
int id;
int flags;
struct format format;
struct print_fmt print_fmt;
char *system;
pevent_event_handler_func handler;
void *context;
};
enum {
EVENT_FL_ISFTRACE = 0x01,
EVENT_FL_ISPRINT = 0x02,
EVENT_FL_ISBPRINT = 0x04,
EVENT_FL_ISFUNCENT = 0x10,
EVENT_FL_ISFUNCRET = 0x20,
EVENT_FL_FAILED = 0x80000000
};
enum event_sort_type {
EVENT_SORT_ID,
EVENT_SORT_NAME,
EVENT_SORT_SYSTEM,
};
enum event_type {
EVENT_ERROR,
EVENT_NONE,
EVENT_SPACE,
EVENT_NEWLINE,
EVENT_OP,
EVENT_DELIM,
EVENT_ITEM,
EVENT_DQUOTE,
EVENT_SQUOTE,
};
typedef unsigned long long (*pevent_func_handler)(struct trace_seq *s,
unsigned long long *args);
enum pevent_func_arg_type {
PEVENT_FUNC_ARG_VOID,
PEVENT_FUNC_ARG_INT,
PEVENT_FUNC_ARG_LONG,
PEVENT_FUNC_ARG_STRING,
PEVENT_FUNC_ARG_PTR,
PEVENT_FUNC_ARG_MAX_TYPES
};
enum pevent_flag {
PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */
};
struct cmdline;
struct cmdline_list;
struct func_map;
struct func_list;
struct event_handler;
struct pevent {
int ref_count;
int header_page_ts_offset;
int header_page_ts_size;
int header_page_size_offset;
int header_page_size_size;
int header_page_data_offset;
int header_page_data_size;
int header_page_overwrite;
int file_bigendian;
int host_bigendian;
int latency_format;
int old_format;
int cpus;
int long_size;
struct cmdline *cmdlines;
struct cmdline_list *cmdlist;
int cmdline_count;
struct func_map *func_map;
struct func_list *funclist;
unsigned int func_count;
struct printk_map *printk_map;
struct printk_list *printklist;
unsigned int printk_count;
struct event_format **events;
int nr_events;
struct event_format **sort_events;
enum event_sort_type last_type;
int type_offset;
int type_size;
int pid_offset;
int pid_size;
int pc_offset;
int pc_size;
int flags_offset;
int flags_size;
int ld_offset;
int ld_size;
int print_raw;
int test_filters;
int flags;
struct format_field *bprint_ip_field;
struct format_field *bprint_fmt_field;
struct format_field *bprint_buf_field;
struct event_handler *handlers;
struct pevent_function_handler *func_handlers;
/* cache */
struct event_format *last_event;
};
static inline void pevent_set_flag(struct pevent *pevent, int flag)
{
pevent->flags |= flag;
}
static inline unsigned short
__data2host2(struct pevent *pevent, unsigned short data)
{
unsigned short swap;
if (pevent->host_bigendian == pevent->file_bigendian)
return data;
swap = ((data & 0xffULL) << 8) |
((data & (0xffULL << 8)) >> 8);
return swap;
}
static inline unsigned int
__data2host4(struct pevent *pevent, unsigned int data)
{
unsigned int swap;
if (pevent->host_bigendian == pevent->file_bigendian)
return data;
swap = ((data & 0xffULL) << 24) |
((data & (0xffULL << 8)) << 8) |
((data & (0xffULL << 16)) >> 8) |
((data & (0xffULL << 24)) >> 24);
return swap;
}
static inline unsigned long long
__data2host8(struct pevent *pevent, unsigned long long data)
{
unsigned long long swap;
if (pevent->host_bigendian == pevent->file_bigendian)
return data;
swap = ((data & 0xffULL) << 56) |
((data & (0xffULL << 8)) << 40) |
((data & (0xffULL << 16)) << 24) |
((data & (0xffULL << 24)) << 8) |
((data & (0xffULL << 32)) >> 8) |
((data & (0xffULL << 40)) >> 24) |
((data & (0xffULL << 48)) >> 40) |
((data & (0xffULL << 56)) >> 56);
return swap;
}
#define data2host2(pevent, ptr) __data2host2(pevent, *(unsigned short *)(ptr))
#define data2host4(pevent, ptr) __data2host4(pevent, *(unsigned int *)(ptr))
#define data2host8(pevent, ptr) \
({ \
unsigned long long __val; \
\
memcpy(&__val, (ptr), sizeof(unsigned long long)); \
__data2host8(pevent, __val); \
})
/* taken from kernel/trace/trace.h */
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
};
int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
int pevent_register_function(struct pevent *pevent, char *name,
unsigned long long addr, char *mod);
int pevent_register_print_string(struct pevent *pevent, char *fmt,
unsigned long long addr);
int pevent_pid_is_registered(struct pevent *pevent, int pid);
void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
struct pevent_record *record);
int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
int long_size);
int pevent_parse_event(struct pevent *pevent, const char *buf,
unsigned long size, const char *sys);
void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
int *len, int err);
int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
unsigned long long *val, int err);
int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
unsigned long long *val, int err);
int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
unsigned long long *val, int err);
int pevent_print_num_field(struct trace_seq *s, const char *fmt,
struct event_format *event, const char *name,
struct pevent_record *record, int err);
int pevent_register_event_handler(struct pevent *pevent, int id, char *sys_name, char *event_name,
pevent_event_handler_func func, void *context);
int pevent_register_print_function(struct pevent *pevent,
pevent_func_handler func,
enum pevent_func_arg_type ret_type,
char *name, ...);
struct format_field *pevent_find_common_field(struct event_format *event, const char *name);
struct format_field *pevent_find_field(struct event_format *event, const char *name);
struct format_field *pevent_find_any_field(struct event_format *event, const char *name);
const char *pevent_find_function(struct pevent *pevent, unsigned long long addr);
unsigned long long
pevent_find_function_address(struct pevent *pevent, unsigned long long addr);
unsigned long long pevent_read_number(struct pevent *pevent, const void *ptr, int size);
int pevent_read_number_field(struct format_field *field, const void *data,
unsigned long long *value);
struct event_format *pevent_find_event(struct pevent *pevent, int id);
struct event_format *
pevent_find_event_by_name(struct pevent *pevent, const char *sys, const char *name);
void pevent_data_lat_fmt(struct pevent *pevent,
struct trace_seq *s, struct pevent_record *record);
int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type);
struct format_field **pevent_event_common_fields(struct event_format *event);
struct format_field **pevent_event_fields(struct event_format *event);
static inline int pevent_get_cpus(struct pevent *pevent)
{
return pevent->cpus;
}
static inline void pevent_set_cpus(struct pevent *pevent, int cpus)
{
pevent->cpus = cpus;
}
static inline int pevent_get_long_size(struct pevent *pevent)
{
return pevent->long_size;
}
static inline void pevent_set_long_size(struct pevent *pevent, int long_size)
{
pevent->long_size = long_size;
}
static inline int pevent_is_file_bigendian(struct pevent *pevent)
{
return pevent->file_bigendian;
}
static inline void pevent_set_file_bigendian(struct pevent *pevent, int endian)
{
pevent->file_bigendian = endian;
}
static inline int pevent_is_host_bigendian(struct pevent *pevent)
{
return pevent->host_bigendian;
}
static inline void pevent_set_host_bigendian(struct pevent *pevent, int endian)
{
pevent->host_bigendian = endian;
}
static inline int pevent_is_latency_format(struct pevent *pevent)
{
return pevent->latency_format;
}
static inline void pevent_set_latency_format(struct pevent *pevent, int lat)
{
pevent->latency_format = lat;
}
struct pevent *pevent_alloc(void);
void pevent_free(struct pevent *pevent);
void pevent_ref(struct pevent *pevent);
void pevent_unref(struct pevent *pevent);
/* access to the internal parser */
void pevent_buffer_init(const char *buf, unsigned long long size);
enum event_type pevent_read_token(char **tok);
void pevent_free_token(char *token);
int pevent_peek_char(void);
const char *pevent_get_input_buf(void);
unsigned long long pevent_get_input_buf_ptr(void);
/* for debugging */
void pevent_print_funcs(struct pevent *pevent);
void pevent_print_printk(struct pevent *pevent);
/* ----------------------- filtering ----------------------- */
enum filter_boolean_type {
FILTER_FALSE,
FILTER_TRUE,
};
enum filter_op_type {
FILTER_OP_AND = 1,
FILTER_OP_OR,
FILTER_OP_NOT,
};
enum filter_cmp_type {
FILTER_CMP_NONE,
FILTER_CMP_EQ,
FILTER_CMP_NE,
FILTER_CMP_GT,
FILTER_CMP_LT,
FILTER_CMP_GE,
FILTER_CMP_LE,
FILTER_CMP_MATCH,
FILTER_CMP_NOT_MATCH,
FILTER_CMP_REGEX,
FILTER_CMP_NOT_REGEX,
};
enum filter_exp_type {
FILTER_EXP_NONE,
FILTER_EXP_ADD,
FILTER_EXP_SUB,
FILTER_EXP_MUL,
FILTER_EXP_DIV,
FILTER_EXP_MOD,
FILTER_EXP_RSHIFT,
FILTER_EXP_LSHIFT,
FILTER_EXP_AND,
FILTER_EXP_OR,
FILTER_EXP_XOR,
FILTER_EXP_NOT,
};
enum filter_arg_type {
FILTER_ARG_NONE,
FILTER_ARG_BOOLEAN,
FILTER_ARG_VALUE,
FILTER_ARG_FIELD,
FILTER_ARG_EXP,
FILTER_ARG_OP,
FILTER_ARG_NUM,
FILTER_ARG_STR,
};
enum filter_value_type {
FILTER_NUMBER,
FILTER_STRING,
FILTER_CHAR
};
struct fliter_arg;
struct filter_arg_boolean {
enum filter_boolean_type value;
};
struct filter_arg_field {
struct format_field *field;
};
struct filter_arg_value {
enum filter_value_type type;
union {
char *str;
unsigned long long val;
};
};
struct filter_arg_op {
enum filter_op_type type;
struct filter_arg *left;
struct filter_arg *right;
};
struct filter_arg_exp {
enum filter_exp_type type;
struct filter_arg *left;
struct filter_arg *right;
};
struct filter_arg_num {
enum filter_cmp_type type;
struct filter_arg *left;
struct filter_arg *right;
};
struct filter_arg_str {
enum filter_cmp_type type;
struct format_field *field;
char *val;
char *buffer;
regex_t reg;
};
struct filter_arg {
enum filter_arg_type type;
union {
struct filter_arg_boolean boolean;
struct filter_arg_field field;
struct filter_arg_value value;
struct filter_arg_op op;
struct filter_arg_exp exp;
struct filter_arg_num num;
struct filter_arg_str str;
};
};
struct filter_type {
int event_id;
struct event_format *event;
struct filter_arg *filter;
};
struct event_filter {
struct pevent *pevent;
int filters;
struct filter_type *event_filters;
};
struct event_filter *pevent_filter_alloc(struct pevent *pevent);
#define FILTER_NONE -2
#define FILTER_NOEXIST -1
#define FILTER_MISS 0
#define FILTER_MATCH 1
enum filter_trivial_type {
FILTER_TRIVIAL_FALSE,
FILTER_TRIVIAL_TRUE,
FILTER_TRIVIAL_BOTH,
};
int pevent_filter_add_filter_str(struct event_filter *filter,
const char *filter_str,
char **error_str);
int pevent_filter_match(struct event_filter *filter,
struct pevent_record *record);
int pevent_event_filtered(struct event_filter *filter,
int event_id);
void pevent_filter_reset(struct event_filter *filter);
void pevent_filter_clear_trivial(struct event_filter *filter,
enum filter_trivial_type type);
void pevent_filter_free(struct event_filter *filter);
char *pevent_filter_make_string(struct event_filter *filter, int event_id);
int pevent_filter_remove_event(struct event_filter *filter,
int event_id);
int pevent_filter_event_has_trivial(struct event_filter *filter,
int event_id,
enum filter_trivial_type type);
int pevent_filter_copy(struct event_filter *dest, struct event_filter *source);
int pevent_update_trivial(struct event_filter *dest, struct event_filter *source,
enum filter_trivial_type type);
int pevent_filter_compare(struct event_filter *filter1, struct event_filter *filter2);
#endif /* _PARSE_EVENTS_H */

Просмотреть файл

@ -0,0 +1,80 @@
/*
* Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License (not later!)
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef __UTIL_H
#define __UTIL_H
#include <ctype.h>
/* Can be overridden */
void die(const char *fmt, ...);
void *malloc_or_die(unsigned int size);
void warning(const char *fmt, ...);
void pr_stat(const char *fmt, ...);
void vpr_stat(const char *fmt, va_list ap);
/* Always available */
void __die(const char *fmt, ...);
void __warning(const char *fmt, ...);
void __pr_stat(const char *fmt, ...);
void __vdie(const char *fmt, ...);
void __vwarning(const char *fmt, ...);
void __vpr_stat(const char *fmt, ...);
static inline char *strim(char *string)
{
char *ret;
if (!string)
return NULL;
while (*string) {
if (!isspace(*string))
break;
string++;
}
ret = string;
string = ret + strlen(ret) - 1;
while (string > ret) {
if (!isspace(*string))
break;
string--;
}
string[1] = 0;
return ret;
}
static inline int has_text(const char *text)
{
if (!text)
return 0;
while (*text) {
if (!isspace(*text))
return 1;
text++;
}
return 0;
}
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,110 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <errno.h>
#define __weak __attribute__((weak))
void __vdie(const char *fmt, va_list ap)
{
int ret = errno;
if (errno)
perror("trace-cmd");
else
ret = -1;
fprintf(stderr, " ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
exit(ret);
}
void __die(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vdie(fmt, ap);
va_end(ap);
}
void __weak die(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vdie(fmt, ap);
va_end(ap);
}
void __vwarning(const char *fmt, va_list ap)
{
if (errno)
perror("trace-cmd");
errno = 0;
fprintf(stderr, " ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
}
void __warning(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vwarning(fmt, ap);
va_end(ap);
}
void __weak warning(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vwarning(fmt, ap);
va_end(ap);
}
void __vpr_stat(const char *fmt, va_list ap)
{
vprintf(fmt, ap);
printf("\n");
}
void __pr_stat(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vpr_stat(fmt, ap);
va_end(ap);
}
void __weak vpr_stat(const char *fmt, va_list ap)
{
__vpr_stat(fmt, ap);
}
void __weak pr_stat(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vpr_stat(fmt, ap);
va_end(ap);
}
void __weak *malloc_or_die(unsigned int size)
{
void *data;
data = malloc(size);
if (!data)
die("malloc");
return data;
}

Просмотреть файл

@ -0,0 +1,200 @@
/*
* Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License (not later!)
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include "event-parse.h"
#include "event-utils.h"
/*
* The TRACE_SEQ_POISON is to catch the use of using
* a trace_seq structure after it was destroyed.
*/
#define TRACE_SEQ_POISON ((void *)0xdeadbeef)
#define TRACE_SEQ_CHECK(s) \
do { \
if ((s)->buffer == TRACE_SEQ_POISON) \
die("Usage of trace_seq after it was destroyed"); \
} while (0)
/**
* trace_seq_init - initialize the trace_seq structure
* @s: a pointer to the trace_seq structure to initialize
*/
void trace_seq_init(struct trace_seq *s)
{
s->len = 0;
s->readpos = 0;
s->buffer_size = TRACE_SEQ_BUF_SIZE;
s->buffer = malloc_or_die(s->buffer_size);
}
/**
* trace_seq_destroy - free up memory of a trace_seq
* @s: a pointer to the trace_seq to free the buffer
*
* Only frees the buffer, not the trace_seq struct itself.
*/
void trace_seq_destroy(struct trace_seq *s)
{
if (!s)
return;
TRACE_SEQ_CHECK(s);
free(s->buffer);
s->buffer = TRACE_SEQ_POISON;
}
static void expand_buffer(struct trace_seq *s)
{
s->buffer_size += TRACE_SEQ_BUF_SIZE;
s->buffer = realloc(s->buffer, s->buffer_size);
if (!s->buffer)
die("Can't allocate trace_seq buffer memory");
}
/**
* trace_seq_printf - sequence printing of trace information
* @s: trace sequence descriptor
* @fmt: printf format string
*
* It returns 0 if the trace oversizes the buffer's free
* space, 1 otherwise.
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer.
*/
int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
va_list ap;
int len;
int ret;
TRACE_SEQ_CHECK(s);
try_again:
len = (s->buffer_size - 1) - s->len;
va_start(ap, fmt);
ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
va_end(ap);
if (ret >= len) {
expand_buffer(s);
goto try_again;
}
s->len += ret;
return 1;
}
/**
* trace_seq_vprintf - sequence printing of trace information
* @s: trace sequence descriptor
* @fmt: printf format string
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer.
*/
int
trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
{
int len;
int ret;
TRACE_SEQ_CHECK(s);
try_again:
len = (s->buffer_size - 1) - s->len;
ret = vsnprintf(s->buffer + s->len, len, fmt, args);
if (ret >= len) {
expand_buffer(s);
goto try_again;
}
s->len += ret;
return len;
}
/**
* trace_seq_puts - trace sequence printing of simple string
* @s: trace sequence descriptor
* @str: simple string to record
*
* The tracer may use either the sequence operations or its own
* copy to user routines. This function records a simple string
* into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism.
*/
int trace_seq_puts(struct trace_seq *s, const char *str)
{
int len;
TRACE_SEQ_CHECK(s);
len = strlen(str);
while (len > ((s->buffer_size - 1) - s->len))
expand_buffer(s);
memcpy(s->buffer + s->len, str, len);
s->len += len;
return len;
}
int trace_seq_putc(struct trace_seq *s, unsigned char c)
{
TRACE_SEQ_CHECK(s);
while (s->len >= (s->buffer_size - 1))
expand_buffer(s);
s->buffer[s->len++] = c;
return 1;
}
void trace_seq_terminate(struct trace_seq *s)
{
TRACE_SEQ_CHECK(s);
/* There's always one character left on the buffer */
s->buffer[s->len] = 0;
}
int trace_seq_do_printf(struct trace_seq *s)
{
TRACE_SEQ_CHECK(s);
return printf("%.*s", s->len, s->buffer);
}

Просмотреть файл

@ -6,6 +6,7 @@
normal = black, lightgray normal = black, lightgray
selected = lightgray, magenta selected = lightgray, magenta
code = blue, lightgray code = blue, lightgray
addr = magenta, lightgray
[tui] [tui]

Просмотреть файл

@ -1,18 +1,10 @@
ifeq ("$(origin O)", "command line") include ../scripts/Makefile.include
OUTPUT := $(O)/
endif
# The default target of this Makefile is... # The default target of this Makefile is...
all: all:
include config/utilities.mak include config/utilities.mak
ifneq ($(OUTPUT),)
# check that the output directory actually exists
OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
# Define V to have a more verbose compile. # Define V to have a more verbose compile.
# #
# Define O to save output files in a separate directory. # Define O to save output files in a separate directory.
@ -84,31 +76,6 @@ ifneq ($(WERROR),0)
CFLAGS_WERROR := -Werror CFLAGS_WERROR := -Werror
endif endif
#
# Include saner warnings here, which can catch bugs:
#
EXTRA_WARNINGS := -Wformat
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
ifeq ("$(origin DEBUG)", "command line") ifeq ("$(origin DEBUG)", "command line")
PERF_DEBUG = $(DEBUG) PERF_DEBUG = $(DEBUG)
endif endif
@ -182,7 +149,7 @@ endif
### --- END CONFIGURATION SECTION --- ### --- END CONFIGURATION SECTION ---
BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(EVENT_PARSE_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS = BASIC_LDFLAGS =
# Guard against environment variables # Guard against environment variables
@ -211,6 +178,17 @@ $(OUTPUT)python/perf.so: $(PYRF_OBJS) $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
EVENT_PARSE_DIR = ../lib/traceevent/
ifeq ("$(origin O)", "command line")
EP_PATH=$(OUTPUT)/
else
EP_PATH=$(EVENT_PARSE_DIR)/
endif
LIBPARSEVENT = $(EP_PATH)libtraceevent.a
EP_LIB := -L$(EP_PATH) -ltraceevent
# #
# Single 'perf' binary right now: # Single 'perf' binary right now:
# #
@ -333,6 +311,8 @@ LIB_H += util/cpumap.h
LIB_H += util/top.h LIB_H += util/top.h
LIB_H += $(ARCH_INCLUDE) LIB_H += $(ARCH_INCLUDE)
LIB_H += util/cgroup.h LIB_H += util/cgroup.h
LIB_H += $(EVENT_PARSE_DIR)event-parse.h
LIB_H += util/target.h
LIB_OBJS += $(OUTPUT)util/abspath.o LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o LIB_OBJS += $(OUTPUT)util/alias.o
@ -394,6 +374,7 @@ LIB_OBJS += $(OUTPUT)util/util.o
LIB_OBJS += $(OUTPUT)util/xyarray.o LIB_OBJS += $(OUTPUT)util/xyarray.o
LIB_OBJS += $(OUTPUT)util/cpumap.o LIB_OBJS += $(OUTPUT)util/cpumap.o
LIB_OBJS += $(OUTPUT)util/cgroup.o LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
@ -429,7 +410,7 @@ BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
BUILTIN_OBJS += $(OUTPUT)builtin-test.o BUILTIN_OBJS += $(OUTPUT)builtin-test.o
BUILTIN_OBJS += $(OUTPUT)builtin-inject.o BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
PERFLIBS = $(LIB_FILE) PERFLIBS = $(LIB_FILE) $(LIBPARSEVENT)
# Files needed for the python binding, perf.so # Files needed for the python binding, perf.so
# pyrf is just an internal name needed for all those wrappers. # pyrf is just an internal name needed for all those wrappers.
@ -506,22 +487,23 @@ else
# Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
BASIC_CFLAGS += -I/usr/include/slang BASIC_CFLAGS += -I/usr/include/slang
EXTLIBS += -lnewt -lslang EXTLIBS += -lnewt -lslang
LIB_OBJS += $(OUTPUT)util/ui/setup.o LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)util/ui/browser.o LIB_OBJS += $(OUTPUT)ui/browser.o
LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o LIB_OBJS += $(OUTPUT)ui/browsers/map.o
LIB_OBJS += $(OUTPUT)util/ui/helpline.o LIB_OBJS += $(OUTPUT)ui/helpline.o
LIB_OBJS += $(OUTPUT)util/ui/progress.o LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)util/ui/util.o LIB_OBJS += $(OUTPUT)ui/util.o
LIB_H += util/ui/browser.h LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_H += util/ui/browsers/map.h LIB_H += ui/browser.h
LIB_H += util/ui/helpline.h LIB_H += ui/browsers/map.h
LIB_H += util/ui/keysyms.h LIB_H += ui/helpline.h
LIB_H += util/ui/libslang.h LIB_H += ui/keysyms.h
LIB_H += util/ui/progress.h LIB_H += ui/libslang.h
LIB_H += util/ui/util.h LIB_H += ui/progress.h
LIB_H += util/ui/ui.h LIB_H += ui/util.h
LIB_H += ui/ui.h
endif endif
endif endif
@ -535,7 +517,12 @@ else
else else
BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0) BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
EXTLIBS += $(shell pkg-config --libs gtk+-2.0) EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
LIB_OBJS += $(OUTPUT)util/gtk/browser.o LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
# Make sure that it'd be included only once.
ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/setup.o
endif
endif endif
endif endif
@ -678,18 +665,6 @@ else
endif endif
endif endif
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifndef V
QUIET_CC = @echo ' ' CC $@;
QUIET_AR = @echo ' ' AR $@;
QUIET_LINK = @echo ' ' LINK $@;
QUIET_MKDIR = @echo ' ' MKDIR $@;
QUIET_GEN = @echo ' ' GEN $@;
QUIET_FLEX = @echo ' ' FLEX $@;
QUIET_BISON = @echo ' ' BISON $@;
endif
endif
ifdef ASCIIDOC8 ifdef ASCIIDOC8
export ASCIIDOC8 export ASCIIDOC8
endif endif
@ -800,16 +775,16 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
$(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)util/ui/browsers/map.o: util/ui/browsers/map.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
@ -844,6 +819,10 @@ $(sort $(dir $(DIRECTORY_DEPS))):
$(LIB_FILE): $(LIB_OBJS) $(LIB_FILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
# libparsevent.a
$(LIBPARSEVENT):
make -C $(EVENT_PARSE_DIR) $(COMMAND_O) libtraceevent.a
help: help:
@echo 'Perf make targets:' @echo 'Perf make targets:'
@echo ' doc - make *all* documentation (see below)' @echo ' doc - make *all* documentation (see below)'

Просмотреть файл

@ -192,7 +192,7 @@ static void insert_caller_stat(unsigned long call_site,
} }
static void process_alloc_event(void *data, static void process_alloc_event(void *data,
struct event *event, struct event_format *event,
int cpu, int cpu,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used, struct thread *thread __used,
@ -253,7 +253,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
} }
static void process_free_event(void *data, static void process_free_event(void *data,
struct event *event, struct event_format *event,
int cpu, int cpu,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -281,7 +281,7 @@ static void process_free_event(void *data,
static void process_raw_event(union perf_event *raw_event __used, void *data, static void process_raw_event(union perf_event *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread) int cpu, u64 timestamp, struct thread *thread)
{ {
struct event *event; struct event_format *event;
int type; int type;
type = trace_parse_common_type(data); type = trace_parse_common_type(data);

Просмотреть файл

@ -356,25 +356,25 @@ struct trace_release_event {
struct trace_lock_handler { struct trace_lock_handler {
void (*acquire_event)(struct trace_acquire_event *, void (*acquire_event)(struct trace_acquire_event *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*acquired_event)(struct trace_acquired_event *, void (*acquired_event)(struct trace_acquired_event *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*contended_event)(struct trace_contended_event *, void (*contended_event)(struct trace_contended_event *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*release_event)(struct trace_release_event *, void (*release_event)(struct trace_release_event *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
@ -416,7 +416,7 @@ enum acquire_flags {
static void static void
report_lock_acquire_event(struct trace_acquire_event *acquire_event, report_lock_acquire_event(struct trace_acquire_event *acquire_event,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -480,7 +480,7 @@ end:
static void static void
report_lock_acquired_event(struct trace_acquired_event *acquired_event, report_lock_acquired_event(struct trace_acquired_event *acquired_event,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -536,7 +536,7 @@ end:
static void static void
report_lock_contended_event(struct trace_contended_event *contended_event, report_lock_contended_event(struct trace_contended_event *contended_event,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -583,7 +583,7 @@ end:
static void static void
report_lock_release_event(struct trace_release_event *release_event, report_lock_release_event(struct trace_release_event *release_event,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -647,7 +647,7 @@ static struct trace_lock_handler *trace_handler;
static void static void
process_lock_acquire_event(void *data, process_lock_acquire_event(void *data,
struct event *event __used, struct event_format *event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -666,7 +666,7 @@ process_lock_acquire_event(void *data,
static void static void
process_lock_acquired_event(void *data, process_lock_acquired_event(void *data,
struct event *event __used, struct event_format *event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -684,7 +684,7 @@ process_lock_acquired_event(void *data,
static void static void
process_lock_contended_event(void *data, process_lock_contended_event(void *data,
struct event *event __used, struct event_format *event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -702,7 +702,7 @@ process_lock_contended_event(void *data,
static void static void
process_lock_release_event(void *data, process_lock_release_event(void *data,
struct event *event __used, struct event_format *event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -721,7 +721,7 @@ process_lock_release_event(void *data,
static void static void
process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
{ {
struct event *event; struct event_format *event;
int type; int type;
type = trace_parse_common_type(data); type = trace_parse_common_type(data);

Просмотреть файл

@ -44,7 +44,6 @@ struct perf_record {
struct perf_evlist *evlist; struct perf_evlist *evlist;
struct perf_session *session; struct perf_session *session;
const char *progname; const char *progname;
const char *uid_str;
int output; int output;
unsigned int page_size; unsigned int page_size;
int realtime_prio; int realtime_prio;
@ -218,7 +217,7 @@ try_again:
if (err == EPERM || err == EACCES) { if (err == EPERM || err == EACCES) {
ui__error_paranoid(); ui__error_paranoid();
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} else if (err == ENODEV && opts->cpu_list) { } else if (err == ENODEV && opts->target.cpu_list) {
die("No such device - did you specify" die("No such device - did you specify"
" an out-of-range profile CPU?\n"); " an out-of-range profile CPU?\n");
} else if (err == EINVAL) { } else if (err == EINVAL) {
@ -243,9 +242,13 @@ try_again:
/* /*
* If it's cycles then fall back to hrtimer * If it's cycles then fall back to hrtimer
* based cpu-clock-tick sw counter, which * based cpu-clock-tick sw counter, which
* is always available even if no PMU support: * is always available even if no PMU support.
*
* PPC returns ENXIO until 2.6.37 (behavior changed
* with commit b0a873e).
*/ */
if (attr->type == PERF_TYPE_HARDWARE if ((err == ENOENT || err == ENXIO)
&& attr->type == PERF_TYPE_HARDWARE
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) { && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
if (verbose) if (verbose)
@ -253,6 +256,10 @@ try_again:
"trying to fall back to cpu-clock-ticks\n"); "trying to fall back to cpu-clock-ticks\n");
attr->type = PERF_TYPE_SOFTWARE; attr->type = PERF_TYPE_SOFTWARE;
attr->config = PERF_COUNT_SW_CPU_CLOCK; attr->config = PERF_COUNT_SW_CPU_CLOCK;
if (pos->name) {
free(pos->name);
pos->name = NULL;
}
goto try_again; goto try_again;
} }
@ -578,7 +585,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_session__process_machines(session, tool, perf_session__process_machines(session, tool,
perf_event__synthesize_guest_os); perf_event__synthesize_guest_os);
if (!opts->system_wide) if (!opts->target.system_wide)
perf_event__synthesize_thread_map(tool, evsel_list->threads, perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event, process_synthesized_event,
machine); machine);
@ -747,6 +754,9 @@ static struct perf_record record = {
.user_freq = UINT_MAX, .user_freq = UINT_MAX,
.user_interval = ULLONG_MAX, .user_interval = ULLONG_MAX,
.freq = 1000, .freq = 1000,
.target = {
.uses_mmap = true,
},
}, },
.write_mode = WRITE_FORCE, .write_mode = WRITE_FORCE,
.file_new = true, .file_new = true,
@ -765,9 +775,9 @@ const struct option record_options[] = {
parse_events_option), parse_events_option),
OPT_CALLBACK(0, "filter", &record.evlist, "filter", OPT_CALLBACK(0, "filter", &record.evlist, "filter",
"event filter", parse_filter), "event filter", parse_filter),
OPT_STRING('p', "pid", &record.opts.target_pid, "pid", OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
"record events on existing process id"), "record events on existing process id"),
OPT_STRING('t', "tid", &record.opts.target_tid, "tid", OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
"record events on existing thread id"), "record events on existing thread id"),
OPT_INTEGER('r', "realtime", &record.realtime_prio, OPT_INTEGER('r', "realtime", &record.realtime_prio,
"collect data with this RT SCHED_FIFO priority"), "collect data with this RT SCHED_FIFO priority"),
@ -775,11 +785,11 @@ const struct option record_options[] = {
"collect data without buffering"), "collect data without buffering"),
OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
"collect raw sample records from all opened counters"), "collect raw sample records from all opened counters"),
OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide, OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
"system-wide collection from all CPUs"), "system-wide collection from all CPUs"),
OPT_BOOLEAN('A', "append", &record.append_file, OPT_BOOLEAN('A', "append", &record.append_file,
"append to the output file to do incremental profiling"), "append to the output file to do incremental profiling"),
OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu", OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"), "list of cpus to monitor"),
OPT_BOOLEAN('f', "force", &record.force, OPT_BOOLEAN('f', "force", &record.force,
"overwrite existing data file (deprecated)"), "overwrite existing data file (deprecated)"),
@ -813,7 +823,8 @@ const struct option record_options[] = {
OPT_CALLBACK('G', "cgroup", &record.evlist, "name", OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only", "monitor event in cgroup name only",
parse_cgroups), parse_cgroups),
OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack, OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
"branch any", "sample any taken branches", "branch any", "sample any taken branches",
@ -831,6 +842,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
struct perf_evsel *pos; struct perf_evsel *pos;
struct perf_evlist *evsel_list; struct perf_evlist *evsel_list;
struct perf_record *rec = &record; struct perf_record *rec = &record;
char errbuf[BUFSIZ];
perf_header__set_cmdline(argc, argv); perf_header__set_cmdline(argc, argv);
@ -842,8 +854,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, record_options, record_usage, argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION); PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && !rec->opts.target_pid && !rec->opts.target_tid && if (!argc && perf_target__none(&rec->opts.target))
!rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str)
usage_with_options(record_usage, record_options); usage_with_options(record_usage, record_options);
if (rec->force && rec->append_file) { if (rec->force && rec->append_file) {
@ -856,7 +867,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
rec->write_mode = WRITE_FORCE; rec->write_mode = WRITE_FORCE;
} }
if (nr_cgroups && !rec->opts.system_wide) { if (nr_cgroups && !rec->opts.target.system_wide) {
fprintf(stderr, "cgroup monitoring only available in" fprintf(stderr, "cgroup monitoring only available in"
" system-wide mode\n"); " system-wide mode\n");
usage_with_options(record_usage, record_options); usage_with_options(record_usage, record_options);
@ -883,17 +894,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
goto out_symbol_exit; goto out_symbol_exit;
} }
rec->opts.uid = parse_target_uid(rec->uid_str, rec->opts.target_tid, err = perf_target__validate(&rec->opts.target);
rec->opts.target_pid); if (err) {
if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1) perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
err = perf_target__parse_uid(&rec->opts.target);
if (err) {
int saved_errno = errno;
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
err = -saved_errno;
goto out_free_fd; goto out_free_fd;
}
if (rec->opts.target_pid) err = -ENOMEM;
rec->opts.target_tid = rec->opts.target_pid; if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
rec->opts.target_tid, rec->opts.uid,
rec->opts.cpu_list) < 0)
usage_with_options(record_usage, record_options); usage_with_options(record_usage, record_options);
list_for_each_entry(pos, &evsel_list->entries, node) { list_for_each_entry(pos, &evsel_list->entries, node) {

Просмотреть файл

@ -296,12 +296,15 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
{ {
size_t ret; size_t ret;
char unit; char unit;
unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
u64 nr_events = self->stats.total_period;
nr_events = convert_unit(nr_events, &unit); nr_samples = convert_unit(nr_samples, &unit);
ret = fprintf(fp, "# Events: %lu%c", nr_events, unit); ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
if (evname != NULL) if (evname != NULL)
ret += fprintf(fp, " %s", evname); ret += fprintf(fp, " of event '%s'", evname);
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
return ret + fprintf(fp, "\n#\n"); return ret + fprintf(fp, "\n#\n");
} }
@ -680,14 +683,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
} }
if (strcmp(report.input_name, "-") != 0) { if (strcmp(report.input_name, "-") != 0)
if (report.use_gtk) setup_browser(true);
perf_gtk_setup_browser(argc, argv, true); else
else
setup_browser(true);
} else {
use_browser = 0; use_browser = 0;
}
/* /*
* Only in the newt browser we are doing integrated annotation, * Only in the newt browser we are doing integrated annotation,

Просмотреть файл

@ -728,34 +728,34 @@ struct trace_migrate_task_event {
struct trace_sched_handler { struct trace_sched_handler {
void (*switch_event)(struct trace_switch_event *, void (*switch_event)(struct trace_switch_event *,
struct machine *, struct machine *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*runtime_event)(struct trace_runtime_event *, void (*runtime_event)(struct trace_runtime_event *,
struct machine *, struct machine *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*wakeup_event)(struct trace_wakeup_event *, void (*wakeup_event)(struct trace_wakeup_event *,
struct machine *, struct machine *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*fork_event)(struct trace_fork_event *, void (*fork_event)(struct trace_fork_event *,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
void (*migrate_task_event)(struct trace_migrate_task_event *, void (*migrate_task_event)(struct trace_migrate_task_event *,
struct machine *machine, struct machine *machine,
struct event *, struct event_format *,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread); struct thread *thread);
@ -765,7 +765,7 @@ struct trace_sched_handler {
static void static void
replay_wakeup_event(struct trace_wakeup_event *wakeup_event, replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
struct machine *machine __used, struct machine *machine __used,
struct event *event, struct event_format *event,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -792,7 +792,7 @@ static u64 cpu_last_switched[MAX_CPUS];
static void static void
replay_switch_event(struct trace_switch_event *switch_event, replay_switch_event(struct trace_switch_event *switch_event,
struct machine *machine __used, struct machine *machine __used,
struct event *event, struct event_format *event,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread __used) struct thread *thread __used)
@ -835,7 +835,7 @@ replay_switch_event(struct trace_switch_event *switch_event,
static void static void
replay_fork_event(struct trace_fork_event *fork_event, replay_fork_event(struct trace_fork_event *fork_event,
struct event *event, struct event_format *event,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -944,7 +944,7 @@ static void thread_atoms_insert(struct thread *thread)
static void static void
latency_fork_event(struct trace_fork_event *fork_event __used, latency_fork_event(struct trace_fork_event *fork_event __used,
struct event *event __used, struct event_format *event __used,
int cpu __used, int cpu __used,
u64 timestamp __used, u64 timestamp __used,
struct thread *thread __used) struct thread *thread __used)
@ -1026,7 +1026,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
static void static void
latency_switch_event(struct trace_switch_event *switch_event, latency_switch_event(struct trace_switch_event *switch_event,
struct machine *machine, struct machine *machine,
struct event *event __used, struct event_format *event __used,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *thread __used) struct thread *thread __used)
@ -1079,7 +1079,7 @@ latency_switch_event(struct trace_switch_event *switch_event,
static void static void
latency_runtime_event(struct trace_runtime_event *runtime_event, latency_runtime_event(struct trace_runtime_event *runtime_event,
struct machine *machine, struct machine *machine,
struct event *event __used, struct event_format *event __used,
int cpu, int cpu,
u64 timestamp, u64 timestamp,
struct thread *this_thread __used) struct thread *this_thread __used)
@ -1102,7 +1102,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
static void static void
latency_wakeup_event(struct trace_wakeup_event *wakeup_event, latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
struct machine *machine, struct machine *machine,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp, u64 timestamp,
struct thread *thread __used) struct thread *thread __used)
@ -1150,7 +1150,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
static void static void
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
struct machine *machine, struct machine *machine,
struct event *__event __used, struct event_format *__event __used,
int cpu __used, int cpu __used,
u64 timestamp, u64 timestamp,
struct thread *thread __used) struct thread *thread __used)
@ -1361,7 +1361,7 @@ static struct trace_sched_handler *trace_handler;
static void static void
process_sched_wakeup_event(struct perf_tool *tool __used, process_sched_wakeup_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine, struct machine *machine,
struct thread *thread) struct thread *thread)
@ -1398,7 +1398,7 @@ static char next_shortname2 = '0';
static void static void
map_switch_event(struct trace_switch_event *switch_event, map_switch_event(struct trace_switch_event *switch_event,
struct machine *machine, struct machine *machine,
struct event *event __used, struct event_format *event __used,
int this_cpu, int this_cpu,
u64 timestamp, u64 timestamp,
struct thread *thread __used) struct thread *thread __used)
@ -1476,7 +1476,7 @@ map_switch_event(struct trace_switch_event *switch_event,
static void static void
process_sched_switch_event(struct perf_tool *tool __used, process_sched_switch_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine, struct machine *machine,
struct thread *thread) struct thread *thread)
@ -1512,7 +1512,7 @@ process_sched_switch_event(struct perf_tool *tool __used,
static void static void
process_sched_runtime_event(struct perf_tool *tool __used, process_sched_runtime_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine, struct machine *machine,
struct thread *thread) struct thread *thread)
@ -1532,7 +1532,7 @@ process_sched_runtime_event(struct perf_tool *tool __used,
static void static void
process_sched_fork_event(struct perf_tool *tool __used, process_sched_fork_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine __used, struct machine *machine __used,
struct thread *thread) struct thread *thread)
@ -1554,7 +1554,7 @@ process_sched_fork_event(struct perf_tool *tool __used,
static void static void
process_sched_exit_event(struct perf_tool *tool __used, process_sched_exit_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample __used, struct perf_sample *sample __used,
struct machine *machine __used, struct machine *machine __used,
struct thread *thread __used) struct thread *thread __used)
@ -1565,7 +1565,7 @@ process_sched_exit_event(struct perf_tool *tool __used,
static void static void
process_sched_migrate_task_event(struct perf_tool *tool __used, process_sched_migrate_task_event(struct perf_tool *tool __used,
struct event *event, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine, struct machine *machine,
struct thread *thread) struct thread *thread)
@ -1586,7 +1586,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used,
sample->time, thread); sample->time, thread);
} }
typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event, typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event,
struct perf_sample *sample, struct perf_sample *sample,
struct machine *machine, struct machine *machine,
struct thread *thread); struct thread *thread);

Просмотреть файл

@ -261,7 +261,7 @@ static void print_sample_start(struct perf_sample *sample,
struct perf_event_attr *attr) struct perf_event_attr *attr)
{ {
int type; int type;
struct event *event; struct event_format *event;
const char *evname = NULL; const char *evname = NULL;
unsigned long secs; unsigned long secs;
unsigned long usecs; unsigned long usecs;

Просмотреть файл

@ -173,24 +173,23 @@ static struct perf_event_attr very_very_detailed_attrs[] = {
struct perf_evlist *evsel_list; static struct perf_evlist *evsel_list;
static struct perf_target target = {
.uid = UINT_MAX,
};
static bool system_wide = false;
static int run_idx = 0; static int run_idx = 0;
static int run_count = 1; static int run_count = 1;
static bool no_inherit = false; static bool no_inherit = false;
static bool scale = true; static bool scale = true;
static bool no_aggr = false; static bool no_aggr = false;
static const char *target_pid;
static const char *target_tid;
static pid_t child_pid = -1; static pid_t child_pid = -1;
static bool null_run = false; static bool null_run = false;
static int detailed_run = 0; static int detailed_run = 0;
static bool sync_run = false; static bool sync_run = false;
static bool big_num = true; static bool big_num = true;
static int big_num_opt = -1; static int big_num_opt = -1;
static const char *cpu_list;
static const char *csv_sep = NULL; static const char *csv_sep = NULL;
static bool csv_output = false; static bool csv_output = false;
static bool group = false; static bool group = false;
@ -265,18 +264,18 @@ static double stddev_stats(struct stats *stats)
return sqrt(variance_mean); return sqrt(variance_mean);
} }
struct stats runtime_nsecs_stats[MAX_NR_CPUS]; static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
struct stats runtime_cycles_stats[MAX_NR_CPUS]; static struct stats runtime_cycles_stats[MAX_NR_CPUS];
struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
struct stats runtime_branches_stats[MAX_NR_CPUS]; static struct stats runtime_branches_stats[MAX_NR_CPUS];
struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
struct stats walltime_nsecs_stats; static struct stats walltime_nsecs_stats;
static int create_perf_stat_counter(struct perf_evsel *evsel, static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first) struct perf_evsel *first)
@ -299,15 +298,15 @@ retry:
if (exclude_guest_missing) if (exclude_guest_missing)
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
if (system_wide) { if (perf_target__has_cpu(&target)) {
ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
group, group_fd); group, group_fd);
if (ret) if (ret)
goto check_ret; goto check_ret;
return 0; return 0;
} }
if (!target_pid && !target_tid && (!group || evsel == first)) { if (!perf_target__has_task(&target) && (!group || evsel == first)) {
attr->disabled = 1; attr->disabled = 1;
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
@ -471,7 +470,7 @@ static int run_perf_stat(int argc __used, const char **argv)
exit(-1); exit(-1);
} }
if (!target_tid && !target_pid && !system_wide) if (perf_target__none(&target))
evsel_list->threads->map[0] = child_pid; evsel_list->threads->map[0] = child_pid;
/* /*
@ -506,7 +505,7 @@ static int run_perf_stat(int argc __used, const char **argv)
error("You may not have permission to collect %sstats.\n" error("You may not have permission to collect %sstats.\n"
"\t Consider tweaking" "\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid or running as root.", " /proc/sys/kernel/perf_event_paranoid or running as root.",
system_wide ? "system-wide " : ""); target.system_wide ? "system-wide " : "");
} else { } else {
error("open_counter returned with %d (%s). " error("open_counter returned with %d (%s). "
"/bin/dmesg may provide additional information.\n", "/bin/dmesg may provide additional information.\n",
@ -998,14 +997,14 @@ static void print_stat(int argc, const char **argv)
if (!csv_output) { if (!csv_output) {
fprintf(output, "\n"); fprintf(output, "\n");
fprintf(output, " Performance counter stats for "); fprintf(output, " Performance counter stats for ");
if (!target_pid && !target_tid) { if (!perf_target__has_task(&target)) {
fprintf(output, "\'%s", argv[0]); fprintf(output, "\'%s", argv[0]);
for (i = 1; i < argc; i++) for (i = 1; i < argc; i++)
fprintf(output, " %s", argv[i]); fprintf(output, " %s", argv[i]);
} else if (target_pid) } else if (target.pid)
fprintf(output, "process id \'%s", target_pid); fprintf(output, "process id \'%s", target.pid);
else else
fprintf(output, "thread id \'%s", target_tid); fprintf(output, "thread id \'%s", target.tid);
fprintf(output, "\'"); fprintf(output, "\'");
if (run_count > 1) if (run_count > 1)
@ -1079,11 +1078,11 @@ static const struct option options[] = {
"event filter", parse_filter), "event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &no_inherit, OPT_BOOLEAN('i', "no-inherit", &no_inherit,
"child tasks do not inherit counters"), "child tasks do not inherit counters"),
OPT_STRING('p', "pid", &target_pid, "pid", OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"), "stat events on existing process id"),
OPT_STRING('t', "tid", &target_tid, "tid", OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"), "stat events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &system_wide, OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"), "system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group, OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"), "put the counters into a counter group"),
@ -1102,7 +1101,7 @@ static const struct option options[] = {
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators", "print large numbers with thousands\' separators",
stat__set_big_num), stat__set_big_num),
OPT_STRING('C', "cpu", &cpu_list, "cpu", OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"), "list of cpus to monitor in system-wide"),
OPT_BOOLEAN('A', "no-aggr", &no_aggr, OPT_BOOLEAN('A', "no-aggr", &no_aggr,
"disable CPU count aggregation"), "disable CPU count aggregation"),
@ -1220,13 +1219,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
} else if (big_num_opt == 0) /* User passed --no-big-num */ } else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false; big_num = false;
if (!argc && !target_pid && !target_tid) if (!argc && !perf_target__has_task(&target))
usage_with_options(stat_usage, options); usage_with_options(stat_usage, options);
if (run_count <= 0) if (run_count <= 0)
usage_with_options(stat_usage, options); usage_with_options(stat_usage, options);
/* no_aggr, cgroup are for system-wide only */ /* no_aggr, cgroup are for system-wide only */
if ((no_aggr || nr_cgroups) && !system_wide) { if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation " fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n"); "modes only available in system-wide mode\n");
@ -1236,23 +1235,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
if (add_default_attributes()) if (add_default_attributes())
goto out; goto out;
if (target_pid) perf_target__validate(&target);
target_tid = target_pid;
evsel_list->threads = thread_map__new_str(target_pid, if (perf_evlist__create_maps(evsel_list, &target) < 0) {
target_tid, UINT_MAX); if (perf_target__has_task(&target))
if (evsel_list->threads == NULL) { pr_err("Problems finding threads of monitor\n");
pr_err("Problems finding threads of monitor\n"); if (perf_target__has_cpu(&target))
usage_with_options(stat_usage, options); perror("failed to parse CPUs map");
}
if (system_wide)
evsel_list->cpus = cpu_map__new(cpu_list);
else
evsel_list->cpus = cpu_map__dummy_new();
if (evsel_list->cpus == NULL) {
perror("failed to parse CPUs map");
usage_with_options(stat_usage, options); usage_with_options(stat_usage, options);
return -1; return -1;
} }

Просмотреть файл

@ -1195,6 +1195,10 @@ realloc:
static int test__PERF_RECORD(void) static int test__PERF_RECORD(void)
{ {
struct perf_record_opts opts = { struct perf_record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.no_delay = true, .no_delay = true,
.freq = 10, .freq = 10,
.mmap_pages = 256, .mmap_pages = 256,
@ -1237,8 +1241,7 @@ static int test__PERF_RECORD(void)
* perf_evlist__prepare_workload we'll fill in the only thread * perf_evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there. * we're monitoring, the one forked there.
*/ */
err = perf_evlist__create_maps(evlist, opts.target_pid, err = perf_evlist__create_maps(evlist, &opts.target);
opts.target_tid, UINT_MAX, opts.cpu_list);
if (err < 0) { if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n"); pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist; goto out_delete_evlist;
@ -1579,8 +1582,6 @@ static int __test__rdpmc(void)
sa.sa_sigaction = segfault_handler; sa.sa_sigaction = segfault_handler;
sigaction(SIGSEGV, &sa, NULL); sigaction(SIGSEGV, &sa, NULL);
fprintf(stderr, "\n\n");
fd = sys_perf_event_open(&attr, 0, -1, -1, 0); fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd < 0) { if (fd < 0) {
die("Error: sys_perf_event_open() syscall returned " die("Error: sys_perf_event_open() syscall returned "
@ -1605,7 +1606,7 @@ static int __test__rdpmc(void)
loops *= 10; loops *= 10;
delta = now - stamp; delta = now - stamp;
fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta); pr_debug("%14d: %14Lu\n", n, (long long)delta);
delta_sum += delta; delta_sum += delta;
} }
@ -1613,7 +1614,7 @@ static int __test__rdpmc(void)
munmap(addr, page_size); munmap(addr, page_size);
close(fd); close(fd);
fprintf(stderr, " "); pr_debug(" ");
if (!delta_sum) if (!delta_sum)
return -1; return -1;

Просмотреть файл

@ -588,7 +588,7 @@ static void *display_thread_tui(void *arg)
* via --uid. * via --uid.
*/ */
list_for_each_entry(pos, &top->evlist->entries, node) list_for_each_entry(pos, &top->evlist->entries, node)
pos->hists.uid_filter_str = top->uid_str; pos->hists.uid_filter_str = top->target.uid_str;
perf_evlist__tui_browse_hists(top->evlist, help, perf_evlist__tui_browse_hists(top->evlist, help,
perf_top__sort_new_samples, perf_top__sort_new_samples,
@ -948,6 +948,10 @@ try_again:
attr->type = PERF_TYPE_SOFTWARE; attr->type = PERF_TYPE_SOFTWARE;
attr->config = PERF_COUNT_SW_CPU_CLOCK; attr->config = PERF_COUNT_SW_CPU_CLOCK;
if (counter->name) {
free(counter->name);
counter->name = strdup(event_name(counter));
}
goto try_again; goto try_again;
} }
@ -1016,7 +1020,7 @@ static int __cmd_top(struct perf_top *top)
if (ret) if (ret)
goto out_delete; goto out_delete;
if (top->target_tid || top->uid != UINT_MAX) if (perf_target__has_task(&top->target))
perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
perf_event__process, perf_event__process,
&top->session->host_machine); &top->session->host_machine);
@ -1150,14 +1154,17 @@ static const char * const top_usage[] = {
int cmd_top(int argc, const char **argv, const char *prefix __used) int cmd_top(int argc, const char **argv, const char *prefix __used)
{ {
struct perf_evsel *pos; struct perf_evsel *pos;
int status = -ENOMEM; int status;
char errbuf[BUFSIZ];
struct perf_top top = { struct perf_top top = {
.count_filter = 5, .count_filter = 5,
.delay_secs = 2, .delay_secs = 2,
.uid = UINT_MAX,
.freq = 1000, /* 1 KHz */ .freq = 1000, /* 1 KHz */
.mmap_pages = 128, .mmap_pages = 128,
.sym_pcnt_filter = 5, .sym_pcnt_filter = 5,
.target = {
.uses_mmap = true,
},
}; };
char callchain_default_opt[] = "fractal,0.5,callee"; char callchain_default_opt[] = "fractal,0.5,callee";
const struct option options[] = { const struct option options[] = {
@ -1166,13 +1173,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
parse_events_option), parse_events_option),
OPT_INTEGER('c', "count", &top.default_interval, OPT_INTEGER('c', "count", &top.default_interval,
"event period to sample"), "event period to sample"),
OPT_STRING('p', "pid", &top.target_pid, "pid", OPT_STRING('p', "pid", &top.target.pid, "pid",
"profile events on existing process id"), "profile events on existing process id"),
OPT_STRING('t', "tid", &top.target_tid, "tid", OPT_STRING('t', "tid", &top.target.tid, "tid",
"profile events on existing thread id"), "profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &top.system_wide, OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide,
"system-wide collection from all CPUs"), "system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &top.cpu_list, "cpu", OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu",
"list of cpus to monitor"), "list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"), "file", "vmlinux pathname"),
@ -1227,7 +1234,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
"Display raw encoding of assembly instructions (default)"), "Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"), "Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &top.uid_str, "user", "user to profile"), OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"),
OPT_END() OPT_END()
}; };
@ -1253,22 +1260,27 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
setup_browser(false); setup_browser(false);
top.uid = parse_target_uid(top.uid_str, top.target_tid, top.target_pid); status = perf_target__validate(&top.target);
if (top.uid_str != NULL && top.uid == UINT_MAX - 1) if (status) {
goto out_delete_evlist; perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
/* CPU and PID are mutually exclusive */
if (top.target_tid && top.cpu_list) {
printf("WARNING: PID switch overriding CPU\n");
sleep(1);
top.cpu_list = NULL;
} }
if (top.target_pid) status = perf_target__parse_uid(&top.target);
top.target_tid = top.target_pid; if (status) {
int saved_errno = errno;
if (perf_evlist__create_maps(top.evlist, top.target_pid, perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
top.target_tid, top.uid, top.cpu_list) < 0) ui__warning("%s", errbuf);
status = -saved_errno;
goto out_delete_evlist;
}
if (perf_target__none(&top.target))
top.target.system_wide = true;
if (perf_evlist__create_maps(top.evlist, &top.target) < 0)
usage_with_options(top_usage, options); usage_with_options(top_usage, options);
if (!top.evlist->nr_entries && if (!top.evlist->nr_entries &&

Просмотреть файл

@ -207,10 +207,10 @@ extern const char perf_version_string[];
void pthread__unblock_sigwinch(void); void pthread__unblock_sigwinch(void);
#include "util/target.h"
struct perf_record_opts { struct perf_record_opts {
const char *target_pid; struct perf_target target;
const char *target_tid;
uid_t uid;
bool call_graph; bool call_graph;
bool group; bool group;
bool inherit_stat; bool inherit_stat;
@ -223,7 +223,6 @@ struct perf_record_opts {
bool sample_time; bool sample_time;
bool sample_id_all_missing; bool sample_id_all_missing;
bool exclude_guest_missing; bool exclude_guest_missing;
bool system_wide;
bool period; bool period;
unsigned int freq; unsigned int freq;
unsigned int mmap_pages; unsigned int mmap_pages;
@ -231,7 +230,6 @@ struct perf_record_opts {
int branch_stack; int branch_stack;
u64 default_interval; u64 default_interval;
u64 user_interval; u64 user_interval;
const char *cpu_list;
}; };
#endif #endif

Просмотреть файл

@ -27,9 +27,12 @@ static int ui_browser__percent_color(struct ui_browser *browser,
return HE_COLORSET_NORMAL; return HE_COLORSET_NORMAL;
} }
void ui_browser__set_color(struct ui_browser *self __used, int color) int ui_browser__set_color(struct ui_browser *browser, int color)
{ {
int ret = browser->current_color;
browser->current_color = color;
SLsmg_set_color(color); SLsmg_set_color(color);
return ret;
} }
void ui_browser__set_percent_color(struct ui_browser *self, void ui_browser__set_percent_color(struct ui_browser *self,
@ -502,6 +505,12 @@ static struct ui_browser__colorset {
.fg = "blue", .fg = "blue",
.bg = "default", .bg = "default",
}, },
{
.colorset = HE_COLORSET_ADDR,
.name = "addr",
.fg = "magenta",
.bg = "default",
},
{ {
.name = NULL, .name = NULL,
} }
@ -584,6 +593,111 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
return row; return row;
} }
void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
u16 start, u16 end)
{
SLsmg_set_char_set(1);
ui_browser__gotorc(browser, start, column);
SLsmg_draw_vline(end - start + 1);
SLsmg_set_char_set(0);
}
void ui_browser__write_graph(struct ui_browser *browser __used, int graph)
{
SLsmg_set_char_set(1);
SLsmg_write_char(graph);
SLsmg_set_char_set(0);
}
static void __ui_browser__line_arrow_up(struct ui_browser *browser,
unsigned int column,
u64 start, u64 end)
{
unsigned int row, end_row;
SLsmg_set_char_set(1);
if (start < browser->top_idx + browser->height) {
row = start - browser->top_idx;
ui_browser__gotorc(browser, row, column);
SLsmg_write_char(SLSMG_LLCORN_CHAR);
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
if (row-- == 0)
goto out;
} else
row = browser->height - 1;
if (end > browser->top_idx)
end_row = end - browser->top_idx;
else
end_row = 0;
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_vline(row - end_row + 1);
ui_browser__gotorc(browser, end_row, column);
if (end >= browser->top_idx) {
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, end_row, column + 1);
SLsmg_write_char(SLSMG_HLINE_CHAR);
ui_browser__gotorc(browser, end_row, column + 2);
SLsmg_write_char(SLSMG_RARROW_CHAR);
}
out:
SLsmg_set_char_set(0);
}
static void __ui_browser__line_arrow_down(struct ui_browser *browser,
unsigned int column,
u64 start, u64 end)
{
unsigned int row, end_row;
SLsmg_set_char_set(1);
if (start >= browser->top_idx) {
row = start - browser->top_idx;
ui_browser__gotorc(browser, row, column);
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
if (row++ == 0)
goto out;
} else
row = 0;
if (end >= browser->top_idx + browser->height)
end_row = browser->height - 1;
else
end_row = end - browser->top_idx;;
ui_browser__gotorc(browser, row, column);
SLsmg_draw_vline(end_row - row + 1);
ui_browser__gotorc(browser, end_row, column);
if (end < browser->top_idx + browser->height) {
SLsmg_write_char(SLSMG_LLCORN_CHAR);
ui_browser__gotorc(browser, end_row, column + 1);
SLsmg_write_char(SLSMG_HLINE_CHAR);
ui_browser__gotorc(browser, end_row, column + 2);
SLsmg_write_char(SLSMG_RARROW_CHAR);
}
out:
SLsmg_set_char_set(0);
}
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end)
{
if (start > end)
__ui_browser__line_arrow_up(browser, column, start, end);
else
__ui_browser__line_arrow_down(browser, column, start, end);
}
void ui_browser__init(void) void ui_browser__init(void)
{ {
int i = 0; int i = 0;

Просмотреть файл

@ -10,11 +10,13 @@
#define HE_COLORSET_NORMAL 52 #define HE_COLORSET_NORMAL 52
#define HE_COLORSET_SELECTED 53 #define HE_COLORSET_SELECTED 53
#define HE_COLORSET_CODE 54 #define HE_COLORSET_CODE 54
#define HE_COLORSET_ADDR 55
struct ui_browser { struct ui_browser {
u64 index, top_idx; u64 index, top_idx;
void *top, *entries; void *top, *entries;
u16 y, x, width, height; u16 y, x, width, height;
int current_color;
void *priv; void *priv;
const char *title; const char *title;
char *helpline; char *helpline;
@ -27,7 +29,7 @@ struct ui_browser {
bool use_navkeypressed; bool use_navkeypressed;
}; };
void ui_browser__set_color(struct ui_browser *self, int color); int ui_browser__set_color(struct ui_browser *browser, int color);
void ui_browser__set_percent_color(struct ui_browser *self, void ui_browser__set_percent_color(struct ui_browser *self,
double percent, bool current); double percent, bool current);
bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row); bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
@ -35,6 +37,9 @@ void ui_browser__refresh_dimensions(struct ui_browser *self);
void ui_browser__reset_index(struct ui_browser *self); void ui_browser__reset_index(struct ui_browser *self);
void ui_browser__gotorc(struct ui_browser *self, int y, int x); void ui_browser__gotorc(struct ui_browser *self, int y, int x);
void ui_browser__write_graph(struct ui_browser *browser, int graph);
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end);
void __ui_browser__show_title(struct ui_browser *browser, const char *title); void __ui_browser__show_title(struct ui_browser *browser, const char *title);
void ui_browser__show_title(struct ui_browser *browser, const char *title); void ui_browser__show_title(struct ui_browser *browser, const char *title);
int ui_browser__show(struct ui_browser *self, const char *title, int ui_browser__show(struct ui_browser *self, const char *title,
@ -44,6 +49,8 @@ int ui_browser__refresh(struct ui_browser *self);
int ui_browser__run(struct ui_browser *browser, int delay_secs); int ui_browser__run(struct ui_browser *browser, int delay_secs);
void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries); void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries);
void ui_browser__handle_resize(struct ui_browser *browser); void ui_browser__handle_resize(struct ui_browser *browser);
void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
u16 start, u16 end);
int ui_browser__warning(struct ui_browser *browser, int timeout, int ui_browser__warning(struct ui_browser *browser, int timeout,
const char *format, ...); const char *format, ...);

Просмотреть файл

@ -0,0 +1,867 @@
#include "../../util/util.h"
#include "../browser.h"
#include "../helpline.h"
#include "../libslang.h"
#include "../ui.h"
#include "../util.h"
#include "../../util/annotate.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
#include "../../util/symbol.h"
#include <pthread.h>
#include <newt.h>
struct browser_disasm_line {
struct rb_node rb_node;
double percent;
u32 idx;
int idx_asm;
int jump_sources;
};
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
struct rb_node *curr_hot;
struct disasm_line *selection;
struct disasm_line **offsets;
u64 start;
int nr_asm_entries;
int nr_entries;
int max_jump_sources;
int nr_jumps;
bool hide_src_code;
bool use_offset;
bool jump_arrows;
bool show_nr_jumps;
bool searching_backwards;
u8 addr_width;
u8 jumps_width;
u8 target_width;
u8 min_addr_width;
u8 max_addr_width;
char search_bf[128];
};
static inline struct browser_disasm_line *disasm_line__browser(struct disasm_line *dl)
{
return (struct browser_disasm_line *)(dl + 1);
}
static bool disasm_line__filter(struct ui_browser *browser, void *entry)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
if (ab->hide_src_code) {
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
return dl->offset == -1;
}
return false;
}
static int annotate_browser__jumps_percent_color(struct annotate_browser *browser,
int nr, bool current)
{
if (current && (!browser->b.use_navkeypressed || browser->b.navkeypressed))
return HE_COLORSET_SELECTED;
if (nr == browser->max_jump_sources)
return HE_COLORSET_TOP;
if (nr > 1)
return HE_COLORSET_MEDIUM;
return HE_COLORSET_NORMAL;
}
static int annotate_browser__set_jumps_percent_color(struct annotate_browser *browser,
int nr, bool current)
{
int color = annotate_browser__jumps_percent_color(browser, nr, current);
return ui_browser__set_color(&browser->b, color);
}
static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
{
struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
struct browser_disasm_line *bdl = disasm_line__browser(dl);
bool current_entry = ui_browser__is_current_entry(self, row);
bool change_color = (!ab->hide_src_code &&
(!current_entry || (self->use_navkeypressed &&
!self->navkeypressed)));
int width = self->width, printed;
char bf[256];
if (dl->offset != -1 && bdl->percent != 0.0) {
ui_browser__set_percent_color(self, bdl->percent, current_entry);
slsmg_printf("%6.2f ", bdl->percent);
} else {
ui_browser__set_percent_color(self, 0, current_entry);
slsmg_write_nstring(" ", 7);
}
SLsmg_write_char(' ');
/* The scroll bar isn't being used */
if (!self->navkeypressed)
width += 1;
if (!*dl->line)
slsmg_write_nstring(" ", width - 7);
else if (dl->offset == -1) {
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
slsmg_write_nstring(bf, printed);
slsmg_write_nstring(dl->line, width - printed - 6);
} else {
u64 addr = dl->offset;
int color = -1;
if (!ab->use_offset)
addr += ab->start;
if (!ab->use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (bdl->jump_sources) {
if (ab->show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
bdl->jump_sources);
prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
current_entry);
slsmg_write_nstring(bf, printed);
ui_browser__set_color(self, prev);
}
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
ab->target_width, addr);
} else {
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
}
}
if (change_color)
color = ui_browser__set_color(self, HE_COLORSET_ADDR);
slsmg_write_nstring(bf, printed);
if (change_color)
ui_browser__set_color(self, color);
if (dl->ins && dl->ins->ops->scnprintf) {
if (ins__is_jump(dl->ins)) {
bool fwd = dl->ops.target.offset > (u64)dl->offset;
ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
SLSMG_UARROW_CHAR);
SLsmg_write_char(' ');
} else if (ins__is_call(dl->ins)) {
ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
SLsmg_write_char(' ');
} else {
slsmg_write_nstring(" ", 2);
}
} else {
if (strcmp(dl->name, "retq")) {
slsmg_write_nstring(" ", 2);
} else {
ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
SLsmg_write_char(' ');
}
}
disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
slsmg_write_nstring(bf, width - 10 - printed);
}
if (current_entry)
ab->selection = dl;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct disasm_line *cursor = ab->selection, *target;
struct browser_disasm_line *btarget, *bcursor;
unsigned int from, to;
if (!cursor->ins || !ins__is_jump(cursor->ins) ||
!disasm_line__has_offset(cursor))
return;
target = ab->offsets[cursor->ops.target.offset];
if (!target)
return;
bcursor = disasm_line__browser(cursor);
btarget = disasm_line__browser(target);
if (ab->hide_src_code) {
from = bcursor->idx_asm;
to = btarget->idx_asm;
} else {
from = (u64)bcursor->idx;
to = (u64)btarget->idx;
}
ui_browser__set_color(browser, HE_COLORSET_CODE);
__ui_browser__line_arrow(browser, 9 + ab->addr_width, from, to);
}
static unsigned int annotate_browser__refresh(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
int ret = ui_browser__list_head_refresh(browser);
if (ab->jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
__ui_browser__vline(browser, 7, 0, browser->height - 1);
return ret;
}
static double disasm_line__calc_percent(struct disasm_line *dl, struct symbol *sym, int evidx)
{
double percent = 0.0;
if (dl->offset != -1) {
int len = sym->end - sym->start;
unsigned int hits = 0;
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = dl->offset;
struct disasm_line *next;
next = disasm__get_next_ip_line(&notes->src->source, dl);
while (offset < (s64)len &&
(next == NULL || offset < next->offset)) {
if (src_line) {
percent += src_line[offset].percent;
} else
hits += h->addr[offset];
++offset;
}
/*
* If the percentage wasn't already calculated in
* symbol__get_source_line, do it now:
*/
if (src_line == NULL && h->sum)
percent = 100.0 * hits / h->sum;
}
return percent;
}
static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct browser_disasm_line *l;
while (*p != NULL) {
parent = *p;
l = rb_entry(parent, struct browser_disasm_line, rb_node);
if (bdl->percent < l->percent)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&bdl->rb_node, parent, p);
rb_insert_color(&bdl->rb_node, root);
}
static void annotate_browser__set_top(struct annotate_browser *self,
struct disasm_line *pos, u32 idx)
{
unsigned back;
ui_browser__refresh_dimensions(&self->b);
back = self->b.height / 2;
self->b.top_idx = self->b.index = idx;
while (self->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct disasm_line, node);
if (disasm_line__filter(&self->b, &pos->node))
continue;
--self->b.top_idx;
--back;
}
self->b.top = pos;
self->b.navkeypressed = true;
}
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
struct browser_disasm_line *bpos;
struct disasm_line *pos;
bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
pos = ((struct disasm_line *)bpos) - 1;
annotate_browser__set_top(browser, pos, bpos->idx);
browser->curr_hot = nd;
}
static void annotate_browser__calc_percent(struct annotate_browser *browser,
int evidx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos;
browser->entries = RB_ROOT;
pthread_mutex_lock(&notes->lock);
list_for_each_entry(pos, &notes->src->source, node) {
struct browser_disasm_line *bpos = disasm_line__browser(pos);
bpos->percent = disasm_line__calc_percent(pos, sym, evidx);
if (bpos->percent < 0.01) {
RB_CLEAR_NODE(&bpos->rb_node);
continue;
}
disasm_rb_tree__insert(&browser->entries, bpos);
}
pthread_mutex_unlock(&notes->lock);
browser->curr_hot = rb_last(&browser->entries);
}
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
struct disasm_line *dl;
struct browser_disasm_line *bdl;
off_t offset = browser->b.index - browser->b.top_idx;
browser->b.seek(&browser->b, offset, SEEK_CUR);
dl = list_entry(browser->b.top, struct disasm_line, node);
bdl = disasm_line__browser(dl);
if (browser->hide_src_code) {
if (bdl->idx_asm < offset)
offset = bdl->idx;
browser->b.nr_entries = browser->nr_entries;
browser->hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx - offset;
browser->b.index = bdl->idx;
} else {
if (bdl->idx_asm < 0) {
ui_helpline__puts("Only available for assembly lines.");
browser->b.seek(&browser->b, -offset, SEEK_CUR);
return false;
}
if (bdl->idx_asm < offset)
offset = bdl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
browser->hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx_asm - offset;
browser->b.index = bdl->idx_asm;
}
return true;
}
static bool annotate_browser__callq(struct annotate_browser *browser,
int evidx, void (*timer)(void *arg),
void *arg, int delay_secs)
{
struct map_symbol *ms = browser->b.priv;
struct disasm_line *dl = browser->selection;
struct symbol *sym = ms->sym;
struct annotation *notes;
struct symbol *target;
u64 ip;
if (!ins__is_call(dl->ins))
return false;
ip = ms->map->map_ip(ms->map, dl->ops.target.addr);
target = map__find_symbol(ms->map, ip, NULL);
if (target == NULL) {
ui_helpline__puts("The called function was not found.");
return true;
}
notes = symbol__annotation(target);
pthread_mutex_lock(&notes->lock);
if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
pthread_mutex_unlock(&notes->lock);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
target->name);
return true;
}
pthread_mutex_unlock(&notes->lock);
symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs);
ui_browser__show_title(&browser->b, sym->name);
return true;
}
static
struct disasm_line *annotate_browser__find_offset(struct annotate_browser *browser,
s64 offset, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos;
*idx = 0;
list_for_each_entry(pos, &notes->src->source, node) {
if (pos->offset == offset)
return pos;
if (!disasm_line__filter(&browser->b, &pos->node))
++*idx;
}
return NULL;
}
static bool annotate_browser__jump(struct annotate_browser *browser)
{
struct disasm_line *dl = browser->selection;
s64 idx;
if (!ins__is_jump(dl->ins))
return false;
dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx);
if (dl == NULL) {
ui_helpline__puts("Invallid jump offset");
return true;
}
annotate_browser__set_top(browser, dl, idx);
return true;
}
static
struct disasm_line *annotate_browser__find_string(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos = browser->selection;
*idx = browser->b.index;
list_for_each_entry_continue(pos, &notes->src->source, node) {
if (disasm_line__filter(&browser->b, &pos->node))
continue;
++*idx;
if (pos->line && strstr(pos->line, s) != NULL)
return pos;
}
return NULL;
}
static bool __annotate_browser__search(struct annotate_browser *browser)
{
struct disasm_line *dl;
s64 idx;
dl = annotate_browser__find_string(browser, browser->search_bf, &idx);
if (dl == NULL) {
ui_helpline__puts("String not found!");
return false;
}
annotate_browser__set_top(browser, dl, idx);
browser->searching_backwards = false;
return true;
}
static
struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos = browser->selection;
*idx = browser->b.index;
list_for_each_entry_continue_reverse(pos, &notes->src->source, node) {
if (disasm_line__filter(&browser->b, &pos->node))
continue;
--*idx;
if (pos->line && strstr(pos->line, s) != NULL)
return pos;
}
return NULL;
}
static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
{
struct disasm_line *dl;
s64 idx;
dl = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
if (dl == NULL) {
ui_helpline__puts("String not found!");
return false;
}
annotate_browser__set_top(browser, dl, idx);
browser->searching_backwards = true;
return true;
}
static bool annotate_browser__search_window(struct annotate_browser *browser,
int delay_secs)
{
if (ui_browser__input_window("Search", "String: ", browser->search_bf,
"ENTER: OK, ESC: Cancel",
delay_secs * 2) != K_ENTER ||
!*browser->search_bf)
return false;
return true;
}
static bool annotate_browser__search(struct annotate_browser *browser, int delay_secs)
{
if (annotate_browser__search_window(browser, delay_secs))
return __annotate_browser__search(browser);
return false;
}
static bool annotate_browser__continue_search(struct annotate_browser *browser,
int delay_secs)
{
if (!*browser->search_bf)
return annotate_browser__search(browser, delay_secs);
return __annotate_browser__search(browser);
}
static bool annotate_browser__search_reverse(struct annotate_browser *browser,
int delay_secs)
{
if (annotate_browser__search_window(browser, delay_secs))
return __annotate_browser__search_reverse(browser);
return false;
}
static
bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
int delay_secs)
{
if (!*browser->search_bf)
return annotate_browser__search_reverse(browser, delay_secs);
return __annotate_browser__search_reverse(browser);
}
static int annotate_browser__run(struct annotate_browser *self, int evidx,
void(*timer)(void *arg),
void *arg, int delay_secs)
{
struct rb_node *nd = NULL;
struct map_symbol *ms = self->b.priv;
struct symbol *sym = ms->sym;
const char *help = "Press 'h' for help on key bindings";
int key;
if (ui_browser__show(&self->b, sym->name, help) < 0)
return -1;
annotate_browser__calc_percent(self, evidx);
if (self->curr_hot) {
annotate_browser__set_rb_top(self, self->curr_hot);
self->b.navkeypressed = false;
}
nd = self->curr_hot;
while (1) {
key = ui_browser__run(&self->b, delay_secs);
if (delay_secs != 0) {
annotate_browser__calc_percent(self, evidx);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
* move to curr_hot (current hottest line).
*/
if (nd != NULL && RB_EMPTY_NODE(nd))
nd = NULL;
}
switch (key) {
case K_TIMER:
if (timer != NULL)
timer(arg);
if (delay_secs != 0)
symbol__annotate_decay_histogram(sym, evidx);
continue;
case K_TAB:
if (nd != NULL) {
nd = rb_prev(nd);
if (nd == NULL)
nd = rb_last(&self->entries);
} else
nd = self->curr_hot;
break;
case K_UNTAB:
if (nd != NULL)
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&self->entries);
else
nd = self->curr_hot;
break;
case K_F1:
case 'h':
ui_browser__help_window(&self->b,
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"q/ESC/CTRL+C Exit\n\n"
"-> Go to target\n"
"<- Exit\n"
"h Cycle thru hottest instructions\n"
"j Toggle showing jump to target arrows\n"
"J Toggle showing number of jump sources on targets\n"
"n Search next string\n"
"o Toggle disassembler output/simplified view\n"
"s Toggle source code view\n"
"/ Search string\n"
"? Search previous string\n");
continue;
case 'H':
nd = self->curr_hot;
break;
case 's':
if (annotate_browser__toggle_source(self))
ui_helpline__puts(help);
continue;
case 'o':
self->use_offset = !self->use_offset;
if (self->use_offset)
self->target_width = self->min_addr_width;
else
self->target_width = self->max_addr_width;
update_addr_width:
self->addr_width = self->target_width;
if (self->show_nr_jumps)
self->addr_width += self->jumps_width + 1;
continue;
case 'j':
self->jump_arrows = !self->jump_arrows;
continue;
case 'J':
self->show_nr_jumps = !self->show_nr_jumps;
goto update_addr_width;
case '/':
if (annotate_browser__search(self, delay_secs)) {
show_help:
ui_helpline__puts(help);
}
continue;
case 'n':
if (self->searching_backwards ?
annotate_browser__continue_search_reverse(self, delay_secs) :
annotate_browser__continue_search(self, delay_secs))
goto show_help;
continue;
case '?':
if (annotate_browser__search_reverse(self, delay_secs))
goto show_help;
continue;
case K_ENTER:
case K_RIGHT:
if (self->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
else if (self->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
else if (!self->selection->ins) {
if (strcmp(self->selection->name, "retq"))
goto show_sup_ins;
goto out;
} else if (!(annotate_browser__jump(self) ||
annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
show_sup_ins:
ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
}
continue;
case K_LEFT:
case K_ESC:
case 'q':
case CTRL('c'):
goto out;
default:
continue;
}
if (nd != NULL)
annotate_browser__set_rb_top(self, nd);
}
out:
ui_browser__hide(&self->b);
return key;
}
int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs)
{
return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
timer, arg, delay_secs);
}
static void annotate_browser__mark_jump_targets(struct annotate_browser *browser,
size_t size)
{
u64 offset;
for (offset = 0; offset < size; ++offset) {
struct disasm_line *dl = browser->offsets[offset], *dlt;
struct browser_disasm_line *bdlt;
if (!dl || !dl->ins || !ins__is_jump(dl->ins) ||
!disasm_line__has_offset(dl))
continue;
if (dl->ops.target.offset >= size) {
ui__error("jump to after symbol!\n"
"size: %zx, jump target: %" PRIx64,
size, dl->ops.target.offset);
continue;
}
dlt = browser->offsets[dl->ops.target.offset];
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
if (dlt == NULL)
continue;
bdlt = disasm_line__browser(dlt);
if (++bdlt->jump_sources > browser->max_jump_sources)
browser->max_jump_sources = bdlt->jump_sources;
++browser->nr_jumps;
}
}
static inline int width_jumps(int n)
{
if (n >= 100)
return 5;
if (n / 10)
return 2;
return 1;
}
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
void(*timer)(void *arg), void *arg,
int delay_secs)
{
struct disasm_line *pos, *n;
struct annotation *notes;
const size_t size = symbol__size(sym);
struct map_symbol ms = {
.map = map,
.sym = sym,
};
struct annotate_browser browser = {
.b = {
.refresh = annotate_browser__refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
.filter = disasm_line__filter,
.priv = &ms,
.use_navkeypressed = true,
},
.use_offset = true,
.jump_arrows = true,
};
int ret = -1;
if (sym == NULL)
return -1;
if (map->dso->annotate_warned)
return -1;
browser.offsets = zalloc(size * sizeof(struct disasm_line *));
if (browser.offsets == NULL) {
ui__error("Not enough memory!");
return -1;
}
if (symbol__annotate(sym, map, sizeof(struct browser_disasm_line)) < 0) {
ui__error("%s", ui_helpline__last_msg);
goto out_free_offsets;
}
ui_helpline__push("Press <- or ESC to exit");
notes = symbol__annotation(sym);
browser.start = map__rip_2objdump(map, sym->start);
list_for_each_entry(pos, &notes->src->source, node) {
struct browser_disasm_line *bpos;
size_t line_len = strlen(pos->line);
if (browser.b.width < line_len)
browser.b.width = line_len;
bpos = disasm_line__browser(pos);
bpos->idx = browser.nr_entries++;
if (pos->offset != -1) {
bpos->idx_asm = browser.nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
* routines that comes with labels in the same column
* as the address in objdump, sigh.
*
* E.g. copy_user_generic_unrolled
*/
if (pos->offset < (s64)size)
browser.offsets[pos->offset] = pos;
} else
bpos->idx_asm = -1;
}
annotate_browser__mark_jump_targets(&browser, size);
browser.addr_width = browser.target_width = browser.min_addr_width = hex_width(size);
browser.max_addr_width = hex_width(sym->end);
browser.jumps_width = width_jumps(browser.max_jump_sources);
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
disasm_line__free(pos);
}
out_free_offsets:
free(browser.offsets);
return ret;
}

Просмотреть файл

@ -5,12 +5,12 @@
#include <newt.h> #include <newt.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include "../../evsel.h" #include "../../util/evsel.h"
#include "../../evlist.h" #include "../../util/evlist.h"
#include "../../hist.h" #include "../../util/hist.h"
#include "../../pstack.h" #include "../../util/pstack.h"
#include "../../sort.h" #include "../../util/sort.h"
#include "../../util.h" #include "../../util/util.h"
#include "../browser.h" #include "../browser.h"
#include "../helpline.h" #include "../helpline.h"
@ -840,10 +840,14 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
int printed; int printed;
const struct dso *dso = self->dso_filter; const struct dso *dso = self->dso_filter;
const struct thread *thread = self->thread_filter; const struct thread *thread = self->thread_filter;
unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
u64 nr_events = self->stats.total_period;
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
"Samples: %lu%c of event '%s', Event count (approx.): %lu",
nr_samples, unit, ev_name, nr_events);
nr_events = convert_unit(nr_events, &unit);
printed = scnprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
if (self->uid_filter_str) if (self->uid_filter_str)
printed += snprintf(bf + printed, size - printed, printed += snprintf(bf + printed, size - printed,
@ -937,7 +941,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
goto zoom_dso; goto zoom_dso;
case 't': case 't':
goto zoom_thread; goto zoom_thread;
case 's': case '/':
if (ui_browser__input_window("Symbol to show", if (ui_browser__input_window("Symbol to show",
"Please enter the name of symbol you want to see", "Please enter the name of symbol you want to see",
buf, "ENTER: OK, ESC: Cancel", buf, "ENTER: OK, ESC: Cancel",
@ -965,7 +969,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"E Expand all callchains\n" "E Expand all callchains\n"
"d Zoom into current DSO\n" "d Zoom into current DSO\n"
"t Zoom into current Thread\n" "t Zoom into current Thread\n"
"s Filter symbol by name"); "/ Filter symbol by name");
continue; continue;
case K_ENTER: case K_ENTER:
case K_RIGHT: case K_RIGHT:

Просмотреть файл

@ -5,9 +5,9 @@
#include <sys/ttydefaults.h> #include <sys/ttydefaults.h>
#include <string.h> #include <string.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include "../../util.h" #include "../../util/util.h"
#include "../../debug.h" #include "../../util/debug.h"
#include "../../symbol.h" #include "../../util/symbol.h"
#include "../browser.h" #include "../browser.h"
#include "../helpline.h" #include "../helpline.h"
#include "map.h" #include "map.h"

Просмотреть файл

Просмотреть файл

@ -9,24 +9,13 @@
#define MAX_COLUMNS 32 #define MAX_COLUMNS 32
void perf_gtk_setup_browser(int argc, const char *argv[], static void perf_gtk__signal(int sig)
bool fallback_to_pager __used)
{
gtk_init(&argc, (char ***)&argv);
}
void perf_gtk_exit_browser(bool wait_for_ok __used)
{
gtk_main_quit();
}
static void perf_gtk_signal(int sig)
{ {
psignal(sig, "perf"); psignal(sig, "perf");
gtk_main_quit(); gtk_main_quit();
} }
static void perf_gtk_resize_window(GtkWidget *window) static void perf_gtk__resize_window(GtkWidget *window)
{ {
GdkRectangle rect; GdkRectangle rect;
GdkScreen *screen; GdkScreen *screen;
@ -46,7 +35,7 @@ static void perf_gtk_resize_window(GtkWidget *window)
gtk_window_resize(GTK_WINDOW(window), width, height); gtk_window_resize(GTK_WINDOW(window), width, height);
} }
static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists) static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
{ {
GType col_types[MAX_COLUMNS]; GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer; GtkCellRenderer *renderer;
@ -142,11 +131,11 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
GtkWidget *notebook; GtkWidget *notebook;
GtkWidget *window; GtkWidget *window;
signal(SIGSEGV, perf_gtk_signal); signal(SIGSEGV, perf_gtk__signal);
signal(SIGFPE, perf_gtk_signal); signal(SIGFPE, perf_gtk__signal);
signal(SIGINT, perf_gtk_signal); signal(SIGINT, perf_gtk__signal);
signal(SIGQUIT, perf_gtk_signal); signal(SIGQUIT, perf_gtk__signal);
signal(SIGTERM, perf_gtk_signal); signal(SIGTERM, perf_gtk__signal);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL); window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
@ -168,7 +157,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC); GTK_POLICY_AUTOMATIC);
perf_gtk_show_hists(scrolled_window, hists); perf_gtk__show_hists(scrolled_window, hists);
tab_label = gtk_label_new(evname); tab_label = gtk_label_new(evname);
@ -179,7 +168,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_widget_show_all(window); gtk_widget_show_all(window);
perf_gtk_resize_window(window); perf_gtk__resize_window(window);
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);

Просмотреть файл

12
tools/perf/ui/gtk/setup.c Normal file
Просмотреть файл

@ -0,0 +1,12 @@
#include "gtk.h"
#include "../../util/cache.h"
int perf_gtk__init(void)
{
return gtk_init_check(NULL, NULL) ? 0 : -1;
}
void perf_gtk__exit(bool wait_for_ok __used)
{
gtk_main_quit();
}

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

45
tools/perf/ui/setup.c Normal file
Просмотреть файл

@ -0,0 +1,45 @@
#include "../cache.h"
#include "../debug.h"
void setup_browser(bool fallback_to_pager)
{
if (!isatty(1) || dump_trace)
use_browser = 0;
/* default to TUI */
if (use_browser < 0)
use_browser = 1;
switch (use_browser) {
case 2:
if (perf_gtk__init() == 0)
break;
/* fall through */
case 1:
use_browser = 1;
if (ui__init() == 0)
break;
/* fall through */
default:
if (fallback_to_pager)
setup_pager();
break;
}
}
void exit_browser(bool wait_for_ok)
{
switch (use_browser) {
case 2:
perf_gtk__exit(wait_for_ok);
break;
case 1:
ui__exit(wait_for_ok);
break;
default:
break;
}
}

Просмотреть файл

@ -2,14 +2,14 @@
#include <signal.h> #include <signal.h>
#include <stdbool.h> #include <stdbool.h>
#include "../cache.h" #include "../../util/cache.h"
#include "../debug.h" #include "../../util/debug.h"
#include "browser.h" #include "../browser.h"
#include "helpline.h" #include "../helpline.h"
#include "ui.h" #include "../ui.h"
#include "util.h" #include "../util.h"
#include "libslang.h" #include "../libslang.h"
#include "keysyms.h" #include "../keysyms.h"
pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
@ -93,45 +93,26 @@ static void newt_suspend(void *d __used)
newtResume(); newtResume();
} }
static int ui__init(void)
{
int err = SLkp_init();
if (err < 0)
goto out;
SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
out:
return err;
}
static void ui__exit(void)
{
SLtt_set_cursor_visibility(1);
SLsmg_refresh();
SLsmg_reset_smg();
SLang_reset_tty();
}
static void ui__signal(int sig) static void ui__signal(int sig)
{ {
ui__exit(); ui__exit(false);
psignal(sig, "perf"); psignal(sig, "perf");
exit(0); exit(0);
} }
void setup_browser(bool fallback_to_pager) int ui__init(void)
{ {
if (!isatty(1) || !use_browser || dump_trace) { int err;
use_browser = 0;
if (fallback_to_pager) newtInit();
setup_pager(); err = SLkp_init();
return; if (err < 0) {
pr_err("TUI initialization failed.\n");
goto out;
} }
use_browser = 1; SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
newtInit();
ui__init();
newtSetSuspendCallback(newt_suspend, NULL); newtSetSuspendCallback(newt_suspend, NULL);
ui_helpline__init(); ui_helpline__init();
ui_browser__init(); ui_browser__init();
@ -141,15 +122,19 @@ void setup_browser(bool fallback_to_pager)
signal(SIGINT, ui__signal); signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal); signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal); signal(SIGTERM, ui__signal);
out:
return err;
} }
void exit_browser(bool wait_for_ok) void ui__exit(bool wait_for_ok)
{ {
if (use_browser > 0) { if (wait_for_ok)
if (wait_for_ok) ui__question_window("Fatal Error",
ui__question_window("Fatal Error", ui_helpline__last_msg,
ui_helpline__last_msg, "Press any key...", 0);
"Press any key...", 0);
ui__exit(); SLtt_set_cursor_visibility(1);
} SLsmg_refresh();
SLsmg_reset_smg();
SLang_reset_tty();
} }

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -18,6 +18,403 @@
const char *disassembler_style; const char *disassembler_style;
static struct ins *ins__find(const char *name);
static int disasm_line__parse(char *line, char **namep, char **rawp);
static void ins__delete(struct ins_operands *ops)
{
free(ops->source.raw);
free(ops->source.name);
free(ops->target.raw);
free(ops->target.name);
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ins->ops->scnprintf)
return ins->ops->scnprintf(ins, bf, size, ops);
return ins__raw_scnprintf(ins, bf, size, ops);
}
static int call__parse(struct ins_operands *ops)
{
char *endptr, *tok, *name;
ops->target.addr = strtoull(ops->raw, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
goto indirect_call;
name++;
tok = strchr(name, '>');
if (tok == NULL)
return -1;
*tok = '\0';
ops->target.name = strdup(name);
*tok = '>';
return ops->target.name == NULL ? -1 : 0;
indirect_call:
tok = strchr(endptr, '(');
if (tok != NULL) {
ops->target.addr = 0;
return 0;
}
tok = strchr(endptr, '*');
if (tok == NULL)
return -1;
ops->target.addr = strtoull(tok + 1, NULL, 16);
return 0;
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ops->target.name)
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
if (ops->target.addr == 0)
return ins__raw_scnprintf(ins, bf, size, ops);
return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
.parse = call__parse,
.scnprintf = call__scnprintf,
};
bool ins__is_call(const struct ins *ins)
{
return ins->ops == &call_ops;
}
static int jump__parse(struct ins_operands *ops)
{
const char *s = strchr(ops->raw, '+');
ops->target.addr = strtoll(ops->raw, NULL, 16);
if (s++ != NULL)
ops->target.offset = strtoll(s, NULL, 16);
else
ops->target.offset = UINT64_MAX;
return 0;
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
}
static struct ins_ops jump_ops = {
.parse = jump__parse,
.scnprintf = jump__scnprintf,
};
bool ins__is_jump(const struct ins *ins)
{
return ins->ops == &jump_ops;
}
static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
{
char *endptr, *name, *t;
if (strstr(raw, "(%rip)") == NULL)
return 0;
*addrp = strtoull(comment, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
return -1;
name++;
t = strchr(name, '>');
if (t == NULL)
return 0;
*t = '\0';
*namep = strdup(name);
*t = '>';
return 0;
}
static int lock__parse(struct ins_operands *ops)
{
char *name;
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
return 0;
if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
ops->locked.ins = ins__find(name);
if (ops->locked.ins == NULL)
goto out_free_ops;
if (!ops->locked.ins->ops)
return 0;
if (ops->locked.ins->ops->parse)
ops->locked.ins->ops->parse(ops->locked.ops);
return 0;
out_free_ops:
free(ops->locked.ops);
ops->locked.ops = NULL;
return 0;
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
int printed;
if (ops->locked.ins == NULL)
return ins__raw_scnprintf(ins, bf, size, ops);
printed = scnprintf(bf, size, "%-6.6s ", ins->name);
return printed + ins__scnprintf(ops->locked.ins, bf + printed,
size - printed, ops->locked.ops);
}
static void lock__delete(struct ins_operands *ops)
{
free(ops->locked.ops);
free(ops->target.raw);
free(ops->target.name);
}
static struct ins_ops lock_ops = {
.free = lock__delete,
.parse = lock__parse,
.scnprintf = lock__scnprintf,
};
static int mov__parse(struct ins_operands *ops)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
if (s == NULL)
return -1;
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;
target = ++s;
while (s[0] != '\0' && !isspace(s[0]))
++s;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
goto out_free_source;
comment = strchr(s, '#');
if (comment == NULL)
return 0;
while (comment[0] != '\0' && isspace(comment[0]))
++comment;
comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
return 0;
out_free_source:
free(ops->source.raw);
ops->source.raw = NULL;
return -1;
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops mov_ops = {
.parse = mov__parse,
.scnprintf = mov__scnprintf,
};
static int dec__parse(struct ins_operands *ops)
{
char *target, *comment, *s, prev;
target = s = ops->raw;
while (s[0] != '\0' && !isspace(s[0]))
++s;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
return -1;
comment = strchr(s, '#');
if (comment == NULL)
return 0;
while (comment[0] != '\0' && isspace(comment[0]))
++comment;
comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
return 0;
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s", ins->name,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops dec_ops = {
.parse = dec__parse,
.scnprintf = dec__scnprintf,
};
static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size,
struct ins_operands *ops __used)
{
return scnprintf(bf, size, "%-6.6s", "nop");
}
static struct ins_ops nop_ops = {
.scnprintf = nop__scnprintf,
};
/*
* Must be sorted by name!
*/
static struct ins instructions[] = {
{ .name = "add", .ops = &mov_ops, },
{ .name = "addl", .ops = &mov_ops, },
{ .name = "addq", .ops = &mov_ops, },
{ .name = "addw", .ops = &mov_ops, },
{ .name = "and", .ops = &mov_ops, },
{ .name = "bts", .ops = &mov_ops, },
{ .name = "call", .ops = &call_ops, },
{ .name = "callq", .ops = &call_ops, },
{ .name = "cmp", .ops = &mov_ops, },
{ .name = "cmpb", .ops = &mov_ops, },
{ .name = "cmpl", .ops = &mov_ops, },
{ .name = "cmpq", .ops = &mov_ops, },
{ .name = "cmpw", .ops = &mov_ops, },
{ .name = "cmpxch", .ops = &mov_ops, },
{ .name = "dec", .ops = &dec_ops, },
{ .name = "decl", .ops = &dec_ops, },
{ .name = "imul", .ops = &mov_ops, },
{ .name = "inc", .ops = &dec_ops, },
{ .name = "incl", .ops = &dec_ops, },
{ .name = "ja", .ops = &jump_ops, },
{ .name = "jae", .ops = &jump_ops, },
{ .name = "jb", .ops = &jump_ops, },
{ .name = "jbe", .ops = &jump_ops, },
{ .name = "jc", .ops = &jump_ops, },
{ .name = "jcxz", .ops = &jump_ops, },
{ .name = "je", .ops = &jump_ops, },
{ .name = "jecxz", .ops = &jump_ops, },
{ .name = "jg", .ops = &jump_ops, },
{ .name = "jge", .ops = &jump_ops, },
{ .name = "jl", .ops = &jump_ops, },
{ .name = "jle", .ops = &jump_ops, },
{ .name = "jmp", .ops = &jump_ops, },
{ .name = "jmpq", .ops = &jump_ops, },
{ .name = "jna", .ops = &jump_ops, },
{ .name = "jnae", .ops = &jump_ops, },
{ .name = "jnb", .ops = &jump_ops, },
{ .name = "jnbe", .ops = &jump_ops, },
{ .name = "jnc", .ops = &jump_ops, },
{ .name = "jne", .ops = &jump_ops, },
{ .name = "jng", .ops = &jump_ops, },
{ .name = "jnge", .ops = &jump_ops, },
{ .name = "jnl", .ops = &jump_ops, },
{ .name = "jnle", .ops = &jump_ops, },
{ .name = "jno", .ops = &jump_ops, },
{ .name = "jnp", .ops = &jump_ops, },
{ .name = "jns", .ops = &jump_ops, },
{ .name = "jnz", .ops = &jump_ops, },
{ .name = "jo", .ops = &jump_ops, },
{ .name = "jp", .ops = &jump_ops, },
{ .name = "jpe", .ops = &jump_ops, },
{ .name = "jpo", .ops = &jump_ops, },
{ .name = "jrcxz", .ops = &jump_ops, },
{ .name = "js", .ops = &jump_ops, },
{ .name = "jz", .ops = &jump_ops, },
{ .name = "lea", .ops = &mov_ops, },
{ .name = "lock", .ops = &lock_ops, },
{ .name = "mov", .ops = &mov_ops, },
{ .name = "movb", .ops = &mov_ops, },
{ .name = "movdqa",.ops = &mov_ops, },
{ .name = "movl", .ops = &mov_ops, },
{ .name = "movq", .ops = &mov_ops, },
{ .name = "movslq", .ops = &mov_ops, },
{ .name = "movzbl", .ops = &mov_ops, },
{ .name = "movzwl", .ops = &mov_ops, },
{ .name = "nop", .ops = &nop_ops, },
{ .name = "nopl", .ops = &nop_ops, },
{ .name = "nopw", .ops = &nop_ops, },
{ .name = "or", .ops = &mov_ops, },
{ .name = "orl", .ops = &mov_ops, },
{ .name = "test", .ops = &mov_ops, },
{ .name = "testb", .ops = &mov_ops, },
{ .name = "testl", .ops = &mov_ops, },
{ .name = "xadd", .ops = &mov_ops, },
};
static int ins__cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
return strcmp(name, ins->name);
}
static struct ins *ins__find(const char *name)
{
const int nmemb = ARRAY_SIZE(instructions);
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
}
int symbol__annotate_init(struct map *map __used, struct symbol *sym) int symbol__annotate_init(struct map *map __used, struct symbol *sym)
{ {
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
@ -28,7 +425,7 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
int symbol__alloc_hist(struct symbol *sym) int symbol__alloc_hist(struct symbol *sym)
{ {
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
const size_t size = sym->end - sym->start + 1; const size_t size = symbol__size(sym);
size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
@ -78,31 +475,110 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
return 0; return 0;
} }
static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) static void disasm_line__init_ins(struct disasm_line *dl)
{ {
struct objdump_line *self = malloc(sizeof(*self) + privsize); dl->ins = ins__find(dl->name);
if (self != NULL) { if (dl->ins == NULL)
self->offset = offset; return;
self->line = line;
if (!dl->ins->ops)
return;
if (dl->ins->ops->parse)
dl->ins->ops->parse(&dl->ops);
}
static int disasm_line__parse(char *line, char **namep, char **rawp)
{
char *name = line, tmp;
while (isspace(name[0]))
++name;
if (name[0] == '\0')
return -1;
*rawp = name + 1;
while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
++*rawp;
tmp = (*rawp)[0];
(*rawp)[0] = '\0';
*namep = strdup(name);
if (*namep == NULL)
goto out_free_name;
(*rawp)[0] = tmp;
if ((*rawp)[0] != '\0') {
(*rawp)++;
while (isspace((*rawp)[0]))
++(*rawp);
} }
return self; return 0;
out_free_name:
free(*namep);
*namep = NULL;
return -1;
} }
void objdump_line__free(struct objdump_line *self) static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privsize)
{ {
free(self->line); struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
free(self);
if (dl != NULL) {
dl->offset = offset;
dl->line = strdup(line);
if (dl->line == NULL)
goto out_delete;
if (offset != -1) {
if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
goto out_free_line;
disasm_line__init_ins(dl);
}
}
return dl;
out_free_line:
free(dl->line);
out_delete:
free(dl);
return NULL;
} }
static void objdump__add_line(struct list_head *head, struct objdump_line *line) void disasm_line__free(struct disasm_line *dl)
{
free(dl->line);
free(dl->name);
if (dl->ins && dl->ins->ops->free)
dl->ins->ops->free(&dl->ops);
else
ins__delete(&dl->ops);
free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
{
if (raw || !dl->ins)
return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
return ins__scnprintf(dl->ins, bf, size, &dl->ops);
}
static void disasm__add(struct list_head *head, struct disasm_line *line)
{ {
list_add_tail(&line->node, head); list_add_tail(&line->node, head);
} }
struct objdump_line *objdump__get_next_ip_line(struct list_head *head, struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
struct objdump_line *pos)
{ {
list_for_each_entry_continue(pos, head, node) list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0) if (pos->offset >= 0)
@ -111,15 +587,14 @@ struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
return NULL; return NULL;
} }
static int objdump_line__print(struct objdump_line *oline, struct symbol *sym, static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
int evidx, u64 len, int min_pcnt, int evidx, u64 len, int min_pcnt, int printed,
int printed, int max_lines, int max_lines, struct disasm_line *queue)
struct objdump_line *queue)
{ {
static const char *prev_line; static const char *prev_line;
static const char *prev_color; static const char *prev_color;
if (oline->offset != -1) { if (dl->offset != -1) {
const char *path = NULL; const char *path = NULL;
unsigned int hits = 0; unsigned int hits = 0;
double percent = 0.0; double percent = 0.0;
@ -127,10 +602,11 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines; struct source_line *src_line = notes->src->lines;
struct sym_hist *h = annotation__histogram(notes, evidx); struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = oline->offset; s64 offset = dl->offset;
struct objdump_line *next; const u64 addr = start + offset;
struct disasm_line *next;
next = objdump__get_next_ip_line(&notes->src->source, oline); next = disasm__get_next_ip_line(&notes->src->source, dl);
while (offset < (s64)len && while (offset < (s64)len &&
(next == NULL || offset < next->offset)) { (next == NULL || offset < next->offset)) {
@ -155,9 +631,9 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
if (queue != NULL) { if (queue != NULL) {
list_for_each_entry_from(queue, &notes->src->source, node) { list_for_each_entry_from(queue, &notes->src->source, node) {
if (queue == oline) if (queue == dl)
break; break;
objdump_line__print(queue, sym, evidx, len, disasm_line__print(queue, sym, start, evidx, len,
0, 0, 1, NULL); 0, 0, 1, NULL);
} }
} }
@ -180,17 +656,18 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
color_fprintf(stdout, color, " %7.2f", percent); color_fprintf(stdout, color, " %7.2f", percent);
printf(" : "); printf(" : ");
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line); color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
} else if (max_lines && printed >= max_lines) } else if (max_lines && printed >= max_lines)
return 1; return 1;
else { else {
if (queue) if (queue)
return -1; return -1;
if (!*oline->line) if (!*dl->line)
printf(" :\n"); printf(" :\n");
else else
printf(" : %s\n", oline->line); printf(" : %s\n", dl->line);
} }
return 0; return 0;
@ -200,8 +677,8 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
FILE *file, size_t privsize) FILE *file, size_t privsize)
{ {
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
struct objdump_line *objdump_line; struct disasm_line *dl;
char *line = NULL, *tmp, *tmp2, *c; char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
size_t line_len; size_t line_len;
s64 line_ip, offset = -1; s64 line_ip, offset = -1;
@ -219,6 +696,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
*c = 0; *c = 0;
line_ip = -1; line_ip = -1;
parsed_line = line;
/* /*
* Strip leading spaces: * Strip leading spaces:
@ -246,14 +724,17 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
offset = line_ip - start; offset = line_ip - start;
if (offset < 0 || (u64)line_ip > end) if (offset < 0 || (u64)line_ip > end)
offset = -1; offset = -1;
else
parsed_line = tmp2 + 1;
} }
objdump_line = objdump_line__new(offset, line, privsize); dl = disasm_line__new(offset, parsed_line, privsize);
if (objdump_line == NULL) { free(line);
free(line);
if (dl == NULL)
return -1; return -1;
}
objdump__add_line(&notes->src->source, objdump_line); disasm__add(&notes->src->source, dl);
return 0; return 0;
} }
@ -476,7 +957,7 @@ static void symbol__annotate_hits(struct symbol *sym, int evidx)
{ {
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx); struct sym_hist *h = annotation__histogram(notes, evidx);
u64 len = sym->end - sym->start, offset; u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset) for (offset = 0; offset < len; ++offset)
if (h->addr[offset] != 0) if (h->addr[offset] != 0)
@ -492,7 +973,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
struct dso *dso = map->dso; struct dso *dso = map->dso;
const char *filename = dso->long_name, *d_filename; const char *filename = dso->long_name, *d_filename;
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
struct objdump_line *pos, *queue = NULL; struct disasm_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0; int printed = 2, queue_len = 0;
int more = 0; int more = 0;
u64 len; u64 len;
@ -502,7 +984,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
else else
d_filename = basename(filename); d_filename = basename(filename);
len = sym->end - sym->start; len = symbol__size(sym);
printf(" Percent | Source code & Disassembly of %s\n", d_filename); printf(" Percent | Source code & Disassembly of %s\n", d_filename);
printf("------------------------------------------------\n"); printf("------------------------------------------------\n");
@ -516,8 +998,9 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
queue_len = 0; queue_len = 0;
} }
switch (objdump_line__print(pos, sym, evidx, len, min_pcnt, switch (disasm_line__print(pos, sym, start, evidx, len,
printed, max_lines, queue)) { min_pcnt, printed, max_lines,
queue)) {
case 0: case 0:
++printed; ++printed;
if (context) { if (context) {
@ -561,7 +1044,7 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{ {
struct annotation *notes = symbol__annotation(sym); struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx); struct sym_hist *h = annotation__histogram(notes, evidx);
int len = sym->end - sym->start, offset; int len = symbol__size(sym), offset;
h->sum = 0; h->sum = 0;
for (offset = 0; offset < len; ++offset) { for (offset = 0; offset < len; ++offset) {
@ -570,16 +1053,44 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
} }
} }
void objdump_line_list__purge(struct list_head *head) void disasm__purge(struct list_head *head)
{ {
struct objdump_line *pos, *n; struct disasm_line *pos, *n;
list_for_each_entry_safe(pos, n, head, node) { list_for_each_entry_safe(pos, n, head, node) {
list_del(&pos->node); list_del(&pos->node);
objdump_line__free(pos); disasm_line__free(pos);
} }
} }
static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
if (dl->offset == -1)
return fprintf(fp, "%s\n", dl->line);
printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
dl->ops.raw);
}
return printed + fprintf(fp, "\n");
}
size_t disasm__fprintf(struct list_head *head, FILE *fp)
{
struct disasm_line *pos;
size_t printed = 0;
list_for_each_entry(pos, head, node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
bool print_lines, bool full_paths, int min_pcnt, bool print_lines, bool full_paths, int min_pcnt,
int max_lines) int max_lines)
@ -592,7 +1103,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
if (symbol__annotate(sym, map, 0) < 0) if (symbol__annotate(sym, map, 0) < 0)
return -1; return -1;
len = sym->end - sym->start; len = symbol__size(sym);
if (print_lines) { if (print_lines) {
symbol__get_source_line(sym, map, evidx, &source_line, symbol__get_source_line(sym, map, evidx, &source_line,
@ -605,7 +1116,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
if (print_lines) if (print_lines)
symbol__free_source_line(sym, len); symbol__free_source_line(sym, len);
objdump_line_list__purge(&symbol__annotation(sym)->src->source); disasm__purge(&symbol__annotation(sym)->src->source);
return 0; return 0;
} }

Просмотреть файл

@ -2,20 +2,69 @@
#define __PERF_ANNOTATE_H #define __PERF_ANNOTATE_H
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h>
#include "types.h" #include "types.h"
#include "symbol.h" #include "symbol.h"
#include <linux/list.h> #include <linux/list.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
struct objdump_line { struct ins;
struct list_head node;
s64 offset; struct ins_operands {
char *line; char *raw;
struct {
char *raw;
char *name;
u64 addr;
u64 offset;
} target;
union {
struct {
char *raw;
char *name;
u64 addr;
} source;
struct {
struct ins *ins;
struct ins_operands *ops;
} locked;
};
}; };
void objdump_line__free(struct objdump_line *self); struct ins_ops {
struct objdump_line *objdump__get_next_ip_line(struct list_head *head, void (*free)(struct ins_operands *ops);
struct objdump_line *pos); int (*parse)(struct ins_operands *ops);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops);
};
struct ins {
const char *name;
struct ins_ops *ops;
};
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
struct disasm_line {
struct list_head node;
s64 offset;
char *line;
char *name;
struct ins *ins;
struct ins_operands ops;
};
static inline bool disasm_line__has_offset(const struct disasm_line *dl)
{
return dl->ops.target.offset != UINT64_MAX;
}
void disasm_line__free(struct disasm_line *dl);
struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
struct sym_hist { struct sym_hist {
u64 sum; u64 sum;
@ -32,7 +81,7 @@ struct source_line {
* *
* @histogram: Array of addr hit histograms per event being monitored * @histogram: Array of addr hit histograms per event being monitored
* @lines: If 'print_lines' is specified, per source code line percentages * @lines: If 'print_lines' is specified, per source code line percentages
* @source: source parsed from objdump -dS * @source: source parsed from a disassembler like objdump -dS
* *
* lines is allocated, percentages calculated and all sorted by percentage * lines is allocated, percentages calculated and all sorted by percentage
* when the annotation is about to be presented, so the percentages are for * when the annotation is about to be presented, so the percentages are for
@ -82,7 +131,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
int context); int context);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
void objdump_line_list__purge(struct list_head *head); void disasm__purge(struct list_head *head);
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
bool print_lines, bool full_paths, int min_pcnt, bool print_lines, bool full_paths, int min_pcnt,

Просмотреть файл

@ -33,7 +33,7 @@ extern int pager_use_color;
extern int use_browser; extern int use_browser;
#ifdef NO_NEWT_SUPPORT #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
static inline void setup_browser(bool fallback_to_pager) static inline void setup_browser(bool fallback_to_pager)
{ {
if (fallback_to_pager) if (fallback_to_pager)
@ -43,19 +43,29 @@ static inline void exit_browser(bool wait_for_ok __used) {}
#else #else
void setup_browser(bool fallback_to_pager); void setup_browser(bool fallback_to_pager);
void exit_browser(bool wait_for_ok); void exit_browser(bool wait_for_ok);
#ifdef NO_NEWT_SUPPORT
static inline int ui__init(void)
{
return -1;
}
static inline void ui__exit(bool wait_for_ok __used) {}
#else
int ui__init(void);
void ui__exit(bool wait_for_ok);
#endif #endif
#ifdef NO_GTK2_SUPPORT #ifdef NO_GTK2_SUPPORT
static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager) static inline int perf_gtk__init(void)
{ {
if (fallback_to_pager) return -1;
setup_pager();
} }
static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {} static inline void perf_gtk__exit(bool wait_for_ok __used) {}
#else #else
void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager); int perf_gtk__init(void);
void perf_gtk_exit_browser(bool wait_for_ok); void perf_gtk__exit(bool wait_for_ok);
#endif #endif
#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
char *alias_lookup(const char *alias); char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv); int split_cmdline(char *cmdline, const char ***argv);

Просмотреть файл

@ -11,6 +11,7 @@
#include "event.h" #include "event.h"
#include "debug.h" #include "debug.h"
#include "util.h" #include "util.h"
#include "target.h"
int verbose; int verbose;
bool dump_trace = false, quiet = false; bool dump_trace = false, quiet = false;

Просмотреть файл

@ -26,7 +26,7 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used,
#else #else
extern char ui_helpline__last_msg[]; extern char ui_helpline__last_msg[];
int ui_helpline__show_help(const char *format, va_list ap); int ui_helpline__show_help(const char *format, va_list ap);
#include "ui/progress.h" #include "../ui/progress.h"
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
#endif #endif

Просмотреть файл

@ -11,6 +11,7 @@
#include <poll.h> #include <poll.h>
#include "cpumap.h" #include "cpumap.h"
#include "thread_map.h" #include "thread_map.h"
#include "target.h"
#include "evlist.h" #include "evlist.h"
#include "evsel.h" #include "evsel.h"
#include <unistd.h> #include <unistd.h>
@ -599,18 +600,21 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, prot, mask); return perf_evlist__mmap_per_cpu(evlist, prot, mask);
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, int perf_evlist__create_maps(struct perf_evlist *evlist,
const char *target_tid, uid_t uid, const char *cpu_list) struct perf_target *target)
{ {
evlist->threads = thread_map__new_str(target_pid, target_tid, uid); evlist->threads = thread_map__new_str(target->pid, target->tid,
target->uid);
if (evlist->threads == NULL) if (evlist->threads == NULL)
return -1; return -1;
if (uid != UINT_MAX || (cpu_list == NULL && target_tid)) if (perf_target__has_task(target))
evlist->cpus = cpu_map__dummy_new();
else if (!perf_target__has_cpu(target) && !target->uses_mmap)
evlist->cpus = cpu_map__dummy_new(); evlist->cpus = cpu_map__dummy_new();
else else
evlist->cpus = cpu_map__new(cpu_list); evlist->cpus = cpu_map__new(target->cpu_list);
if (evlist->cpus == NULL) if (evlist->cpus == NULL)
goto out_delete_threads; goto out_delete_threads;
@ -827,7 +831,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
exit(-1); exit(-1);
} }
if (!opts->system_wide && !opts->target_tid && !opts->target_pid) if (perf_target__none(&opts->target))
evlist->threads->map[0] = evlist->workload.pid; evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]); close(child_ready_pipe[1]);

Просмотреть файл

@ -106,8 +106,8 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
evlist->threads = threads; evlist->threads = threads;
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, int perf_evlist__create_maps(struct perf_evlist *evlist,
const char *tid, uid_t uid, const char *cpu_list); struct perf_target *target);
void perf_evlist__delete_maps(struct perf_evlist *evlist); void perf_evlist__delete_maps(struct perf_evlist *evlist);
int perf_evlist__set_filters(struct perf_evlist *evlist); int perf_evlist__set_filters(struct perf_evlist *evlist);

Просмотреть файл

@ -14,6 +14,7 @@
#include "util.h" #include "util.h"
#include "cpumap.h" #include "cpumap.h"
#include "thread_map.h" #include "thread_map.h"
#include "target.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@ -69,6 +70,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */ int track = !evsel->idx; /* only the first counter needs these */
attr->disabled = 1;
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
attr->inherit = !opts->no_inherit; attr->inherit = !opts->no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@ -106,15 +108,15 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
if (opts->call_graph) if (opts->call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN; attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
if (opts->system_wide) if (opts->target.system_wide)
attr->sample_type |= PERF_SAMPLE_CPU; attr->sample_type |= PERF_SAMPLE_CPU;
if (opts->period) if (opts->period)
attr->sample_type |= PERF_SAMPLE_PERIOD; attr->sample_type |= PERF_SAMPLE_PERIOD;
if (!opts->sample_id_all_missing && if (!opts->sample_id_all_missing &&
(opts->sample_time || opts->system_wide || (opts->sample_time || !opts->no_inherit ||
!opts->no_inherit || opts->cpu_list)) perf_target__has_cpu(&opts->target)))
attr->sample_type |= PERF_SAMPLE_TIME; attr->sample_type |= PERF_SAMPLE_TIME;
if (opts->raw_samples) { if (opts->raw_samples) {
@ -135,9 +137,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
attr->mmap = track; attr->mmap = track;
attr->comm = track; attr->comm = track;
if (!opts->target_pid && !opts->target_tid && !opts->system_wide && if (perf_target__none(&opts->target) &&
(!opts->group || evsel == first)) { (!opts->group || evsel == first)) {
attr->disabled = 1;
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
} }

Просмотреть файл

@ -31,21 +31,16 @@ static const char **header_argv;
int perf_header__push_event(u64 id, const char *name) int perf_header__push_event(u64 id, const char *name)
{ {
struct perf_trace_event_type *nevents;
if (strlen(name) > MAX_EVENT_NAME) if (strlen(name) > MAX_EVENT_NAME)
pr_warning("Event %s will be truncated\n", name); pr_warning("Event %s will be truncated\n", name);
if (!events) { nevents = realloc(events, (event_count + 1) * sizeof(*events));
events = malloc(sizeof(struct perf_trace_event_type)); if (nevents == NULL)
if (events == NULL) return -ENOMEM;
return -ENOMEM; events = nevents;
} else {
struct perf_trace_event_type *nevents;
nevents = realloc(events, (event_count + 1) * sizeof(*events));
if (nevents == NULL)
return -ENOMEM;
events = nevents;
}
memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
events[event_count].event_id = id; events[event_count].event_id = id;
strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);

Просмотреть файл

@ -599,7 +599,7 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
if (chain->ms.sym) if (chain->ms.sym)
ret += fprintf(fp, "%s\n", chain->ms.sym->name); ret += fprintf(fp, "%s\n", chain->ms.sym->name);
else else
ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
return ret; return ret;
} }

Просмотреть файл

@ -138,7 +138,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
#define K_LEFT -1 #define K_LEFT -1
#define K_RIGHT -2 #define K_RIGHT -2
#else #else
#include "ui/keysyms.h" #include "../ui/keysyms.h"
int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs); void(*timer)(void *arg), void *arg, int delay_secs);

Просмотреть файл

@ -593,17 +593,27 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
static int config_term(struct perf_event_attr *attr, static int config_term(struct perf_event_attr *attr,
struct parse_events__term *term) struct parse_events__term *term)
{ {
switch (term->type) { #define CHECK_TYPE_VAL(type) \
do { \
if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
return -EINVAL; \
} while (0)
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_CONFIG: case PARSE_EVENTS__TERM_TYPE_CONFIG:
CHECK_TYPE_VAL(NUM);
attr->config = term->val.num; attr->config = term->val.num;
break; break;
case PARSE_EVENTS__TERM_TYPE_CONFIG1: case PARSE_EVENTS__TERM_TYPE_CONFIG1:
CHECK_TYPE_VAL(NUM);
attr->config1 = term->val.num; attr->config1 = term->val.num;
break; break;
case PARSE_EVENTS__TERM_TYPE_CONFIG2: case PARSE_EVENTS__TERM_TYPE_CONFIG2:
CHECK_TYPE_VAL(NUM);
attr->config2 = term->val.num; attr->config2 = term->val.num;
break; break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
CHECK_TYPE_VAL(NUM);
attr->sample_period = term->val.num; attr->sample_period = term->val.num;
break; break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
@ -615,7 +625,9 @@ static int config_term(struct perf_event_attr *attr,
default: default:
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
#undef CHECK_TYPE_VAL
} }
static int config_attr(struct perf_event_attr *attr, static int config_attr(struct perf_event_attr *attr,
@ -1015,11 +1027,12 @@ void print_events(const char *event_glob)
int parse_events__is_hardcoded_term(struct parse_events__term *term) int parse_events__is_hardcoded_term(struct parse_events__term *term)
{ {
return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX; return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
} }
int parse_events__new_term(struct parse_events__term **_term, int type, static int new_term(struct parse_events__term **_term, int type_val,
char *config, char *str, long num) int type_term, char *config,
char *str, long num)
{ {
struct parse_events__term *term; struct parse_events__term *term;
@ -1028,15 +1041,11 @@ int parse_events__new_term(struct parse_events__term **_term, int type,
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&term->list); INIT_LIST_HEAD(&term->list);
term->type = type; term->type_val = type_val;
term->type_term = type_term;
term->config = config; term->config = config;
switch (type) { switch (type_val) {
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
case PARSE_EVENTS__TERM_TYPE_NUM: case PARSE_EVENTS__TERM_TYPE_NUM:
term->val.num = num; term->val.num = num;
break; break;
@ -1051,6 +1060,20 @@ int parse_events__new_term(struct parse_events__term **_term, int type,
return 0; return 0;
} }
int parse_events__term_num(struct parse_events__term **term,
int type_term, char *config, long num)
{
return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
config, NULL, num);
}
int parse_events__term_str(struct parse_events__term **term,
int type_term, char *config, char *str)
{
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
config, str, 0);
}
void parse_events__free_terms(struct list_head *terms) void parse_events__free_terms(struct list_head *terms)
{ {
struct parse_events__term *term, *h; struct parse_events__term *term, *h;

Просмотреть файл

@ -4,7 +4,9 @@
* Parse symbolic events/counts passed in as options: * Parse symbolic events/counts passed in as options:
*/ */
#include <stdbool.h>
#include "../../../include/linux/perf_event.h" #include "../../../include/linux/perf_event.h"
#include "types.h"
struct list_head; struct list_head;
struct perf_evsel; struct perf_evsel;
@ -34,16 +36,17 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024) #define EVENTS_HELP_MAX (128*1024)
enum { enum {
PARSE_EVENTS__TERM_TYPE_NUM,
PARSE_EVENTS__TERM_TYPE_STR,
};
enum {
PARSE_EVENTS__TERM_TYPE_USER,
PARSE_EVENTS__TERM_TYPE_CONFIG, PARSE_EVENTS__TERM_TYPE_CONFIG,
PARSE_EVENTS__TERM_TYPE_CONFIG1, PARSE_EVENTS__TERM_TYPE_CONFIG1,
PARSE_EVENTS__TERM_TYPE_CONFIG2, PARSE_EVENTS__TERM_TYPE_CONFIG2,
PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
PARSE_EVENTS__TERM_TYPE_NUM,
PARSE_EVENTS__TERM_TYPE_STR,
PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
}; };
struct parse_events__term { struct parse_events__term {
@ -52,14 +55,16 @@ struct parse_events__term {
char *str; char *str;
long num; long num;
} val; } val;
int type; int type_val;
int type_term;
struct list_head list; struct list_head list;
}; };
int parse_events__is_hardcoded_term(struct parse_events__term *term); int parse_events__is_hardcoded_term(struct parse_events__term *term);
int parse_events__new_term(struct parse_events__term **term, int type, int parse_events__term_num(struct parse_events__term **_term,
char *config, char *str, long num); int type_term, char *config, long num);
int parse_events__term_str(struct parse_events__term **_term,
int type_term, char *config, char *str);
void parse_events__free_terms(struct list_head *terms); void parse_events__free_terms(struct list_head *terms);
int parse_events_modifier(struct list_head *list __used, char *str __used); int parse_events_modifier(struct list_head *list __used, char *str __used);
int parse_events_add_tracepoint(struct list_head *list, int *idx, int parse_events_add_tracepoint(struct list_head *list, int *idx,

Просмотреть файл

@ -176,8 +176,8 @@ PE_NAME '=' PE_NAME
{ {
struct parse_events__term *term; struct parse_events__term *term;
ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR, ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3, 0)); $1, $3));
$$ = term; $$ = term;
} }
| |
@ -185,8 +185,8 @@ PE_NAME '=' PE_VALUE
{ {
struct parse_events__term *term; struct parse_events__term *term;
ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM, ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, NULL, $3)); $1, $3));
$$ = term; $$ = term;
} }
| |
@ -194,8 +194,8 @@ PE_NAME
{ {
struct parse_events__term *term; struct parse_events__term *term;
ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM, ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, NULL, 1)); $1, 1));
$$ = term; $$ = term;
} }
| |
@ -203,7 +203,7 @@ PE_TERM '=' PE_VALUE
{ {
struct parse_events__term *term; struct parse_events__term *term;
ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3)); ABORT_ON(parse_events__term_num(&term, $1, NULL, $3));
$$ = term; $$ = term;
} }
| |
@ -211,7 +211,7 @@ PE_TERM
{ {
struct parse_events__term *term; struct parse_events__term *term;
ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1)); ABORT_ON(parse_events__term_num(&term, $1, NULL, 1));
$$ = term; $$ = term;
} }

Просмотреть файл

@ -225,7 +225,7 @@ static int pmu_config_term(struct list_head *formats,
if (parse_events__is_hardcoded_term(term)) if (parse_events__is_hardcoded_term(term))
return 0; return 0;
if (term->type != PARSE_EVENTS__TERM_TYPE_NUM) if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
return -EINVAL; return -EINVAL;
format = pmu_find_format(formats, term->config); format = pmu_find_format(formats, term->config);
@ -246,6 +246,11 @@ static int pmu_config_term(struct list_head *formats,
return -EINVAL; return -EINVAL;
} }
/*
* XXX If we ever decide to go with string values for
* non-hardcoded terms, here's the place to translate
* them into value.
*/
*vp |= pmu_format_value(format->bits, term->val.num); *vp |= pmu_format_value(format->bits, term->val.num);
return 0; return 0;
} }
@ -324,49 +329,58 @@ static struct test_format {
/* Simulated users input. */ /* Simulated users input. */
static struct parse_events__term test_terms[] = { static struct parse_events__term test_terms[] = {
{ {
.config = (char *) "krava01", .config = (char *) "krava01",
.val.num = 15, .val.num = 15,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava02", .config = (char *) "krava02",
.val.num = 170, .val.num = 170,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava03", .config = (char *) "krava03",
.val.num = 1, .val.num = 1,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava11", .config = (char *) "krava11",
.val.num = 27, .val.num = 27,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava12", .config = (char *) "krava12",
.val.num = 1, .val.num = 1,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava13", .config = (char *) "krava13",
.val.num = 2, .val.num = 2,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava21", .config = (char *) "krava21",
.val.num = 119, .val.num = 119,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava22", .config = (char *) "krava22",
.val.num = 11, .val.num = 11,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
{ {
.config = (char *) "krava23", .config = (char *) "krava23",
.val.num = 2, .val.num = 2,
.type = PARSE_EVENTS__TERM_TYPE_NUM, .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
}, },
}; };
#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) #define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))

Просмотреть файл

@ -37,7 +37,7 @@ PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \ #define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1) ((1 << (sizeof(unsigned short) * 8)) - 1)
struct event *events[FTRACE_MAX_EVENT]; struct event_format *events[FTRACE_MAX_EVENT];
#define MAX_FIELDS 64 #define MAX_FIELDS 64
#define N_COMMON_FIELDS 7 #define N_COMMON_FIELDS 7
@ -136,7 +136,7 @@ static void define_field(enum print_arg_type field_type,
Py_DECREF(t); Py_DECREF(t);
} }
static void define_event_symbols(struct event *event, static void define_event_symbols(struct event_format *event,
const char *ev_name, const char *ev_name,
struct print_arg *args) struct print_arg *args)
{ {
@ -178,6 +178,10 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->op.right); define_event_symbols(event, ev_name, args->op.right);
break; break;
default: default:
/* gcc warns for these? */
case PRINT_BSTRING:
case PRINT_DYNAMIC_ARRAY:
case PRINT_FUNC:
/* we should warn... */ /* we should warn... */
return; return;
} }
@ -186,10 +190,10 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->next); define_event_symbols(event, ev_name, args->next);
} }
static inline struct event *find_cache_event(int type) static inline struct event_format *find_cache_event(int type)
{ {
static char ev_name[256]; static char ev_name[256];
struct event *event; struct event_format *event;
if (events[type]) if (events[type])
return events[type]; return events[type];
@ -216,7 +220,7 @@ static void python_process_event(union perf_event *pevent __unused,
struct format_field *field; struct format_field *field;
unsigned long long val; unsigned long long val;
unsigned long s, ns; unsigned long s, ns;
struct event *event; struct event_format *event;
unsigned n = 0; unsigned n = 0;
int type; int type;
int pid; int pid;
@ -436,7 +440,7 @@ out:
static int python_generate_script(const char *outfile) static int python_generate_script(const char *outfile)
{ {
struct event *event = NULL; struct event_format *event = NULL;
struct format_field *f; struct format_field *f;
char fname[PATH_MAX]; char fname[PATH_MAX];
int not_first, count; int not_first, count;

Просмотреть файл

@ -1108,16 +1108,10 @@ more:
} }
if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) { if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
head, event.header.size, event.header.type); head, event.header.size, event.header.type);
/* err = -EINVAL;
* assume we lost track of the stream, check alignment, and goto out_err;
* increment a single u64 in the hope to catch on again 'soon'.
*/
if (unlikely(head & 7))
head &= ~7ULL;
size = 8;
} }
head += size; head += size;
@ -1226,17 +1220,11 @@ more:
if (size == 0 || if (size == 0 ||
perf_session__process_event(session, event, tool, file_pos) < 0) { perf_session__process_event(session, event, tool, file_pos) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size, file_offset + head, event->header.size,
event->header.type); event->header.type);
/* err = -EINVAL;
* assume we lost track of the stream, check alignment, and goto out_err;
* increment a single u64 in the hope to catch on again 'soon'.
*/
if (unlikely(head & 7))
head &= ~7ULL;
size = 8;
} }
head += size; head += size;

Просмотреть файл

@ -65,6 +65,11 @@ struct symbol {
void symbol__delete(struct symbol *sym); void symbol__delete(struct symbol *sym);
static inline size_t symbol__size(const struct symbol *sym)
{
return sym->end - sym->start + 1;
}
struct strlist; struct strlist;
struct symbol_conf { struct symbol_conf {

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше