perf/core improvements and fixes:

New features:
 
 - Allow using trace events fields as sort order keys, making 'perf evlist --trace_fields'
   show those, and then the user can select a subset and use like:
 
     perf top -e sched:sched_switch -s prev_comm,next_comm
 
   That works as well in 'perf report' when handling files containing
   tracepoints.
 
   The default when just tracepoint events are found in a perf.data file is to
   format it like ftrace, using the libtraceevent formatters, plugins, etc (Namhyung Kim)
 
 - Add support in 'perf script' to process 'perf stat record' generated files,
   culminating in a python perf script that calculates CPI (Cycles per
   Instruction) (Jiri Olsa)
 
 - Show random perf tool tips in the 'perf report' bottom line (Namhyung Kim)
 
 - perf report now defaults to --group if the perf.data file has grouped events, try it with:
 
   # perf record -e '{cycles,instructions}' -a sleep 1
   [ perf record: Woken up 1 times to write data ]
   [ perf record: Captured and wrote 1.093 MB perf.data (1247 samples) ]
   # perf report
   # Samples: 1K of event 'anon group { cycles, instructions }'
   # Event count (approx.): 1955219195
   #
   #       Overhead  Command     Shared Object      Symbol
 
      2.86%   0.22%  swapper     [kernel.kallsyms]  [k] intel_idle
      1.05%   0.33%  firefox     libxul.so          [.] js::SetObjectElement
      1.05%   0.00%  kworker/0:3 [kernel.kallsyms]  [k] gen6_ring_get_seqno
      0.88%   0.17%  chrome      chrome             [.] 0x0000000000ee27ab
      0.65%   0.86%  firefox     libxul.so          [.] js::ValueToId<(js::AllowGC)1>
      0.64%   0.23%  JS Helper   libxul.so          [.] js::SplayTree<js::jit::LiveRange*, js::jit::LiveRange>::splay
      0.62%   1.27%  firefox     libxul.so          [.] js::GetIterator
      0.61%   1.74%  firefox     libxul.so          [.] js::NativeSetProperty
      0.61%   0.31%  firefox     libxul.so          [.] js::SetPropertyByDefining
 
 User visible fixes:
 
 - Coect data mmaps so that the DWARF unwinder can handle usecases needing them,
   like softice (Jiri Olsa)
 
 - Decay callchains in fractal mode, fixing up cases where 'perf top -g' would
   show entries with more than 100% (Namhyung Kim)
 
 Infrastructure:
 
 - Sync tools/lib with the lib/ in the kernel sources for find_bit.c and
   move bitmap.[ch] from tools/perf/util/ to tools/lib/ (Arnaldo Carvalho de Melo)
 
 - No need to set attr.sample_freq in some 'perf test' entries that only
   want to deal with PERF_RECORD_ meta-events, improve a bit error output
   for CQM test (Arnaldo Carvalho de Melo)
 
 - Fix python binding build, adding some missing object files now required
   due to cpumap using find_bit stuff (Arnaldo Carvalho de Melo)
 
 - tools/build improvemnts (Jiri Olsa)
 
 - Add more files to cscope/ctags databases (Jiri Olsa)
 
 - Do not show 'trace' in 'perf help' if it is not compiled in (Jiri Olsa)
 
 - Make perf_evlist__open() open evsels with their cpus and threads,
   like perf record does, making them consistent (Adrian Hunter)
 
 - Fix pmu snapshot initialization bug (Stephane Eranian)
 
 - Add missing headers in perf's MANIFEST (Wang Nan)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWj/imAAoJENZQFvNTUqpADg4P/2/zn/JlIQbFMBo0YS4HtBtp
 OIvpU9XV0YyMWEbxEUfNOM3qnOfqZWOuTHvqVywnz1JXHxKNt9j7KjPhBA2avLEc
 WuloOf7Af1eUjroKW/tl1FsPatW0x0zXEVqR5XjUrPfXge6rYPuMwQ2f4oTCzc6H
 uf5fH1H2vK/iQsOu9X+IoGEKxoJF22zabKDQy7q+48gSq/TVtz1wJfHqYBCCFOh8
 c+MEMnOZ3F0DXJ9iRsXbChcOmkHTfAnu5CM8GlkvM38VnJA69K+AkFXC3YuExvA1
 PghTVZMBsYDyHNq+ewIshrJ1xGz0/OwXX2IUtJfFPGbWhMZYl2NCg1SuLfTl7jCF
 m/iR5LzdHpLiEIaimuK+8eIVfLdRyxZUeN9/BQzUFrl7pqe2n1xIjIvUX0djHPv9
 YlQ6ZI/g/nJ1AunC0QhWiwSkmUas/YATNKt7CunOnSjJey2p9T91lhzstqvCur4Q
 XH1iIA4o2A67vRLbVEb2eh54QT2BASO1H+suYPNTvU55W5gGz9pJJjVxIbpT5+lk
 dAZ8vXwxOg1jMFIgDrm6mbosGcs4lUBeeKJbE7ImevpSyOECR8lRdV4yX+bfkuUe
 FL5fNfJyFcr3q2p+sRTpikrq32C3iyQ08VhYYNLuVSUOQgNYEyLTmhMkotZYjmIk
 JZO4/8trMU7FR2WpnGqG
 =tbfd
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

New features:

- Allow using trace events fields as sort order keys, making 'perf evlist --trace_fields'
  show those, and then the user can select a subset and use like:

    perf top -e sched:sched_switch -s prev_comm,next_comm

  That works as well in 'perf report' when handling files containing
  tracepoints.

  The default when just tracepoint events are found in a perf.data file is to
  format it like ftrace, using the libtraceevent formatters, plugins, etc (Namhyung Kim)

- Add support in 'perf script' to process 'perf stat record' generated files,
  culminating in a python perf script that calculates CPI (Cycles per
  Instruction) (Jiri Olsa)

- Show random perf tool tips in the 'perf report' bottom line (Namhyung Kim)

- perf report now defaults to --group if the perf.data file has grouped events, try it with:

  # perf record -e '{cycles,instructions}' -a sleep 1
  [ perf record: Woken up 1 times to write data ]
  [ perf record: Captured and wrote 1.093 MB perf.data (1247 samples) ]
  # perf report
  # Samples: 1K of event 'anon group { cycles, instructions }'
  # Event count (approx.): 1955219195
  #
  #       Overhead  Command     Shared Object      Symbol

     2.86%   0.22%  swapper     [kernel.kallsyms]  [k] intel_idle
     1.05%   0.33%  firefox     libxul.so          [.] js::SetObjectElement
     1.05%   0.00%  kworker/0:3 [kernel.kallsyms]  [k] gen6_ring_get_seqno
     0.88%   0.17%  chrome      chrome             [.] 0x0000000000ee27ab
     0.65%   0.86%  firefox     libxul.so          [.] js::ValueToId<(js::AllowGC)1>
     0.64%   0.23%  JS Helper   libxul.so          [.] js::SplayTree<js::jit::LiveRange*, js::jit::LiveRange>::splay
     0.62%   1.27%  firefox     libxul.so          [.] js::GetIterator
     0.61%   1.74%  firefox     libxul.so          [.] js::NativeSetProperty
     0.61%   0.31%  firefox     libxul.so          [.] js::SetPropertyByDefining

User visible fixes:

- Coect data mmaps so that the DWARF unwinder can handle usecases needing them,
  like softice (Jiri Olsa)

- Decay callchains in fractal mode, fixing up cases where 'perf top -g' would
  show entries with more than 100% (Namhyung Kim)

Infrastructure changes:

- Sync tools/lib with the lib/ in the kernel sources for find_bit.c and
  move bitmap.[ch] from tools/perf/util/ to tools/lib/ (Arnaldo Carvalho de Melo)

- No need to set attr.sample_freq in some 'perf test' entries that only
  want to deal with PERF_RECORD_ meta-events, improve a bit error output
  for CQM test (Arnaldo Carvalho de Melo)

- Fix python binding build, adding some missing object files now required
  due to cpumap using find_bit stuff (Arnaldo Carvalho de Melo)

- tools/build improvemnts (Jiri Olsa)

- Add more files to cscope/ctags databases (Jiri Olsa)

- Do not show 'trace' in 'perf help' if it is not compiled in (Jiri Olsa)

- Make perf_evlist__open() open evsels with their cpus and threads,
  like perf record does, making them consistent (Adrian Hunter)

- Fix pmu snapshot initialization bug (Stephane Eranian)

- Add missing headers in perf's MANIFEST (Wang Nan)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-01-09 17:17:33 +01:00
Родитель 9cc2617de5 775d8a1b0d
Коммит 3eb9ede23b
65 изменённых файлов: 1558 добавлений и 349 удалений

Просмотреть файл

@ -122,13 +122,31 @@ define feature_print_text_code
MSG = $(shell printf '...%30s: %s' $(1) $(2))
endef
FEATURE_DUMP_FILENAME = $(OUTPUT)FEATURE-DUMP$(FEATURE_USER)
FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
FEATURE_DUMP_FILE := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
#
# generates feature value assignment for name, like:
# $(call feature_assign,dwarf) == feature-dwarf=1
#
feature_assign = feature-$(1)=$(feature-$(1))
ifeq ($(dwarf-post-unwind),1)
FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
endif
FEATURE_DUMP_FILENAME = $(OUTPUT)FEATURE-DUMP$(FEATURE_USER)
FEATURE_DUMP := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
feature_dump_check = $(eval $(feature_dump_check_code))
define feature_dump_check_code
ifeq ($(findstring $(1),$(FEATURE_DUMP)),)
$(2) := 1
endif
endef
#
# First check if any test from FEATURE_DISPLAY
# and set feature_display := 1 if it does
$(foreach feat,$(FEATURE_DISPLAY),$(call feature_dump_check,$(call feature_assign,$(feat)),feature_display))
#
# Now also check if any other test changed,
# so we force FEATURE-DUMP generation
$(foreach feat,$(FEATURE_TESTS),$(call feature_dump_check,$(call feature_assign,$(feat)),feature_dump_changed))
# The $(feature_display) controls the default detection message
# output. It's set if:
@ -137,13 +155,13 @@ endif
# - one of the $(FEATURE_DISPLAY) is not detected
# - VF is enabled
ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
$(shell echo "$(FEATURE_DUMP)" > $(FEATURE_DUMP_FILENAME))
feature_display := 1
ifeq ($(feature_dump_changed),1)
$(shell rm -f $(FEATURE_DUMP_FILENAME))
$(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
endif
feature_display_check = $(eval $(feature_check_display_code))
define feature_display_check_code
define feature_check_display_code
ifneq ($(feature-$(1)), 1)
feature_display := 1
endif
@ -160,11 +178,6 @@ ifeq ($(feature_display),1)
$(info )
$(info Auto-detecting system features:)
$(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
ifeq ($(dwarf-post-unwind),1)
$(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
endif
ifneq ($(feature_verbose),1)
$(info )
endif

Просмотреть файл

@ -11,6 +11,8 @@ int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \

Просмотреть файл

Просмотреть файл

@ -80,7 +80,11 @@ endif
endif
ifeq ($(check_feat),1)
ifeq ($(FEATURES_DUMP),)
include $(srctree)/tools/build/Makefile.feature
else
include $(FEATURES_DUMP)
endif
endif
export prefix libdir src obj

84
tools/lib/find_bit.c Normal file
Просмотреть файл

@ -0,0 +1,84 @@
/* bit search implementation
*
* Copied from lib/find_bit.c to tools/lib/find_bit.c
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Copyright (C) 2008 IBM Corporation
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/kernel.h>
#if !defined(find_next_bit)
/*
* This is a common helper function for find_next_bit and
* find_next_zero_bit. The difference is the "invert" argument, which
* is XORed with each fetched word before searching it for one bits.
*/
static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (!nbits || start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
return min(start + __ffs(tmp), nbits);
}
#endif
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, 0UL);
}
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx])
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
}
return size;
}
#endif

Просмотреть файл

@ -4735,73 +4735,80 @@ static int is_printable_array(char *p, unsigned int len)
return 1;
}
static void print_event_fields(struct trace_seq *s, void *data,
int size __maybe_unused,
struct event_format *event)
void pevent_print_field(struct trace_seq *s, void *data,
struct format_field *field)
{
struct format_field *field;
unsigned long long val;
unsigned int offset, len, i;
struct pevent *pevent = field->event->pevent;
if (field->flags & FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & FIELD_IS_DYNAMIC) {
val = pevent_read_number(pevent, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
}
if (field->flags & FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
trace_seq_printf(s, "%s", (char *)data + offset);
} else {
trace_seq_puts(s, "ARRAY[");
for (i = 0; i < len; i++) {
if (i)
trace_seq_puts(s, ", ");
trace_seq_printf(s, "%02x",
*((unsigned char *)data + offset + i));
}
trace_seq_putc(s, ']');
field->flags &= ~FIELD_IS_STRING;
}
} else {
val = pevent_read_number(pevent, data + field->offset,
field->size);
if (field->flags & FIELD_IS_POINTER) {
trace_seq_printf(s, "0x%llx", val);
} else if (field->flags & FIELD_IS_SIGNED) {
switch (field->size) {
case 4:
/*
* If field is long then print it in hex.
* A long usually stores pointers.
*/
if (field->flags & FIELD_IS_LONG)
trace_seq_printf(s, "0x%x", (int)val);
else
trace_seq_printf(s, "%d", (int)val);
break;
case 2:
trace_seq_printf(s, "%2d", (short)val);
break;
case 1:
trace_seq_printf(s, "%1d", (char)val);
break;
default:
trace_seq_printf(s, "%lld", val);
}
} else {
if (field->flags & FIELD_IS_LONG)
trace_seq_printf(s, "0x%llx", val);
else
trace_seq_printf(s, "%llu", val);
}
}
}
void pevent_print_fields(struct trace_seq *s, void *data,
int size __maybe_unused, struct event_format *event)
{
struct format_field *field;
field = event->format.fields;
while (field) {
trace_seq_printf(s, " %s=", field->name);
if (field->flags & FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & FIELD_IS_DYNAMIC) {
val = pevent_read_number(event->pevent, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
}
if (field->flags & FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
trace_seq_printf(s, "%s", (char *)data + offset);
} else {
trace_seq_puts(s, "ARRAY[");
for (i = 0; i < len; i++) {
if (i)
trace_seq_puts(s, ", ");
trace_seq_printf(s, "%02x",
*((unsigned char *)data + offset + i));
}
trace_seq_putc(s, ']');
field->flags &= ~FIELD_IS_STRING;
}
} else {
val = pevent_read_number(event->pevent, data + field->offset,
field->size);
if (field->flags & FIELD_IS_POINTER) {
trace_seq_printf(s, "0x%llx", val);
} else if (field->flags & FIELD_IS_SIGNED) {
switch (field->size) {
case 4:
/*
* If field is long then print it in hex.
* A long usually stores pointers.
*/
if (field->flags & FIELD_IS_LONG)
trace_seq_printf(s, "0x%x", (int)val);
else
trace_seq_printf(s, "%d", (int)val);
break;
case 2:
trace_seq_printf(s, "%2d", (short)val);
break;
case 1:
trace_seq_printf(s, "%1d", (char)val);
break;
default:
trace_seq_printf(s, "%lld", val);
}
} else {
if (field->flags & FIELD_IS_LONG)
trace_seq_printf(s, "0x%llx", val);
else
trace_seq_printf(s, "%llu", val);
}
}
pevent_print_field(s, data, field);
field = field->next;
}
}
@ -4827,7 +4834,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
if (event->flags & EVENT_FL_FAILED) {
trace_seq_printf(s, "[FAILED TO PARSE]");
print_event_fields(s, data, size, event);
pevent_print_fields(s, data, size, event);
return;
}
@ -5301,7 +5308,7 @@ void pevent_event_info(struct trace_seq *s, struct event_format *event,
int print_pretty = 1;
if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW))
print_event_fields(s, record->data, record->size, event);
pevent_print_fields(s, record->data, record->size, event);
else {
if (event->handler && !(event->flags & EVENT_FL_NOHANDLE))

Просмотреть файл

@ -705,6 +705,10 @@ struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *com
struct cmdline *next);
int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline);
void pevent_print_field(struct trace_seq *s, void *data,
struct format_field *field);
void pevent_print_fields(struct trace_seq *s, void *data,
int size __maybe_unused, struct event_format *event);
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,

Просмотреть файл

@ -1,89 +0,0 @@
/* find_next_bit.c: fallback find next bit implementation
*
* Copied from lib/find_next_bit.c to tools/lib/next_bit.c
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + __ffs(tmp);
}
#endif

Просмотреть файл

@ -41,6 +41,7 @@ CFLAGS_perf.o += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" \
-DPREFIX="BUILD_STR($(prefix_SQ))" \
-include $(OUTPUT)PERF-VERSION-FILE
CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_SQ))"
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
libperf-y += util/
libperf-y += arch/

Просмотреть файл

@ -32,6 +32,9 @@ OPTIONS
--group::
Show event group information.
--trace-fields::
Show tracepoint field names.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-list[1],

Просмотреть файл

@ -117,6 +117,30 @@ OPTIONS
And default sort keys are changed to comm, dso_from, symbol_from, dso_to
and symbol_to, see '--branch-stack'.
If the data file has tracepoint event(s), following (dynamic) sort keys
are also available:
trace, trace_fields, [<event>.]<field>[/raw]
- trace: pretty printed trace output in a single column
- trace_fields: fields in tracepoints in separate columns
- <field name>: optional event and field name for a specific field
The last form consists of event and field names. If event name is
omitted, it searches all events for matching field name. The matched
field will be shown only for the event has the field. The event name
supports substring match so user doesn't need to specify full subsystem
and event name everytime. For example, 'sched:sched_switch' event can
be shortened to 'switch' as long as it's not ambiguous. Also event can
be specified by its index (starting from 1) preceded by the '%'.
So '%1' is the first event, '%2' is the second, and so on.
The field name can have '/raw' suffix which disables pretty printing
and shows raw field value like hex numbers. The --raw-trace option
has the same effect for all dynamic sort keys.
The default sort keys are changed to 'trace' if all events in the data
file are tracepoint.
-F::
--fields=::
Specify output field - multiple keys can be specified in CSV format.
@ -371,6 +395,9 @@ include::itrace.txt[]
--socket-filter::
Only report the samples on the processor socket that match with this filter
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
include::callchain-overhead-calculation.txt[]
SEE ALSO

Просмотреть файл

@ -230,6 +230,9 @@ Default is to monitor all CPUS.
The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
Note that this feature may not be available on all processors.
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
INTERACTIVE PROMPTING KEYS
--------------------------

Просмотреть файл

@ -0,0 +1,14 @@
For a higher level overview, try: perf report --sort comm,dso
Sample related events with: perf record -e '{cycles,instructions}:S'
Compare performance results with: perf diff [<old file> <new file>]
Boolean options have negative forms, e.g.: perf report --no-children
Customize output of perf script with: perf script -F event,ip,sym
Generate a script for your data: perf script -g <lang>
Save output of perf stat using: perf stat record <target workload>
Create an archive with symtabs to analyse on other machine: perf archive
Search options using a keyword: perf report -h <keyword>
Use parent filter to see specific call path: perf report -p <regex>
List events using substring match: perf list <keyword>
To see list of saved events and attributes: perf evlist -v
Use --symfs <dir> if your symbol files are in non-standard locations
To see callchains in a more compact form: perf report -g folded

Просмотреть файл

@ -1,6 +1,7 @@
tools/perf
tools/arch/alpha/include/asm/barrier.h
tools/arch/arm/include/asm/barrier.h
tools/arch/arm64/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/mips/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
@ -26,10 +27,11 @@ tools/lib/rbtree.c
tools/lib/string.c
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/lib/find_bit.c
tools/include/asm/atomic.h
tools/include/asm/barrier.h
tools/include/asm/bug.h
tools/include/asm-generic/atomic-gcc.h
tools/include/asm-generic/barrier.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h

Просмотреть файл

@ -436,7 +436,7 @@ $(LIBAPI)-clean:
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
$(LIBBPF): fixdep FORCE
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(realpath $(OUTPUT)FEATURE-DUMP)
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
@ -488,7 +488,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
$(DOC_TARGETS):
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol ../include ../lib/bpf
TAG_FOLDERS= . ../lib ../include
TAG_FILES= ../../include/uapi/linux/perf_event.h
TAGS:
@ -567,6 +567,9 @@ endif
$(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
$(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(call QUIET_INSTALL, perf-tip) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(tip_instdir_SQ)'; \
$(INSTALL) Documentation/tips.txt -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \

Просмотреть файл

@ -54,7 +54,7 @@ int test__intel_cqm_count_nmi_context(int subtest __maybe_unused)
ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
if (ret) {
pr_debug("parse_events failed\n");
pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
err = TEST_SKIP;
goto out;
}

Просмотреть файл

@ -41,7 +41,6 @@ int test__perf_time_to_tsc(int subtest __maybe_unused)
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000,
.target = {
.uses_mmap = true,
},

Просмотреть файл

@ -327,7 +327,7 @@ static int intel_bts_snapshot_start(struct auxtrace_record *itr)
evlist__for_each(btsr->evlist, evsel) {
if (evsel->attr.type == btsr->intel_bts_pmu->type)
return perf_evlist__disable_event(btsr->evlist, evsel);
return perf_evsel__disable(evsel);
}
return -EINVAL;
}
@ -340,7 +340,7 @@ static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
evlist__for_each(btsr->evlist, evsel) {
if (evsel->attr.type == btsr->intel_bts_pmu->type)
return perf_evlist__enable_event(btsr->evlist, evsel);
return perf_evsel__enable(evsel);
}
return -EINVAL;
}

Просмотреть файл

@ -725,7 +725,7 @@ static int intel_pt_snapshot_start(struct auxtrace_record *itr)
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->intel_pt_pmu->type)
return perf_evlist__disable_event(ptr->evlist, evsel);
return perf_evsel__disable(evsel);
}
return -EINVAL;
}
@ -738,7 +738,7 @@ static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->intel_pt_pmu->type)
return perf_evlist__enable_event(ptr->evlist, evsel);
return perf_evsel__enable(evsel);
}
return -EINVAL;
}

Просмотреть файл

@ -47,7 +47,7 @@ struct perf_annotate {
};
static int perf_evsel__add_sample(struct perf_evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann)
{
@ -72,7 +72,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
return 0;
}
he = __hists__add_entry(hists, al, NULL, NULL, NULL, 1, 1, 0, true);
sample->period = 1;
sample->weight = 1;
he = __hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;
@ -367,7 +370,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
if (ret < 0)
goto out_delete;
if (setup_sorting() < 0)
if (setup_sorting(NULL) < 0)
usage_with_options(annotate_usage, options);
if (annotate.use_stdio)

Просмотреть файл

@ -311,11 +311,11 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
}
static int hists__add_entry(struct hists *hists,
struct addr_location *al, u64 period,
u64 weight, u64 transaction)
struct addr_location *al,
struct perf_sample *sample)
{
if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight,
transaction, true) != NULL)
if (__hists__add_entry(hists, al, NULL, NULL, NULL,
sample, true) != NULL)
return 0;
return -ENOMEM;
}
@ -336,8 +336,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
if (hists__add_entry(hists, &al, sample->period,
sample->weight, sample->transaction)) {
if (hists__add_entry(hists, &al, sample)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
}
@ -1208,7 +1207,7 @@ static int ui_init(void)
BUG_ON(1);
}
list_add(&fmt->sort_list, &perf_hpp__sort_list);
perf_hpp__register_sort_field(fmt);
return 0;
}
@ -1280,7 +1279,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
sort__mode = SORT_MODE__DIFF;
if (setup_sorting() < 0)
if (setup_sorting(NULL) < 0)
usage_with_options(diff_usage, options);
setup_pager();

Просмотреть файл

@ -26,14 +26,22 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
bool has_tracepoint = false;
session = perf_session__new(&file, 0, NULL);
if (session == NULL)
return -1;
evlist__for_each(session->evlist, pos)
evlist__for_each(session->evlist, pos) {
perf_evsel__fprintf(pos, details, stdout);
if (pos->attr.type == PERF_TYPE_TRACEPOINT)
has_tracepoint = true;
}
if (has_tracepoint && !details->trace_fields)
printf("# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events\n");
perf_session__delete(session);
return 0;
}
@ -49,6 +57,7 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('g', "group", &details.event_group,
"Show event group information"),
OPT_BOOLEAN('f', "force", &details.force, "don't complain, do it"),
OPT_BOOLEAN(0, "trace-fields", &details.trace_fields, "Show tracepoint fields"),
OPT_END()
};
const char * const evlist_usage[] = {

Просмотреть файл

@ -815,8 +815,12 @@ int record_parse_callchain_opt(const struct option *opt,
}
ret = parse_callchain_record_opt(arg, &callchain_param);
if (!ret)
if (!ret) {
/* Enable data address sampling for DWARF unwind. */
if (callchain_param.record_mode == CALLCHAIN_DWARF)
record->sample_address = true;
callchain_debug();
}
return ret;
}

Просмотреть файл

@ -433,7 +433,7 @@ static int report__browse_hists(struct report *rep)
int ret;
struct perf_session *session = rep->session;
struct perf_evlist *evlist = session->evlist;
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
const char *help = perf_tip(TIPDIR);
switch (use_browser) {
case 1:
@ -788,6 +788,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Show callgraph from reference event"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_END()
};
struct perf_data_file file = {
@ -897,7 +899,7 @@ repeat:
symbol_conf.cumulate_callchain = false;
}
if (setup_sorting() < 0) {
if (setup_sorting(session->evlist) < 0) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)

Просмотреть файл

@ -18,7 +18,11 @@
#include "util/sort.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/stat.h"
#include <linux/bitmap.h>
#include "asm/bug.h"
static char const *script_name;
static char const *generate_script_lang;
@ -32,6 +36,7 @@ static bool print_flags;
static bool nanosecs;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@ -216,6 +221,9 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
bool allow_user_set;
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
allow_user_set = perf_header__has_feat(&session->header,
HEADER_AUXTRACE);
@ -606,9 +614,27 @@ struct perf_script {
bool show_task_events;
bool show_mmap_events;
bool show_switch_events;
bool allocated;
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
};
static void process_event(struct perf_script *script __maybe_unused, union perf_event *event,
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
int max = 0;
evlist__for_each(evlist, evsel) {
int len = strlen(perf_evsel__name(evsel));
max = MAX(len, max);
}
return max;
}
static void process_event(struct perf_script *script, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
struct addr_location *al)
{
@ -625,7 +651,12 @@ static void process_event(struct perf_script *script __maybe_unused, union perf_
if (PRINT_FIELD(EVNAME)) {
const char *evname = perf_evsel__name(evsel);
printf("%s: ", evname ? evname : "[unknown]");
if (!script->name_width)
script->name_width = perf_evlist__max_name_len(script->session->evlist);
printf("%*s: ", script->name_width,
evname ? evname : "[unknown]");
}
if (print_flags)
@ -666,6 +697,54 @@ static void process_event(struct perf_script *script __maybe_unused, union perf_
static struct scripting_ops *scripting_ops;
static void __process_stat(struct perf_evsel *counter, u64 tstamp)
{
int nthreads = thread_map__nr(counter->threads);
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
static int header_printed;
if (counter->system_wide)
nthreads = 1;
if (!header_printed) {
printf("%3s %8s %15s %15s %15s %15s %s\n",
"CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
header_printed = 1;
}
for (thread = 0; thread < nthreads; thread++) {
for (cpu = 0; cpu < ncpus; cpu++) {
struct perf_counts_values *counts;
counts = perf_counts(counter->counts, cpu, thread);
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
counter->cpus->map[cpu],
thread_map__pid(counter->threads, thread),
counts->val,
counts->ena,
counts->run,
tstamp,
perf_evsel__name(counter));
}
}
}
static void process_stat(struct perf_evsel *counter, u64 tstamp)
{
if (scripting_ops && scripting_ops->process_stat)
scripting_ops->process_stat(&stat_config, counter, tstamp);
else
__process_stat(counter, tstamp);
}
static void process_stat_interval(u64 tstamp)
{
if (scripting_ops && scripting_ops->process_stat_interval)
scripting_ops->process_stat_interval(tstamp);
}
static void setup_scripting(void)
{
setup_perl_scripting();
@ -1682,6 +1761,87 @@ static void script__setup_sample_type(struct perf_script *script)
}
}
static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session)
{
struct stat_round_event *round = &event->stat_round;
struct perf_evsel *counter;
evlist__for_each(session->evlist, counter) {
perf_stat_process_counter(&stat_config, counter);
process_stat(counter, round->time);
}
process_stat_interval(round->time);
return 0;
}
static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session __maybe_unused)
{
perf_event__read_stat_config(&stat_config, &event->stat_config);
return 0;
}
static int set_maps(struct perf_script *script)
{
struct perf_evlist *evlist = script->session->evlist;
if (!script->cpus || !script->threads)
return 0;
if (WARN_ONCE(script->allocated, "stats double allocation\n"))
return -EINVAL;
perf_evlist__set_maps(evlist, script->cpus, script->threads);
if (perf_evlist__alloc_stats(evlist, true))
return -ENOMEM;
script->allocated = true;
return 0;
}
static
int process_thread_map_event(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session __maybe_unused)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (script->threads) {
pr_warning("Extra thread map event, ignoring.\n");
return 0;
}
script->threads = thread_map__new_event(&event->thread_map);
if (!script->threads)
return -ENOMEM;
return set_maps(script);
}
static
int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session __maybe_unused)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (script->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
return 0;
}
script->cpus = cpu_map__new_data(&event->cpu_map.data);
if (!script->cpus)
return -ENOMEM;
return set_maps(script);
}
int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool show_full_info = false;
@ -1710,6 +1870,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.stat = perf_event__process_stat_event,
.stat_round = process_stat_round_event,
.stat_config = process_stat_config_event,
.thread_map = process_thread_map_event,
.cpu_map = process_cpu_map_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@ -2063,6 +2228,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
flush_scripting();
out_delete:
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
if (script_started)

Просмотреть файл

@ -184,11 +184,18 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
* like tracepoints. Clear it up for counting.
*/
attr->sample_period = 0;
/*
* But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
* while avoiding that older tools show confusing messages.
*
* However for pipe sessions we need to keep it zero,
* because script's perf_evsel__check_attr is triggered
* by attr->sample_type != 0, and we can't run it on
* stat sessions.
*/
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
if (!(STAT_RECORD && perf_stat.file.is_pipe))
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
/*
* Disabling all counters initially, they will be enabled

Просмотреть файл

@ -1210,6 +1210,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
"branch filter mask", "branch stack filter modes",
parse_branch_stack),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_END()
};
const char * const top_usage[] = {
@ -1231,11 +1233,17 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(top_usage, options);
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
pr_err("Not enough memory for event selector list\n");
goto out_delete_evlist;
}
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
sort__need_collapse = 1;
if (setup_sorting() < 0) {
if (setup_sorting(top.evlist) < 0) {
if (sort_order)
parse_options_usage(top_usage, options, "s", 1);
if (field_order)
@ -1277,12 +1285,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
goto out_delete_evlist;
}
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
ui__error("Not enough memory for event selector list\n");
goto out_delete_evlist;
}
symbol_conf.nr_events = top.evlist->nr_entries;
if (top.delay_secs < 1)

Просмотреть файл

@ -26,4 +26,4 @@ perf-stat mainporcelain common
perf-test mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
perf-trace mainporcelain common
perf-trace mainporcelain audit

Просмотреть файл

@ -691,6 +691,7 @@ sharedir = $(prefix)/share
template_dir = share/perf-core/templates
STRACE_GROUPS_DIR = share/perf-core/strace/groups
htmldir = share/doc/perf-doc
tipdir = share/doc/perf-tip
ifeq ($(prefix),/usr)
sysconfdir = /etc
ETC_PERFCONFIG = $(sysconfdir)/perfconfig
@ -717,6 +718,7 @@ infodir_SQ = $(subst ','\'',$(infodir))
perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
tipdir_SQ = $(subst ','\'',$(tipdir))
prefix_SQ = $(subst ','\'',$(prefix))
sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
libdir_SQ = $(subst ','\'',$(libdir))
@ -724,12 +726,15 @@ libdir_SQ = $(subst ','\'',$(libdir))
ifneq ($(filter /%,$(firstword $(perfexecdir))),)
perfexec_instdir = $(perfexecdir)
STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
tip_instdir = $(tipdir)
else
perfexec_instdir = $(prefix)/$(perfexecdir)
STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
tip_instdir = $(prefix)/$(tipdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
# If we install to $(HOME) we keep the traceevent default:
# $(HOME)/.traceevent/plugins
@ -751,6 +756,10 @@ ifeq ($(VF),1)
$(call print_var,sysconfdir)
$(call print_var,LIBUNWIND_DIR)
$(call print_var,LIBDW_DIR)
ifeq ($(dwarf-post-unwind),1)
$(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
endif
$(info )
endif
@ -766,6 +775,7 @@ $(call detected_var,ETC_PERFCONFIG_SQ)
$(call detected_var,STRACE_GROUPS_DIR_SQ)
$(call detected_var,prefix_SQ)
$(call detected_var,perfexecdir_SQ)
$(call detected_var,tipdir_SQ)
$(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)

Просмотреть файл

@ -19,6 +19,8 @@
#include "util/debug.h"
#include <api/fs/tracing_path.h>
#include <pthread.h>
#include <stdlib.h>
#include <time.h>
const char perf_usage_string[] =
"perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
@ -542,6 +544,8 @@ int main(int argc, const char **argv)
if (!cmd)
cmd = "perf-help";
srandom(time(NULL));
/* get debugfs/tracefs mount point from /proc/mounts */
tracing_path_mount();

Просмотреть файл

@ -0,0 +1,77 @@
#!/usr/bin/env python
data = {}
times = []
threads = []
cpus = []
def get_key(time, event, cpu, thread):
return "%d-%s-%d-%d" % (time, event, cpu, thread)
def store_key(time, cpu, thread):
if (time not in times):
times.append(time)
if (cpu not in cpus):
cpus.append(cpu)
if (thread not in threads):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
#print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
# (event, cpu, thread, time, val, ena, run)
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
data[key] = [ val, ena, run]
def get(time, event, cpu, thread):
key = get_key(time, event, cpu, thread)
return data[key][0]
def stat__cycles_k(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_k(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles_u(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_u(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__interval(time):
for cpu in cpus:
for thread in threads:
cyc = get(time, "cycles", cpu, thread)
ins = get(time, "instructions", cpu, thread)
cpi = 0
if ins != 0:
cpi = cyc/float(ins)
print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
def trace_end():
pass
# XXX trace_end callback could be used as an alternative place
# to compute same values as in the script above:
#
# for time in times:
# for cpu in cpus:
# for thread in threads:
# cyc = get(time, "cycles", cpu, thread)
# ins = get(time, "instructions", cpu, thread)
#
# if ins != 0:
# cpi = cyc/float(ins)
#
# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)

Просмотреть файл

@ -281,7 +281,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting();
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@ -428,7 +428,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting();
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@ -486,7 +486,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting();
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@ -670,7 +670,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting();
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);

Просмотреть файл

@ -122,7 +122,7 @@ int test__hists_filter(int subtest __maybe_unused)
goto out;
/* default sort order (comm,dso,sym) will be used */
if (setup_sorting() < 0)
if (setup_sorting(NULL) < 0)
goto out;
machines__init(&machines);

Просмотреть файл

@ -64,7 +64,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
struct perf_evsel *evsel;
struct addr_location al;
struct hist_entry *he;
struct perf_sample sample = { .period = 1, };
struct perf_sample sample = { .period = 1, .weight = 1, };
size_t i = 0, k;
/*
@ -90,7 +90,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
goto out;
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
NULL, NULL, &sample, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
@ -116,7 +116,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
goto out;
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
NULL, NULL, &sample, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
@ -294,7 +294,7 @@ int test__hists_link(int subtest __maybe_unused)
goto out;
/* default sort order (comm,dso,sym) will be used */
if (setup_sorting() < 0)
if (setup_sorting(NULL) < 0)
goto out;
machines__init(&machines);

Просмотреть файл

@ -134,7 +134,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
field_order = NULL;
sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
setup_sorting();
setup_sorting(NULL);
/*
* expected output:
@ -236,7 +236,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
field_order = "overhead,cpu";
sort_order = "pid";
setup_sorting();
setup_sorting(NULL);
/*
* expected output:
@ -292,7 +292,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
field_order = "comm,overhead,dso";
sort_order = NULL;
setup_sorting();
setup_sorting(NULL);
/*
* expected output:
@ -366,7 +366,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
field_order = "dso,sym,comm,overhead,dso";
sort_order = "sym";
setup_sorting();
setup_sorting(NULL);
/*
* expected output:
@ -468,7 +468,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
field_order = "cpu,pid,comm,dso,sym";
sort_order = "dso,pid";
setup_sorting();
setup_sorting(NULL);
/*
* expected output:

Просмотреть файл

@ -55,7 +55,6 @@ int test__keep_tracking(int subtest __maybe_unused)
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000,
.target = {
.uses_mmap = true,
},
@ -124,7 +123,7 @@ int test__keep_tracking(int subtest __maybe_unused)
evsel = perf_evlist__last(evlist);
CHECK__(perf_evlist__disable_event(evlist, evsel));
CHECK__(perf_evsel__disable(evsel));
comm = "Test COMM 2";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));

Просмотреть файл

@ -40,12 +40,11 @@ int test__PERF_RECORD(int subtest __maybe_unused)
.uses_mmap = true,
},
.no_buffering = true,
.freq = 10,
.mmap_pages = 256,
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
struct perf_evlist *evlist = perf_evlist__new_default();
struct perf_evlist *evlist = perf_evlist__new_dummy();
struct perf_evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
@ -61,6 +60,9 @@ int test__PERF_RECORD(int subtest __maybe_unused)
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
char sbuf[STRERR_BUFSIZE];
if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
evlist = perf_evlist__new_default();
if (evlist == NULL || argv == NULL) {
pr_debug("Not enough memory to create evlist\n");
goto out;

Просмотреть файл

@ -455,7 +455,7 @@ int test__switch_tracking(int subtest __maybe_unused)
perf_evlist__enable(evlist);
err = perf_evlist__disable_event(evlist, cpu_clocks_evsel);
err = perf_evsel__disable(cpu_clocks_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;
@ -474,7 +474,7 @@ int test__switch_tracking(int subtest __maybe_unused)
goto out_err;
}
err = perf_evlist__disable_event(evlist, cycles_evsel);
err = perf_evsel__disable(cycles_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;
@ -500,7 +500,7 @@ int test__switch_tracking(int subtest __maybe_unused)
goto out_err;
}
err = perf_evlist__enable_event(evlist, cycles_evsel);
err = perf_evsel__enable(cycles_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;

Просмотреть файл

@ -1041,7 +1041,8 @@ static int hist_browser__show_entry(struct hist_browser *browser,
hist_browser__gotorc(browser, row, 0);
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
if (perf_hpp__should_skip(fmt, entry->hists) ||
column++ < browser->b.horiz_scroll)
continue;
if (current_entry && browser->b.navkeypressed) {
@ -1144,7 +1145,7 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
}
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
if (perf_hpp__should_skip(fmt, hists) || column++ < browser->b.horiz_scroll)
continue;
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
@ -1414,7 +1415,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
printed += fprintf(fp, "%c ", folded_sign);
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, he->hists))
continue;
if (!first) {

Просмотреть файл

@ -318,7 +318,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, hists))
continue;
/*
@ -368,7 +368,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, h->hists))
continue;
if (fmt->color)

Просмотреть файл

@ -443,7 +443,6 @@ LIST_HEAD(perf_hpp__sort_list);
void perf_hpp__init(void)
{
struct list_head *list;
int i;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
@ -484,17 +483,6 @@ void perf_hpp__init(void)
if (symbol_conf.show_total_period)
hpp_dimension__add_output(PERF_HPP__PERIOD);
/* prepend overhead field for backward compatiblity. */
list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
if (list_empty(list))
list_add(list, &perf_hpp__sort_list);
if (symbol_conf.cumulate_callchain) {
list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list;
if (list_empty(list))
list_add(list, &perf_hpp__sort_list);
}
}
void perf_hpp__column_register(struct perf_hpp_fmt *format)
@ -619,7 +607,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
struct perf_hpp dummy_hpp;
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, hists))
continue;
if (first)

Просмотреть файл

@ -385,7 +385,7 @@ static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
return 0;
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, he->hists))
continue;
/*
@ -464,7 +464,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
fprintf(fp, "# ");
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first)
@ -490,7 +490,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
perf_hpp__for_each_format(fmt) {
unsigned int i;
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first)

Просмотреть файл

@ -9,7 +9,7 @@ libperf-y += env.o
libperf-y += event.o
libperf-y += evlist.o
libperf-y += evsel.o
libperf-y += find_next_bit.o
libperf-y += find_bit.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
@ -132,7 +132,8 @@ CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_bitmap.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_find_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
@ -142,7 +143,11 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
$(OUTPUT)util/bitmap.o: ../lib/bitmap.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)util/find_bit.o: ../lib/find_bit.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)

Просмотреть файл

@ -925,6 +925,34 @@ void free_callchain(struct callchain_root *root)
free_callchain_node(&root->node);
}
static u64 decay_callchain_node(struct callchain_node *node)
{
struct callchain_node *child;
struct rb_node *n;
u64 child_hits = 0;
n = rb_first(&node->rb_root_in);
while (n) {
child = container_of(n, struct callchain_node, rb_node_in);
child_hits += decay_callchain_node(child);
n = rb_next(n);
}
node->hit = (node->hit * 7) / 8;
node->children_hit = child_hits;
return node->hit;
}
void decay_callchain(struct callchain_root *root)
{
if (!symbol_conf.use_callchain)
return;
decay_callchain_node(&root->node);
}
int callchain_node__make_parent_list(struct callchain_node *node)
{
struct callchain_node *parent = node->parent;

Просмотреть файл

@ -253,6 +253,7 @@ int callchain_node__fprintf_value(struct callchain_node *node,
FILE *fp, u64 total);
void free_callchain(struct callchain_root *root);
void decay_callchain(struct callchain_root *root);
int callchain_node__make_parent_list(struct callchain_node *node);
#endif /* __PERF_CALLCHAIN_H */

Просмотреть файл

@ -188,8 +188,17 @@ static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
if (map) {
unsigned i;
for (i = 0; i < cpus->nr; i++)
map->map[i] = (int)cpus->cpu[i];
for (i = 0; i < cpus->nr; i++) {
/*
* Special treatment for -1, which is not real cpu number,
* and we need to use (int) -1 to initialize map[i],
* otherwise it would become 65535.
*/
if (cpus->cpu[i] == (u16) -1)
map->map[i] = -1;
else
map->map[i] = (int) cpus->cpu[i];
}
}
return map;

Просмотреть файл

@ -68,6 +68,18 @@ struct perf_evlist *perf_evlist__new_default(void)
return evlist;
}
struct perf_evlist *perf_evlist__new_dummy(void)
{
struct perf_evlist *evlist = perf_evlist__new();
if (evlist && perf_evlist__add_dummy(evlist)) {
perf_evlist__delete(evlist);
evlist = NULL;
}
return evlist;
}
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@ -248,6 +260,22 @@ error:
return -ENOMEM;
}
int perf_evlist__add_dummy(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
struct perf_evsel *evsel = perf_evsel__new(&attr);
if (evsel == NULL)
return -ENOMEM;
perf_evlist__add(evlist, evsel);
return 0;
}
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
struct perf_event_attr *attrs, size_t nr_attrs)
{
@ -365,48 +393,6 @@ void perf_evlist__toggle_enable(struct perf_evlist *evlist)
(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
}
int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
int cpu, thread, err;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd)
return 0;
for (cpu = 0; cpu < nr_cpus; cpu++) {
for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
if (err)
return err;
}
}
return 0;
}
int perf_evlist__enable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
int cpu, thread, err;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd)
return -EINVAL;
for (cpu = 0; cpu < nr_cpus; cpu++) {
for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
if (err)
return err;
}
}
return 0;
}
static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
struct perf_evsel *evsel, int cpu)
{
@ -1470,7 +1456,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
perf_evlist__update_id_pos(evlist);
evlist__for_each(evlist, evsel) {
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
if (err < 0)
goto out_err;
}

Просмотреть файл

@ -67,6 +67,7 @@ struct perf_evsel_str_handler {
struct perf_evlist *perf_evlist__new(void);
struct perf_evlist *perf_evlist__new_default(void);
struct perf_evlist *perf_evlist__new_dummy(void);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
@ -81,6 +82,8 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
#define perf_evlist__add_default_attrs(evlist, array) \
__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
int perf_evlist__add_dummy(struct perf_evlist *evlist);
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
@ -152,10 +155,6 @@ void perf_evlist__disable(struct perf_evlist *evlist);
void perf_evlist__enable(struct perf_evlist *evlist);
void perf_evlist__toggle_enable(struct perf_evlist *evlist);
int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel);
int perf_evlist__enable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel);
int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
struct perf_evsel *evsel, int idx);

Просмотреть файл

@ -2298,6 +2298,29 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
term, (u64)evsel->attr.sample_freq);
}
if (details->trace_fields) {
struct format_field *field;
if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
printed += comma_fprintf(fp, &first, " (not a tracepoint)");
goto out;
}
field = evsel->tp_format->format.fields;
if (field == NULL) {
printed += comma_fprintf(fp, &first, " (no trace field)");
goto out;
}
printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
field = field->next;
while (field) {
printed += comma_fprintf(fp, &first, "%s", field->name);
field = field->next;
}
}
out:
fputc('\n', fp);
return ++printed;

Просмотреть файл

@ -369,6 +369,7 @@ struct perf_attr_details {
bool verbose;
bool event_group;
bool force;
bool trace_fields;
};
int perf_evsel__fprintf(struct perf_evsel *evsel,

Просмотреть файл

@ -36,4 +36,19 @@ do
}' "Documentation/perf-$cmd.txt"
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "#ifdef HAVE_LIBAUDIT_SUPPORT"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort |
while read cmd
do
sed -n '
/^NAME/,/perf-'"$cmd"'/H
${
x
s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
p
}' "Documentation/perf-$cmd.txt"
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "};"

Просмотреть файл

@ -254,6 +254,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
he_stat__decay(&he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__decay(he->stat_acc);
decay_callchain(he->callchain);
diff = prev_period - he->stat.period;
@ -369,6 +370,25 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
if (symbol_conf.use_callchain)
callchain_init(he->callchain);
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
if (he->raw_data == NULL) {
map__put(he->ms.map);
if (he->branch_info) {
map__put(he->branch_info->from.map);
map__put(he->branch_info->to.map);
free(he->branch_info);
}
if (he->mem_info) {
map__put(he->mem_info->iaddr.map);
map__put(he->mem_info->daddr.map);
}
free(he->stat_acc);
free(he);
return NULL;
}
}
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
}
@ -461,7 +481,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
u64 period, u64 weight, u64 transaction,
struct perf_sample *sample,
bool sample_self)
{
struct hist_entry entry = {
@ -478,15 +498,17 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
.level = al->level,
.stat = {
.nr_events = 1,
.period = period,
.weight = weight,
.period = sample->period,
.weight = sample->weight,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
.mem_info = mi,
.transaction = transaction,
.transaction = sample->transaction,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
};
return hists__findnew_entry(hists, &entry, al, sample_self);
@ -526,12 +548,13 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
u64 cost;
struct mem_info *mi = iter->priv;
struct hists *hists = evsel__hists(iter->evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
if (mi == NULL)
return -EINVAL;
cost = iter->sample->weight;
cost = sample->weight;
if (!cost)
cost = 1;
@ -542,8 +565,10 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
* and this is indirectly achieved by passing period=weight here
* and the he_stat__add_period() function.
*/
sample->period = cost;
he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
cost, cost, 0, true);
sample, true);
if (!he)
return -ENOMEM;
@ -630,6 +655,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
struct branch_info *bi;
struct perf_evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry *he = NULL;
int i = iter->curr;
int err = 0;
@ -643,9 +669,11 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
* The report shows the percentage of total branches captured
* and not events sampled. Thus we use a pseudo period of 1.
*/
sample->period = 1;
sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
1, bi->flags.cycles ? bi->flags.cycles : 1,
0, true);
sample, true);
if (he == NULL)
return -ENOMEM;
@ -682,8 +710,7 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location
struct hist_entry *he;
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction, true);
sample, true);
if (he == NULL)
return -ENOMEM;
@ -744,8 +771,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
int err = 0;
he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction, true);
sample, true);
if (he == NULL)
return -ENOMEM;
@ -797,6 +823,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.sym = al->sym,
},
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
};
int i;
struct callchain_cursor cursor;
@ -818,8 +846,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
}
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction, false);
sample, false);
if (he == NULL)
return -ENOMEM;
@ -971,6 +998,8 @@ void hist_entry__delete(struct hist_entry *he)
if (he->srcfile && he->srcfile[0])
free(he->srcfile);
free_callchain(he->callchain);
free(he->trace_output);
free(he->raw_data);
free(he);
}
@ -978,9 +1007,8 @@ void hist_entry__delete(struct hist_entry *he)
* collapse the histogram
*/
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
struct rb_root *root,
struct hist_entry *he)
bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
struct rb_root *root, struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@ -1020,7 +1048,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
return true;
}
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root *root;
@ -1084,7 +1112,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
int64_t cmp = 0;
perf_hpp__for_each_sort_list(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt, a->hists))
continue;
cmp = fmt->sort(fmt, a, b);
@ -1555,10 +1583,8 @@ int perf_hist_config(const char *var, const char *value)
return 0;
}
static int hists_evsel__init(struct perf_evsel *evsel)
int __hists__init(struct hists *hists)
{
struct hists *hists = evsel__hists(evsel);
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
hists->entries_in = &hists->entries_in_array[0];
@ -1598,6 +1624,14 @@ static void hists_evsel__exit(struct perf_evsel *evsel)
hists__delete_all_entries(hists);
}
static int hists_evsel__init(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
__hists__init(hists);
return 0;
}
/*
* XXX We probably need a hists_evsel__exit() to free the hist_entries
* stored in the rbtree...

Просмотреть файл

@ -52,6 +52,7 @@ enum hist_column {
HISTC_MEM_IADDR_SYMBOL,
HISTC_TRANSACTION,
HISTC_CYCLES,
HISTC_TRACE,
HISTC_NR_COLS, /* Last entry */
};
@ -114,8 +115,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *parent,
struct branch_info *bi,
struct mem_info *mi, u64 period,
u64 weight, u64 transaction,
struct mem_info *mi,
struct perf_sample *sample,
bool sample_self);
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg);
@ -184,6 +185,11 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
}
int hists__init(void);
int __hists__init(struct hists *hists);
struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
struct rb_root *root, struct hist_entry *he);
struct perf_hpp {
char *buf;
@ -261,10 +267,20 @@ void perf_hpp__append_sort_keys(void);
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *format);
bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists);
static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format)
static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format,
struct hists *hists)
{
return format->elide;
if (format->elide)
return true;
if (perf_hpp__is_dynamic_entry(format) &&
!perf_hpp__defined_dynamic_entry(format, hists))
return true;
return false;
}
void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);

Просмотреть файл

@ -220,6 +220,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = false;
alias->snapshot = false;
ret = parse_events_terms(&alias->terms, val);
if (ret) {

Просмотреть файл

@ -10,6 +10,8 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
../lib/bitmap.c
../lib/find_bit.c
../lib/hweight.c
util/thread_map.c
util/util.c

Просмотреть файл

@ -41,6 +41,9 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../machine.h"
#include "thread_map.h"
#include "cpumap.h"
#include "stat.h"
PyMODINIT_FUNC initperf_trace_context(void);
@ -859,6 +862,104 @@ static void python_process_event(union perf_event *event,
}
}
static void get_handler_name(char *str, size_t size,
struct perf_evsel *evsel)
{
char *p = str;
scnprintf(str, size, "stat__%s", perf_evsel__name(evsel));
while ((p = strchr(p, ':'))) {
*p = '_';
p++;
}
}
static void
process_stat(struct perf_evsel *counter, int cpu, int thread, u64 tstamp,
struct perf_counts_values *count)
{
PyObject *handler, *t;
static char handler_name[256];
int n = 0;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
get_handler_name(handler_name, sizeof(handler_name),
counter);
handler = get_handler(handler_name);
if (!handler) {
pr_debug("can't find python handler %s\n", handler_name);
return;
}
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
PyTuple_SetItem(t, n++, PyInt_FromLong(thread));
tuple_set_u64(t, n++, tstamp);
tuple_set_u64(t, n++, count->val);
tuple_set_u64(t, n++, count->ena);
tuple_set_u64(t, n++, count->run);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void python_process_stat(struct perf_stat_config *config,
struct perf_evsel *counter, u64 tstamp)
{
struct thread_map *threads = counter->threads;
struct cpu_map *cpus = counter->cpus;
int cpu, thread;
if (config->aggr_mode == AGGR_GLOBAL) {
process_stat(counter, -1, -1, tstamp,
&counter->counts->aggr);
return;
}
for (thread = 0; thread < threads->nr; thread++) {
for (cpu = 0; cpu < cpus->nr; cpu++) {
process_stat(counter, cpus->map[cpu],
thread_map__pid(threads, thread), tstamp,
perf_counts(counter->counts, cpu, thread));
}
}
}
static void python_process_stat_interval(u64 tstamp)
{
PyObject *handler, *t;
static const char handler_name[] = "stat__interval";
int n = 0;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
handler = get_handler(handler_name);
if (!handler) {
pr_debug("can't find python handler %s\n", handler_name);
return;
}
tuple_set_u64(t, n++, tstamp);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
@ -1201,10 +1302,12 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
}
struct scripting_ops python_scripting_ops = {
.name = "Python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.generate_script = python_generate_script,
.name = "Python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.process_stat = python_process_stat,
.process_stat_interval = python_process_stat_interval,
.generate_script = python_generate_script,
};

Просмотреть файл

@ -4,6 +4,8 @@
#include "comm.h"
#include "symbol.h"
#include "evsel.h"
#include "evlist.h"
#include <traceevent/event-parse.h>
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
@ -13,6 +15,7 @@ const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cy
const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
const char default_tracepoint_sort_order[] = "trace";
const char *sort_order;
const char *field_order;
regex_t ignore_callees_regex;
@ -443,6 +446,70 @@ struct sort_entry sort_socket = {
.se_width_idx = HISTC_SOCKET,
};
/* --sort trace */
static char *get_trace_output(struct hist_entry *he)
{
struct trace_seq seq;
struct perf_evsel *evsel;
struct pevent_record rec = {
.data = he->raw_data,
.size = he->raw_size,
};
evsel = hists_to_evsel(he->hists);
trace_seq_init(&seq);
if (symbol_conf.raw_trace) {
pevent_print_fields(&seq, he->raw_data, he->raw_size,
evsel->tp_format);
} else {
pevent_event_info(&seq, evsel->tp_format, &rec);
}
return seq.buffer;
}
static int64_t
sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct perf_evsel *evsel;
evsel = hists_to_evsel(left->hists);
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
return 0;
if (left->trace_output == NULL)
left->trace_output = get_trace_output(left);
if (right->trace_output == NULL)
right->trace_output = get_trace_output(right);
hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
return strcmp(right->trace_output, left->trace_output);
}
static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct perf_evsel *evsel;
evsel = hists_to_evsel(he->hists);
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
if (he->trace_output == NULL)
he->trace_output = get_trace_output(he);
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
}
struct sort_entry sort_trace = {
.se_header = "Trace output",
.se_cmp = sort__trace_cmp,
.se_snprintf = hist_entry__trace_snprintf,
.se_width_idx = HISTC_TRACE,
};
/* sort keys for branch stacks */
static int64_t
@ -1312,6 +1379,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_TRANSACTION, "transaction", sort_transaction),
DIM(SORT_TRACE, "trace", sort_trace),
};
#undef DIM
@ -1529,6 +1597,455 @@ static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
return 0;
}
struct hpp_dynamic_entry {
struct perf_hpp_fmt hpp;
struct perf_evsel *evsel;
struct format_field *field;
unsigned dynamic_len;
bool raw_trace;
};
static int hde_width(struct hpp_dynamic_entry *hde)
{
if (!hde->hpp.len) {
int len = hde->dynamic_len;
int namelen = strlen(hde->field->name);
int fieldlen = hde->field->size;
if (namelen > len)
len = namelen;
if (!(hde->field->flags & FIELD_IS_STRING)) {
/* length for print hex numbers */
fieldlen = hde->field->size * 2 + 2;
}
if (fieldlen > len)
len = fieldlen;
hde->hpp.len = len;
}
return hde->hpp.len;
}
static void update_dynamic_len(struct hpp_dynamic_entry *hde,
struct hist_entry *he)
{
char *str, *pos;
struct format_field *field = hde->field;
size_t namelen;
bool last = false;
if (hde->raw_trace)
return;
/* parse pretty print result and update max length */
if (!he->trace_output)
he->trace_output = get_trace_output(he);
namelen = strlen(field->name);
str = he->trace_output;
while (str) {
pos = strchr(str, ' ');
if (pos == NULL) {
last = true;
pos = str + strlen(str);
}
if (!strncmp(str, field->name, namelen)) {
size_t len;
str += namelen + 1;
len = pos - str;
if (len > hde->dynamic_len)
hde->dynamic_len = len;
break;
}
if (last)
str = NULL;
else
str = pos + 1;
}
}
static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel __maybe_unused)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
}
static int __sort__hde_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct perf_evsel *evsel __maybe_unused)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
return len;
}
bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
{
struct hpp_dynamic_entry *hde;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
return hists_to_evsel(hists) == hde->evsel;
}
static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
char *str, *pos;
struct format_field *field;
size_t namelen;
bool last = false;
int ret;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
if (hde->raw_trace)
goto raw_field;
field = hde->field;
namelen = strlen(field->name);
str = he->trace_output;
while (str) {
pos = strchr(str, ' ');
if (pos == NULL) {
last = true;
pos = str + strlen(str);
}
if (!strncmp(str, field->name, namelen)) {
str += namelen + 1;
str = strndup(str, pos - str);
if (str == NULL)
return scnprintf(hpp->buf, hpp->size,
"%*.*s", len, len, "ERROR");
break;
}
if (last)
str = NULL;
else
str = pos + 1;
}
if (str == NULL) {
struct trace_seq seq;
raw_field:
trace_seq_init(&seq);
pevent_print_field(&seq, he->raw_data, hde->field);
str = seq.buffer;
}
ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
free(str);
return ret;
}
static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_dynamic_entry *hde;
struct format_field *field;
unsigned offset, size;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
field = hde->field;
if (field->flags & FIELD_IS_DYNAMIC) {
unsigned long long dyn;
pevent_read_number_field(field, a->raw_data, &dyn);
offset = dyn & 0xffff;
size = (dyn >> 16) & 0xffff;
/* record max width for output */
if (size > hde->dynamic_len)
hde->dynamic_len = size;
} else {
offset = field->offset;
size = field->size;
update_dynamic_len(hde, a);
update_dynamic_len(hde, b);
}
return memcmp(a->raw_data + offset, b->raw_data + offset, size);
}
bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
{
return fmt->cmp == __sort__hde_cmp;
}
static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
{
struct hpp_dynamic_entry *hde;
hde = malloc(sizeof(*hde));
if (hde == NULL) {
pr_debug("Memory allocation failed\n");
return NULL;
}
hde->evsel = evsel;
hde->field = field;
hde->dynamic_len = 0;
hde->hpp.name = field->name;
hde->hpp.header = __sort__hde_header;
hde->hpp.width = __sort__hde_width;
hde->hpp.entry = __sort__hde_entry;
hde->hpp.color = NULL;
hde->hpp.cmp = __sort__hde_cmp;
hde->hpp.collapse = __sort__hde_cmp;
hde->hpp.sort = __sort__hde_cmp;
INIT_LIST_HEAD(&hde->hpp.list);
INIT_LIST_HEAD(&hde->hpp.sort_list);
hde->hpp.elide = false;
hde->hpp.len = 0;
hde->hpp.user_len = 0;
return hde;
}
static int parse_field_name(char *str, char **event, char **field, char **opt)
{
char *event_name, *field_name, *opt_name;
event_name = str;
field_name = strchr(str, '.');
if (field_name) {
*field_name++ = '\0';
} else {
event_name = NULL;
field_name = str;
}
opt_name = strchr(field_name, '/');
if (opt_name)
*opt_name++ = '\0';
*event = event_name;
*field = field_name;
*opt = opt_name;
return 0;
}
/* find match evsel using a given event name. The event name can be:
* 1. '%' + event index (e.g. '%1' for first event)
* 2. full event name (e.g. sched:sched_switch)
* 3. partial event name (should not contain ':')
*/
static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
{
struct perf_evsel *evsel = NULL;
struct perf_evsel *pos;
bool full_name;
/* case 1 */
if (event_name[0] == '%') {
int nr = strtol(event_name+1, NULL, 0);
if (nr > evlist->nr_entries)
return NULL;
evsel = perf_evlist__first(evlist);
while (--nr > 0)
evsel = perf_evsel__next(evsel);
return evsel;
}
full_name = !!strchr(event_name, ':');
evlist__for_each(evlist, pos) {
/* case 2 */
if (full_name && !strcmp(pos->name, event_name))
return pos;
/* case 3 */
if (!full_name && strstr(pos->name, event_name)) {
if (evsel) {
pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
event_name, evsel->name, pos->name);
return NULL;
}
evsel = pos;
}
}
return evsel;
}
static int __dynamic_dimension__add(struct perf_evsel *evsel,
struct format_field *field,
bool raw_trace)
{
struct hpp_dynamic_entry *hde;
hde = __alloc_dynamic_entry(evsel, field);
if (hde == NULL)
return -ENOMEM;
hde->raw_trace = raw_trace;
perf_hpp__register_sort_field(&hde->hpp);
return 0;
}
static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
{
int ret;
struct format_field *field;
field = evsel->tp_format->format.fields;
while (field) {
ret = __dynamic_dimension__add(evsel, field, raw_trace);
if (ret < 0)
return ret;
field = field->next;
}
return 0;
}
static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
{
int ret;
struct perf_evsel *evsel;
evlist__for_each(evlist, evsel) {
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
continue;
ret = add_evsel_fields(evsel, raw_trace);
if (ret < 0)
return ret;
}
return 0;
}
static int add_all_matching_fields(struct perf_evlist *evlist,
char *field_name, bool raw_trace)
{
int ret = -ESRCH;
struct perf_evsel *evsel;
struct format_field *field;
evlist__for_each(evlist, evsel) {
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
continue;
field = pevent_find_any_field(evsel->tp_format, field_name);
if (field == NULL)
continue;
ret = __dynamic_dimension__add(evsel, field, raw_trace);
if (ret < 0)
break;
}
return ret;
}
static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
{
char *str, *event_name, *field_name, *opt_name;
struct perf_evsel *evsel;
struct format_field *field;
bool raw_trace = symbol_conf.raw_trace;
int ret = 0;
if (evlist == NULL)
return -ENOENT;
str = strdup(tok);
if (str == NULL)
return -ENOMEM;
if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
ret = -EINVAL;
goto out;
}
if (opt_name) {
if (strcmp(opt_name, "raw")) {
pr_debug("unsupported field option %s\n", opt_name);
ret = -EINVAL;
goto out;
}
raw_trace = true;
}
if (!strcmp(field_name, "trace_fields")) {
ret = add_all_dynamic_fields(evlist, raw_trace);
goto out;
}
if (event_name == NULL) {
ret = add_all_matching_fields(evlist, field_name, raw_trace);
goto out;
}
evsel = find_evsel(evlist, event_name);
if (evsel == NULL) {
pr_debug("Cannot find event: %s\n", event_name);
ret = -ENOENT;
goto out;
}
if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
pr_debug("%s is not a tracepoint event\n", event_name);
ret = -EINVAL;
goto out;
}
if (!strcmp(field_name, "*")) {
ret = add_evsel_fields(evsel, raw_trace);
} else {
field = pevent_find_any_field(evsel->tp_format, field_name);
if (field == NULL) {
pr_debug("Cannot find event field for %s.%s\n",
event_name, field_name);
return -ENOENT;
}
ret = __dynamic_dimension__add(evsel, field, raw_trace);
}
out:
free(str);
return ret;
}
static int __sort_dimension__add(struct sort_dimension *sd)
{
if (sd->taken)
@ -1583,7 +2100,8 @@ int hpp_dimension__add_output(unsigned col)
return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
}
int sort_dimension__add(const char *tok)
static int sort_dimension__add(const char *tok,
struct perf_evlist *evlist __maybe_unused)
{
unsigned int i;
@ -1664,10 +2182,13 @@ int sort_dimension__add(const char *tok)
return 0;
}
if (!add_dynamic_entry(evlist, tok))
return 0;
return -ESRCH;
}
static const char *get_default_sort_order(void)
static const char *get_default_sort_order(struct perf_evlist *evlist)
{
const char *default_sort_orders[] = {
default_sort_order,
@ -1675,14 +2196,33 @@ static const char *get_default_sort_order(void)
default_mem_sort_order,
default_top_sort_order,
default_diff_sort_order,
default_tracepoint_sort_order,
};
bool use_trace = true;
struct perf_evsel *evsel;
BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
if (evlist == NULL)
goto out_no_evlist;
evlist__for_each(evlist, evsel) {
if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
use_trace = false;
break;
}
}
if (use_trace) {
sort__mode = SORT_MODE__TRACEPOINT;
if (symbol_conf.raw_trace)
return "trace_fields";
}
out_no_evlist:
return default_sort_orders[sort__mode];
}
static int setup_sort_order(void)
static int setup_sort_order(struct perf_evlist *evlist)
{
char *new_sort_order;
@ -1703,7 +2243,7 @@ static int setup_sort_order(void)
* because it's checked over the rest of the code.
*/
if (asprintf(&new_sort_order, "%s,%s",
get_default_sort_order(), sort_order + 1) < 0) {
get_default_sort_order(evlist), sort_order + 1) < 0) {
error("Not enough memory to set up --sort");
return -ENOMEM;
}
@ -1712,13 +2252,41 @@ static int setup_sort_order(void)
return 0;
}
static int __setup_sorting(void)
/*
* Adds 'pre,' prefix into 'str' is 'pre' is
* not already part of 'str'.
*/
static char *prefix_if_not_in(const char *pre, char *str)
{
char *n;
if (!str || strstr(str, pre))
return str;
if (asprintf(&n, "%s,%s", pre, str) < 0)
return NULL;
free(str);
return n;
}
static char *setup_overhead(char *keys)
{
keys = prefix_if_not_in("overhead", keys);
if (symbol_conf.cumulate_callchain)
keys = prefix_if_not_in("overhead_children", keys);
return keys;
}
static int __setup_sorting(struct perf_evlist *evlist)
{
char *tmp, *tok, *str;
const char *sort_keys;
int ret = 0;
ret = setup_sort_order();
ret = setup_sort_order(evlist);
if (ret)
return ret;
@ -1732,7 +2300,7 @@ static int __setup_sorting(void)
return 0;
}
sort_keys = get_default_sort_order();
sort_keys = get_default_sort_order(evlist);
}
str = strdup(sort_keys);
@ -1741,9 +2309,20 @@ static int __setup_sorting(void)
return -ENOMEM;
}
/*
* Prepend overhead fields for backward compatibility.
*/
if (!is_strict_order(field_order)) {
str = setup_overhead(str);
if (str == NULL) {
error("Not enough memory to setup overhead keys");
return -ENOMEM;
}
}
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = sort_dimension__add(tok);
ret = sort_dimension__add(tok, evlist);
if (ret == -EINVAL) {
error("Invalid --sort key: `%s'", tok);
break;
@ -1954,16 +2533,16 @@ out:
return ret;
}
int setup_sorting(void)
int setup_sorting(struct perf_evlist *evlist)
{
int err;
err = __setup_sorting();
err = __setup_sorting(evlist);
if (err < 0)
return err;
if (parent_pattern != default_parent_pattern) {
err = sort_dimension__add("parent");
err = sort_dimension__add("parent", evlist);
if (err < 0)
return err;
}

Просмотреть файл

@ -122,6 +122,9 @@ struct hist_entry {
struct branch_info *branch_info;
struct hists *hists;
struct mem_info *mem_info;
void *raw_data;
u32 raw_size;
void *trace_output;
struct callchain_root callchain[0]; /* must be last member */
};
@ -164,6 +167,7 @@ enum sort_mode {
SORT_MODE__MEMORY,
SORT_MODE__TOP,
SORT_MODE__DIFF,
SORT_MODE__TRACEPOINT,
};
enum sort_type {
@ -180,6 +184,7 @@ enum sort_type {
SORT_LOCAL_WEIGHT,
SORT_GLOBAL_WEIGHT,
SORT_TRANSACTION,
SORT_TRACE,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@ -209,8 +214,6 @@ enum sort_type {
*/
struct sort_entry {
struct list_head list;
const char *se_header;
int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
@ -224,10 +227,11 @@ struct sort_entry {
extern struct sort_entry sort_thread;
extern struct list_head hist_entry__sort_list;
int setup_sorting(void);
struct perf_evlist;
struct pevent;
int setup_sorting(struct perf_evlist *evlist);
int setup_output_field(void);
void reset_output_field(void);
extern int sort_dimension__add(const char *);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);

Просмотреть файл

@ -39,6 +39,7 @@ struct symbol_conf symbol_conf = {
.cumulate_callchain = true,
.show_hist_headers = true,
.symfs = "",
.event_group = true,
};
static enum dso_binary_type binary_type_symtab[] = {

Просмотреть файл

@ -109,7 +109,8 @@ struct symbol_conf {
branch_callstack,
has_filter,
show_ref_callgraph,
hide_unresolved;
hide_unresolved,
raw_trace;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,

Просмотреть файл

@ -65,6 +65,7 @@ int tracing_data_put(struct tracing_data *tdata);
struct addr_location;
struct perf_session;
struct perf_stat_config;
struct scripting_ops {
const char *name;
@ -75,6 +76,9 @@ struct scripting_ops {
struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al);
void (*process_stat)(struct perf_stat_config *config,
struct perf_evsel *evsel, u64 tstamp);
void (*process_stat_interval)(u64 tstamp);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};

Просмотреть файл

@ -95,6 +95,16 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, addr, &al);
if (!al.map) {
/*
* We've seen cases (softice) where DWARF unwinder went
* through non executable mmaps, which we need to lookup
* in MAP__VARIABLE tree.
*/
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__VARIABLE, addr, &al);
}
if (!al.map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;

Просмотреть файл

@ -319,6 +319,15 @@ static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al);
if (!al.map) {
/*
* We've seen cases (softice) where DWARF unwinder went
* through non executable mmaps, which we need to lookup
* in MAP__VARIABLE tree.
*/
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__VARIABLE, ip, &al);
}
return al.map;
}
@ -416,20 +425,19 @@ get_proc_name(unw_addr_space_t __maybe_unused as,
static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
unw_word_t *data)
{
struct addr_location al;
struct map *map;
ssize_t size;
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, addr, &al);
if (!al.map) {
map = find_map(addr, ui);
if (!map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;
}
if (!al.map->dso)
if (!map->dso)
return -1;
size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
size = dso__data_read_addr(map->dso, map, ui->machine,
addr, (u8 *) data, sizeof(*data));
return !(size == sizeof(*data));

Просмотреть файл

@ -16,6 +16,8 @@
#include <linux/kernel.h>
#include <unistd.h>
#include "callchain.h"
#include "strlist.h"
#include <subcmd/exec-cmd.h>
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_ABS,
@ -663,3 +665,28 @@ fetch_kernel_version(unsigned int *puint, char *str,
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}
const char *perf_tip(const char *dirpath)
{
struct strlist *tips;
struct str_node *node;
char *tip = NULL;
struct strlist_config conf = {
.dirname = system_path(dirpath) ,
};
tips = strlist__new("tips.txt", &conf);
if (tips == NULL || strlist__nr_entries(tips) == 1) {
tip = (char *)"Cannot find tips.txt file";
goto out;
}
node = strlist__entry(tips, random() % strlist__nr_entries(tips));
if (asprintf(&tip, "Tip: %s", node->s) < 0)
tip = (char *)"Tip: get more memory! ;-)";
out:
strlist__delete(tips);
return tip;
}

Просмотреть файл

@ -342,4 +342,6 @@ int fetch_kernel_version(unsigned int *puint,
#define KVER_FMT "%d.%d.%d"
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
const char *perf_tip(const char *dirpath);
#endif /* GIT_COMPAT_UTIL_H */