perf/core improvements and fixes:

Intel PT:
 
 - Support "ptwrite" instructio, a way to stuff 32 or 64 bit values into
   the Intel PT trace (Adrian Hunter)
 
 - Support power events in Intel PT to report changes to C-state (Adrian
   Hunter)
 
 - Synthesize Intel PT events as PERF_RECORD_SAMPLE records with a
   perf_event_attr.type (PERF_TYPE_SYNTH) just after the range used by the
   kernel, i.e. right after what is allocated for PMUs, at INT_MAX + 1U,
   attr.config will have the identification for the synthesized event and
   the PERF_SAMPLE_RAW payload will have its fields (Adrian Hunter)
 
 Infrastructure:
 
 - Remove warning() and error(), using instead pr_warning() and
   pr_error(), consolidating error reporting (Arnaldo Carvalho de Melo)
 
 - Add platform dependency to 'perf test 15' (Thomas Richter)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJZVsurAAoJENZQFvNTUqpAnYYP/i44/Y99vfN751fuTlJYci2g
 u1VVRsd0GC8OnFIZKRzFumAd+IXRUXiLp25nP36yvsXNOMHGU1O/SQmRRHOC6zTY
 ffPmnlHeUT8LOVX82GiiG6E6rzE2KHuAbgILvzswelPoyT6/91mysoZMu2xHpy3f
 sLUtjN7gAZqy6nMNTiGgItUDyFIAl4c2iQf5v8YkxfM0UxekXt/XIj2Zn5uUXTIW
 q9B0po9/MneI+7Fqtj3YTN7owY0YhXmynKHzE7CseNyGFFbtIzoTLW3qgtz+Ld3M
 ip0QcsRiV6hbgEkPsi6nwOAF1EABlsHb4QHwFifVqzWCPwqeLmI3rd7FsONDNcCZ
 TVoHfm1wlgqtQw6KVQodIrTKCq7DOpjTIzk6AX980vJ8yp2KtWf2DB0AqwpJ/7R2
 2nqTsLm9iWbPOTA0mp/7au/WbNDcgL9jv2yqU8/UGBg92tVlVN5IiAVVpnsdBJgi
 VjEeUdqbvs9aw//+L1uN0N7Y22zqpQAm/eomd9wwXzDHCeWjIcrIR4tDA5i22waH
 4XFJLgJhfbTZsSGonpQ+7GVPzFru3rz56wAM4UbD3BRtVCj+EMPu0/mb9u3URgjp
 1iJdOm7WY/XH7AYV5dXnZyR+o4VDHwuziw5yxvoR3RNpARxAjVFGzXfq6Q5DbHPS
 mycD8rcoQp+3IeyA/IEN
 =tvJF
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-4.13-20170630' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

Intel PT enhancements:

 - Support "ptwrite" instruction, a way to stuff 32 or 64 bit values into
   the Intel PT trace (Adrian Hunter)

 - Support power events in Intel PT to report changes to C-state (Adrian
   Hunter)

 - Synthesize Intel PT events as PERF_RECORD_SAMPLE records with a
   perf_event_attr.type (PERF_TYPE_SYNTH) just after the range used by the
   kernel, i.e. right after what is allocated for PMUs, at INT_MAX + 1U,
   attr.config will have the identification for the synthesized event and
   the PERF_SAMPLE_RAW payload will have its fields (Adrian Hunter)

Infrastructure changes:

 - Remove warning() and error(), using instead pr_warning() and
   pr_error(), consolidating error reporting (Arnaldo Carvalho de Melo)

 - Add platform dependency to 'perf test 15' (Thomas Richter)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-07-01 10:39:25 +02:00
Родитель e91c8d97ea 644e0840ad
Коммит 23acd3e1a0
40 изменённых файлов: 1231 добавлений и 372 удалений

Просмотреть файл

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

Просмотреть файл

@ -5,6 +5,8 @@
#include <stddef.h>
#include <assert.h>
#include <linux/compiler.h>
#include <endian.h>
#include <byteswap.h>
#ifndef UINT_MAX
#define UINT_MAX (~0U)
@ -67,12 +69,33 @@
#endif
#endif
/*
* Both need more care to handle endianness
* (Don't use bitmap_copy_le() for now)
*/
#define cpu_to_le64(x) (x)
#define cpu_to_le32(x) (x)
#if __BYTE_ORDER == __BIG_ENDIAN
#define cpu_to_le16 bswap_16
#define cpu_to_le32 bswap_32
#define cpu_to_le64 bswap_64
#define le16_to_cpu bswap_16
#define le32_to_cpu bswap_32
#define le64_to_cpu bswap_64
#define cpu_to_be16
#define cpu_to_be32
#define cpu_to_be64
#define be16_to_cpu
#define be32_to_cpu
#define be64_to_cpu
#else
#define cpu_to_le16
#define cpu_to_le32
#define cpu_to_le64
#define le16_to_cpu
#define le32_to_cpu
#define le64_to_cpu
#define cpu_to_be16 bswap_16
#define cpu_to_be32 bswap_32
#define cpu_to_be64 bswap_64
#define be16_to_cpu bswap_16
#define be32_to_cpu bswap_32
#define be64_to_cpu bswap_64
#endif
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
int scnprintf(char * buf, size_t size, const char * fmt, ...);

Просмотреть файл

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

Просмотреть файл

@ -108,6 +108,9 @@ approach is available to export the data to a postgresql database. Refer to
script export-to-postgresql.py for more details, and to script
call-graph-from-postgresql.py for an example of using the database.
There is also script intel-pt-events.py which provides an example of how to
unpack the raw data for power events and PTWRITE.
As mentioned above, it is easy to capture too much data. One way to limit the
data captured is to use 'snapshot' mode which is explained further below.
Refer to 'new snapshot option' and 'Intel PT modes of operation' further below.
@ -710,13 +713,15 @@ Having no option is the same as
which, in turn, is the same as
--itrace=ibxe
--itrace=ibxwpe
The letters are:
i synthesize "instructions" events
b synthesize "branches" events
x synthesize "transactions" events
w synthesize "ptwrite" events
p synthesize "power" events
c synthesize branches events (calls only)
r synthesize branches events (returns only)
e synthesize tracing error events
@ -735,7 +740,40 @@ and "r" can be combined to get calls and returns.
'flags' field can be used in perf script to determine whether the event is a
tranasaction start, commit or abort.
Error events are new. They show where the decoder lost the trace. Error events
Note that "instructions", "branches" and "transactions" events depend on code
flow packets which can be disabled by using the config term "branch=0". Refer
to the config terms section above.
"ptwrite" events record the payload of the ptwrite instruction and whether
"fup_on_ptw" was used. "ptwrite" events depend on PTWRITE packets which are
recorded only if the "ptw" config term was used. Refer to the config terms
section above. perf script "synth" field displays "ptwrite" information like
this: "ip: 0 payload: 0x123456789abcdef0" where "ip" is 1 if "fup_on_ptw" was
used.
"Power" events correspond to power event packets and CBR (core-to-bus ratio)
packets. While CBR packets are always recorded when tracing is enabled, power
event packets are recorded only if the "pwr_evt" config term was used. Refer to
the config terms section above. The power events record information about
C-state changes, whereas CBR is indicative of CPU frequency. perf script
"event,synth" fields display information like this:
cbr: cbr: 22 freq: 2189 MHz (200%)
mwait: hints: 0x60 extensions: 0x1
pwre: hw: 0 cstate: 2 sub-cstate: 0
exstop: ip: 1
pwrx: deepest cstate: 2 last cstate: 2 wake reason: 0x4
Where:
"cbr" includes the frequency and the percentage of maximum non-turbo
"mwait" shows mwait hints and extensions
"pwre" shows C-state transitions (to a C-state deeper than C0) and
whether initiated by hardware
"exstop" indicates execution stopped and whether the IP was recorded
exactly,
"pwrx" indicates return to C0
For more details refer to the Intel 64 and IA-32 Architectures Software
Developer Manuals.
Error events show where the decoder lost the trace. Error events
are quite important. Users must know if what they are seeing is a complete
picture or not.

Просмотреть файл

@ -3,13 +3,15 @@
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
w synthesize ptwrite events
p synthesize power events
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
l synthesize last branch entries (use with i or x)
s skip initial number of events
The default is all events i.e. the same as --itrace=ibxe
The default is all events i.e. the same as --itrace=ibxwpe
In addition, the period (default 100000) for instructions events
can be specified in units of:
@ -26,8 +28,8 @@
Also the number of last branch entries (default 64, max. 1024) for
instructions or transactions events can be specified.
It is also possible to skip events generated (instructions, branches, transactions)
at the beginning. This is useful to ignore initialization code.
It is also possible to skip events generated (instructions, branches, transactions,
ptwrite, power) at the beginning. This is useful to ignore initialization code.
--itrace=i0nss1000000

Просмотреть файл

@ -117,7 +117,8 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn, brstackoff,
callindent, insn, insnlen. Field list can be prepended with the type, trace, sw or hw,
callindent, insn, insnlen, synth.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@ -193,6 +194,9 @@ OPTIONS
instruction bytes and the instruction length of the current
instruction.
The synth field is used by synthesized events which may be created when
Instruction Trace decoding.
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.

Просмотреть файл

@ -1664,3 +1664,15 @@
"0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%eax)",},
{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%eax)",},
{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},

Просмотреть файл

@ -1696,3 +1696,33 @@
"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
{{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%rax)",},
{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 41 0f ae 20 \tptwritel (%r8)",},
{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%rax)",},
{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 41 0f ae 20 \tptwritel (%r8)",},
{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x48, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 48 0f ae 20 \tptwriteq (%rax)",},
{{0xf3, 0x49, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 49 0f ae 20 \tptwriteq (%r8)",},
{{0xf3, 0x48, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 48 0f ae 24 25 78 56 34 12 \tptwriteq 0x12345678",},
{{0xf3, 0x48, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 48 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x49, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 49 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%r8,%rcx,8)",},

Просмотреть файл

@ -1343,6 +1343,26 @@ int main(void)
asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
/* ptwrite */
asm volatile("ptwrite (%rax)");
asm volatile("ptwrite (%r8)");
asm volatile("ptwrite (0x12345678)");
asm volatile("ptwrite 0x12345678(%rax,%rcx,8)");
asm volatile("ptwrite 0x12345678(%r8,%rcx,8)");
asm volatile("ptwritel (%rax)");
asm volatile("ptwritel (%r8)");
asm volatile("ptwritel (0x12345678)");
asm volatile("ptwritel 0x12345678(%rax,%rcx,8)");
asm volatile("ptwritel 0x12345678(%r8,%rcx,8)");
asm volatile("ptwriteq (%rax)");
asm volatile("ptwriteq (%r8)");
asm volatile("ptwriteq (0x12345678)");
asm volatile("ptwriteq 0x12345678(%rax,%rcx,8)");
asm volatile("ptwriteq 0x12345678(%r8,%rcx,8)");
#else /* #ifdef __x86_64__ */
/* bound r32, mem (same op code as EVEX prefix) */
@ -2653,6 +2673,16 @@ int main(void)
asm volatile("xrstors (0x12345678)");
asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
/* ptwrite */
asm volatile("ptwrite (%eax)");
asm volatile("ptwrite (0x12345678)");
asm volatile("ptwrite 0x12345678(%eax,%ecx,8)");
asm volatile("ptwritel (%eax)");
asm volatile("ptwritel (0x12345678)");
asm volatile("ptwritel 0x12345678(%eax,%ecx,8)");
#endif /* #ifndef __x86_64__ */
/* Following line is a marker for the awk script - do not change */

Просмотреть файл

@ -1725,10 +1725,10 @@ static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
tok; tok = strtok_r(NULL, ", ", &tmp)) { \
ret = _fn(hpp_list, tok); \
if (ret == -EINVAL) { \
error("Invalid --fields key: `%s'", tok); \
pr_err("Invalid --fields key: `%s'", tok); \
break; \
} else if (ret == -ESRCH) { \
error("Unknown --fields key: `%s'", tok); \
pr_err("Unknown --fields key: `%s'", tok); \
break; \
} \
} \

Просмотреть файл

@ -1302,7 +1302,10 @@ static int diff__config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "diff.order")) {
sort_compute = perf_config_int(var, value);
int ret;
if (perf_config_int(&ret, var, value) < 0)
return -1;
sort_compute = ret;
return 0;
}
if (!strcmp(var, "diff.compute")) {

Просмотреть файл

@ -108,10 +108,14 @@ out:
return ret;
}
static void exec_woman_emacs(const char *path, const char *page)
static void exec_failed(const char *cmd)
{
char sbuf[STRERR_BUFSIZE];
pr_warning("failed to exec '%s': %s", cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
}
static void exec_woman_emacs(const char *path, const char *page)
{
if (!check_emacsclient_version()) {
/* This works only with emacsclient version >= 22. */
char *man_page;
@ -122,8 +126,7 @@ static void exec_woman_emacs(const char *path, const char *page)
execlp(path, "emacsclient", "-e", man_page, NULL);
free(man_page);
}
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
}
@ -134,7 +137,6 @@ static void exec_man_konqueror(const char *path, const char *page)
if (display && *display) {
char *man_page;
const char *filename = "kfmclient";
char sbuf[STRERR_BUFSIZE];
/* It's simpler to launch konqueror using kfmclient. */
if (path) {
@ -155,33 +157,27 @@ static void exec_man_konqueror(const char *path, const char *page)
execlp(path, filename, "newTab", man_page, NULL);
free(man_page);
}
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
}
static void exec_man_man(const char *path, const char *page)
{
char sbuf[STRERR_BUFSIZE];
if (!path)
path = "man";
execlp(path, "man", page, NULL);
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
static void exec_man_cmd(const char *cmd, const char *page)
{
char sbuf[STRERR_BUFSIZE];
char *shell_cmd;
if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
execl("/bin/sh", "sh", "-c", shell_cmd, NULL);
free(shell_cmd);
}
warning("failed to exec '%s': %s", cmd,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(cmd);
}
static void add_man_viewer(const char *name)
@ -214,6 +210,12 @@ static void do_add_man_viewer_info(const char *name,
man_viewer_info_list = new;
}
static void unsupported_man_viewer(const char *name, const char *var)
{
pr_warning("'%s': path for unsupported man viewer.\n"
"Please consider using 'man.<tool>.%s' instead.", name, var);
}
static int add_man_viewer_path(const char *name,
size_t len,
const char *value)
@ -221,9 +223,7 @@ static int add_man_viewer_path(const char *name,
if (supported_man_viewer(name, len))
do_add_man_viewer_info(name, len, value);
else
warning("'%s': path for unsupported man viewer.\n"
"Please consider using 'man.<tool>.cmd' instead.",
name);
unsupported_man_viewer(name, "cmd");
return 0;
}
@ -233,9 +233,7 @@ static int add_man_viewer_cmd(const char *name,
const char *value)
{
if (supported_man_viewer(name, len))
warning("'%s': cmd for supported man viewer.\n"
"Please consider using 'man.<tool>.path' instead.",
name);
unsupported_man_viewer(name, "path");
else
do_add_man_viewer_info(name, len, value);
@ -247,8 +245,10 @@ static int add_man_viewer_info(const char *var, const char *value)
const char *name = var + 4;
const char *subkey = strrchr(name, '.');
if (!subkey)
return error("Config with no key for man viewer: %s", name);
if (!subkey) {
pr_err("Config with no key for man viewer: %s", name);
return -1;
}
if (!strcmp(subkey, ".path")) {
if (!value)
@ -261,7 +261,7 @@ static int add_man_viewer_info(const char *var, const char *value)
return add_man_viewer_cmd(name, subkey - name, value);
}
warning("'%s': unsupported man viewer sub key.", subkey);
pr_warning("'%s': unsupported man viewer sub key.", subkey);
return 0;
}
@ -332,7 +332,7 @@ static void setup_man_path(void)
setenv("MANPATH", new_path, 1);
free(new_path);
} else {
error("Unable to setup man path");
pr_err("Unable to setup man path");
}
}
@ -349,7 +349,7 @@ static void exec_viewer(const char *name, const char *page)
else if (info)
exec_man_cmd(info, page);
else
warning("'%s': unknown man viewer.", name);
pr_warning("'%s': unknown man viewer.", name);
}
static int show_man_page(const char *perf_cmd)

Просмотреть файл

@ -1715,7 +1715,7 @@ static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
if (!tok)
break;
if (slab_sort_dimension__add(tok, sort_list) < 0) {
error("Unknown slab --sort key: '%s'", tok);
pr_err("Unknown slab --sort key: '%s'", tok);
free(str);
return -1;
}
@ -1741,7 +1741,7 @@ static int setup_page_sorting(struct list_head *sort_list, const char *arg)
if (!tok)
break;
if (page_sort_dimension__add(tok, sort_list) < 0) {
error("Unknown page --sort key: '%s'", tok);
pr_err("Unknown page --sort key: '%s'", tok);
free(str);
return -1;
}

Просмотреть файл

@ -453,7 +453,7 @@ try_again:
}
if (perf_evlist__apply_filters(evlist, &pos)) {
error("failed to set filter \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
@ -461,7 +461,7 @@ try_again:
}
if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;

Просмотреть файл

@ -94,10 +94,9 @@ static int report__config(const char *var, const char *value, void *cb)
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.queue-size")) {
rep->queue_size = perf_config_u64(var, value);
return 0;
}
if (!strcmp(var, "report.queue-size"))
return perf_config_u64(&rep->queue_size, var, value);
if (!strcmp(var, "report.sort_order")) {
default_sort_order = strdup(value);
return 0;
@ -558,6 +557,7 @@ static int __cmd_report(struct report *rep)
ui__error("failed to set cpu bitmap\n");
return ret;
}
session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
}
if (rep->show_threads) {

Просмотреть файл

@ -2066,7 +2066,7 @@ static void save_task_callchain(struct perf_sched *sched,
if (thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, sched->max_stack + 2) != 0) {
if (verbose > 0)
error("Failed to resolve callchain. Skipping\n");
pr_err("Failed to resolve callchain. Skipping\n");
return;
}

Просмотреть файл

@ -86,6 +86,7 @@ enum perf_output_field {
PERF_OUTPUT_INSNLEN = 1U << 22,
PERF_OUTPUT_BRSTACKINSN = 1U << 23,
PERF_OUTPUT_BRSTACKOFF = 1U << 24,
PERF_OUTPUT_SYNTH = 1U << 25,
};
struct output_option {
@ -117,6 +118,12 @@ struct output_option {
{.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
{.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
};
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
OUTPUT_TYPE_MAX
};
/* default set to maintain compatibility with current format */
@ -126,7 +133,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
} output[PERF_TYPE_MAX] = {
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
.user_set = false,
@ -184,12 +191,44 @@ static struct {
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[OUTPUT_TYPE_SYNTH] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
PERF_OUTPUT_SYNTH,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
};
static inline int output_type(unsigned int type)
{
switch (type) {
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
return type;
}
}
static inline unsigned int attr_type(unsigned int type)
{
switch (type) {
case OUTPUT_TYPE_SYNTH:
return PERF_TYPE_SYNTH;
default:
return type;
}
}
static bool output_set_by_user(void)
{
int j;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].user_set)
return true;
}
@ -210,7 +249,7 @@ static const char *output_field2str(enum perf_output_field field)
return str;
}
#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x)
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg,
@ -218,7 +257,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->attr;
int type = attr->type;
int type = output_type(attr->type);
const char *evname;
if (attr->sample_type & sample_type)
@ -348,7 +387,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
static void set_print_ip_opts(struct perf_event_attr *attr)
{
unsigned int type = attr->type;
unsigned int type = output_type(attr->type);
output[type].print_ip_opts = 0;
if (PRINT_FIELD(IP))
@ -376,14 +415,15 @@ static int perf_session__check_output_opt(struct perf_session *session)
unsigned int j;
struct perf_evsel *evsel;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, j);
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, attr_type(j));
/*
* even if fields is set to 0 (ie., show nothing) event must
* exist if user explicitly includes it on the command line
*/
if (!evsel && output[j].user_set && !output[j].wildcard_set) {
if (!evsel && output[j].user_set && !output[j].wildcard_set &&
j != OUTPUT_TYPE_SYNTH) {
pr_err("%s events do not exist. "
"Remove corresponding -F option to proceed.\n",
event_type(j));
@ -989,6 +1029,7 @@ static void print_sample_bts(struct perf_sample *sample,
struct machine *machine)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
bool print_srcline_last = false;
if (PRINT_FIELD(CALLINDENT))
@ -996,7 +1037,7 @@ static void print_sample_bts(struct perf_sample *sample,
/* print branch_from information */
if (PRINT_FIELD(IP)) {
unsigned int print_opts = output[attr->type].print_ip_opts;
unsigned int print_opts = output[type].print_ip_opts;
struct callchain_cursor *cursor = NULL;
if (symbol_conf.use_callchain && sample->callchain &&
@ -1019,7 +1060,7 @@ static void print_sample_bts(struct perf_sample *sample,
/* print branch_to information */
if (PRINT_FIELD(ADDR) ||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[attr->type].user_set)) {
!output[type].user_set)) {
printf(" => ");
print_sample_addr(sample, thread, attr);
}
@ -1162,6 +1203,127 @@ static void print_sample_bpf_output(struct perf_sample *sample)
(char *)(sample->raw_data));
}
static void print_sample_spacing(int len, int spacing)
{
if (len > 0 && len < spacing)
printf("%*s", spacing - len, "");
}
static void print_sample_pt_spacing(int len)
{
print_sample_spacing(len, 34);
}
static void print_sample_synth_ptwrite(struct perf_sample *sample)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" IP: %u payload: %#" PRIx64 " ",
data->ip, le64_to_cpu(data->payload));
print_sample_pt_spacing(len);
}
static void print_sample_synth_mwait(struct perf_sample *sample)
{
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" hints: %#x extensions: %#x ",
data->hints, data->extensions);
print_sample_pt_spacing(len);
}
static void print_sample_synth_pwre(struct perf_sample *sample)
{
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" hw: %u cstate: %u sub-cstate: %u ",
data->hw, data->cstate, data->subcstate);
print_sample_pt_spacing(len);
}
static void print_sample_synth_exstop(struct perf_sample *sample)
{
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" IP: %u ", data->ip);
print_sample_pt_spacing(len);
}
static void print_sample_synth_pwrx(struct perf_sample *sample)
{
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ",
data->deepest_cstate, data->last_cstate,
data->wake_reason);
print_sample_pt_spacing(len);
}
static void print_sample_synth_cbr(struct perf_sample *sample)
{
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
unsigned int percent, freq;
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
freq = (le32_to_cpu(data->freq) + 500) / 1000;
len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq);
if (data->max_nonturbo) {
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
len += printf("(%3u%%) ", percent);
}
print_sample_pt_spacing(len);
}
static void print_sample_synth(struct perf_sample *sample,
struct perf_evsel *evsel)
{
switch (evsel->attr.config) {
case PERF_SYNTH_INTEL_PTWRITE:
print_sample_synth_ptwrite(sample);
break;
case PERF_SYNTH_INTEL_MWAIT:
print_sample_synth_mwait(sample);
break;
case PERF_SYNTH_INTEL_PWRE:
print_sample_synth_pwre(sample);
break;
case PERF_SYNTH_INTEL_EXSTOP:
print_sample_synth_exstop(sample);
break;
case PERF_SYNTH_INTEL_PWRX:
print_sample_synth_pwrx(sample);
break;
case PERF_SYNTH_INTEL_CBR:
print_sample_synth_cbr(sample);
break;
default:
break;
}
}
struct perf_script {
struct perf_tool tool;
struct perf_session *session;
@ -1215,8 +1377,9 @@ static void process_event(struct perf_script *script,
{
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
if (output[attr->type].fields == 0)
if (output[type].fields == 0)
return;
print_sample_start(sample, thread, evsel);
@ -1245,6 +1408,10 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(TRACE))
event_format__print(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size);
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
print_sample_synth(sample, evsel);
if (PRINT_FIELD(ADDR))
print_sample_addr(sample, thread, attr);
@ -1263,7 +1430,7 @@ static void process_event(struct perf_script *script,
cursor = &callchain_cursor;
putchar(cursor ? '\n' : ' ');
sample__fprintf_sym(sample, al, 0, output[attr->type].print_ip_opts, cursor, stdout);
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout);
}
if (PRINT_FIELD(IREGS))
@ -1410,7 +1577,8 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
evlist = *pevlist;
evsel = perf_evlist__last(*pevlist);
if (evsel->attr.type >= PERF_TYPE_MAX)
if (evsel->attr.type >= PERF_TYPE_MAX &&
evsel->attr.type != PERF_TYPE_SYNTH)
return 0;
evlist__for_each_entry(evlist, pos) {
@ -1835,6 +2003,8 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
type = PERF_TYPE_RAW;
else if (!strcmp(str, "break"))
type = PERF_TYPE_BREAKPOINT;
else if (!strcmp(str, "synth"))
type = OUTPUT_TYPE_SYNTH;
else {
fprintf(stderr, "Invalid event type in field string.\n");
rc = -EINVAL;
@ -1865,7 +2035,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
if (output_set_by_user())
pr_warning("Overriding previous field request for all events.\n");
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
output[j].fields = 0;
output[j].user_set = true;
output[j].wildcard_set = true;
@ -1908,7 +2078,7 @@ parse:
/* add user option to all events types for
* which it is valid
*/
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].invalid_fields & all_output_options[i].field) {
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
@ -2560,10 +2730,10 @@ int cmd_script(int argc, const char **argv)
OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw. "
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,period,iregs,brstack,brstacksym,flags,"
"bpf-output,callindent,insn,insnlen,brstackinsn",
"bpf-output,callindent,insn,insnlen,brstackinsn,synth",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
@ -2822,6 +2992,7 @@ int cmd_script(int argc, const char **argv)
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)
goto out_delete;
itrace_synth_opts.cpu_bitmap = cpu_bitmap;
}
if (!no_callchain)

Просмотреть файл

@ -636,14 +636,14 @@ try_again:
}
if (perf_evlist__apply_filters(evsel_list, &counter)) {
error("failed to set filter \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;

Просмотреть файл

@ -958,7 +958,7 @@ static int __cmd_top(struct perf_top *top)
ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
if (ret) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
goto out_delete;

Просмотреть файл

@ -304,7 +304,7 @@ jvmti_close(void *agent)
FILE *fp = agent;
if (!fp) {
warnx("jvmti: incalid fd in close_agent");
warnx("jvmti: invalid fd in close_agent");
return -1;
}

Просмотреть файл

@ -0,0 +1,13 @@
#!/bin/bash
#
# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
# to be specified with appropriate config terms.
#
if ! echo "$@" | grep -q intel_pt ; then
echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
echo "and for power events it probably needs to be system wide i.e. -a option"
echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
exit 1
fi
perf record $@

Просмотреть файл

@ -0,0 +1,3 @@
#!/bin/bash
# description: print Intel PT Power Events and PTWRITE
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py

Просмотреть файл

@ -0,0 +1,128 @@
# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
def trace_begin():
print "Intel PT Power Events and PTWRITE"
def trace_end():
print "End"
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
print "IP: %u payload: %#x" % (exact_ip, payload),
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
print "hints: %#x extensions: %#x" % (hints, extensions),
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
print "IP: %u" % (exact_ip),
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
print "%16x %s (%s)" % (ip, symbol, dso)
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "[unknown]"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
print_common_ip(sample, symbol, dso)

Просмотреть файл

@ -18,6 +18,7 @@
* permissions. All the event text files are stored there.
*/
#include <debug.h>
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
@ -29,14 +30,11 @@
#include <sys/stat.h>
#include <unistd.h>
#include "../perf.h"
#include "util.h"
#include <subcmd/exec-cmd.h>
#include "tests.h"
#define ENV "PERF_TEST_ATTR"
extern int verbose;
static char *dir;
void test_attr__init(void)
@ -138,8 +136,10 @@ void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
{
int errno_saved = errno;
if (store_event(attr, pid, cpu, fd, group_fd, flags))
die("test attr FAILED");
if (store_event(attr, pid, cpu, fd, group_fd, flags)) {
pr_err("test attr FAILED");
exit(128);
}
errno = errno_saved;
}

Просмотреть файл

@ -16,6 +16,13 @@ class Fail(Exception):
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Notest(Exception):
def __init__(self, test, arch):
self.arch = arch
self.test = test
def getMsg(self):
return '[%s] \'%s\'' % (self.arch, self.test.path)
class Unsup(Exception):
def __init__(self, test):
self.test = test
@ -112,6 +119,9 @@ class Event(dict):
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
# 'arch' - architecture specific test (optional)
# comma separated list, ! at the beginning
# negates it.
#
# [eventX:base]
# - one or multiple instances in file
@ -134,6 +144,12 @@ class Test(object):
except:
self.ret = 0
try:
self.arch = parser.get('config', 'arch')
log.warning("test limitation '%s'" % self.arch)
except:
self.arch = ''
self.expect = {}
self.result = {}
log.debug(" loading expected events");
@ -145,6 +161,31 @@ class Test(object):
else:
return True
def skip_test(self, myarch):
# If architecture not set always run test
if self.arch == '':
# log.warning("test for arch %s is ok" % myarch)
return False
# Allow multiple values in assignment separated by ','
arch_list = self.arch.split(',')
# Handle negated list such as !s390x,ppc
if arch_list[0][0] == '!':
arch_list[0] = arch_list[0][1:]
log.warning("excluded architecture list %s" % arch_list)
for arch_item in arch_list:
# log.warning("test for %s arch is %s" % (arch_item, myarch))
if arch_item == myarch:
return True
return False
for arch_item in arch_list:
# log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch))
if arch_item == myarch:
return False
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
@ -168,6 +209,11 @@ class Test(object):
events[section] = e
def run_cmd(self, tempdir):
junk1, junk2, junk3, junk4, myarch = (os.uname())
if self.skip_test(myarch):
raise Notest(self, myarch)
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
@ -265,6 +311,8 @@ def run_tests(options):
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
except Notest, obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
global log

Просмотреть файл

@ -1810,17 +1810,6 @@ static int test_pmu_events(void)
return ret;
}
static void debug_warn(const char *warn, va_list params)
{
char msg[1024];
if (verbose <= 0)
return;
vsnprintf(msg, sizeof(msg), warn, params);
fprintf(stderr, " Warning: %s\n", msg);
}
int test__parse_events(int subtest __maybe_unused)
{
int ret1, ret2 = 0;
@ -1832,8 +1821,6 @@ do { \
ret2 = ret1; \
} while (0)
set_warning_routine(debug_warn);
TEST_EVENTS(test__events);
if (test_pmu())

Просмотреть файл

@ -322,6 +322,13 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
return auxtrace_queues__add_buffer(queues, idx, buffer);
}
static bool filter_cpu(struct perf_session *session, int cpu)
{
unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
}
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
union perf_event *event, off_t data_offset,
@ -331,6 +338,9 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
unsigned int idx;
int err;
if (filter_cpu(session, event->auxtrace.cpu))
return 0;
buffer = zalloc(sizeof(struct auxtrace_buffer));
if (!buffer)
return -ENOMEM;
@ -947,6 +957,8 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
synth_opts->instructions = true;
synth_opts->branches = true;
synth_opts->transactions = true;
synth_opts->ptwrites = true;
synth_opts->pwr_events = true;
synth_opts->errors = true;
synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
@ -1030,6 +1042,12 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
case 'x':
synth_opts->transactions = true;
break;
case 'w':
synth_opts->ptwrites = true;
break;
case 'p':
synth_opts->pwr_events = true;
break;
case 'e':
synth_opts->errors = true;
break;

Просмотреть файл

@ -59,6 +59,8 @@ enum itrace_period_type {
* @instructions: whether to synthesize 'instructions' events
* @branches: whether to synthesize 'branches' events
* @transactions: whether to synthesize events for transactions
* @ptwrites: whether to synthesize events for ptwrites
* @pwr_events: whether to synthesize power events
* @errors: whether to synthesize decoder error events
* @dont_decode: whether to skip decoding entirely
* @log: write a decoding log
@ -72,6 +74,7 @@ enum itrace_period_type {
* @period: 'instructions' events period
* @period_type: 'instructions' events period type
* @initial_skip: skip N events at the beginning.
* @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
*/
struct itrace_synth_opts {
bool set;
@ -79,6 +82,8 @@ struct itrace_synth_opts {
bool instructions;
bool branches;
bool transactions;
bool ptwrites;
bool pwr_events;
bool errors;
bool dont_decode;
bool log;
@ -92,6 +97,7 @@ struct itrace_synth_opts {
unsigned long long period;
enum itrace_period_type period_type;
unsigned long initial_skip;
unsigned long *cpu_bitmap;
};
/**

Просмотреть файл

@ -335,32 +335,42 @@ static int perf_parse_long(const char *value, long *ret)
return 0;
}
static void die_bad_config(const char *name)
static void bad_config(const char *name)
{
if (config_file_name)
die("bad config value for '%s' in %s", name, config_file_name);
die("bad config value for '%s'", name);
pr_warning("bad config value for '%s' in %s, ignoring...\n", name, config_file_name);
else
pr_warning("bad config value for '%s', ignoring...\n", name);
}
u64 perf_config_u64(const char *name, const char *value)
int perf_config_u64(u64 *dest, const char *name, const char *value)
{
long long ret = 0;
if (!perf_parse_llong(value, &ret))
die_bad_config(name);
return (u64) ret;
if (!perf_parse_llong(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
int perf_config_int(const char *name, const char *value)
int perf_config_int(int *dest, const char *name, const char *value)
{
long ret = 0;
if (!perf_parse_long(value, &ret))
die_bad_config(name);
return ret;
if (!perf_parse_long(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int ret;
*is_bool = 1;
if (!value)
return 1;
@ -371,7 +381,7 @@ static int perf_config_bool_or_int(const char *name, const char *value, int *is_
if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
return 0;
*is_bool = 0;
return perf_config_int(name, value);
return perf_config_int(&ret, name, value) < 0 ? -1 : ret;
}
int perf_config_bool(const char *name, const char *value)
@ -657,8 +667,7 @@ static int perf_config_set__init(struct perf_config_set *set)
user_config = strdup(mkpath("%s/.perfconfig", home));
if (user_config == NULL) {
warning("Not enough memory to process %s/.perfconfig, "
"ignoring it.", home);
pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.", home);
goto out;
}
@ -671,8 +680,7 @@ static int perf_config_set__init(struct perf_config_set *set)
ret = 0;
if (st.st_uid && (st.st_uid != geteuid())) {
warning("File %s not owned by current user or root, "
"ignoring it.", user_config);
pr_warning("File %s not owned by current user or root, ignoring it.", user_config);
goto out_free;
}
@ -795,7 +803,8 @@ void perf_config_set__delete(struct perf_config_set *set)
*/
int config_error_nonbool(const char *var)
{
return error("Missing value for '%s'", var);
pr_err("Missing value for '%s'", var);
return -1;
}
void set_buildid_dir(const char *dir)

Просмотреть файл

@ -27,8 +27,8 @@ extern const char *config_exclusive_filename;
typedef int (*config_fn_t)(const char *, const char *, void *);
int perf_default_config(const char *, const char *, void *);
int perf_config(config_fn_t fn, void *);
int perf_config_int(const char *, const char *);
u64 perf_config_u64(const char *, const char *);
int perf_config_int(int *dest, const char *, const char *);
int perf_config_u64(u64 *dest, const char *, const char *);
int perf_config_bool(const char *, const char *);
int config_error_nonbool(const char *);
const char *perf_etc_perfconfig(void);

Просмотреть файл

@ -1444,10 +1444,8 @@ static int convert__config(const char *var, const char *value, void *cb)
{
struct convert *c = cb;
if (!strcmp(var, "convert.queue-size")) {
c->queue_size = perf_config_u64(var, value);
return 0;
}
if (!strcmp(var, "convert.queue-size"))
return perf_config_u64(&c->queue_size, var, value);
return 0;
}

Просмотреть файл

@ -252,6 +252,127 @@ enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_MAX
};
/* Attribute type for custom synthesized events */
#define PERF_TYPE_SYNTH (INT_MAX + 1U)
/* Attribute config for custom synthesized events */
enum perf_synth_id {
PERF_SYNTH_INTEL_PTWRITE,
PERF_SYNTH_INTEL_MWAIT,
PERF_SYNTH_INTEL_PWRE,
PERF_SYNTH_INTEL_EXSTOP,
PERF_SYNTH_INTEL_PWRX,
PERF_SYNTH_INTEL_CBR,
};
/*
* Raw data formats for synthesized events. Note that 4 bytes of padding are
* present to match the 'size' member of PERF_SAMPLE_RAW data which is always
* 8-byte aligned. That means we must dereference raw_data with an offset of 4.
* Refer perf_sample__synth_ptr() and perf_synth__raw_data(). It also means the
* structure sizes are 4 bytes bigger than the raw_size, refer
* perf_synth__raw_size().
*/
struct perf_synth_intel_ptwrite {
u32 padding;
union {
struct {
u32 ip : 1,
reserved : 31;
};
u32 flags;
};
u64 payload;
};
struct perf_synth_intel_mwait {
u32 padding;
u32 reserved;
union {
struct {
u64 hints : 8,
reserved1 : 24,
extensions : 2,
reserved2 : 30;
};
u64 payload;
};
};
struct perf_synth_intel_pwre {
u32 padding;
u32 reserved;
union {
struct {
u64 reserved1 : 7,
hw : 1,
subcstate : 4,
cstate : 4,
reserved2 : 48;
};
u64 payload;
};
};
struct perf_synth_intel_exstop {
u32 padding;
union {
struct {
u32 ip : 1,
reserved : 31;
};
u32 flags;
};
};
struct perf_synth_intel_pwrx {
u32 padding;
u32 reserved;
union {
struct {
u64 deepest_cstate : 4,
last_cstate : 4,
wake_reason : 4,
reserved1 : 52;
};
u64 payload;
};
};
struct perf_synth_intel_cbr {
u32 padding;
union {
struct {
u32 cbr : 8,
reserved1 : 8,
max_nonturbo : 8,
reserved2 : 8;
};
u32 flags;
};
u32 freq;
u32 reserved3;
};
/*
* raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
* 8-byte alignment.
*/
static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
{
return sample->raw_data - 4;
}
static inline void *perf_synth__raw_data(void *p)
{
return p + 4;
}
#define perf_synth__raw_size(d) (sizeof(d) - 4)
#define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
/*
* The kernel collects the number of events it couldn't send in a stretch and
* when possible sends this number in a PERF_RECORD_LOST event. The number of

Просмотреть файл

@ -12,7 +12,7 @@ static int perf_unknown_cmd_config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "help.autocorrect"))
autocorrect = perf_config_int(var,value);
return perf_config_int(&autocorrect, var,value);
return 0;
}

Просмотреть файл

@ -711,6 +711,12 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
break;
case INTEL_PT_TSC:
/*
* For now, do not support using TSC packets - refer
* intel_pt_calc_cyc_to_tsc().
*/
if (data->from_mtc)
return 1;
timestamp = pkt_info->packet.payload |
(data->timestamp & (0xffULL << 56));
if (data->from_mtc && timestamp < data->timestamp &&
@ -828,6 +834,14 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
.cbr_cyc_to_tsc = 0,
};
/*
* For now, do not support using TSC packets for at least the reasons:
* 1) timing might have stopped
* 2) TSC packets within PSB+ can slip against CYC packets
*/
if (!from_mtc)
return;
intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
}

Просмотреть файл

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

Просмотреть файл

@ -81,7 +81,6 @@ struct intel_pt {
bool sample_instructions;
u64 instructions_sample_type;
u64 instructions_sample_period;
u64 instructions_id;
bool sample_branches;
@ -93,6 +92,18 @@ struct intel_pt {
u64 transactions_sample_type;
u64 transactions_id;
bool sample_ptwrites;
u64 ptwrites_sample_type;
u64 ptwrites_id;
bool sample_pwr_events;
u64 pwr_events_sample_type;
u64 mwait_id;
u64 pwre_id;
u64 exstop_id;
u64 pwrx_id;
u64 cbr_id;
bool synth_needs_swap;
u64 tsc_bit;
@ -103,6 +114,7 @@ struct intel_pt {
u64 cyc_bit;
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
unsigned cbr2khz;
unsigned long num_events;
@ -1058,6 +1070,36 @@ static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
bs->nr += 1;
}
static inline bool intel_pt_skip_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip;
}
static void intel_pt_prep_b_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
if (!pt->timeless_decoding)
sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample->cpumode = PERF_RECORD_MISC_USER;
sample->ip = ptq->state->from_ip;
sample->pid = ptq->pid;
sample->tid = ptq->tid;
sample->addr = ptq->state->to_ip;
sample->period = 1;
sample->cpu = ptq->cpu;
sample->flags = ptq->flags;
sample->insn_len = ptq->insn_len;
memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
}
static int intel_pt_inject_event(union perf_event *event,
struct perf_sample *sample, u64 type,
bool swapped)
@ -1066,9 +1108,35 @@ static int intel_pt_inject_event(union perf_event *event,
return perf_event__synthesize_sample(event, type, 0, sample, swapped);
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
static inline int intel_pt_opt_inject(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
if (!pt->synth_opts.inject)
return 0;
return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
}
static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
int ret;
ret = intel_pt_opt_inject(pt, event, sample, type);
if (ret)
return ret;
ret = perf_session__deliver_synth_event(pt->session, event, sample);
if (ret)
pr_err("Intel PT: failed to deliver event, error %d\n", ret);
return ret;
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
@ -1080,29 +1148,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
return 0;
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_b_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->branches_id;
sample.stream_id = ptq->pt->branches_id;
sample.period = 1;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
/*
* perf report cannot handle events without a branch stack when using
@ -1119,144 +1171,251 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->branches_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
return intel_pt_deliver_synth_b_event(pt, event, &sample,
pt->branches_sample_type);
}
static void intel_pt_prep_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_b_sample(pt, ptq, event, sample);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample->ip);
sample->callchain = ptq->chain;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
ret);
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample->branch_stack = ptq->last_branch;
}
}
static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample,
u64 type)
{
int ret;
ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
}
static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
{
int ret;
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->instructions_id;
sample.stream_id = ptq->pt->instructions_id;
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip);
sample.callchain = ptq->chain;
}
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample.branch_stack = ptq->last_branch;
}
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->instructions_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
ret);
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->instructions_sample_type);
}
static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
{
int ret;
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->transactions_id;
sample.stream_id = ptq->pt->transactions_id;
sample.period = 1;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip);
sample.callchain = ptq->chain;
}
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->transactions_sample_type);
}
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample.branch_stack = ptq->last_branch;
}
static void intel_pt_prep_p_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_sample(pt, ptq, event, sample);
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->transactions_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
}
/*
* Zero IP is used to mean "trace start" but that is not the case for
* power or PTWRITE events with no IP, so clear the flags.
*/
if (!sample->ip)
sample->flags = 0;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
ret);
static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_ptwrite raw;
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
if (intel_pt_skip_event(pt))
return 0;
return ret;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->ptwrites_id;
sample.stream_id = ptq->pt->ptwrites_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.payload = cpu_to_le64(ptq->state->ptw_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->ptwrites_sample_type);
}
static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_cbr raw;
u32 flags;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cbr_id;
sample.stream_id = ptq->pt->cbr_id;
flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
raw.flags = cpu_to_le32(flags);
raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
raw.reserved3 = 0;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_mwait raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->mwait_id;
sample.stream_id = ptq->pt->mwait_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->mwait_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwre raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwre_id;
sample.stream_id = ptq->pt->pwre_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwre_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_exstop raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->exstop_id;
sample.stream_id = ptq->pt->exstop_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwrx raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwrx_id;
sample.stream_id = ptq->pt->pwrx_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
@ -1310,6 +1469,10 @@ static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
}
#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
INTEL_PT_CBR_CHG)
static int intel_pt_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_state *state = ptq->state;
@ -1321,20 +1484,52 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->have_sample = false;
if (pt->sample_instructions &&
(state->type & INTEL_PT_INSTRUCTION)) {
if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
if (state->type & INTEL_PT_CBR_CHG) {
err = intel_pt_synth_cbr_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_MWAIT_OP) {
err = intel_pt_synth_mwait_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_ENTRY) {
err = intel_pt_synth_pwre_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_EX_STOP) {
err = intel_pt_synth_exstop_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EXIT) {
err = intel_pt_synth_pwrx_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
err = intel_pt_synth_instruction_sample(ptq);
if (err)
return err;
}
if (pt->sample_transactions &&
(state->type & INTEL_PT_TRANSACTION)) {
if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
err = intel_pt_synth_transaction_sample(ptq);
if (err)
return err;
}
if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
err = intel_pt_synth_ptwrite_sample(ptq);
if (err)
return err;
}
if (!(state->type & INTEL_PT_BRANCH))
return 0;
@ -1935,36 +2130,65 @@ static int intel_pt_event_synth(struct perf_tool *tool,
NULL);
}
static int intel_pt_synth_event(struct perf_session *session,
static int intel_pt_synth_event(struct perf_session *session, const char *name,
struct perf_event_attr *attr, u64 id)
{
struct intel_pt_synth intel_pt_synth;
int err;
pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
name, id, (u64)attr->sample_type);
memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
intel_pt_synth.session = session;
return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
if (err)
pr_err("%s: failed to synthesize '%s' event type\n",
__func__, name);
return err;
}
static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
const char *name)
{
struct perf_evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->id && evsel->id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
break;
}
}
}
static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type == pt->pmu_type && evsel->ids)
return evsel;
}
return NULL;
}
static int intel_pt_synth_events(struct intel_pt *pt,
struct perf_session *session)
{
struct perf_evlist *evlist = session->evlist;
struct perf_evsel *evsel;
struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
struct perf_event_attr attr;
bool found = false;
u64 id;
int err;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type == pt->pmu_type && evsel->ids) {
found = true;
break;
}
}
if (!found) {
if (!evsel) {
pr_debug("There are no selected events with Intel Processor Trace data\n");
return 0;
}
@ -1993,6 +2217,25 @@ static int intel_pt_synth_events(struct intel_pt *pt,
if (!id)
id = 1;
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
err = intel_pt_synth_event(session, "branches", &attr, id);
if (err)
return err;
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
@ -2000,71 +2243,90 @@ static int intel_pt_synth_events(struct intel_pt *pt,
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
pt->instructions_sample_period = attr.sample_period;
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'instructions' event type\n",
__func__);
err = intel_pt_synth_event(session, "instructions", &attr, id);
if (err)
return err;
}
pt->sample_instructions = true;
pt->instructions_sample_type = attr.sample_type;
pt->instructions_id = id;
id += 1;
}
attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
attr.sample_period = 1;
if (pt->synth_opts.transactions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = 1;
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'transactions' event type\n",
__func__);
err = intel_pt_synth_event(session, "transactions", &attr, id);
if (err)
return err;
}
pt->sample_transactions = true;
pt->transactions_sample_type = attr.sample_type;
pt->transactions_id = id;
intel_pt_set_event_name(evlist, id, "transactions");
id += 1;
evlist__for_each_entry(evlist, evsel) {
if (evsel->id && evsel->id[0] == pt->transactions_id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup("transactions");
break;
}
}
}
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'branches' event type\n",
__func__);
attr.type = PERF_TYPE_SYNTH;
attr.sample_type |= PERF_SAMPLE_RAW;
if (pt->synth_opts.ptwrites) {
attr.config = PERF_SYNTH_INTEL_PTWRITE;
err = intel_pt_synth_event(session, "ptwrite", &attr, id);
if (err)
return err;
}
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
pt->sample_ptwrites = true;
pt->ptwrites_sample_type = attr.sample_type;
pt->ptwrites_id = id;
intel_pt_set_event_name(evlist, id, "ptwrite");
id += 1;
}
if (pt->synth_opts.pwr_events) {
pt->sample_pwr_events = true;
pt->pwr_events_sample_type = attr.sample_type;
attr.config = PERF_SYNTH_INTEL_CBR;
err = intel_pt_synth_event(session, "cbr", &attr, id);
if (err)
return err;
pt->cbr_id = id;
intel_pt_set_event_name(evlist, id, "cbr");
id += 1;
}
if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
attr.config = PERF_SYNTH_INTEL_MWAIT;
err = intel_pt_synth_event(session, "mwait", &attr, id);
if (err)
return err;
pt->mwait_id = id;
intel_pt_set_event_name(evlist, id, "mwait");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRE;
err = intel_pt_synth_event(session, "pwre", &attr, id);
if (err)
return err;
pt->pwre_id = id;
intel_pt_set_event_name(evlist, id, "pwre");
id += 1;
attr.config = PERF_SYNTH_INTEL_EXSTOP;
err = intel_pt_synth_event(session, "exstop", &attr, id);
if (err)
return err;
pt->exstop_id = id;
intel_pt_set_event_name(evlist, id, "exstop");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRX;
err = intel_pt_synth_event(session, "pwrx", &attr, id);
if (err)
return err;
pt->pwrx_id = id;
intel_pt_set_event_name(evlist, id, "pwrx");
id += 1;
}
pt->synth_needs_swap = evsel->needs_swap;
@ -2333,6 +2595,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
intel_pt_log("Maximum non-turbo ratio %u\n",
pt->max_non_turbo_ratio);
pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
}
if (pt->synth_opts.calls)

Просмотреть файл

@ -2532,12 +2532,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
ret = sort_dimension__add(list, tok, evlist, level);
if (ret == -EINVAL) {
if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
else
error("Invalid --sort key: `%s'", tok);
pr_err("Invalid --sort key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
error("Unknown --sort key: `%s'", tok);
pr_err("Unknown --sort key: `%s'", tok);
break;
}
}
@ -2594,7 +2594,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
return 0;
if (sort_order[1] == '\0') {
error("Invalid --sort key: `+'");
pr_err("Invalid --sort key: `+'");
return -EINVAL;
}
@ -2604,7 +2604,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
*/
if (asprintf(&new_sort_order, "%s,%s",
get_default_sort_order(evlist), sort_order + 1) < 0) {
error("Not enough memory to set up --sort");
pr_err("Not enough memory to set up --sort");
return -ENOMEM;
}
@ -2668,7 +2668,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
str = strdup(sort_keys);
if (str == NULL) {
error("Not enough memory to setup sort keys");
pr_err("Not enough memory to setup sort keys");
return -ENOMEM;
}
@ -2678,7 +2678,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
if (!is_strict_order(field_order)) {
str = setup_overhead(str);
if (str == NULL) {
error("Not enough memory to setup overhead keys");
pr_err("Not enough memory to setup overhead keys");
return -ENOMEM;
}
}
@ -2834,10 +2834,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
error("Invalid --fields key: `%s'", tok);
pr_err("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
error("Unknown --fields key: `%s'", tok);
pr_err("Unknown --fields key: `%s'", tok);
break;
}
}
@ -2877,7 +2877,7 @@ static int __setup_output_field(void)
strp = str = strdup(field_order);
if (str == NULL) {
error("Not enough memory to setup output fields");
pr_err("Not enough memory to setup output fields");
return -ENOMEM;
}
@ -2885,7 +2885,7 @@ static int __setup_output_field(void)
strp++;
if (!strlen(strp)) {
error("Invalid --fields key: `+'");
pr_err("Invalid --fields key: `+'");
goto out;
}

Просмотреть файл

@ -24,7 +24,7 @@
#include <errno.h>
#include "../perf.h"
#include "util.h"
#include "debug.h"
#include "trace-event.h"
#include "sane_ctype.h"
@ -150,7 +150,7 @@ void parse_ftrace_printk(struct pevent *pevent,
while (line) {
addr_str = strtok_r(line, ":", &fmt);
if (!addr_str) {
warning("printk format with empty entry");
pr_warning("printk format with empty entry");
break;
}
addr = strtoull(addr_str, NULL, 16);

Просмотреть файл

@ -9,75 +9,17 @@
#include "util.h"
#include "debug.h"
static void report(const char *prefix, const char *err, va_list params)
{
char msg[1024];
vsnprintf(msg, sizeof(msg), err, params);
fprintf(stderr, " %s%s\n", prefix, msg);
}
static __noreturn void usage_builtin(const char *err)
{
fprintf(stderr, "\n Usage: %s\n", err);
exit(129);
}
static __noreturn void die_builtin(const char *err, va_list params)
{
report(" Fatal: ", err, params);
exit(128);
}
static void error_builtin(const char *err, va_list params)
{
report(" Error: ", err, params);
}
static void warn_builtin(const char *warn, va_list params)
{
report(" Warning: ", warn, params);
}
/* If we are in a dlopen()ed .so write to a global variable would segfault
* (ugh), so keep things static. */
static void (*usage_routine)(const char *err) __noreturn = usage_builtin;
static void (*error_routine)(const char *err, va_list params) = error_builtin;
static void (*warn_routine)(const char *err, va_list params) = warn_builtin;
void set_warning_routine(void (*routine)(const char *err, va_list params))
{
warn_routine = routine;
}
void usage(const char *err)
{
usage_routine(err);
}
void die(const char *err, ...)
{
va_list params;
va_start(params, err);
die_builtin(err, params);
va_end(params);
}
int error(const char *err, ...)
{
va_list params;
va_start(params, err);
error_routine(err, params);
va_end(params);
return -1;
}
void warning(const char *warn, ...)
{
va_list params;
va_start(params, warn);
warn_routine(warn, params);
va_end(params);
}

Просмотреть файл

@ -16,10 +16,6 @@
/* General helper functions */
void usage(const char *err) __noreturn;
void die(const char *err, ...) __noreturn __printf(1, 2);
int error(const char *err, ...) __printf(1, 2);
void warning(const char *err, ...) __printf(1, 2);
void set_warning_routine(void (*routine)(const char *err, va_list params));
static inline void *zalloc(size_t size)
{