2010-05-13 21:47:16 +04:00
|
|
|
#include "util.h"
|
2010-05-21 14:48:39 +04:00
|
|
|
#include "build-id.h"
|
2009-09-28 17:32:55 +04:00
|
|
|
#include "hist.h"
|
2009-12-14 18:10:39 +03:00
|
|
|
#include "session.h"
|
|
|
|
#include "sort.h"
|
2014-10-10 22:49:21 +04:00
|
|
|
#include "evlist.h"
|
2013-01-22 13:09:33 +04:00
|
|
|
#include "evsel.h"
|
2013-10-30 04:40:34 +04:00
|
|
|
#include "annotate.h"
|
2014-12-22 07:44:10 +03:00
|
|
|
#include "ui/progress.h"
|
2009-12-16 19:31:49 +03:00
|
|
|
#include <math.h>
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
|
|
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
2012-03-16 12:50:51 +04:00
|
|
|
static bool hists__filter_entry_by_symbol(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
2011-10-19 19:09:10 +04:00
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
u16 hists__col_len(struct hists *hists, enum hist_column col)
|
2010-07-20 21:42:52 +04:00
|
|
|
{
|
2011-09-26 19:33:28 +04:00
|
|
|
return hists->col_len[col];
|
2010-07-20 21:42:52 +04:00
|
|
|
}
|
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
|
2010-07-20 21:42:52 +04:00
|
|
|
{
|
2011-09-26 19:33:28 +04:00
|
|
|
hists->col_len[col] = len;
|
2010-07-20 21:42:52 +04:00
|
|
|
}
|
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
|
2010-07-20 21:42:52 +04:00
|
|
|
{
|
2011-09-26 19:33:28 +04:00
|
|
|
if (len > hists__col_len(hists, col)) {
|
|
|
|
hists__set_col_len(hists, col, len);
|
2010-07-20 21:42:52 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-08-20 08:52:05 +04:00
|
|
|
void hists__reset_col_len(struct hists *hists)
|
2010-07-20 21:42:52 +04:00
|
|
|
{
|
|
|
|
enum hist_column col;
|
|
|
|
|
|
|
|
for (col = 0; col < HISTC_NR_COLS; ++col)
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__set_col_len(hists, col, 0);
|
2010-07-20 21:42:52 +04:00
|
|
|
}
|
|
|
|
|
2012-02-10 02:21:01 +04:00
|
|
|
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
|
|
|
|
{
|
|
|
|
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
|
|
|
|
|
|
|
if (hists__col_len(hists, dso) < unresolved_col_width &&
|
|
|
|
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
|
|
|
!symbol_conf.dso_list)
|
|
|
|
hists__set_col_len(hists, dso, unresolved_col_width);
|
|
|
|
}
|
|
|
|
|
2012-08-20 08:52:05 +04:00
|
|
|
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
|
2010-07-20 21:42:52 +04:00
|
|
|
{
|
2012-02-10 02:21:01 +04:00
|
|
|
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
2013-01-24 19:10:35 +04:00
|
|
|
int symlen;
|
2010-07-20 21:42:52 +04:00
|
|
|
u16 len;
|
|
|
|
|
2013-04-01 15:35:19 +04:00
|
|
|
/*
|
|
|
|
* +4 accounts for '[x] ' priv level info
|
|
|
|
* +2 accounts for 0x prefix on raw addresses
|
|
|
|
* +3 accounts for ' y ' symtab origin info
|
|
|
|
*/
|
|
|
|
if (h->ms.sym) {
|
|
|
|
symlen = h->ms.sym->namelen + 4;
|
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
|
|
|
|
} else {
|
2013-01-24 19:10:35 +04:00
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
|
2012-02-10 02:21:01 +04:00
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO);
|
2013-01-24 19:10:35 +04:00
|
|
|
}
|
2010-07-20 21:42:52 +04:00
|
|
|
|
|
|
|
len = thread__comm_len(h->thread);
|
2011-09-26 19:33:28 +04:00
|
|
|
if (hists__new_col_len(hists, HISTC_COMM, len))
|
|
|
|
hists__set_col_len(hists, HISTC_THREAD, len + 6);
|
2010-07-20 21:42:52 +04:00
|
|
|
|
|
|
|
if (h->ms.map) {
|
|
|
|
len = dso__name_len(h->ms.map->dso);
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__new_col_len(hists, HISTC_DSO, len);
|
2010-07-20 21:42:52 +04:00
|
|
|
}
|
2012-02-10 02:21:01 +04:00
|
|
|
|
2012-12-27 13:11:42 +04:00
|
|
|
if (h->parent)
|
|
|
|
hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
|
|
|
|
|
2012-02-10 02:21:01 +04:00
|
|
|
if (h->branch_info) {
|
|
|
|
if (h->branch_info->from.sym) {
|
|
|
|
symlen = (int)h->branch_info->from.sym->namelen + 4;
|
2013-04-01 15:35:19 +04:00
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
2012-02-10 02:21:01 +04:00
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
|
|
|
|
|
|
|
|
symlen = dso__name_len(h->branch_info->from.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (h->branch_info->to.sym) {
|
|
|
|
symlen = (int)h->branch_info->to.sym->namelen + 4;
|
2013-04-01 15:35:19 +04:00
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
2012-02-10 02:21:01 +04:00
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
|
|
|
|
|
|
|
symlen = dso__name_len(h->branch_info->to.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
|
|
|
|
}
|
|
|
|
}
|
2013-01-24 19:10:35 +04:00
|
|
|
|
|
|
|
if (h->mem_info) {
|
|
|
|
if (h->mem_info->daddr.sym) {
|
|
|
|
symlen = (int)h->mem_info->daddr.sym->namelen + 4
|
|
|
|
+ unresolved_col_width + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
|
|
|
|
symlen);
|
2014-06-01 17:38:29 +04:00
|
|
|
hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
|
|
|
|
symlen + 1);
|
2013-01-24 19:10:35 +04:00
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
|
|
|
|
symlen);
|
|
|
|
}
|
|
|
|
if (h->mem_info->daddr.map) {
|
|
|
|
symlen = dso__name_len(h->mem_info->daddr.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
|
|
|
|
symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
|
|
|
|
}
|
|
|
|
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_TLB, 22);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
|
|
|
|
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
|
|
|
|
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
|
2013-09-20 18:40:43 +04:00
|
|
|
|
2015-08-10 22:53:54 +03:00
|
|
|
if (h->srcline)
|
|
|
|
hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
|
|
|
|
|
2015-08-08 01:54:24 +03:00
|
|
|
if (h->srcfile)
|
|
|
|
hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
|
|
|
|
|
2013-09-20 18:40:43 +04:00
|
|
|
if (h->transaction)
|
|
|
|
hists__new_col_len(hists, HISTC_TRANSACTION,
|
|
|
|
hist_entry__transaction_len());
|
2010-07-20 21:42:52 +04:00
|
|
|
}
|
|
|
|
|
2012-08-20 08:52:05 +04:00
|
|
|
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
int row = 0;
|
|
|
|
|
|
|
|
hists__reset_col_len(hists);
|
|
|
|
|
|
|
|
while (next && row++ < max_rows) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
if (!n->filtered)
|
|
|
|
hists__calc_col_len(hists, n);
|
|
|
|
next = rb_next(&n->rb_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-14 09:25:37 +04:00
|
|
|
static void he_stat__add_cpumode_period(struct he_stat *he_stat,
|
|
|
|
unsigned int cpumode, u64 period)
|
2010-04-19 09:32:50 +04:00
|
|
|
{
|
2010-05-09 20:02:23 +04:00
|
|
|
switch (cpumode) {
|
2010-04-19 09:32:50 +04:00
|
|
|
case PERF_RECORD_MISC_KERNEL:
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat->period_sys += period;
|
2010-04-19 09:32:50 +04:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_USER:
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat->period_us += period;
|
2010-04-19 09:32:50 +04:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_GUEST_KERNEL:
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat->period_guest_sys += period;
|
2010-04-19 09:32:50 +04:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_GUEST_USER:
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat->period_guest_us += period;
|
2010-04-19 09:32:50 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-24 19:10:29 +04:00
|
|
|
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
|
|
|
|
u64 weight)
|
2012-10-04 16:49:43 +04:00
|
|
|
{
|
2013-01-24 19:10:35 +04:00
|
|
|
|
2012-10-04 16:49:43 +04:00
|
|
|
he_stat->period += period;
|
2013-01-24 19:10:29 +04:00
|
|
|
he_stat->weight += weight;
|
2012-10-04 16:49:43 +04:00
|
|
|
he_stat->nr_events += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
|
|
|
|
{
|
|
|
|
dest->period += src->period;
|
|
|
|
dest->period_sys += src->period_sys;
|
|
|
|
dest->period_us += src->period_us;
|
|
|
|
dest->period_guest_sys += src->period_guest_sys;
|
|
|
|
dest->period_guest_us += src->period_guest_us;
|
|
|
|
dest->nr_events += src->nr_events;
|
2013-01-24 19:10:29 +04:00
|
|
|
dest->weight += src->weight;
|
2012-10-04 16:49:43 +04:00
|
|
|
}
|
|
|
|
|
2014-01-14 09:25:37 +04:00
|
|
|
static void he_stat__decay(struct he_stat *he_stat)
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 02:16:15 +04:00
|
|
|
{
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat->period = (he_stat->period * 7) / 8;
|
|
|
|
he_stat->nr_events = (he_stat->nr_events * 7) / 8;
|
2013-01-24 19:10:29 +04:00
|
|
|
/* XXX need decay for weight too? */
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 02:16:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
2012-10-04 16:49:41 +04:00
|
|
|
u64 prev_period = he->stat.period;
|
2014-04-22 08:44:23 +04:00
|
|
|
u64 diff;
|
2011-10-20 12:45:44 +04:00
|
|
|
|
|
|
|
if (prev_period == 0)
|
2011-10-13 15:01:33 +04:00
|
|
|
return true;
|
2011-10-20 12:45:44 +04:00
|
|
|
|
2014-01-14 09:25:37 +04:00
|
|
|
he_stat__decay(&he->stat);
|
2012-09-11 08:15:07 +04:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__decay(he->stat_acc);
|
2011-10-20 12:45:44 +04:00
|
|
|
|
2014-04-22 08:44:23 +04:00
|
|
|
diff = prev_period - he->stat.period;
|
|
|
|
|
|
|
|
hists->stats.total_period -= diff;
|
2011-10-20 12:45:44 +04:00
|
|
|
if (!he->filtered)
|
2014-04-22 08:44:23 +04:00
|
|
|
hists->stats.total_non_filtered_period -= diff;
|
2011-10-20 12:45:44 +04:00
|
|
|
|
2012-10-04 16:49:41 +04:00
|
|
|
return he->stat.period == 0;
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 02:16:15 +04:00
|
|
|
}
|
|
|
|
|
2014-12-19 18:41:28 +03:00
|
|
|
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
|
|
|
rb_erase(&he->rb_node, &hists->entries);
|
|
|
|
|
|
|
|
if (sort__need_collapse)
|
|
|
|
rb_erase(&he->rb_node_in, &hists->entries_collapsed);
|
|
|
|
|
|
|
|
--hists->nr_entries;
|
|
|
|
if (!he->filtered)
|
|
|
|
--hists->nr_non_filtered_entries;
|
|
|
|
|
|
|
|
hist_entry__delete(he);
|
|
|
|
}
|
|
|
|
|
2013-05-14 06:09:01 +04:00
|
|
|
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 02:16:15 +04:00
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
|
|
|
while (next) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
next = rb_next(&n->rb_node);
|
2011-10-17 15:05:04 +04:00
|
|
|
if (((zap_user && n->level == '.') ||
|
|
|
|
(zap_kernel && n->level != '.') ||
|
2015-03-17 23:18:58 +03:00
|
|
|
hists__decay_entry(hists, n))) {
|
2014-12-19 18:41:28 +03:00
|
|
|
hists__delete_entry(hists, n);
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 02:16:15 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 12:16:05 +04:00
|
|
|
void hists__delete_entries(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
|
|
|
while (next) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
next = rb_next(&n->rb_node);
|
|
|
|
|
2014-12-19 18:41:28 +03:00
|
|
|
hists__delete_entry(hists, n);
|
2014-08-12 12:16:05 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-28 17:32:55 +04:00
|
|
|
/*
|
2010-05-14 21:19:35 +04:00
|
|
|
* histogram, sorted on item, collects periods
|
2009-09-28 17:32:55 +04:00
|
|
|
*/
|
|
|
|
|
2012-09-11 08:34:27 +04:00
|
|
|
static struct hist_entry *hist_entry__new(struct hist_entry *template,
|
|
|
|
bool sample_self)
|
2010-05-09 20:02:23 +04:00
|
|
|
{
|
2012-09-11 08:15:07 +04:00
|
|
|
size_t callchain_size = 0;
|
|
|
|
struct hist_entry *he;
|
|
|
|
|
2014-12-22 07:44:14 +03:00
|
|
|
if (symbol_conf.use_callchain)
|
2012-09-11 08:15:07 +04:00
|
|
|
callchain_size = sizeof(struct callchain_root);
|
|
|
|
|
|
|
|
he = zalloc(sizeof(*he) + callchain_size);
|
2010-05-09 20:02:23 +04:00
|
|
|
|
2012-01-04 18:27:03 +04:00
|
|
|
if (he != NULL) {
|
|
|
|
*he = *template;
|
2012-10-04 16:49:42 +04:00
|
|
|
|
2012-09-11 08:15:07 +04:00
|
|
|
if (symbol_conf.cumulate_callchain) {
|
|
|
|
he->stat_acc = malloc(sizeof(he->stat));
|
|
|
|
if (he->stat_acc == NULL) {
|
|
|
|
free(he);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
|
2012-09-11 08:34:27 +04:00
|
|
|
if (!sample_self)
|
|
|
|
memset(&he->stat, 0, sizeof(he->stat));
|
2012-09-11 08:15:07 +04:00
|
|
|
}
|
|
|
|
|
2015-06-16 05:29:51 +03:00
|
|
|
map__get(he->ms.map);
|
2013-01-14 18:02:45 +04:00
|
|
|
|
|
|
|
if (he->branch_info) {
|
2013-04-01 15:35:17 +04:00
|
|
|
/*
|
|
|
|
* This branch info is (a part of) allocated from
|
2014-01-22 20:15:36 +04:00
|
|
|
* sample__resolve_bstack() and will be freed after
|
2013-04-01 15:35:17 +04:00
|
|
|
* adding new entries. So we need to save a copy.
|
|
|
|
*/
|
|
|
|
he->branch_info = malloc(sizeof(*he->branch_info));
|
|
|
|
if (he->branch_info == NULL) {
|
2015-06-16 05:29:51 +03:00
|
|
|
map__zput(he->ms.map);
|
2012-09-11 08:15:07 +04:00
|
|
|
free(he->stat_acc);
|
2013-04-01 15:35:17 +04:00
|
|
|
free(he);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(he->branch_info, template->branch_info,
|
|
|
|
sizeof(*he->branch_info));
|
|
|
|
|
2015-06-16 05:29:51 +03:00
|
|
|
map__get(he->branch_info->from.map);
|
|
|
|
map__get(he->branch_info->to.map);
|
2013-01-14 18:02:45 +04:00
|
|
|
}
|
|
|
|
|
2013-01-24 19:10:35 +04:00
|
|
|
if (he->mem_info) {
|
2015-06-16 05:29:51 +03:00
|
|
|
map__get(he->mem_info->iaddr.map);
|
|
|
|
map__get(he->mem_info->daddr.map);
|
2013-01-24 19:10:35 +04:00
|
|
|
}
|
|
|
|
|
2010-05-09 20:02:23 +04:00
|
|
|
if (symbol_conf.use_callchain)
|
2012-01-04 18:27:03 +04:00
|
|
|
callchain_init(he->callchain);
|
2012-10-25 20:42:45 +04:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&he->pairs.node);
|
2015-03-03 04:21:35 +03:00
|
|
|
thread__get(he->thread);
|
2010-05-09 20:02:23 +04:00
|
|
|
}
|
|
|
|
|
2012-01-04 18:27:03 +04:00
|
|
|
return he;
|
2010-05-09 20:02:23 +04:00
|
|
|
}
|
|
|
|
|
2010-07-21 16:19:41 +04:00
|
|
|
static u8 symbol__parent_filter(const struct symbol *parent)
|
|
|
|
{
|
|
|
|
if (symbol_conf.exclude_other && parent == NULL)
|
|
|
|
return 1 << HIST_FILTER__PARENT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-19 17:31:22 +03:00
|
|
|
static struct hist_entry *hists__findnew_entry(struct hists *hists,
|
|
|
|
struct hist_entry *entry,
|
|
|
|
struct addr_location *al,
|
|
|
|
bool sample_self)
|
2009-10-03 17:42:45 +04:00
|
|
|
{
|
2011-10-06 00:50:23 +04:00
|
|
|
struct rb_node **p;
|
2009-10-03 17:42:45 +04:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *he;
|
2013-10-01 18:22:15 +04:00
|
|
|
int64_t cmp;
|
2013-12-18 09:21:11 +04:00
|
|
|
u64 period = entry->stat.period;
|
|
|
|
u64 weight = entry->stat.weight;
|
2009-10-03 17:42:45 +04:00
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
p = &hists->entries_in->rb_node;
|
|
|
|
|
2009-10-03 17:42:45 +04:00
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2011-10-06 00:50:23 +04:00
|
|
|
he = rb_entry(parent, struct hist_entry, rb_node_in);
|
2009-10-03 17:42:45 +04:00
|
|
|
|
2012-12-10 12:29:54 +04:00
|
|
|
/*
|
|
|
|
* Make sure that it receives arguments in a same order as
|
|
|
|
* hist_entry__collapse() so that we can use an appropriate
|
|
|
|
* function when searching an entry regardless which sort
|
|
|
|
* keys were used.
|
|
|
|
*/
|
|
|
|
cmp = hist_entry__cmp(he, entry);
|
2009-10-03 17:42:45 +04:00
|
|
|
|
|
|
|
if (!cmp) {
|
2012-09-11 08:34:27 +04:00
|
|
|
if (sample_self)
|
|
|
|
he_stat__add_period(&he->stat, period, weight);
|
2012-09-11 08:15:07 +04:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_period(he->stat_acc, period, weight);
|
2012-03-27 11:14:18 +04:00
|
|
|
|
2013-04-01 15:35:18 +04:00
|
|
|
/*
|
2014-01-22 20:05:06 +04:00
|
|
|
* This mem info was allocated from sample__resolve_mem
|
2013-04-01 15:35:18 +04:00
|
|
|
* and will not be used anymore.
|
|
|
|
*/
|
2013-12-27 23:55:14 +04:00
|
|
|
zfree(&entry->mem_info);
|
2013-04-01 15:35:18 +04:00
|
|
|
|
2012-03-27 11:14:18 +04:00
|
|
|
/* If the map of an existing hist_entry has
|
|
|
|
* become out-of-date due to an exec() or
|
|
|
|
* similar, update it. Otherwise we will
|
|
|
|
* mis-adjust symbol addresses when computing
|
|
|
|
* the history counter to increment.
|
|
|
|
*/
|
|
|
|
if (he->ms.map != entry->ms.map) {
|
2015-06-16 05:29:51 +03:00
|
|
|
map__put(he->ms.map);
|
|
|
|
he->ms.map = map__get(entry->ms.map);
|
2012-03-27 11:14:18 +04:00
|
|
|
}
|
2010-05-09 20:02:23 +04:00
|
|
|
goto out;
|
2009-10-03 17:42:45 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2012-09-11 08:34:27 +04:00
|
|
|
he = hist_entry__new(entry, sample_self);
|
2009-10-03 17:42:45 +04:00
|
|
|
if (!he)
|
2013-05-14 06:09:02 +04:00
|
|
|
return NULL;
|
2011-10-06 00:50:23 +04:00
|
|
|
|
2014-12-22 07:44:09 +03:00
|
|
|
hists->nr_entries++;
|
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, hists->entries_in);
|
2010-05-09 20:02:23 +04:00
|
|
|
out:
|
2012-09-11 08:34:27 +04:00
|
|
|
if (sample_self)
|
|
|
|
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
|
2012-09-11 08:15:07 +04:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
|
2009-10-03 17:42:45 +04:00
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
2013-10-23 02:01:31 +04:00
|
|
|
struct hist_entry *__hists__add_entry(struct hists *hists,
|
2012-02-10 02:21:01 +04:00
|
|
|
struct addr_location *al,
|
2013-10-31 10:56:03 +04:00
|
|
|
struct symbol *sym_parent,
|
|
|
|
struct branch_info *bi,
|
|
|
|
struct mem_info *mi,
|
2012-09-11 08:34:27 +04:00
|
|
|
u64 period, u64 weight, u64 transaction,
|
|
|
|
bool sample_self)
|
2012-02-10 02:21:01 +04:00
|
|
|
{
|
|
|
|
struct hist_entry entry = {
|
|
|
|
.thread = al->thread,
|
2013-09-13 11:28:57 +04:00
|
|
|
.comm = thread__comm(al->thread),
|
2012-02-10 02:21:01 +04:00
|
|
|
.ms = {
|
|
|
|
.map = al->map,
|
|
|
|
.sym = al->sym,
|
|
|
|
},
|
2014-05-27 20:28:05 +04:00
|
|
|
.cpu = al->cpu,
|
|
|
|
.cpumode = al->cpumode,
|
|
|
|
.ip = al->addr,
|
|
|
|
.level = al->level,
|
2012-10-04 16:49:41 +04:00
|
|
|
.stat = {
|
2012-10-04 16:49:42 +04:00
|
|
|
.nr_events = 1,
|
2013-10-31 10:56:03 +04:00
|
|
|
.period = period,
|
2013-01-24 19:10:29 +04:00
|
|
|
.weight = weight,
|
2012-10-04 16:49:41 +04:00
|
|
|
},
|
2012-02-10 02:21:01 +04:00
|
|
|
.parent = sym_parent,
|
2014-03-18 01:18:54 +04:00
|
|
|
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
|
2013-10-23 02:01:31 +04:00
|
|
|
.hists = hists,
|
2013-10-31 10:56:03 +04:00
|
|
|
.branch_info = bi,
|
|
|
|
.mem_info = mi,
|
2013-09-20 18:40:43 +04:00
|
|
|
.transaction = transaction,
|
2012-02-10 02:21:01 +04:00
|
|
|
};
|
|
|
|
|
2015-05-19 17:31:22 +03:00
|
|
|
return hists__findnew_entry(hists, &entry, al, sample_self);
|
2012-02-10 02:21:01 +04:00
|
|
|
}
|
|
|
|
|
2013-10-30 04:40:34 +04:00
|
|
|
static int
|
|
|
|
iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
struct mem_info *mi;
|
|
|
|
|
|
|
|
mi = sample__resolve_mem(sample, al);
|
|
|
|
if (mi == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->priv = mi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
u64 cost;
|
|
|
|
struct mem_info *mi = iter->priv;
|
2014-10-09 20:13:41 +04:00
|
|
|
struct hists *hists = evsel__hists(iter->evsel);
|
2013-10-30 04:40:34 +04:00
|
|
|
struct hist_entry *he;
|
|
|
|
|
|
|
|
if (mi == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cost = iter->sample->weight;
|
|
|
|
if (!cost)
|
|
|
|
cost = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* must pass period=weight in order to get the correct
|
|
|
|
* sorting from hists__collapse_resort() which is solely
|
|
|
|
* based on periods. We want sorting be done on nr_events * weight
|
|
|
|
* and this is indirectly achieved by passing period=weight here
|
|
|
|
* and the he_stat__add_period() function.
|
|
|
|
*/
|
2014-10-09 20:13:41 +04:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
|
2012-09-11 08:34:27 +04:00
|
|
|
cost, cost, 0, true);
|
2013-10-30 04:40:34 +04:00
|
|
|
if (!he)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-01-07 12:02:25 +04:00
|
|
|
iter_finish_mem_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
2013-10-30 04:40:34 +04:00
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-09 20:13:41 +04:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2013-10-30 04:40:34 +04:00
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (he == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2013-10-30 04:40:34 +04:00
|
|
|
|
|
|
|
err = hist_entry__append_callchain(he, iter->sample);
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
2015-05-19 17:31:22 +03:00
|
|
|
* We don't need to free iter->priv (mem_info) here since the mem info
|
|
|
|
* was either already freed in hists__findnew_entry() or passed to a
|
|
|
|
* new hist entry by hist_entry__new().
|
2013-10-30 04:40:34 +04:00
|
|
|
*/
|
|
|
|
iter->priv = NULL;
|
|
|
|
|
|
|
|
iter->he = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct branch_info *bi;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
|
|
|
|
bi = sample__resolve_bstack(sample, al);
|
|
|
|
if (!bi)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->curr = 0;
|
|
|
|
iter->total = sample->branch_stack->nr;
|
|
|
|
|
|
|
|
iter->priv = bi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2014-01-07 12:02:25 +04:00
|
|
|
/* to avoid calling callback function */
|
|
|
|
iter->he = NULL;
|
|
|
|
|
2013-10-30 04:40:34 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct branch_info *bi = iter->priv;
|
|
|
|
int i = iter->curr;
|
|
|
|
|
|
|
|
if (bi == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (iter->curr >= iter->total)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
al->map = bi[i].to.map;
|
|
|
|
al->sym = bi[i].to.sym;
|
|
|
|
al->addr = bi[i].to.addr;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
2014-01-07 12:02:25 +04:00
|
|
|
struct branch_info *bi;
|
2013-10-30 04:40:34 +04:00
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-09 20:13:41 +04:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2013-10-30 04:40:34 +04:00
|
|
|
struct hist_entry *he = NULL;
|
|
|
|
int i = iter->curr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
bi = iter->priv;
|
|
|
|
|
|
|
|
if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The report shows the percentage of total branches captured
|
|
|
|
* and not events sampled. Thus we use a pseudo period of 1.
|
|
|
|
*/
|
2014-10-09 20:13:41 +04:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
|
2015-07-18 18:24:46 +03:00
|
|
|
1, bi->flags.cycles ? bi->flags.cycles : 1,
|
|
|
|
0, true);
|
2013-10-30 04:40:34 +04:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2013-10-30 04:40:34 +04:00
|
|
|
|
|
|
|
out:
|
|
|
|
iter->he = he;
|
|
|
|
iter->curr++;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_finish_branch_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
zfree(&iter->priv);
|
|
|
|
iter->he = NULL;
|
|
|
|
|
|
|
|
return iter->curr >= iter->total ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
struct hist_entry *he;
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
|
2013-10-30 04:40:34 +04:00
|
|
|
sample->period, sample->weight,
|
2012-09-11 08:34:27 +04:00
|
|
|
sample->transaction, true);
|
2013-10-30 04:40:34 +04:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-01-07 12:02:25 +04:00
|
|
|
iter_finish_normal_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
2013-10-30 04:40:34 +04:00
|
|
|
{
|
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
|
|
|
|
if (he == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iter->he = NULL;
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
|
2013-10-30 04:40:34 +04:00
|
|
|
|
|
|
|
return hist_entry__append_callchain(he, sample);
|
|
|
|
}
|
|
|
|
|
2012-09-11 09:13:04 +04:00
|
|
|
static int
|
|
|
|
iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2013-10-31 05:05:29 +04:00
|
|
|
struct hist_entry **he_cache;
|
|
|
|
|
2012-09-11 09:13:04 +04:00
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
2013-10-31 05:05:29 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is for detecting cycles or recursions so that they're
|
|
|
|
* cumulated only one time to prevent entries more than 100%
|
|
|
|
* overhead.
|
|
|
|
*/
|
|
|
|
he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
|
|
|
|
if (he_cache == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->priv = he_cache;
|
|
|
|
iter->curr = 0;
|
|
|
|
|
2012-09-11 09:13:04 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-09 20:13:41 +04:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2012-09-11 09:13:04 +04:00
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-31 05:05:29 +04:00
|
|
|
struct hist_entry **he_cache = iter->priv;
|
2012-09-11 09:13:04 +04:00
|
|
|
struct hist_entry *he;
|
|
|
|
int err = 0;
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
|
2012-09-11 09:13:04 +04:00
|
|
|
sample->period, sample->weight,
|
|
|
|
sample->transaction, true);
|
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
2013-10-31 05:05:29 +04:00
|
|
|
he_cache[iter->curr++] = he;
|
2012-09-11 09:13:04 +04:00
|
|
|
|
2014-12-22 07:44:14 +03:00
|
|
|
hist_entry__append_callchain(he, sample);
|
2013-12-26 12:44:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to re-initialize the cursor since callchain_append()
|
|
|
|
* advanced the cursor to the end.
|
|
|
|
*/
|
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2012-09-11 09:13:04 +04:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_next_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct callchain_cursor_node *node;
|
|
|
|
|
|
|
|
node = callchain_cursor_current(&callchain_cursor);
|
|
|
|
if (node == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2013-10-31 08:58:30 +04:00
|
|
|
return fill_callchain_info(al, node, iter->hide_unresolved);
|
2012-09-11 09:13:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-31 05:05:29 +04:00
|
|
|
struct hist_entry **he_cache = iter->priv;
|
2012-09-11 09:13:04 +04:00
|
|
|
struct hist_entry *he;
|
2013-10-31 05:05:29 +04:00
|
|
|
struct hist_entry he_tmp = {
|
2015-08-10 21:45:55 +03:00
|
|
|
.hists = evsel__hists(evsel),
|
2013-10-31 05:05:29 +04:00
|
|
|
.cpu = al->cpu,
|
|
|
|
.thread = al->thread,
|
|
|
|
.comm = thread__comm(al->thread),
|
|
|
|
.ip = al->addr,
|
|
|
|
.ms = {
|
|
|
|
.map = al->map,
|
|
|
|
.sym = al->sym,
|
|
|
|
},
|
|
|
|
.parent = iter->parent,
|
|
|
|
};
|
|
|
|
int i;
|
2013-12-26 12:44:10 +04:00
|
|
|
struct callchain_cursor cursor;
|
|
|
|
|
|
|
|
callchain_cursor_snapshot(&cursor, &callchain_cursor);
|
|
|
|
|
|
|
|
callchain_cursor_advance(&callchain_cursor);
|
2013-10-31 05:05:29 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if there's duplicate entries in the callchain.
|
|
|
|
* It's possible that it has cycles or recursive calls.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < iter->curr; i++) {
|
2014-01-07 12:02:25 +04:00
|
|
|
if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
|
|
|
|
/* to avoid calling callback function */
|
|
|
|
iter->he = NULL;
|
2013-10-31 05:05:29 +04:00
|
|
|
return 0;
|
2014-01-07 12:02:25 +04:00
|
|
|
}
|
2013-10-31 05:05:29 +04:00
|
|
|
}
|
2012-09-11 09:13:04 +04:00
|
|
|
|
2014-10-09 20:13:41 +04:00
|
|
|
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
|
2012-09-11 09:13:04 +04:00
|
|
|
sample->period, sample->weight,
|
|
|
|
sample->transaction, false);
|
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
2013-10-31 05:05:29 +04:00
|
|
|
he_cache[iter->curr++] = he;
|
2012-09-11 09:13:04 +04:00
|
|
|
|
2014-12-22 07:44:14 +03:00
|
|
|
if (symbol_conf.use_callchain)
|
|
|
|
callchain_append(he->callchain, &cursor, sample->period);
|
2012-09-11 09:13:04 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2013-10-31 05:05:29 +04:00
|
|
|
zfree(&iter->priv);
|
2012-09-11 09:13:04 +04:00
|
|
|
iter->he = NULL;
|
2013-10-31 05:05:29 +04:00
|
|
|
|
2012-09-11 09:13:04 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-30 04:40:34 +04:00
|
|
|
const struct hist_iter_ops hist_iter_mem = {
|
|
|
|
.prepare_entry = iter_prepare_mem_entry,
|
|
|
|
.add_single_entry = iter_add_single_mem_entry,
|
|
|
|
.next_entry = iter_next_nop_entry,
|
|
|
|
.add_next_entry = iter_add_next_nop_entry,
|
|
|
|
.finish_entry = iter_finish_mem_entry,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct hist_iter_ops hist_iter_branch = {
|
|
|
|
.prepare_entry = iter_prepare_branch_entry,
|
|
|
|
.add_single_entry = iter_add_single_branch_entry,
|
|
|
|
.next_entry = iter_next_branch_entry,
|
|
|
|
.add_next_entry = iter_add_next_branch_entry,
|
|
|
|
.finish_entry = iter_finish_branch_entry,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct hist_iter_ops hist_iter_normal = {
|
|
|
|
.prepare_entry = iter_prepare_normal_entry,
|
|
|
|
.add_single_entry = iter_add_single_normal_entry,
|
|
|
|
.next_entry = iter_next_nop_entry,
|
|
|
|
.add_next_entry = iter_add_next_nop_entry,
|
|
|
|
.finish_entry = iter_finish_normal_entry,
|
|
|
|
};
|
|
|
|
|
2012-09-11 09:13:04 +04:00
|
|
|
const struct hist_iter_ops hist_iter_cumulative = {
|
|
|
|
.prepare_entry = iter_prepare_cumulative_entry,
|
|
|
|
.add_single_entry = iter_add_single_cumulative_entry,
|
|
|
|
.next_entry = iter_next_cumulative_entry,
|
|
|
|
.add_next_entry = iter_add_next_cumulative_entry,
|
|
|
|
.finish_entry = iter_finish_cumulative_entry,
|
|
|
|
};
|
|
|
|
|
2013-10-30 04:40:34 +04:00
|
|
|
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
|
2014-01-07 12:02:25 +04:00
|
|
|
int max_stack_depth, void *arg)
|
2013-10-30 04:40:34 +04:00
|
|
|
{
|
|
|
|
int err, err2;
|
|
|
|
|
2015-05-19 11:04:10 +03:00
|
|
|
err = sample__resolve_callchain(iter->sample, &iter->parent,
|
|
|
|
iter->evsel, al, max_stack_depth);
|
2013-10-30 04:40:34 +04:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = iter->ops->prepare_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = iter->ops->add_single_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2014-01-07 12:02:25 +04:00
|
|
|
if (iter->he && iter->add_entry_cb) {
|
|
|
|
err = iter->add_entry_cb(iter, al, true, arg);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-10-30 04:40:34 +04:00
|
|
|
while (iter->ops->next_entry(iter, al)) {
|
|
|
|
err = iter->ops->add_next_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
break;
|
2014-01-07 12:02:25 +04:00
|
|
|
|
|
|
|
if (iter->he && iter->add_entry_cb) {
|
|
|
|
err = iter->add_entry_cb(iter, al, false, arg);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
2013-10-30 04:40:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
err2 = iter->ops->finish_entry(iter, al);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-09-28 17:32:55 +04:00
|
|
|
int64_t
|
|
|
|
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
|
|
|
|
{
|
2014-03-03 07:07:47 +04:00
|
|
|
struct perf_hpp_fmt *fmt;
|
2009-09-28 17:32:55 +04:00
|
|
|
int64_t cmp = 0;
|
|
|
|
|
2014-03-03 07:07:47 +04:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2014-03-18 08:00:59 +04:00
|
|
|
if (perf_hpp__should_skip(fmt))
|
|
|
|
continue;
|
|
|
|
|
2015-01-08 03:45:46 +03:00
|
|
|
cmp = fmt->cmp(fmt, left, right);
|
2009-09-28 17:32:55 +04:00
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
|
|
|
|
{
|
2014-03-03 07:07:47 +04:00
|
|
|
struct perf_hpp_fmt *fmt;
|
2009-09-28 17:32:55 +04:00
|
|
|
int64_t cmp = 0;
|
|
|
|
|
2014-03-03 07:07:47 +04:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2014-03-18 08:00:59 +04:00
|
|
|
if (perf_hpp__should_skip(fmt))
|
|
|
|
continue;
|
|
|
|
|
2015-01-08 03:45:46 +03:00
|
|
|
cmp = fmt->collapse(fmt, left, right);
|
2009-09-28 17:32:55 +04:00
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmp;
|
|
|
|
}
|
|
|
|
|
2014-12-19 18:31:40 +03:00
|
|
|
void hist_entry__delete(struct hist_entry *he)
|
2009-09-28 17:32:55 +04:00
|
|
|
{
|
2015-03-03 04:21:35 +03:00
|
|
|
thread__zput(he->thread);
|
2015-06-16 05:29:51 +03:00
|
|
|
map__zput(he->ms.map);
|
|
|
|
|
|
|
|
if (he->branch_info) {
|
|
|
|
map__zput(he->branch_info->from.map);
|
|
|
|
map__zput(he->branch_info->to.map);
|
|
|
|
zfree(&he->branch_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (he->mem_info) {
|
|
|
|
map__zput(he->mem_info->iaddr.map);
|
|
|
|
map__zput(he->mem_info->daddr.map);
|
|
|
|
zfree(&he->mem_info);
|
|
|
|
}
|
|
|
|
|
2012-09-11 08:15:07 +04:00
|
|
|
zfree(&he->stat_acc);
|
2013-09-11 09:09:28 +04:00
|
|
|
free_srcline(he->srcline);
|
2015-08-08 01:54:24 +03:00
|
|
|
if (he->srcfile && he->srcfile[0])
|
|
|
|
free(he->srcfile);
|
2014-12-30 08:38:13 +03:00
|
|
|
free_callchain(he->callchain);
|
2009-09-28 17:32:55 +04:00
|
|
|
free(he);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* collapse the histogram
|
|
|
|
*/
|
|
|
|
|
2012-09-11 02:15:03 +04:00
|
|
|
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
|
2011-01-14 06:51:58 +03:00
|
|
|
struct rb_root *root,
|
|
|
|
struct hist_entry *he)
|
2009-09-28 17:32:55 +04:00
|
|
|
{
|
2009-12-14 16:37:11 +03:00
|
|
|
struct rb_node **p = &root->rb_node;
|
2009-09-28 17:32:55 +04:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *iter;
|
|
|
|
int64_t cmp;
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2011-10-06 00:50:23 +04:00
|
|
|
iter = rb_entry(parent, struct hist_entry, rb_node_in);
|
2009-09-28 17:32:55 +04:00
|
|
|
|
|
|
|
cmp = hist_entry__collapse(iter, he);
|
|
|
|
|
|
|
|
if (!cmp) {
|
2012-10-04 16:49:43 +04:00
|
|
|
he_stat__add_stat(&iter->stat, &he->stat);
|
2012-09-11 08:15:07 +04:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_stat(iter->stat_acc, he->stat_acc);
|
2012-09-26 11:47:28 +04:00
|
|
|
|
2011-01-14 06:51:58 +03:00
|
|
|
if (symbol_conf.use_callchain) {
|
2012-05-31 09:43:26 +04:00
|
|
|
callchain_cursor_reset(&callchain_cursor);
|
|
|
|
callchain_merge(&callchain_cursor,
|
|
|
|
iter->callchain,
|
2011-01-14 06:51:58 +03:00
|
|
|
he->callchain);
|
|
|
|
}
|
2014-12-19 18:31:40 +03:00
|
|
|
hist_entry__delete(he);
|
2010-05-10 20:57:51 +04:00
|
|
|
return false;
|
2009-09-28 17:32:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
2014-12-22 07:44:10 +03:00
|
|
|
hists->nr_entries++;
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, root);
|
2010-05-10 20:57:51 +04:00
|
|
|
return true;
|
2009-09-28 17:32:55 +04:00
|
|
|
}
|
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
|
2009-09-28 17:32:55 +04:00
|
|
|
{
|
2011-10-06 00:50:23 +04:00
|
|
|
struct rb_root *root;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&hists->lock);
|
|
|
|
|
|
|
|
root = hists->entries_in;
|
|
|
|
if (++hists->entries_in > &hists->entries_in_array[1])
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&hists->lock);
|
|
|
|
|
|
|
|
return root;
|
|
|
|
}
|
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
|
|
|
hists__filter_entry_by_dso(hists, he);
|
|
|
|
hists__filter_entry_by_thread(hists, he);
|
2012-03-16 12:50:51 +04:00
|
|
|
hists__filter_entry_by_symbol(hists, he);
|
2011-10-19 19:09:10 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 09:15:38 +04:00
|
|
|
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
|
2011-10-06 00:50:23 +04:00
|
|
|
{
|
|
|
|
struct rb_root *root;
|
2009-09-28 17:32:55 +04:00
|
|
|
struct rb_node *next;
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
2013-05-14 06:09:01 +04:00
|
|
|
if (!sort__need_collapse)
|
2009-09-28 17:32:55 +04:00
|
|
|
return;
|
|
|
|
|
2014-12-22 07:44:10 +03:00
|
|
|
hists->nr_entries = 0;
|
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
root = hists__get_rotate_entries_in(hists);
|
2014-12-22 07:44:10 +03:00
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
next = rb_first(root);
|
2009-12-14 16:37:11 +03:00
|
|
|
|
2009-09-28 17:32:55 +04:00
|
|
|
while (next) {
|
2013-09-17 23:34:28 +04:00
|
|
|
if (session_done())
|
|
|
|
break;
|
2011-10-06 00:50:23 +04:00
|
|
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
|
|
|
next = rb_next(&n->rb_node_in);
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
rb_erase(&n->rb_node_in, root);
|
2011-10-19 19:09:10 +04:00
|
|
|
if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
|
|
|
|
/*
|
|
|
|
* If it wasn't combined with one of the entries already
|
|
|
|
* collapsed, we need to apply the filters that may have
|
|
|
|
* been set by, say, the hist_browser.
|
|
|
|
*/
|
|
|
|
hists__apply_filters(hists, n);
|
|
|
|
}
|
2013-10-11 09:15:38 +04:00
|
|
|
if (prog)
|
|
|
|
ui_progress__update(prog, 1);
|
2009-09-28 17:32:55 +04:00
|
|
|
}
|
2011-10-06 00:50:23 +04:00
|
|
|
}
|
2009-12-14 16:37:11 +03:00
|
|
|
|
2014-03-03 09:18:00 +04:00
|
|
|
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
|
2013-01-22 13:09:33 +04:00
|
|
|
{
|
2014-03-03 09:18:00 +04:00
|
|
|
struct perf_hpp_fmt *fmt;
|
|
|
|
int64_t cmp = 0;
|
2013-01-22 13:09:33 +04:00
|
|
|
|
2014-03-03 11:16:20 +04:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2014-03-18 08:00:59 +04:00
|
|
|
if (perf_hpp__should_skip(fmt))
|
|
|
|
continue;
|
|
|
|
|
2015-01-08 03:45:46 +03:00
|
|
|
cmp = fmt->sort(fmt, a, b);
|
2014-03-03 09:18:00 +04:00
|
|
|
if (cmp)
|
2013-01-22 13:09:33 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-03-03 09:18:00 +04:00
|
|
|
return cmp;
|
2013-01-22 13:09:33 +04:00
|
|
|
}
|
|
|
|
|
2014-04-24 11:37:26 +04:00
|
|
|
static void hists__reset_filter_stats(struct hists *hists)
|
|
|
|
{
|
|
|
|
hists->nr_non_filtered_entries = 0;
|
|
|
|
hists->stats.total_non_filtered_period = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__reset_stats(struct hists *hists)
|
|
|
|
{
|
|
|
|
hists->nr_entries = 0;
|
|
|
|
hists->stats.total_period = 0;
|
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
|
|
|
|
{
|
|
|
|
hists->nr_non_filtered_entries++;
|
|
|
|
hists->stats.total_non_filtered_period += h->stat.period;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__inc_stats(struct hists *hists, struct hist_entry *h)
|
|
|
|
{
|
|
|
|
if (!h->filtered)
|
|
|
|
hists__inc_filter_stats(hists, h);
|
|
|
|
|
|
|
|
hists->nr_entries++;
|
|
|
|
hists->stats.total_period += h->stat.period;
|
|
|
|
}
|
|
|
|
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-10 20:04:11 +04:00
|
|
|
static void __hists__insert_output_entry(struct rb_root *entries,
|
|
|
|
struct hist_entry *he,
|
|
|
|
u64 min_callchain_hits)
|
2009-09-28 17:32:55 +04:00
|
|
|
{
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-10 20:04:11 +04:00
|
|
|
struct rb_node **p = &entries->rb_node;
|
2009-09-28 17:32:55 +04:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *iter;
|
|
|
|
|
2009-12-16 01:04:42 +03:00
|
|
|
if (symbol_conf.use_callchain)
|
2010-04-02 16:50:42 +04:00
|
|
|
callchain_param.sort(&he->sorted_chain, he->callchain,
|
2009-09-28 17:32:55 +04:00
|
|
|
min_callchain_hits, &callchain_param);
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
iter = rb_entry(parent, struct hist_entry, rb_node);
|
|
|
|
|
2014-03-03 09:18:00 +04:00
|
|
|
if (hist_entry__sort(he, iter) > 0)
|
2009-09-28 17:32:55 +04:00
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&he->rb_node, parent, p);
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-10 20:04:11 +04:00
|
|
|
rb_insert_color(&he->rb_node, entries);
|
2009-09-28 17:32:55 +04:00
|
|
|
}
|
|
|
|
|
2014-12-22 07:44:10 +03:00
|
|
|
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
|
2009-09-28 17:32:55 +04:00
|
|
|
{
|
2011-10-06 00:50:23 +04:00
|
|
|
struct rb_root *root;
|
2009-09-28 17:32:55 +04:00
|
|
|
struct rb_node *next;
|
|
|
|
struct hist_entry *n;
|
|
|
|
u64 min_callchain_hits;
|
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2013-05-14 06:09:01 +04:00
|
|
|
if (sort__need_collapse)
|
2011-10-06 00:50:23 +04:00
|
|
|
root = &hists->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = hists->entries_in;
|
|
|
|
|
|
|
|
next = rb_first(root);
|
|
|
|
hists->entries = RB_ROOT;
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2014-04-24 11:37:26 +04:00
|
|
|
hists__reset_stats(hists);
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-10 20:57:51 +04:00
|
|
|
|
2009-09-28 17:32:55 +04:00
|
|
|
while (next) {
|
2011-10-06 00:50:23 +04:00
|
|
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
|
|
|
next = rb_next(&n->rb_node_in);
|
2009-09-28 17:32:55 +04:00
|
|
|
|
2011-10-06 00:50:23 +04:00
|
|
|
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
|
2014-04-24 11:21:46 +04:00
|
|
|
hists__inc_stats(hists, n);
|
2014-04-24 11:25:19 +04:00
|
|
|
|
|
|
|
if (!n->filtered)
|
|
|
|
hists__calc_col_len(hists, n);
|
2014-12-22 07:44:10 +03:00
|
|
|
|
|
|
|
if (prog)
|
|
|
|
ui_progress__update(prog, 1);
|
2009-09-28 17:32:55 +04:00
|
|
|
}
|
2011-10-06 00:50:23 +04:00
|
|
|
}
|
2009-12-14 16:37:11 +03:00
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
|
2010-07-16 19:35:07 +04:00
|
|
|
enum hist_filter filter)
|
|
|
|
{
|
|
|
|
h->filtered &= ~(1 << filter);
|
|
|
|
if (h->filtered)
|
|
|
|
return;
|
|
|
|
|
2014-04-24 11:44:16 +04:00
|
|
|
/* force fold unfiltered entry for simplicity */
|
2015-05-05 17:55:46 +03:00
|
|
|
h->unfolded = false;
|
2010-07-27 00:13:40 +04:00
|
|
|
h->row_offset = 0;
|
2015-03-11 15:36:03 +03:00
|
|
|
h->nr_rows = 0;
|
2014-04-24 11:37:26 +04:00
|
|
|
|
2013-12-26 10:11:52 +04:00
|
|
|
hists->stats.nr_non_filtered_samples += h->stat.nr_events;
|
2010-07-16 19:35:07 +04:00
|
|
|
|
2014-04-24 11:37:26 +04:00
|
|
|
hists__inc_filter_stats(hists, h);
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__calc_col_len(hists, h);
|
2010-07-16 19:35:07 +04:00
|
|
|
}
|
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
|
|
|
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->dso_filter != NULL &&
|
|
|
|
(he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__DSO);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-10-19 01:07:34 +04:00
|
|
|
void hists__filter_by_dso(struct hists *hists)
|
2010-05-11 18:10:15 +04:00
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 10:11:52 +04:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 11:37:26 +04:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-11 18:10:15 +04:00
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
2010-05-11 18:10:15 +04:00
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
if (symbol_conf.exclude_other && !h->parent)
|
|
|
|
continue;
|
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
if (hists__filter_entry_by_dso(hists, h))
|
2010-05-11 18:10:15 +04:00
|
|
|
continue;
|
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
|
2010-05-11 18:10:15 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->thread_filter != NULL &&
|
|
|
|
he->thread != hists->thread_filter) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__THREAD);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-10-19 01:07:34 +04:00
|
|
|
void hists__filter_by_thread(struct hists *hists)
|
2010-05-11 18:10:15 +04:00
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 10:11:52 +04:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 11:37:26 +04:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-11 18:10:15 +04:00
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
2010-05-11 18:10:15 +04:00
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
2011-10-19 19:09:10 +04:00
|
|
|
if (hists__filter_entry_by_thread(hists, h))
|
2010-05-11 18:10:15 +04:00
|
|
|
continue;
|
2010-07-16 19:35:07 +04:00
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
|
2010-05-11 18:10:15 +04:00
|
|
|
}
|
|
|
|
}
|
2010-05-12 06:18:06 +04:00
|
|
|
|
2012-03-16 12:50:51 +04:00
|
|
|
static bool hists__filter_entry_by_symbol(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->symbol_filter_str != NULL &&
|
|
|
|
(!he->ms.sym || strstr(he->ms.sym->name,
|
|
|
|
hists->symbol_filter_str) == NULL)) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__SYMBOL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__filter_by_symbol(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 10:11:52 +04:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 11:37:26 +04:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2012-03-16 12:50:51 +04:00
|
|
|
hists__reset_col_len(hists);
|
|
|
|
|
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
if (hists__filter_entry_by_symbol(hists, h))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-18 23:24:46 +04:00
|
|
|
void events_stats__inc(struct events_stats *stats, u32 type)
|
|
|
|
{
|
|
|
|
++stats->nr_events[0];
|
|
|
|
++stats->nr_events[type];
|
|
|
|
}
|
|
|
|
|
2011-09-26 19:33:28 +04:00
|
|
|
void hists__inc_nr_events(struct hists *hists, u32 type)
|
2010-05-14 17:36:42 +04:00
|
|
|
{
|
2012-12-18 23:24:46 +04:00
|
|
|
events_stats__inc(&hists->stats, type);
|
2010-05-14 17:36:42 +04:00
|
|
|
}
|
2012-11-09 00:54:33 +04:00
|
|
|
|
2014-05-28 09:12:18 +04:00
|
|
|
void hists__inc_nr_samples(struct hists *hists, bool filtered)
|
|
|
|
{
|
|
|
|
events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
|
|
|
|
if (!filtered)
|
|
|
|
hists->stats.nr_non_filtered_samples++;
|
|
|
|
}
|
|
|
|
|
2012-11-09 01:03:09 +04:00
|
|
|
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
|
|
|
|
struct hist_entry *pair)
|
|
|
|
{
|
2012-12-10 12:29:55 +04:00
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node **p;
|
2012-11-09 01:03:09 +04:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *he;
|
2013-10-01 18:22:15 +04:00
|
|
|
int64_t cmp;
|
2012-11-09 01:03:09 +04:00
|
|
|
|
2012-12-10 12:29:55 +04:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &hists->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = hists->entries_in;
|
|
|
|
|
|
|
|
p = &root->rb_node;
|
|
|
|
|
2012-11-09 01:03:09 +04:00
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2012-12-10 12:29:55 +04:00
|
|
|
he = rb_entry(parent, struct hist_entry, rb_node_in);
|
2012-11-09 01:03:09 +04:00
|
|
|
|
2012-12-10 12:29:55 +04:00
|
|
|
cmp = hist_entry__collapse(he, pair);
|
2012-11-09 01:03:09 +04:00
|
|
|
|
|
|
|
if (!cmp)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2012-09-11 08:34:27 +04:00
|
|
|
he = hist_entry__new(pair, true);
|
2012-11-09 01:03:09 +04:00
|
|
|
if (he) {
|
2012-11-12 20:20:03 +04:00
|
|
|
memset(&he->stat, 0, sizeof(he->stat));
|
|
|
|
he->hists = hists;
|
2012-12-10 12:29:55 +04:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, root);
|
2014-04-24 11:21:46 +04:00
|
|
|
hists__inc_stats(hists, he);
|
2012-12-02 00:18:20 +04:00
|
|
|
he->dummy = true;
|
2012-11-09 01:03:09 +04:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
2012-11-09 00:54:33 +04:00
|
|
|
static struct hist_entry *hists__find_entry(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
2012-12-10 12:29:55 +04:00
|
|
|
struct rb_node *n;
|
|
|
|
|
|
|
|
if (sort__need_collapse)
|
|
|
|
n = hists->entries_collapsed.rb_node;
|
|
|
|
else
|
|
|
|
n = hists->entries_in->rb_node;
|
2012-11-09 00:54:33 +04:00
|
|
|
|
|
|
|
while (n) {
|
2012-12-10 12:29:55 +04:00
|
|
|
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
|
|
|
|
int64_t cmp = hist_entry__collapse(iter, he);
|
2012-11-09 00:54:33 +04:00
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for pairs to link to the leader buckets (hist_entries):
|
|
|
|
*/
|
|
|
|
void hists__match(struct hists *leader, struct hists *other)
|
|
|
|
{
|
2012-12-10 12:29:55 +04:00
|
|
|
struct rb_root *root;
|
2012-11-09 00:54:33 +04:00
|
|
|
struct rb_node *nd;
|
|
|
|
struct hist_entry *pos, *pair;
|
|
|
|
|
2012-12-10 12:29:55 +04:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &leader->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = leader->entries_in;
|
|
|
|
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
|
|
pos = rb_entry(nd, struct hist_entry, rb_node_in);
|
2012-11-09 00:54:33 +04:00
|
|
|
pair = hists__find_entry(other, pos);
|
|
|
|
|
|
|
|
if (pair)
|
2012-11-29 10:38:34 +04:00
|
|
|
hist_entry__add_pair(pair, pos);
|
2012-11-09 00:54:33 +04:00
|
|
|
}
|
|
|
|
}
|
2012-11-09 01:03:09 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for entries in the other hists that are not present in the leader, if
|
|
|
|
* we find them, just add a dummy entry on the leader hists, with period=0,
|
|
|
|
* nr_events=0, to serve as the list header.
|
|
|
|
*/
|
|
|
|
int hists__link(struct hists *leader, struct hists *other)
|
|
|
|
{
|
2012-12-10 12:29:55 +04:00
|
|
|
struct rb_root *root;
|
2012-11-09 01:03:09 +04:00
|
|
|
struct rb_node *nd;
|
|
|
|
struct hist_entry *pos, *pair;
|
|
|
|
|
2012-12-10 12:29:55 +04:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &other->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = other->entries_in;
|
|
|
|
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
|
|
pos = rb_entry(nd, struct hist_entry, rb_node_in);
|
2012-11-09 01:03:09 +04:00
|
|
|
|
|
|
|
if (!hist_entry__has_pairs(pos)) {
|
|
|
|
pair = hists__add_dummy_entry(leader, pos);
|
|
|
|
if (pair == NULL)
|
|
|
|
return -1;
|
2012-11-29 10:38:34 +04:00
|
|
|
hist_entry__add_pair(pos, pair);
|
2012-11-09 01:03:09 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-01-14 06:52:48 +04:00
|
|
|
|
2015-07-18 18:24:49 +03:00
|
|
|
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
|
|
|
|
struct perf_sample *sample, bool nonany_branch_mode)
|
|
|
|
{
|
|
|
|
struct branch_info *bi;
|
|
|
|
|
|
|
|
/* If we have branch cycles always annotate them. */
|
|
|
|
if (bs && bs->nr && bs->entries[0].flags.cycles) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bi = sample__resolve_bstack(sample, al);
|
|
|
|
if (bi) {
|
|
|
|
struct addr_map_symbol *prev = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore errors, still want to process the
|
|
|
|
* other entries.
|
|
|
|
*
|
|
|
|
* For non standard branch modes always
|
|
|
|
* force no IPC (prev == NULL)
|
|
|
|
*
|
|
|
|
* Note that perf stores branches reversed from
|
|
|
|
* program order!
|
|
|
|
*/
|
|
|
|
for (i = bs->nr - 1; i >= 0; i--) {
|
|
|
|
addr_map_symbol__account_cycles(&bi[i].from,
|
|
|
|
nonany_branch_mode ? NULL : prev,
|
|
|
|
bi[i].flags.cycles);
|
|
|
|
prev = &bi[i].to;
|
|
|
|
}
|
|
|
|
free(bi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-10 22:49:21 +04:00
|
|
|
|
|
|
|
size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
|
|
|
|
{
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
size_t ret = 0;
|
|
|
|
|
|
|
|
evlist__for_each(evlist, pos) {
|
|
|
|
ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
|
|
|
|
ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-01-14 06:52:48 +04:00
|
|
|
u64 hists__total_period(struct hists *hists)
|
|
|
|
{
|
|
|
|
return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
|
|
|
|
hists->stats.total_period;
|
|
|
|
}
|
2014-02-07 07:06:07 +04:00
|
|
|
|
|
|
|
int parse_filter_percentage(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
if (!strcmp(arg, "relative"))
|
|
|
|
symbol_conf.filter_relative = true;
|
|
|
|
else if (!strcmp(arg, "absolute"))
|
|
|
|
symbol_conf.filter_relative = false;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-01-14 07:02:15 +04:00
|
|
|
|
|
|
|
int perf_hist_config(const char *var, const char *value)
|
|
|
|
{
|
|
|
|
if (!strcmp(var, "hist.percentage"))
|
|
|
|
return parse_filter_percentage(NULL, value, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-10-09 23:16:00 +04:00
|
|
|
|
|
|
|
static int hists_evsel__init(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
struct hists *hists = evsel__hists(evsel);
|
|
|
|
|
|
|
|
memset(hists, 0, sizeof(*hists));
|
|
|
|
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
hists->entries_collapsed = RB_ROOT;
|
|
|
|
hists->entries = RB_ROOT;
|
|
|
|
pthread_mutex_init(&hists->lock, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX We probably need a hists_evsel__exit() to free the hist_entries
|
|
|
|
* stored in the rbtree...
|
|
|
|
*/
|
|
|
|
|
|
|
|
int hists__init(void)
|
|
|
|
{
|
|
|
|
int err = perf_evsel__object_config(sizeof(struct hists_evsel),
|
|
|
|
hists_evsel__init, NULL);
|
|
|
|
if (err)
|
|
|
|
fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|