perf stat: Skip evlist__[enable|disable] when all events uses BPF
When all events of a perf-stat session use BPF, it is not necessary to call evlist__enable() and evlist__disable(). Skip them when all_counters_use_bpf is true. Signed-off-by: Song Liu <song@kernel.org> Reported-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
f42907e8a4
Коммит
f8b61bd204
|
@ -572,7 +572,8 @@ static int enable_counters(void)
|
|||
* - we have initial delay configured
|
||||
*/
|
||||
if (!target__none(&target) || stat_config.initial_delay) {
|
||||
evlist__enable(evsel_list);
|
||||
if (!all_counters_use_bpf)
|
||||
evlist__enable(evsel_list);
|
||||
if (stat_config.initial_delay > 0)
|
||||
pr_info(EVLIST_ENABLED_MSG);
|
||||
}
|
||||
|
@ -581,13 +582,19 @@ static int enable_counters(void)
|
|||
|
||||
static void disable_counters(void)
|
||||
{
|
||||
struct evsel *counter;
|
||||
|
||||
/*
|
||||
* If we don't have tracee (attaching to task or cpu), counters may
|
||||
* still be running. To get accurate group ratios, we must stop groups
|
||||
* from counting before reading their constituent counters.
|
||||
*/
|
||||
if (!target__none(&target))
|
||||
evlist__disable(evsel_list);
|
||||
if (!target__none(&target)) {
|
||||
evlist__for_each_entry(evsel_list, counter)
|
||||
bpf_counter__disable(counter);
|
||||
if (!all_counters_use_bpf)
|
||||
evlist__disable(evsel_list);
|
||||
}
|
||||
}
|
||||
|
||||
static volatile int workload_exec_errno;
|
||||
|
|
|
@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
|
|||
if (affinity__setup(&affinity) < 0)
|
||||
return;
|
||||
|
||||
evlist__for_each_entry(evlist, pos)
|
||||
bpf_counter__disable(pos);
|
||||
|
||||
/* Disable 'immediate' events last */
|
||||
for (imm = 0; imm <= 1; imm++) {
|
||||
evlist__for_each_cpu(evlist, i, cpu) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче