perf stat: Skip evlist__[enable|disable] when all events uses BPF

When all events of a perf-stat session use BPF, it is not necessary to
call evlist__enable() and evlist__disable(). Skip them when
all_counters_use_bpf is true.

Signed-off-by: Song Liu <song@kernel.org>
Reported-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Song Liu 2021-05-11 23:51:16 -07:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель f42907e8a4
Коммит f8b61bd204
2 изменённых файлов: 10 добавлений и 6 удалений

Просмотреть файл

@ -572,7 +572,8 @@ static int enable_counters(void)
* - we have initial delay configured * - we have initial delay configured
*/ */
if (!target__none(&target) || stat_config.initial_delay) { if (!target__none(&target) || stat_config.initial_delay) {
evlist__enable(evsel_list); if (!all_counters_use_bpf)
evlist__enable(evsel_list);
if (stat_config.initial_delay > 0) if (stat_config.initial_delay > 0)
pr_info(EVLIST_ENABLED_MSG); pr_info(EVLIST_ENABLED_MSG);
} }
@ -581,13 +582,19 @@ static int enable_counters(void)
static void disable_counters(void) static void disable_counters(void)
{ {
struct evsel *counter;
/* /*
* If we don't have tracee (attaching to task or cpu), counters may * If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups * still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters. * from counting before reading their constituent counters.
*/ */
if (!target__none(&target)) if (!target__none(&target)) {
evlist__disable(evsel_list); evlist__for_each_entry(evsel_list, counter)
bpf_counter__disable(counter);
if (!all_counters_use_bpf)
evlist__disable(evsel_list);
}
} }
static volatile int workload_exec_errno; static volatile int workload_exec_errno;

Просмотреть файл

@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0) if (affinity__setup(&affinity) < 0)
return; return;
evlist__for_each_entry(evlist, pos)
bpf_counter__disable(pos);
/* Disable 'immediate' events last */ /* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) { for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) { evlist__for_each_cpu(evlist, i, cpu) {