perf stat: Delay metric parsing

Having metric parsing as part of argument processing causes issues as
flags like metric-no-group may be specified later. It also denies the
opportunity to optimize the events on SMT systems where fewer events
may be possible if we know the target is system-wide. Move metric
parsing to after command line option parsing. Because of how stat runs
this moves the parsing after record/report which fail to work with
metrics currently anyway.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Kshipra Bopardikar <kshipra.bopardikar@intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220831174926.579643-6-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2022-08-31 10:49:24 -07:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель cc2c4e26ec
Коммит a4b8cfcabb
3 изменённых файлов: 39 добавлений и 18 удалений

Просмотреть файл

@ -191,6 +191,7 @@ static bool append_file;
static bool interval_count;
static const char *output_name;
static int output_fd;
static char *metrics;
struct perf_stat {
bool record;
@ -1148,14 +1149,23 @@ static int enable_metric_only(const struct option *opt __maybe_unused,
return 0;
}
static int parse_metric_groups(const struct option *opt,
static int append_metric_groups(const struct option *opt __maybe_unused,
const char *str,
int unset __maybe_unused)
{
return metricgroup__parse_groups(opt, str,
stat_config.metric_no_group,
stat_config.metric_no_merge,
&stat_config.metric_events);
if (metrics) {
char *tmp;
if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
return -ENOMEM;
free(metrics);
metrics = tmp;
} else {
metrics = strdup(str);
if (!metrics)
return -ENOMEM;
}
return 0;
}
static int parse_control_option(const struct option *opt,
@ -1299,7 +1309,7 @@ static struct option stat_options[] = {
"measure SMI cost"),
OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
"monitor specified metrics or metric groups (separated by ,)",
parse_metric_groups),
append_metric_groups),
OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
@ -1792,11 +1802,9 @@ static int add_default_attributes(void)
* on an architecture test for such a metric name.
*/
if (metricgroup__has_metric("transaction")) {
struct option opt = { .value = &evsel_list };
return metricgroup__parse_groups(&opt, "transaction",
return metricgroup__parse_groups(evsel_list, "transaction",
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_merge,
&stat_config.metric_events);
}
@ -2183,6 +2191,8 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
perf_stat__init_shadow_stats();
perf_stat.data.path = input_name;
perf_stat.data.mode = PERF_DATA_MODE_READ;
@ -2262,8 +2272,6 @@ int cmd_stat(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
perf_stat__collect_metric_expr(evsel_list);
perf_stat__init_shadow_stats();
if (stat_config.csv_sep) {
stat_config.csv_output = true;
@ -2430,6 +2438,23 @@ int cmd_stat(int argc, const char **argv)
target.system_wide = true;
}
if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
target.per_thread = true;
/*
* Metric parsing needs to be delayed as metrics may optimize events
* knowing the target is system-wide.
*/
if (metrics) {
metricgroup__parse_groups(evsel_list, metrics,
stat_config.metric_no_group,
stat_config.metric_no_merge,
&stat_config.metric_events);
zfree(&metrics);
}
perf_stat__collect_metric_expr(evsel_list);
perf_stat__init_shadow_stats();
if (add_default_attributes())
goto out;
@ -2449,9 +2474,6 @@ int cmd_stat(int argc, const char **argv)
}
}
if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
target.per_thread = true;
if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) {
pr_err("failed to use cpu list %s\n", target.cpu_list);
goto out;

Просмотреть файл

@ -1646,13 +1646,12 @@ out:
return ret;
}
int metricgroup__parse_groups(const struct option *opt,
int metricgroup__parse_groups(struct evlist *perf_evlist,
const char *str,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events)
{
struct evlist *perf_evlist = *(struct evlist **)opt->value;
const struct pmu_events_table *table = pmu_events_table__find();
if (!table)

Просмотреть файл

@ -64,7 +64,7 @@ struct metric_expr {
struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct evsel *evsel,
bool create);
int metricgroup__parse_groups(const struct option *opt,
int metricgroup__parse_groups(struct evlist *perf_evlist,
const char *str,
bool metric_no_group,
bool metric_no_merge,