perf x86: Iterate hybrid PMUs as core PMUs
Rather than iterating over a separate hybrid list, iterate all PMUs with the hybrid ones having is_core as true. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-18-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
ab1a1c77a3
Коммит
dd64647ecb
|
@ -3,7 +3,7 @@
|
|||
#include "debug.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "pmu-hybrid.h"
|
||||
#include "pmu.h"
|
||||
#include "tests/tests.h"
|
||||
|
||||
static bool test_config(const struct evsel *evsel, __u64 expected_config)
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "util/evlist.h"
|
||||
#include "util/parse-events.h"
|
||||
#include "util/event.h"
|
||||
#include "util/pmu-hybrid.h"
|
||||
#include "topdown.h"
|
||||
#include "evsel.h"
|
||||
|
||||
|
@ -12,9 +11,6 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
|
|||
struct perf_event_attr *attrs,
|
||||
size_t nr_attrs)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
struct evsel *evsel, *n;
|
||||
struct perf_pmu *pmu;
|
||||
LIST_HEAD(head);
|
||||
size_t i = 0;
|
||||
|
||||
|
@ -25,15 +21,24 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
|
|||
return evlist__add_attrs(evlist, attrs, nr_attrs);
|
||||
|
||||
for (i = 0; i < nr_attrs; i++) {
|
||||
struct perf_pmu *pmu = NULL;
|
||||
|
||||
if (attrs[i].type == PERF_TYPE_SOFTWARE) {
|
||||
evsel = evsel__new(attrs + i);
|
||||
struct evsel *evsel = evsel__new(attrs + i);
|
||||
|
||||
if (evsel == NULL)
|
||||
goto out_delete_partial_list;
|
||||
list_add_tail(&evsel->core.node, &head);
|
||||
continue;
|
||||
}
|
||||
|
||||
perf_pmu__for_each_hybrid_pmu(pmu) {
|
||||
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
|
||||
struct perf_cpu_map *cpus;
|
||||
struct evsel *evsel;
|
||||
|
||||
if (!pmu->is_core)
|
||||
continue;
|
||||
|
||||
evsel = evsel__new(attrs + i);
|
||||
if (evsel == NULL)
|
||||
goto out_delete_partial_list;
|
||||
|
@ -51,8 +56,12 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
|
|||
return 0;
|
||||
|
||||
out_delete_partial_list:
|
||||
__evlist__for_each_entry_safe(&head, n, evsel)
|
||||
evsel__delete(evsel);
|
||||
{
|
||||
struct evsel *evsel, *n;
|
||||
|
||||
__evlist__for_each_entry_safe(&head, n, evsel)
|
||||
evsel__delete(evsel);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include "../../../util/debug.h"
|
||||
#include "../../../util/event.h"
|
||||
#include "../../../util/pmu.h"
|
||||
#include "../../../util/pmu-hybrid.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG(AX, PERF_REG_X86_AX),
|
||||
|
@ -286,7 +285,6 @@ uint64_t arch__intr_reg_mask(void)
|
|||
.disabled = 1,
|
||||
.exclude_kernel = 1,
|
||||
};
|
||||
struct perf_pmu *pmu;
|
||||
int fd;
|
||||
/*
|
||||
* In an unnamed union, init it here to build on older gcc versions
|
||||
|
@ -294,12 +292,20 @@ uint64_t arch__intr_reg_mask(void)
|
|||
attr.sample_period = 1;
|
||||
|
||||
if (perf_pmu__has_hybrid()) {
|
||||
struct perf_pmu *pmu = NULL;
|
||||
__u64 type = PERF_TYPE_RAW;
|
||||
|
||||
/*
|
||||
* The same register set is supported among different hybrid PMUs.
|
||||
* Only check the first available one.
|
||||
*/
|
||||
pmu = list_first_entry(&perf_pmu__hybrid_pmus, typeof(*pmu), hybrid_list);
|
||||
attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
|
||||
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
|
||||
if (pmu->is_core) {
|
||||
type = pmu->type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
attr.config |= type << PERF_PMU_TYPE_SHIFT;
|
||||
}
|
||||
|
||||
event_attr_init(&attr);
|
||||
|
|
Загрузка…
Ссылка в новой задаче