perf pmus: Prefer perf_pmu__scan over perf_pmus__for_each_pmu

perf_pmus__for_each_pmu doesn't lazily initialize pmus making its use
error prone. Just use perf_pmu__scan as this only impacts
non-performance critical tests.

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-26-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2023-05-27 00:22:01 -07:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель 597a4276fb
Коммит f24ebe8053
4 изменённых файлов: 8 добавлений и 18 удалений

Просмотреть файл

@ -40,13 +40,11 @@ static struct pmu_scan_result *results;
static int save_result(void)
{
struct perf_pmu *pmu;
struct perf_pmu *pmu = NULL;
struct list_head *list;
struct pmu_scan_result *r;
perf_pmu__scan(NULL);
perf_pmus__for_each_pmu(pmu) {
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
r = realloc(results, (nr_pmus + 1) * sizeof(*r));
if (r == NULL)
return -ENOMEM;

Просмотреть файл

@ -50,13 +50,10 @@ static int event_open(int type, unsigned long config, int group_fd)
static int setup_uncore_event(void)
{
struct perf_pmu *pmu;
struct perf_pmu *pmu = NULL;
int i, fd;
if (list_empty(&pmus))
perf_pmu__scan(NULL);
perf_pmus__for_each_pmu(pmu) {
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
for (i = 0; i < NR_UNCORE_PMUS; i++) {
if (!strcmp(uncore_pmus[i].name, pmu->name)) {
pr_debug("Using %s for uncore pmu event\n", pmu->name);

Просмотреть файл

@ -108,11 +108,11 @@ static int test__checkevent_raw(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
struct perf_pmu *pmu;
struct perf_pmu *pmu = NULL;
bool type_matched = false;
TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 0x1a));
perf_pmus__for_each_pmu(pmu) {
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
if (pmu->type == evsel->attr.type) {
TEST_ASSERT_VAL("PMU type expected once", !type_matched);
type_matched = true;
@ -2243,13 +2243,10 @@ static int test__terms2(struct test_suite *test __maybe_unused, int subtest __ma
static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_pmu *pmu;
struct perf_pmu *pmu = NULL;
int ret = TEST_OK;
if (list_empty(&pmus))
perf_pmu__scan(NULL);
perf_pmus__for_each_pmu(pmu) {
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
struct stat st;
char path[PATH_MAX];
struct dirent *ent;

Просмотреть файл

@ -5,8 +5,6 @@
extern struct list_head pmus;
struct perf_pmu;
#define perf_pmus__for_each_pmu(pmu) list_for_each_entry(pmu, &pmus, list)
const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
#endif /* __PMUS_H */