perf test: Add skip list for metrics known would fail

Add skip list for metrics known would fail because some of the metrics are
very likely to fail due to multiplexing or other errors. So add all of the
flaky tests into the skip list.

Signed-off-by: Weilin Wang <weilin.wang@intel.com>
Tested-by: Namhyung Kim <namhyung@kernel.org>
Cc: ravi.bangoria@amd.com
Cc: Ian Rogers <irogers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Samantha Alt <samantha.alt@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Link: https://lore.kernel.org/r/20230620170027.1861012-3-weilin.wang@intel.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
This commit is contained in:
Weilin Wang 2023-06-20 10:00:26 -07:00 коммит произвёл Namhyung Kim
Родитель 3ad7092f51
Коммит a0f1cc18f9
2 изменённых файлов: 38 добавлений и 4 удалений

Просмотреть файл

@ -12,7 +12,7 @@ class Validator:
self.reportfname = reportfname
self.rules = None
self.collectlist=metrics
self.metrics = set()
self.metrics = set(metrics)
self.tolerance = t
self.workloads = [x for x in workload.split(",") if x]
@ -148,6 +148,7 @@ class Validator:
self.errlist.append("Metric '%s' is not collected"%(name))
elif val < 0:
negmetric.add("{0}(={1:.4f})".format(name, val))
self.collectlist[0].append(name)
else:
pcnt += 1
tcnt += 1
@ -266,6 +267,7 @@ class Validator:
passcnt += 1
else:
faillist.append({'MetricName':m['Name'], 'CollectedValue':result})
self.collectlist[0].append(m['Name'])
self.totalcnt += totalcnt
self.passedcnt += passcnt
@ -348,7 +350,7 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
collectlist[rule["RuleIndex"]] = set(metrics)
collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
for idx, metrics in collectlist.items():
if idx == 0: wl = "sleep 0.5".split()
@ -356,9 +358,12 @@ class Validator:
for metric in metrics:
command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
command.extend(wl)
print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
self.convert(data, idx)
self.collectlist = dict()
self.collectlist[0] = list()
# End of Collector and Converter
# Start of Rule Generator
@ -386,6 +391,20 @@ class Validator:
return
def remove_unsupported_rules(self, rules, skiplist: set = None):
for m in skiplist:
self.metrics.discard(m)
new_rules = []
for rule in rules:
add_rule = True
for m in rule["Metrics"]:
if m["Name"] not in self.metrics:
add_rule = False
break
if add_rule:
new_rules.append(rule)
return new_rules
def create_rules(self):
"""
Create full rules which includes:
@ -394,7 +413,10 @@ class Validator:
Reindex all the rules to avoid repeated RuleIndex
"""
self.rules = self.read_json(self.rulefname)['RelationshipRules']
data = self.read_json(self.rulefname)
rules = data['RelationshipRules']
skiplist = set(data['SkipList'])
self.rules = self.remove_unsupported_rules(rules, skiplist)
pctgrule = {'RuleIndex':0,
'TestType':'SingleMetricTest',
'RangeLower':'0',
@ -453,7 +475,8 @@ class Validator:
The final report is written into a JSON file.
'''
self.parse_perf_metrics()
if not self.collectlist:
self.parse_perf_metrics()
self.create_rules()
for i in range(0, len(self.workloads)):
self._init_data()

Просмотреть файл

@ -1,4 +1,15 @@
{
"SkipList": [
"tsx_aborted_cycles",
"tsx_transactional_cycles",
"C2_Pkg_Residency",
"C6_Pkg_Residency",
"C1_Core_Residency",
"C6_Core_Residency",
"tma_false_sharing",
"tma_remote_cache",
"tma_contested_accesses"
],
"RelationshipRules": [
{
"RuleIndex": 1,