Bug 1184966 - perfherder should let harness do summarization

This commit is contained in:
Joel Maher 2015-08-06 05:18:20 -04:00 коммит произвёл William Lachance
Родитель 6130e1f6eb
Коммит 61a4291292
3 изменённых файлов: 59 добавлений и 2 удалений

Просмотреть файл

@ -3,6 +3,7 @@
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
import zlib
from tests.sampledata import SampleData
from treeherder.etl.perf_data_adapters import TalosDataAdapter
@ -50,4 +51,27 @@ def test_adapt_and_load():
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda.adapt_and_load(reference_data, job_data, datum)
# we upload a summary with a suite and subtest values, +1 for suite
if 'summary' in datum['blob']:
results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4]))
data = json.loads(datum['blob'])['talos_data'][0]
assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite']
# deal with the subtests now
subtests = len(data['summary']['subtests'])
for iter in range(0, subtests):
subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - iter][4]))
if 'subtest_signatures' in subresults["blob"]['signature_properties']:
# ignore summary signatures
continue
subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']]
for datatype in ['min', 'max', 'mean', 'median', 'std']:
print datatype
assert subdata[datatype] == subresults["blob"]["performance_series"][datatype]
else:
# FIXME: the talos data blob we're currently using contains datums with summaries and those without
# we should probably test non-summarized data as well
pass
assert result_count == len(tda.performance_artifact_placeholders)

Просмотреть файл

@ -26,6 +26,13 @@
"rss": false
}
},
"summary": {
"suite": 3141.00,
"subtests": {
"dhtml.html": {"min": 1, "max": 100, "std": 0.75, "mean": 50, "median": 50},
"tablemutation.html": {"min": 1, "max": 100, "std": 0.75, "mean": 50, "median": 50}
}
},
"results": {
"dhtml.html": [
1273,

Просмотреть файл

@ -89,6 +89,11 @@ class PerformanceDataAdapter(object):
# that inflate the size of the stored data structure
return round(num, 2)
@staticmethod
def _extract_summary_data(suite_data, summary):
suite_data["geomean"] = summary["suite"]
return suite_data
@staticmethod
def _calculate_summary_data(job_id, result_set_id, push_timestamp, results):
values = []
@ -108,6 +113,19 @@ class PerformanceDataAdapter(object):
"geomean": PerformanceDataAdapter._round(geomean)
}
@staticmethod
def _extract_test_data(series_data, summary):
if not isinstance(summary, dict):
return series_data
series_data["min"] = PerformanceDataAdapter._round(summary["min"])
series_data["max"] = PerformanceDataAdapter._round(summary["max"])
series_data["std"] = PerformanceDataAdapter._round(summary["std"])
series_data["median"] = PerformanceDataAdapter._round(summary["median"])
series_data["mean"] = PerformanceDataAdapter._round(summary["mean"])
return series_data
@staticmethod
def _calculate_test_data(job_id, result_set_id, push_timestamp,
replicates):
@ -328,8 +346,12 @@ class TalosDataAdapter(PerformanceDataAdapter):
series_data = self._calculate_test_data(
job_id, result_set_id, push_timestamp,
talos_datum["results"][_test]
)
talos_datum["results"][_test])
if "summary" in talos_datum and talos_datum["summary"]["subtests"][_test]:
summary_data = talos_datum["summary"]["subtests"][_test]
series_data = self._extract_test_data(series_data,
summary_data)
obj = self._get_base_perf_obj(_job_guid, _name, _type,
talos_datum,
@ -357,6 +379,10 @@ class TalosDataAdapter(PerformanceDataAdapter):
summary_data = self._calculate_summary_data(
job_id, result_set_id, push_timestamp, talos_datum["results"])
if "summary" in talos_datum and "suite" in talos_datum["summary"]:
summary_data = self._extract_summary_data(summary_data,
talos_datum["summary"])
obj = self._get_base_perf_obj(_job_guid, _name, _type,
talos_datum,
summary_signature,