Benchmarks: Code Revision - change 'reduce' to 'reduce_op' (#156)

**Description**
Change the field name `reduce` to `reduce_op`.
This commit is contained in:
guoshzhao 2021-08-16 11:33:39 +08:00 коммит произвёл GitHub
Родитель 783c91258d
Коммит 7293e783f1
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
5 изменённых файлов: 20 добавлений и 15 удалений

Просмотреть файл

@ -219,7 +219,7 @@ result = {
... ...
'metricsM': List[Number], 'metricsM': List[Number],
}, },
'reduce': { 'reduce_op': {
'metrics1': ReduceType, 'metrics1': ReduceType,
... ...
'metricsM': ReduceType, 'metricsM': ReduceType,
@ -249,7 +249,7 @@ result = {
'throughput-inference-float32': [avg_throughput1, ..., avg_throughputN], 'throughput-inference-float32': [avg_throughput1, ..., avg_throughputN],
'throughput-inference-float16': [avg_throughput1, ..., avg_throughputN], 'throughput-inference-float16': [avg_throughput1, ..., avg_throughputN],
}, },
'reduce': { 'reduce_op': {
'throughput-train-float32': 'min', 'throughput-train-float32': 'min',
'throughput-train-float16': 'min', 'throughput-train-float16': 'min',
'throughput-inference-float32': None, 'throughput-inference-float32': None,
@ -272,7 +272,7 @@ result = {
'result': { # Key is metrics 'result': { # Key is metrics
'overhead': [overhead1, ..., overheadN], 'overhead': [overhead1, ..., overheadN],
}, },
'reduce': { 'reduce_op': {
'overhead': None, 'overhead': None,
}, },
} }

Просмотреть файл

@ -31,7 +31,7 @@ class BenchmarkResult():
self.__end_time = None self.__end_time = None
self.__raw_data = dict() self.__raw_data = dict()
self.__result = dict() self.__result = dict()
self.__reduce = dict() self.__reduce_op = dict()
def __eq__(self, rhs): def __eq__(self, rhs):
"""Override equal function for deep comparison. """Override equal function for deep comparison.
@ -89,7 +89,7 @@ class BenchmarkResult():
if metric not in self.__result: if metric not in self.__result:
self.__result[metric] = list() self.__result[metric] = list()
self.__reduce[metric] = reduce_type.value if isinstance(reduce_type, Enum) else None self.__reduce_op[metric] = reduce_type.value if isinstance(reduce_type, Enum) else None
self.__result[metric].append(value) self.__result[metric].append(value)
return True return True
@ -177,3 +177,8 @@ class BenchmarkResult():
def result(self): def result(self):
"""Decoration function to access __result.""" """Decoration function to access __result."""
return self.__result return self.__result
@property
def reduce_op(self):
"""Decoration function to access __reduce_op."""
return self.__reduce_op

Просмотреть файл

@ -220,7 +220,7 @@ def test_train():
'"steptime_train_float32": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], ' '"steptime_train_float32": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], '
'"throughput_train_float32": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, ' '"throughput_train_float32": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
'"result": {"steptime_train_float32": [2.0], "throughput_train_float32": [16000.0]}, ' '"result": {"steptime_train_float32": [2.0], "throughput_train_float32": [16000.0]}, '
'"reduce": {"steptime_train_float32": "max", "throughput_train_float32": "min"}}' '"reduce_op": {"steptime_train_float32": "max", "throughput_train_float32": "min"}}'
) )
assert (benchmark._preprocess()) assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__train(Precision.FLOAT32)) assert (benchmark._ModelBenchmark__train(Precision.FLOAT32))
@ -230,7 +230,7 @@ def test_train():
benchmark = create_benchmark('--num_steps 0') benchmark = create_benchmark('--num_steps 0')
expected_result = ( expected_result = (
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, ' '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
'"start_time": null, "end_time": null, "raw_data": {}, "result": {}, "reduce": {}}' '"start_time": null, "end_time": null, "raw_data": {}, "result": {}, "reduce_op": {}}'
) )
assert (benchmark._preprocess()) assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__train(Precision.FLOAT32) is False) assert (benchmark._ModelBenchmark__train(Precision.FLOAT32) is False)
@ -246,7 +246,7 @@ def test_inference():
'"steptime_inference_float16": [[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]], ' '"steptime_inference_float16": [[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]], '
'"throughput_inference_float16": [[8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0]]}, ' '"throughput_inference_float16": [[8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0]]}, '
'"result": {"steptime_inference_float16": [4.0], "throughput_inference_float16": [8000.0]}, ' '"result": {"steptime_inference_float16": [4.0], "throughput_inference_float16": [8000.0]}, '
'"reduce": {"steptime_inference_float16": null, "throughput_inference_float16": null}}' '"reduce_op": {"steptime_inference_float16": null, "throughput_inference_float16": null}}'
) )
assert (benchmark._preprocess()) assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16)) assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16))
@ -256,7 +256,7 @@ def test_inference():
benchmark = create_benchmark('--num_steps 0') benchmark = create_benchmark('--num_steps 0')
expected_result = ( expected_result = (
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, ' '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
'"start_time": null, "end_time": null, "raw_data": {}, "result": {}, "reduce": {}}' '"start_time": null, "end_time": null, "raw_data": {}, "result": {}, "reduce_op": {}}'
) )
assert (benchmark._preprocess()) assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16) is False) assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16) is False)
@ -296,7 +296,7 @@ def test_benchmark():
'"throughput_train_float16": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, ' '"throughput_train_float16": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
'"result": {"steptime_train_float32": [2.0], "throughput_train_float32": [16000.0], ' '"result": {"steptime_train_float32": [2.0], "throughput_train_float32": [16000.0], '
'"steptime_train_float16": [2.0], "throughput_train_float16": [16000.0]}, ' '"steptime_train_float16": [2.0], "throughput_train_float16": [16000.0]}, '
'"reduce": {"steptime_train_float32": "max", "throughput_train_float32": "min", ' '"reduce_op": {"steptime_train_float32": "max", "throughput_train_float32": "min", '
'"steptime_train_float16": "max", "throughput_train_float16": "min"}}' '"steptime_train_float16": "max", "throughput_train_float16": "min"}}'
) )
assert (benchmark.serialized_result == expected_serialized_result) assert (benchmark.serialized_result == expected_serialized_result)

Просмотреть файл

@ -149,7 +149,7 @@ def test_launch_benchmark():
'"return_code": 0, "start_time": null, "end_time": null, ' '"return_code": 0, "start_time": null, "end_time": null, '
'"raw_data": {"accumulation_result": ["1,3,6,10"]}, ' '"raw_data": {"accumulation_result": ["1,3,6,10"]}, '
'"result": {"accumulation_result": [10]}, ' '"result": {"accumulation_result": [10]}, '
'"reduce": {"accumulation_result": null}}' '"reduce_op": {"accumulation_result": null}}'
) )
assert (result == expected) assert (result == expected)
@ -173,7 +173,7 @@ def test_launch_benchmark():
'"return_code": 0, "start_time": null, "end_time": null, ' '"return_code": 0, "start_time": null, "end_time": null, '
'"raw_data": {"accumulation_result": ["1,3,6"]}, ' '"raw_data": {"accumulation_result": ["1,3,6"]}, '
'"result": {"accumulation_result": [6]}, ' '"result": {"accumulation_result": [6]}, '
'"reduce": {"accumulation_result": null}}' '"reduce_op": {"accumulation_result": null}}'
) )
assert (result == expected) assert (result == expected)

Просмотреть файл

@ -83,6 +83,6 @@ def test_serialize_deserialize():
'"start_time": "2021-02-03 16:59:49", "end_time": "2021-02-03 17:00:08", ' '"start_time": "2021-02-03 16:59:49", "end_time": "2021-02-03 17:00:08", '
'"raw_data": {"metric1": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}, ' '"raw_data": {"metric1": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}, '
'"result": {"metric1": [300, 200], "metric2": [100]}, ' '"result": {"metric1": [300, 200], "metric2": [100]}, '
'"reduce": {"metric1": "max", "metric2": "avg"}}' '"reduce_op": {"metric1": "max", "metric2": "avg"}}'
) )
assert (result.to_string() == expected) assert (result.to_string() == expected)