Benchmarks: Add Benchmark - Add FAMBench based on docker benchmark (#338)

**Description**
Integrate FAMBench into superbench based on docker implementation:
https://github.com/facebookresearch/FAMBench

The script to run all benchmarks is:
https://github.com/facebookresearch/FAMBench/blob/main/benchmarks/run_all.sh
This commit is contained in:
guoshzhao 2022-04-11 15:31:07 +08:00 коммит произвёл GitHub
Родитель 8dc19ca4af
Коммит 80dcc8aaec
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
4 изменённых файлов: 147 добавлений и 1 удалений

Просмотреть файл

@ -0,0 +1,22 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Docker benchmark example for FAMBench.
Commands to run:
python3 examples/benchmarks/fambench.py
"""
from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger
if __name__ == '__main__':
context = BenchmarkRegistry.create_benchmark_context('fambench', platform=Platform.CUDA)
benchmark = BenchmarkRegistry.launch_benchmark(context)
if benchmark:
logger.info(
'benchmark: {}, return code: {}, result: {}'.format(
benchmark.name, benchmark.return_code, benchmark.result
)
)

Просмотреть файл

@ -6,5 +6,9 @@
from superbench.benchmarks.docker_benchmarks.docker_base import DockerBenchmark, CudaDockerBenchmark, \
RocmDockerBenchmark
from superbench.benchmarks.docker_benchmarks.rocm_onnxruntime_performance import RocmOnnxRuntimeModelBenchmark
from superbench.benchmarks.docker_benchmarks.fambench import FAMBenchBenchmark
__all__ = ['DockerBenchmark', 'CudaDockerBenchmark', 'RocmDockerBenchmark', 'RocmOnnxRuntimeModelBenchmark']
__all__ = [
'DockerBenchmark', 'CudaDockerBenchmark', 'RocmDockerBenchmark', 'RocmOnnxRuntimeModelBenchmark',
'FAMBenchBenchmark'
]

Просмотреть файл

@ -0,0 +1,82 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Module of the FAMBench benchmarks.
Including:
DLRM
XLMR
"""
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.benchmarks.docker_benchmarks.docker_base import CudaDockerBenchmark
class FAMBenchBenchmark(CudaDockerBenchmark):
"""The FAMBench E2E model benchmark class."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name (str): benchmark name.
parameters (str): benchmark parameters.
"""
super().__init__(name, parameters)
# Image uri of the current docker-benchmark.
self._image_uri = 'superbench/benchmark:cuda11.1.1-fambench'
# Image digest of the current docker-benchmark.
self._digest = 'b7b0d07270055287129e8b4b32be0863cbc3cc061610fcfaccf3a7450906e36f'
# Container name of the current docker-benchmark.
self._container_name = 'fambench-benchmarks'
# Entrypoint option of the current docker-benchmark.
self._entrypoint = '/workspace/FAMBench/benchmarks/run_all_benchmarks.sh'
# CMD option of the current docker-benchmark.
self._cmd = None
def _process_raw_result(self, cmd_idx, raw_output):
"""Function to parse raw results and save the summarized results.
self._result.add_raw_data() and self._result.add_result() need to be called to save the results.
Args:
cmd_idx (int): the index of command corresponding with the raw_output.
raw_output (str): raw output string of the micro-benchmark.
Return:
True if the raw output string is valid and result can be extracted.
"""
self._result.add_raw_data('raw_output', raw_output, self._args.log_raw_data)
content = raw_output.splitlines(False)
try:
result_header = 'benchmark implementation mode config score'
found = False
for line in content:
if result_header in line:
found = True
elif found:
items = line.split(' ')
if len(items) == 7:
name = '_'.join(items[0:4] + [items[5]])
for char in ['-', ' ', '=', '/']:
name = name.replace(char, '_')
score = float(items[4])
self._result.add_result(name.lower(), score)
except BaseException as e:
logger.error(
'The result format is invalid - round: {}, benchmark: {}, message: {}.'.format(
self._curr_run_index, self._name, str(e)
)
)
return False
return True
BenchmarkRegistry.register_benchmark('fambench', FAMBenchBenchmark, platform=Platform.CUDA)

Просмотреть файл

@ -0,0 +1,38 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for FAMBench modules."""
from types import SimpleNamespace
from superbench.benchmarks import BenchmarkRegistry, BenchmarkType, Platform, ReturnCode
from superbench.benchmarks.result import BenchmarkResult
def test_fambench():
"""Test FAMBench benchmarks."""
benchmark_name = 'fambench'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CUDA)
assert (benchmark_class)
benchmark = benchmark_class(benchmark_name)
assert (benchmark._benchmark_type == BenchmarkType.DOCKER)
assert (benchmark._image_uri == 'superbench/benchmark:cuda11.1.1-fambench')
assert (benchmark._container_name == 'fambench-benchmarks')
assert (benchmark._entrypoint == '/workspace/FAMBench/benchmarks/run_all_benchmarks.sh')
assert (benchmark._cmd is None)
benchmark._result = BenchmarkResult(benchmark._name, benchmark._benchmark_type, ReturnCode.SUCCESS)
benchmark._args = SimpleNamespace(log_raw_data=False)
raw_output = """
benchmark implementation mode config score units batch_latency_95_sec
DLRM OOTB eval tiny 152.800399 ex/s 0.515052
DLRM OOTB train tiny 35.483686 ex/s None
DLRM UBENCH train linear_[(2,2,2,2,2)] 3.679281e-07 TF/s None
XLMR OOTB eval default-config 1.015586 ex/s 16.463461
"""
assert (benchmark._process_raw_result(0, raw_output))
assert (benchmark.result['dlrm_ootb_eval_tiny_ex_s'][0] == 152.800399)
assert (benchmark.result['dlrm_ootb_train_tiny_ex_s'][0] == 35.483686)
assert (benchmark.result['dlrm_ubench_train_linear_[(2,2,2,2,2)]_tf_s'][0] == 3.679281e-07)
assert (benchmark.result['xlmr_ootb_eval_default_config_ex_s'][0] == 1.015586)