unify arguments format by using whitespace. (#50)
Co-authored-by: Guoshuai Zhao <guzhao@microsoft.com> Co-authored-by: Yifan Xiong <yifan.xiong@microsoft.com>
This commit is contained in:
Родитель
3296265cfa
Коммит
4664019ac3
|
@ -12,7 +12,7 @@ from superbench.common.utils import logger
|
|||
|
||||
if __name__ == '__main__':
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'matmul', parameters='--num_steps=20', framework=Framework.PYTORCH
|
||||
'matmul', parameters='--num_steps 20', framework=Framework.PYTORCH
|
||||
)
|
||||
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
|
|
|
@ -15,7 +15,7 @@ if __name__ == '__main__':
|
|||
# Create context for bert-large benchmark and run it for 120 * 2 seconds.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'bert-large',
|
||||
parameters='--batch_size=1 --duration=120 --seq_len=512 --precision=float32 --run_count=2',
|
||||
parameters='--batch_size 1 --duration 120 --seq_len 8 --precision float32 --run_count 2',
|
||||
framework=Framework.PYTORCH
|
||||
)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ from superbench.common.utils import logger
|
|||
|
||||
if __name__ == '__main__':
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'sharding-matmul', parameters='--num_steps=20', framework=Framework.PYTORCH
|
||||
'sharding-matmul', parameters='--num_steps 20', framework=Framework.PYTORCH
|
||||
)
|
||||
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
|
|
|
@ -101,13 +101,13 @@ class FakeModelBenchmark(ModelBenchmark):
|
|||
return 200
|
||||
|
||||
|
||||
def create_benchmark(params='--num_steps=8'):
|
||||
def create_benchmark(params='--num_steps 8'):
|
||||
"""Register and create benchmark."""
|
||||
# Register the FakeModelBenchmark benchmark.
|
||||
BenchmarkRegistry.register_benchmark(
|
||||
'pytorch-fake-model',
|
||||
FakeModelBenchmark,
|
||||
parameters='--hidden_size=2',
|
||||
parameters='--hidden_size 2',
|
||||
platform=Platform.CUDA,
|
||||
)
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
|
@ -128,13 +128,13 @@ def test_arguments_related_interfaces():
|
|||
Benchmark.get_configurable_settings()
|
||||
"""
|
||||
# Positive case for parse_args().
|
||||
benchmark = create_benchmark('--num_steps=9')
|
||||
benchmark = create_benchmark('--num_steps 9')
|
||||
benchmark.add_parser_arguments()
|
||||
(ret, args, unknown) = benchmark.parse_args()
|
||||
assert (ret and args.num_steps == 9)
|
||||
|
||||
# Negative case for parse_args() - invalid precision.
|
||||
benchmark = create_benchmark('--num_steps=8 --precision=fp32')
|
||||
benchmark = create_benchmark('--num_steps 8 --precision fp32')
|
||||
benchmark.add_parser_arguments()
|
||||
(ret, args, unknown) = benchmark.parse_args()
|
||||
assert (ret is False)
|
||||
|
@ -169,7 +169,7 @@ def test_arguments_related_interfaces():
|
|||
def test_preprocess():
|
||||
"""Test interface Benchmark._preprocess()."""
|
||||
# Positive case for _preprocess().
|
||||
benchmark = create_benchmark('--num_steps=8')
|
||||
benchmark = create_benchmark('--num_steps 8')
|
||||
assert (benchmark._preprocess())
|
||||
assert (benchmark.return_code == ReturnCode.SUCCESS)
|
||||
settings = benchmark.get_configurable_settings()
|
||||
|
@ -199,12 +199,12 @@ def test_preprocess():
|
|||
assert (settings == expected_settings)
|
||||
|
||||
# Negative case for _preprocess() - invalid precision.
|
||||
benchmark = create_benchmark('--num_steps=8 --precision=fp32')
|
||||
benchmark = create_benchmark('--num_steps 8 --precision fp32')
|
||||
assert (benchmark._preprocess() is False)
|
||||
assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
|
||||
|
||||
# Negative case for _preprocess() - invalid benchmark type.
|
||||
benchmark = create_benchmark('--num_steps=8 --precision=float32')
|
||||
benchmark = create_benchmark('--num_steps 8 --precision float32')
|
||||
benchmark._benchmark_type = Platform.CUDA
|
||||
assert (benchmark._preprocess() is False)
|
||||
assert (benchmark.return_code == ReturnCode.INVALID_BENCHMARK_TYPE)
|
||||
|
@ -224,7 +224,7 @@ def test_train():
|
|||
assert (benchmark.serialized_result == expected_result)
|
||||
|
||||
# Step time list is empty (simulate training failure).
|
||||
benchmark = create_benchmark('--num_steps=0')
|
||||
benchmark = create_benchmark('--num_steps 0')
|
||||
expected_result = (
|
||||
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 0, '
|
||||
'"start_time": null, "end_time": null, "raw_data": {}, "result": {}}'
|
||||
|
@ -248,7 +248,7 @@ def test_inference():
|
|||
assert (benchmark.serialized_result == expected_result)
|
||||
|
||||
# Step time list is empty (simulate inference failure).
|
||||
benchmark = create_benchmark('--num_steps=0')
|
||||
benchmark = create_benchmark('--num_steps 0')
|
||||
expected_result = (
|
||||
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 0, '
|
||||
'"start_time": null, "end_time": null, "raw_data": {}, "result": {}}'
|
||||
|
@ -295,19 +295,19 @@ def test_benchmark():
|
|||
assert (benchmark.serialized_result == expected_serialized_result)
|
||||
|
||||
# Negative case for _benchmark() - no supported precision found.
|
||||
benchmark = create_benchmark('--precision=int16')
|
||||
benchmark = create_benchmark('--precision int16')
|
||||
assert (benchmark._preprocess())
|
||||
assert (benchmark._benchmark() is False)
|
||||
assert (benchmark.return_code == ReturnCode.NO_SUPPORTED_PRECISION)
|
||||
|
||||
# Negative case for _benchmark() - model train failure, step time list is empty.
|
||||
benchmark = create_benchmark('--num_steps=0')
|
||||
benchmark = create_benchmark('--num_steps 0')
|
||||
assert (benchmark._preprocess())
|
||||
assert (benchmark._benchmark() is False)
|
||||
assert (benchmark.return_code == ReturnCode.MODEL_TRAIN_FAILURE)
|
||||
|
||||
# Negative case for _benchmark() - model inference failure, step time list is empty.
|
||||
benchmark = create_benchmark('--model_action=inference --num_steps=0')
|
||||
benchmark = create_benchmark('--model_action inference --num_steps 0')
|
||||
assert (benchmark._preprocess())
|
||||
assert (benchmark._benchmark() is False)
|
||||
assert (benchmark.return_code == ReturnCode.MODEL_INFERENCE_FAILURE)
|
||||
|
|
|
@ -179,7 +179,7 @@ def test_pytorch_base():
|
|||
# Launch benchmark with --no_gpu for testing.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'pytorch-mnist',
|
||||
parameters='--batch_size=32 --num_warmup=8 --num_steps=64 --model_action train inference --no_gpu'
|
||||
parameters='--batch_size 32 --num_warmup 8 --num_steps 64 --model_action train inference --no_gpu'
|
||||
)
|
||||
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
|
|
|
@ -15,7 +15,7 @@ def test_pytorch_bert_base():
|
|||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'bert-base',
|
||||
platform=Platform.CUDA,
|
||||
parameters='--batch_size=32 --num_classes=5 --seq_len=512',
|
||||
parameters='--batch_size 32 --num_classes 5 --seq_len 512',
|
||||
framework=Framework.PYTORCH
|
||||
)
|
||||
|
||||
|
@ -61,7 +61,7 @@ def test_pytorch_bert_large():
|
|||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'bert-large',
|
||||
platform=Platform.CUDA,
|
||||
parameters='--batch_size=32 --num_classes=5 --seq_len=512',
|
||||
parameters='--batch_size 32 --num_classes 5 --seq_len 512',
|
||||
framework=Framework.PYTORCH
|
||||
)
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@ from superbench.benchmarks import BenchmarkContext, Platform, Framework
|
|||
|
||||
def test_benchmark_context():
|
||||
"""Test BenchmarkContext class."""
|
||||
context = BenchmarkContext('pytorch-bert-large', Platform.CUDA, 'batch_size=8', framework=Framework.PYTORCH)
|
||||
context = BenchmarkContext('pytorch-bert-large', Platform.CUDA, '--batch_size 8', framework=Framework.PYTORCH)
|
||||
assert (context.name == 'pytorch-bert-large')
|
||||
assert (context.platform == Platform.CUDA)
|
||||
assert (context.parameters == 'batch_size=8')
|
||||
assert (context.parameters == '--batch_size 8')
|
||||
assert (context.framework == Framework.PYTORCH)
|
||||
|
|
|
@ -124,12 +124,12 @@ def test_launch_benchmark():
|
|||
"""Test interface BenchmarkRegistry.launch_benchmark()."""
|
||||
# Register benchmarks for testing.
|
||||
BenchmarkRegistry.register_benchmark(
|
||||
'accumulation', AccumulationBenchmark, parameters='--upper_bound=5', platform=Platform.CPU
|
||||
'accumulation', AccumulationBenchmark, parameters='--upper_bound 5', platform=Platform.CPU
|
||||
)
|
||||
|
||||
# Launch benchmark.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1'
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound 1'
|
||||
)
|
||||
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
|
@ -153,7 +153,7 @@ def test_launch_benchmark():
|
|||
|
||||
# Launch benchmark with overridden parameters.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --upper_bound=4'
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound 1 --upper_bound 4'
|
||||
)
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
assert (benchmark)
|
||||
|
@ -176,14 +176,14 @@ def test_launch_benchmark():
|
|||
|
||||
# Failed to launch benchmark due to 'benchmark not found'.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'accumulation-fail', Platform.CPU, parameters='--lower_bound=1 --upper_bound=4', framework=Framework.PYTORCH
|
||||
'accumulation-fail', Platform.CPU, parameters='--lower_bound 1 --upper_bound 4', framework=Framework.PYTORCH
|
||||
)
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
assert (benchmark is None)
|
||||
|
||||
# Failed to launch benchmark due to 'unknown arguments'.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --test=4'
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound 1 --test 4'
|
||||
)
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
assert (benchmark)
|
||||
|
@ -191,7 +191,7 @@ def test_launch_benchmark():
|
|||
|
||||
# Failed to launch benchmark due to 'invalid arguments'.
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --upper_bound=x'
|
||||
'accumulation', platform=Platform.CPU, parameters='--lower_bound 1 --upper_bound x'
|
||||
)
|
||||
benchmark = BenchmarkRegistry.launch_benchmark(context)
|
||||
assert (benchmark)
|
||||
|
|
Загрузка…
Ссылка в новой задаче