Benchmarks: Add Test - Add tests for matmul and sharding-matmul benchmarks. (#41)

* add tests for matmul and sharding-matmul benchmarks.

* add decorator for sharding_matmul tests.

* add __init__.py for utils of benchmarks tests.

* disable GPU tests for CPU platform validation.

* fix typo

Co-authored-by: Guoshuai Zhao <guzhao@microsoft.com>
Co-authored-by: Peng Cheng <chengpeng5555@outlook.com>
Co-authored-by: Yifan Xiong <yifan.xiong@microsoft.com>
This commit is contained in:
guoshzhao 2021-04-12 22:52:14 +08:00 коммит произвёл GitHub
Родитель 4664019ac3
Коммит 485800268b
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
5 изменённых файлов: 118 добавлений и 1 удалений

Просмотреть файл

@ -26,4 +26,4 @@ RUN python3 -m pip install --upgrade pip setuptools && \
RUN python3 setup.py lint
# Test code
RUN python3 setup.py test
RUN SB_TEST_CUDA=0 python3 setup.py test

Просмотреть файл

@ -0,0 +1,4 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Utilities module for benchmarks tests."""

Просмотреть файл

@ -0,0 +1,40 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for matmul benchmark."""
from tests.helper import decorator
from superbench.benchmarks import BenchmarkRegistry, Platform, Framework, BenchmarkType, ReturnCode
from superbench.benchmarks.micro_benchmarks.sharding_matmul import ShardingMode
@decorator.cuda_test
@decorator.pytorch_test
def test_pytorch_matmul():
"""Test pytorch-matmul benchmark."""
context = BenchmarkRegistry.create_benchmark_context(
'matmul', platform=Platform.CUDA, parameters='--run_count 2 --num_steps 20', framework=Framework.PYTORCH
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (benchmark.name == 'pytorch-matmul')
assert (benchmark.type == BenchmarkType.MICRO)
# Check predefined parameters of sharding-matmul benchmark.
assert (benchmark._args.mode == [ShardingMode.NOSHARDING])
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.run_count == 2)
assert (benchmark._args.num_steps == 20)
# Check results and metrics.
assert (benchmark.run_count == 2)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (len(benchmark.raw_data['nosharding']) == benchmark.run_count)
assert (len(benchmark.raw_data['nosharding'][0]) == benchmark._args.num_steps)
assert (len(benchmark.result['nosharding']) == benchmark.run_count)

Просмотреть файл

@ -0,0 +1,49 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for sharding-matmul benchmark."""
import tests.benchmarks.utils as utils
from tests.helper import decorator
from superbench.benchmarks import BenchmarkRegistry, Platform, Framework, BenchmarkType, ReturnCode
from superbench.benchmarks.micro_benchmarks.sharding_matmul import ShardingMatmul, ShardingMode
@decorator.cuda_test
@decorator.pytorch_test
def test_pytorch_sharding_matmul():
"""Test pytorch-sharding-matmul benchmark."""
context = BenchmarkRegistry.create_benchmark_context(
'sharding-matmul',
platform=Platform.CUDA,
parameters='--run_count 2 --num_steps 20',
framework=Framework.PYTORCH
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
utils.setup_simulated_ddp_distributed_env()
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (isinstance(benchmark, ShardingMatmul))
assert (benchmark.name == 'pytorch-sharding-matmul')
assert (benchmark.type == BenchmarkType.MICRO)
# Check predefined parameters of sharding-matmul benchmark.
assert (benchmark._args.mode == [ShardingMode.ALLREDUCE, ShardingMode.ALLGATHER])
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.run_count == 2)
assert (benchmark._args.num_steps == 20)
# Check results and metrics.
assert (benchmark.run_count == 2)
assert (benchmark.return_code == ReturnCode.SUCCESS)
for metric in ['allreduce', 'allgather']:
assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
assert (len(benchmark.result[metric]) == benchmark.run_count)
utils.clean_simulated_ddp_distributed_env()

24
tests/benchmarks/utils.py Normal file
Просмотреть файл

@ -0,0 +1,24 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Utilities for benchmark tests."""
import os
def setup_simulated_ddp_distributed_env():
"""Function to setup the simulated DDP distributed envionment variables."""
os.environ['WORLD_SIZE'] = '1'
os.environ['RANK'] = '0'
os.environ['LOCAL_RANK'] = '0'
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12345'
def clean_simulated_ddp_distributed_env():
"""Function to clean up the simulated DDP distributed envionment variables."""
os.environ.pop('WORLD_SIZE')
os.environ.pop('RANK')
os.environ.pop('LOCAL_RANK')
os.environ.pop('MASTER_ADDR')
os.environ.pop('MASTER_PORT')