Update omegaconf version to 2.3.0 (#631)
Update `omegaconf` version to [2.3.0](https://pypi.org/project/omegaconf/2.3.0/) as omegaconf 2.0.6 has a non-standard dependency specifier PyYAML>=5.1.*. pip 24.1 will enforce this behaviour change. Discussion can be found at https://github.com/pypa/pip/issues/12063.
This commit is contained in:
Родитель
7435f10a22
Коммит
9a3ce39d5a
4
setup.py
4
setup.py
|
@ -164,7 +164,7 @@ setup(
|
|||
'natsort>=7.1.1',
|
||||
'networkx>=2.5',
|
||||
'numpy>=1.19.2',
|
||||
'omegaconf==2.0.6',
|
||||
'omegaconf==2.3.0',
|
||||
'openpyxl>=3.0.7',
|
||||
'packaging>=21.0',
|
||||
'pandas>=1.1.5',
|
||||
|
@ -198,7 +198,7 @@ setup(
|
|||
'pydocstyle>=5.1.1',
|
||||
'pytest-cov>=2.11.1',
|
||||
'pytest-subtests>=0.4.0',
|
||||
'pytest>=6.2.2',
|
||||
'pytest>=6.2.2, <=7.4.4',
|
||||
'types-markdown',
|
||||
'types-pkg_resources',
|
||||
'types-pyyaml',
|
||||
|
|
|
@ -71,13 +71,13 @@ class SuperBenchExecutor():
|
|||
Return:
|
||||
list: List of benchmarks which will be executed.
|
||||
"""
|
||||
if self._sb_config.superbench.enable:
|
||||
if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
|
||||
if isinstance(self._sb_config.superbench.enable, str):
|
||||
return [self._sb_config.superbench.enable]
|
||||
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
|
||||
return list(self._sb_config.superbench.enable)
|
||||
# TODO: may exist order issue
|
||||
return [k for k, v in self._sb_benchmarks.items() if v.enable]
|
||||
return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]
|
||||
|
||||
def __get_platform(self):
|
||||
"""Detect runninng platform by environment."""
|
||||
|
@ -228,32 +228,37 @@ class SuperBenchExecutor():
|
|||
logger.warning('Monitor can not support CPU platform.')
|
||||
|
||||
benchmark_real_name = benchmark_name.split(':')[0]
|
||||
for framework in benchmark_config.frameworks or [Framework.NONE.value]:
|
||||
if benchmark_real_name == 'model-benchmarks' or (
|
||||
':' not in benchmark_name and benchmark_name.endswith('_models')
|
||||
):
|
||||
for model in benchmark_config.models:
|
||||
full_name = f'{benchmark_name}/{framework}-{model}'
|
||||
if 'frameworks' in benchmark_config:
|
||||
for framework in benchmark_config.frameworks or [Framework.NONE.value]:
|
||||
if benchmark_real_name == 'model-benchmarks' or (
|
||||
':' not in benchmark_name and benchmark_name.endswith('_models')
|
||||
):
|
||||
for model in benchmark_config.models:
|
||||
full_name = f'{benchmark_name}/{framework}-{model}'
|
||||
logger.info('Executor is going to execute %s.', full_name)
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
model,
|
||||
platform=self.__get_platform(),
|
||||
framework=Framework(framework.lower()),
|
||||
parameters=self.__get_arguments(
|
||||
{} if 'parameters' not in benchmark_config else benchmark_config.parameters
|
||||
)
|
||||
)
|
||||
result = self.__exec_benchmark(full_name, context)
|
||||
benchmark_results.append(result)
|
||||
else:
|
||||
full_name = benchmark_name
|
||||
logger.info('Executor is going to execute %s.', full_name)
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
model,
|
||||
benchmark_real_name,
|
||||
platform=self.__get_platform(),
|
||||
framework=Framework(framework.lower()),
|
||||
parameters=self.__get_arguments(benchmark_config.parameters)
|
||||
parameters=self.__get_arguments(
|
||||
{} if 'parameters' not in benchmark_config else benchmark_config.parameters
|
||||
)
|
||||
)
|
||||
result = self.__exec_benchmark(full_name, context)
|
||||
benchmark_results.append(result)
|
||||
else:
|
||||
full_name = benchmark_name
|
||||
logger.info('Executor is going to execute %s.', full_name)
|
||||
context = BenchmarkRegistry.create_benchmark_context(
|
||||
benchmark_real_name,
|
||||
platform=self.__get_platform(),
|
||||
framework=Framework(framework.lower()),
|
||||
parameters=self.__get_arguments(benchmark_config.parameters)
|
||||
)
|
||||
result = self.__exec_benchmark(full_name, context)
|
||||
benchmark_results.append(result)
|
||||
|
||||
if monitor:
|
||||
monitor.stop()
|
||||
|
|
|
@ -67,24 +67,24 @@ class SuperBenchRunner():
|
|||
InvalidConfigError: If input config is invalid.
|
||||
"""
|
||||
# TODO: add validation and defaulting
|
||||
if not self._sb_config.superbench.env:
|
||||
if 'env' not in self._sb_config.superbench:
|
||||
self._sb_config.superbench.env = {}
|
||||
for name in self._sb_benchmarks:
|
||||
if not self._sb_benchmarks[name].modes:
|
||||
if 'modes' not in self._sb_benchmarks[name]:
|
||||
self._sb_benchmarks[name].modes = []
|
||||
for idx, mode in enumerate(self._sb_benchmarks[name].modes):
|
||||
if not mode.env:
|
||||
if 'env' not in mode:
|
||||
self._sb_benchmarks[name].modes[idx].env = {}
|
||||
if mode.name == 'local':
|
||||
if not mode.proc_num:
|
||||
if 'proc_num' not in mode:
|
||||
self._sb_benchmarks[name].modes[idx].proc_num = 1
|
||||
if not mode.prefix:
|
||||
if 'prefix' not in mode:
|
||||
self._sb_benchmarks[name].modes[idx].prefix = ''
|
||||
elif mode.name == 'torch.distributed':
|
||||
if not mode.proc_num:
|
||||
if 'proc_num' not in mode:
|
||||
self._sb_benchmarks[name].modes[idx].proc_num = 8
|
||||
elif mode.name == 'mpi':
|
||||
if not mode.mca:
|
||||
if 'machinefile' not in mode:
|
||||
self._sb_benchmarks[name].modes[idx].mca = {
|
||||
'pml': 'ob1',
|
||||
'btl': '^openib',
|
||||
|
@ -93,8 +93,8 @@ class SuperBenchRunner():
|
|||
}
|
||||
for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']:
|
||||
self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
|
||||
if mode.pattern:
|
||||
if mode.pattern.type == 'topo-aware' and not mode.pattern.ibstat:
|
||||
if 'pattern' in mode:
|
||||
if mode.pattern.type == 'topo-aware' and 'ibstat' not in mode.pattern:
|
||||
self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat(
|
||||
self._ansible_config, str(self._output_path / 'ibstate_file.txt')
|
||||
)
|
||||
|
@ -105,12 +105,12 @@ class SuperBenchRunner():
|
|||
Return:
|
||||
list: List of benchmarks which will be executed.
|
||||
"""
|
||||
if self._sb_config.superbench.enable:
|
||||
if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
|
||||
if isinstance(self._sb_config.superbench.enable, str):
|
||||
return [self._sb_config.superbench.enable]
|
||||
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
|
||||
return list(self._sb_config.superbench.enable)
|
||||
return [k for k, v in self._sb_benchmarks.items() if v.enable]
|
||||
return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]
|
||||
|
||||
def __get_mode_command(self, benchmark_name, mode, timeout=None):
|
||||
"""Get runner command for given mode.
|
||||
|
@ -141,7 +141,7 @@ class SuperBenchRunner():
|
|||
elif mode.name == 'torch.distributed':
|
||||
# TODO: replace with torch.distributed.run in v1.9
|
||||
# TODO: only supports node_num=1 and node_num=all currently
|
||||
torch_dist_params = '' if mode.node_num == 1 else \
|
||||
torch_dist_params = '' if 'node_num' in mode and mode.node_num == 1 else \
|
||||
'--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
|
||||
mode_command = (
|
||||
f'torchrun'
|
||||
|
@ -158,8 +158,8 @@ class SuperBenchRunner():
|
|||
'-bind-to numa ' # bind processes to numa
|
||||
'{mca_list} {env_list} {command}'
|
||||
).format(
|
||||
host_list=f'-host localhost:{mode.proc_num}' if mode.node_num == 1 else
|
||||
f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if mode.host_list is None else '-host ' +
|
||||
host_list=f'-host localhost:{mode.proc_num}' if 'node_num' in mode and mode.node_num == 1 else
|
||||
f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if 'host_list' not in mode else '-host ' +
|
||||
','.join(f'{host}:{mode.proc_num}' for host in mode.host_list),
|
||||
mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
|
||||
env_list=' '.join(
|
||||
|
@ -206,6 +206,9 @@ class SuperBenchRunner():
|
|||
logger.info('Runner is going to get node system info.')
|
||||
|
||||
fcmd = "docker exec sb-workspace bash -c '{command}'"
|
||||
|
||||
if 'skip' not in self._docker_config:
|
||||
self._docker_config.skip = False
|
||||
if self._docker_config.skip:
|
||||
fcmd = "bash -c 'cd $SB_WORKSPACE && {command}'"
|
||||
ansible_runner_config = self._ansible_client.get_shell_config(
|
||||
|
@ -225,7 +228,7 @@ class SuperBenchRunner():
|
|||
self._ansible_client.get_playbook_config(
|
||||
'check_env.yaml',
|
||||
extravars={
|
||||
'no_docker': bool(self._docker_config.skip),
|
||||
'no_docker': False if 'skip' not in self._docker_config else self._docker_config.skip,
|
||||
'output_dir': str(self._output_path),
|
||||
'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
|
||||
}
|
||||
|
@ -441,15 +444,17 @@ class SuperBenchRunner():
|
|||
int: Process return code.
|
||||
"""
|
||||
mode.update(vars)
|
||||
if mode.name == 'mpi' and mode.pattern:
|
||||
if mode.name == 'mpi' and 'pattern' in mode:
|
||||
mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index})
|
||||
logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)
|
||||
|
||||
timeout = self._sb_benchmarks[benchmark_name].timeout
|
||||
timeout = self._sb_benchmarks[benchmark_name].get('timeout', 60)
|
||||
if isinstance(timeout, int):
|
||||
timeout = max(timeout, 60)
|
||||
|
||||
env_list = '--env-file /tmp/sb.env'
|
||||
if 'skip' not in self._docker_config:
|
||||
self._docker_config.skip = False
|
||||
if self._docker_config.skip:
|
||||
env_list = 'set -o allexport && source /tmp/sb.env && set +o allexport'
|
||||
for k, v in mode.env.items():
|
||||
|
@ -463,7 +468,7 @@ class SuperBenchRunner():
|
|||
ansible_runner_config = self._ansible_client.get_shell_config(
|
||||
fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout))
|
||||
)
|
||||
if mode.name == 'mpi' and mode.node_num != 1:
|
||||
if mode.name == 'mpi' and 'node_num' in mode and mode.node_num != 1:
|
||||
ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)
|
||||
|
||||
if isinstance(timeout, int):
|
||||
|
@ -495,7 +500,7 @@ class SuperBenchRunner():
|
|||
)
|
||||
ansible_rc = sum(rc_list)
|
||||
elif mode.name == 'torch.distributed' or mode.name == 'mpi':
|
||||
if not mode.pattern:
|
||||
if 'pattern' not in mode:
|
||||
ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0})
|
||||
else:
|
||||
if not os.path.exists(self._output_path / 'hostfile'):
|
||||
|
|
|
@ -44,7 +44,7 @@ class ExecutorTestCase(unittest.TestCase):
|
|||
def test_get_enabled_benchmarks_enable_none(self):
|
||||
"""Test enabled benchmarks when superbench.enable is none."""
|
||||
benchmarks = self.default_config.superbench.benchmarks
|
||||
expected_enabled_benchmarks = [x for x in benchmarks if benchmarks[x]['enable']]
|
||||
expected_enabled_benchmarks = [x for x in benchmarks if 'enable' in benchmarks[x] and benchmarks[x]['enable']]
|
||||
self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks)
|
||||
|
||||
def test_get_enabled_benchmarks_enable_str(self):
|
||||
|
|
Загрузка…
Ссылка в новой задаче