CLI - Add non-zero return code for `sb [deploy,run]` (#425)
Add non-zero return code for `sb deploy` and `sb run` command when there're Ansible failures in control plane. Return code is set to count of failure. For failures caused by benchmarks, return code is still set per benchmark in results json file.
This commit is contained in:
Родитель
d7bb8303fb
Коммит
1b86503d1e
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
"""SuperBench CLI command handler."""
|
"""SuperBench CLI command handler."""
|
||||||
|
|
||||||
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from importlib_metadata import version, PackageNotFoundError
|
from importlib_metadata import version, PackageNotFoundError
|
||||||
|
|
||||||
|
@ -252,6 +253,8 @@ def deploy_command_handler(
|
||||||
|
|
||||||
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
|
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
|
||||||
runner.deploy()
|
runner.deploy()
|
||||||
|
if runner.get_failure_count() != 0:
|
||||||
|
sys.exit(runner.get_failure_count())
|
||||||
|
|
||||||
|
|
||||||
def run_command_handler(
|
def run_command_handler(
|
||||||
|
@ -307,3 +310,5 @@ def run_command_handler(
|
||||||
|
|
||||||
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
|
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
|
||||||
runner.run()
|
runner.run()
|
||||||
|
if runner.get_failure_count() != 0:
|
||||||
|
sys.exit(runner.get_failure_count())
|
||||||
|
|
|
@ -27,6 +27,7 @@ class AnsibleClient():
|
||||||
'cmdline': '--forks 128',
|
'cmdline': '--forks 128',
|
||||||
}
|
}
|
||||||
self._head_host = None
|
self._head_host = None
|
||||||
|
self.failure_count = 0
|
||||||
if config:
|
if config:
|
||||||
inventory_file = getattr(config, 'host_file', None)
|
inventory_file = getattr(config, 'host_file', None)
|
||||||
inventory_list = getattr(config, 'host_list', None)
|
inventory_list = getattr(config, 'host_list', None)
|
||||||
|
@ -77,6 +78,7 @@ class AnsibleClient():
|
||||||
if r.rc == 0:
|
if r.rc == 0:
|
||||||
logger.info('Run succeed, return code {}.'.format(r.rc))
|
logger.info('Run succeed, return code {}.'.format(r.rc))
|
||||||
else:
|
else:
|
||||||
|
self.failure_count += 1
|
||||||
logger.warning('Run failed, return code {}.'.format(r.rc))
|
logger.warning('Run failed, return code {}.'.format(r.rc))
|
||||||
return r.rc
|
return r.rc
|
||||||
|
|
||||||
|
|
|
@ -160,6 +160,14 @@ class SuperBenchRunner():
|
||||||
logger.warning('Unknown mode %s.', mode.name)
|
logger.warning('Unknown mode %s.', mode.name)
|
||||||
return mode_command.strip()
|
return mode_command.strip()
|
||||||
|
|
||||||
|
def get_failure_count(self):
|
||||||
|
"""Get failure count during Ansible run.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
int: Failure count.
|
||||||
|
"""
|
||||||
|
return self._ansible_client.failure_count
|
||||||
|
|
||||||
def deploy(self): # pragma: no cover
|
def deploy(self): # pragma: no cover
|
||||||
"""Deploy SuperBench environment."""
|
"""Deploy SuperBench environment."""
|
||||||
logger.info('Preparing SuperBench environment.')
|
logger.info('Preparing SuperBench environment.')
|
||||||
|
|
|
@ -8,6 +8,7 @@ import contextlib
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from knack.testsdk import ScenarioTest, StringContainCheck, NoneCheck, JMESPathCheck
|
from knack.testsdk import ScenarioTest, StringContainCheck, NoneCheck, JMESPathCheck
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
import superbench
|
import superbench
|
||||||
from superbench.cli import SuperBenchCLI
|
from superbench.cli import SuperBenchCLI
|
||||||
|
@ -53,8 +54,10 @@ class SuperBenchCLIScenarioTest(ScenarioTest):
|
||||||
"""Test sb version."""
|
"""Test sb version."""
|
||||||
self.cmd('sb version', checks=[StringContainCheck(superbench.__version__)])
|
self.cmd('sb version', checks=[StringContainCheck(superbench.__version__)])
|
||||||
|
|
||||||
def test_sb_deploy(self):
|
@mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
|
||||||
|
def test_sb_deploy(self, mocked_failure_count):
|
||||||
"""Test sb deploy."""
|
"""Test sb deploy."""
|
||||||
|
mocked_failure_count.return_value = 0
|
||||||
self.cmd('sb deploy --host-list localhost', checks=[NoneCheck()])
|
self.cmd('sb deploy --host-list localhost', checks=[NoneCheck()])
|
||||||
|
|
||||||
def test_sb_deploy_no_host(self):
|
def test_sb_deploy_no_host(self):
|
||||||
|
@ -65,12 +68,16 @@ class SuperBenchCLIScenarioTest(ScenarioTest):
|
||||||
"""Test sb exec."""
|
"""Test sb exec."""
|
||||||
self.cmd('sb exec --config-override superbench.enable=["none"]', checks=[NoneCheck()])
|
self.cmd('sb exec --config-override superbench.enable=["none"]', checks=[NoneCheck()])
|
||||||
|
|
||||||
def test_sb_run(self):
|
@mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
|
||||||
|
def test_sb_run(self, mocked_failure_count):
|
||||||
"""Test sb run."""
|
"""Test sb run."""
|
||||||
|
mocked_failure_count.return_value = 0
|
||||||
self.cmd('sb run --host-list localhost --config-override superbench.enable=none', checks=[NoneCheck()])
|
self.cmd('sb run --host-list localhost --config-override superbench.enable=none', checks=[NoneCheck()])
|
||||||
|
|
||||||
def test_sb_run_skipdocker(self):
|
@mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
|
||||||
|
def test_sb_run_skipdocker(self, mocked_failure_count):
|
||||||
"""Test sb run without docker."""
|
"""Test sb run without docker."""
|
||||||
|
mocked_failure_count.return_value = 0
|
||||||
self.cmd('sb run -l localhost -C superbench.enable=none --no-docker', checks=[NoneCheck()])
|
self.cmd('sb run -l localhost -C superbench.enable=none --no-docker', checks=[NoneCheck()])
|
||||||
|
|
||||||
def test_sb_run_no_docker_auth(self):
|
def test_sb_run_no_docker_auth(self):
|
||||||
|
|
|
@ -41,6 +41,10 @@ class RunnerTestCase(unittest.TestCase):
|
||||||
expected_log_file = Path(self.runner._sb_output_dir) / 'sb-run.log'
|
expected_log_file = Path(self.runner._sb_output_dir) / 'sb-run.log'
|
||||||
self.assertTrue(expected_log_file.is_file())
|
self.assertTrue(expected_log_file.is_file())
|
||||||
|
|
||||||
|
def test_get_failure_count(self):
|
||||||
|
"""Test get_failure_count."""
|
||||||
|
self.assertEqual(0, self.runner.get_failure_count())
|
||||||
|
|
||||||
def test_get_mode_command(self):
|
def test_get_mode_command(self):
|
||||||
"""Test __get_mode_command."""
|
"""Test __get_mode_command."""
|
||||||
test_cases = [
|
test_cases = [
|
||||||
|
|
Загрузка…
Ссылка в новой задаче