Benchmarks: Add Benchmark - Add disk performance benchmark (#132)

**Description**
Add disk performance microbenchmark.

**Major Revision**
- Add microbenchmark, example, test, config for disk performance.

**Minor Revision**
- Fix bugs in executor unit test related to default enabled tests.
This commit is contained in:
Ziyue Yang 2021-07-23 14:49:05 +08:00 коммит произвёл GitHub
Родитель 702fb1eb37
Коммит db297fb4ed
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
5 изменённых файлов: 792 добавлений и 1 удалений

Просмотреть файл

@ -0,0 +1,24 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for disk performance.
Commands to run:
python3 examples/benchmarks/disk_performance.py
"""
from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger
if __name__ == '__main__':
context = BenchmarkRegistry.create_benchmark_context(
'disk-benchmark', platform=Platform.CPU, parameters='--block_devices /dev/nvme0n1'
)
benchmark = BenchmarkRegistry.launch_benchmark(context)
if benchmark:
logger.info(
'benchmark: {}, return code: {}, result: {}'.format(
benchmark.name, benchmark.return_code, benchmark.result
)
)

Просмотреть файл

@ -11,8 +11,9 @@ from superbench.benchmarks.micro_benchmarks.cublas_function import CublasBenchma
from superbench.benchmarks.micro_benchmarks.cudnn_function import CudnnBenchmark
from superbench.benchmarks.micro_benchmarks.gemm_flops_performance import GemmFlopsCuda
from superbench.benchmarks.micro_benchmarks.cuda_memory_bw_performance import CudaMemBwBenchmark
from superbench.benchmarks.micro_benchmarks.disk_performance import DiskBenchmark
__all__ = [
'MicroBenchmark', 'MicroBenchmarkWithInvoke', 'ShardingMatmul', 'ComputationCommunicationOverlap', 'KernelLaunch',
'CublasBenchmark', 'CudnnBenchmark', 'GemmFlopsCuda', 'CudaMemBwBenchmark'
'CublasBenchmark', 'CudnnBenchmark', 'GemmFlopsCuda', 'CudaMemBwBenchmark', 'DiskBenchmark'
]

Просмотреть файл

@ -0,0 +1,220 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Module of the Disk Performance benchmarks."""
from pathlib import Path
import json
import os
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, ReturnCode
from superbench.benchmarks.micro_benchmarks import MicroBenchmarkWithInvoke
class DiskBenchmark(MicroBenchmarkWithInvoke):
"""The disk performance benchmark class."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name (str): benchmark name.
parameters (str): benchmark parameters.
"""
super().__init__(name, parameters)
self._bin_name = 'fio'
self.__io_patterns = ['seq', 'rand']
self.__io_types = ['read', 'write']
self.__rand_block_size = 4 * 1024 # 4KiB
self.__seq_block_size = 128 * 1024 # 128KiB
self.__default_iodepth = 64
self.__default_ramp_time = 10
self.__default_runtime = 60
self.__default_numjobs_for_rand = 4
self.__common_fio_args =\
' --randrepeat=1 --thread=1 --ioengine=libaio --direct=1'\
' --norandommap=1 --lat_percentiles=1 --group_reporting=1'\
' --output-format=json'
self.__fio_args = {}
# Sequentially write 128KiB to the device twice
self.__fio_args['seq_precond'] = self.__common_fio_args +\
' --name=seq_precond --rw=write --bs=%d --iodepth=%d --numjobs=1 --loops=2' %\
(self.__seq_block_size, self.__default_iodepth)
# Randomly write 4KiB to the device
self.__fio_args['rand_precond'] = self.__common_fio_args +\
' --name=rand_precond --rw=randwrite --bs=%d --iodepth=%d --numjobs=%d --time_based=1' %\
(self.__rand_block_size, self.__default_iodepth, self.__default_numjobs_for_rand)
# Seq/rand read/write tests
for io_pattern in self.__io_patterns:
for io_type in self.__io_types:
io_str = '%s_%s' % (io_pattern, io_type)
fio_rw = io_type if io_pattern == 'seq' else io_pattern + io_type
fio_bs = self.__seq_block_size if io_pattern == 'seq' else self.__rand_block_size
self.__fio_args[io_str] = self.__common_fio_args +\
' --name=%s --rw=%s --bs=%d --time_based=1' % (io_str, fio_rw, fio_bs)
def add_parser_arguments(self):
"""Add the specified arguments."""
super().add_parser_arguments()
self._parser.add_argument(
'--block_devices',
type=str,
nargs='*',
default=[],
required=False,
help='Disk block device(s) to be tested.',
)
# Disable precondition by default
self._parser.add_argument(
'--enable_seq_precond',
action='store_true',
help='Enable seq write precondition.',
)
self._parser.add_argument(
'--rand_precond_time',
type=int,
default=0,
required=False,
help='Time in seconds to run rand write precondition. Set to 0 to disable this test.',
)
for io_pattern in self.__io_patterns:
for io_type in self.__io_types:
io_str = '%s_%s' % (io_pattern, io_type)
self._parser.add_argument(
'--%s_ramp_time' % io_str,
type=int,
default=self.__default_ramp_time,
required=False,
help='Time in seconds to warm up %s test.' % io_str,
)
# Disable write tests by default
default_runtime = 0 if io_type == 'write' else self.__default_runtime
self._parser.add_argument(
'--%s_runtime' % io_str,
type=int,
default=default_runtime,
required=False,
help='Time in seconds to run %s test. Set to 0 to disable this test.' % io_str,
)
self._parser.add_argument(
'--%s_iodepth' % io_str,
type=int,
default=self.__default_iodepth,
required=False,
help='Queue depth for each thread in %s test.' % io_str,
)
default_numjobs = 1 if io_pattern == 'seq' else self.__default_numjobs_for_rand
self._parser.add_argument(
'--%s_numjobs' % io_str,
type=int,
default=default_numjobs,
required=False,
help='Number of threads in %s test.' % io_str,
)
def _preprocess(self):
"""Preprocess/preparation operations before the benchmarking.
Return:
True if _preprocess() succeed.
"""
if not super()._preprocess():
return False
fio_path = os.path.join(self._args.bin_dir, self._bin_name)
for block_device in self._args.block_devices:
if not Path(block_device).is_block_device():
self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)
logger.error('Invalid block device: {}.'.format(block_device))
return False
if self._args.enable_seq_precond:
command = fio_path +\
' --filename=%s' % block_device +\
self.__fio_args['seq_precond']
self._commands.append(command)
if self._args.rand_precond_time > 0:
command = fio_path +\
' --filename=%s' % block_device +\
' --runtime=%ds' % self._args.rand_precond_time +\
self.__fio_args['rand_precond']
self._commands.append(command)
for io_pattern in self.__io_patterns:
for io_type in self.__io_types:
io_str = '%s_%s' % (io_pattern, io_type)
runtime = getattr(self._args, '%s_runtime' % io_str)
if runtime > 0:
command = fio_path +\
' --filename=%s' % block_device +\
' --ramp_time=%ds' % getattr(self._args, '%s_ramp_time' % io_str) +\
' --runtime=%ds' % runtime +\
' --iodepth=%d' % getattr(self._args, '%s_iodepth' % io_str) +\
' --numjobs=%d' % getattr(self._args, '%s_numjobs' % io_str) +\
self.__fio_args[io_str]
self._commands.append(command)
return True
def _process_raw_result(self, cmd_idx, raw_output):
"""Function to parse raw results and save the summarized results.
self._result.add_raw_data() and self._result.add_result() need to be called to save the results.
Args:
cmd_idx (int): the index of command corresponding with the raw_output.
raw_output (str): raw output string of the micro-benchmark.
Return:
True if the raw output string is valid and result can be extracted.
"""
self._result.add_raw_data('raw_output_' + str(cmd_idx), raw_output)
try:
fio_output = json.loads(raw_output)
jobname = fio_output['jobs'][0]['jobname']
block_device = fio_output['global options']['filename']
jobname_prefix = 'disk_performance:%s:%s' % (block_device, jobname)
lat_units = ['lat_ns', 'lat_us', 'lat_ms']
bs = fio_output['jobs'][0]['job options']['bs']
self._result.add_result('%s:bs' % jobname_prefix, float(bs))
for io_type in ['read', 'write']:
io_type_prefix = '%s:%s' % (jobname_prefix, io_type)
iops = fio_output['jobs'][0][io_type]['iops']
self._result.add_result('%s:iops' % io_type_prefix, float(iops))
for lat_unit in lat_units:
if lat_unit in fio_output['jobs'][0][io_type]:
lat_unit_prefix = '%s:%s' % (io_type_prefix, lat_unit)
for lat_percentile in ['95.000000', '99.000000', '99.900000']:
lat = fio_output['jobs'][0][io_type][lat_unit]['percentile'][lat_percentile]
self._result.add_result('%s:%s' % (lat_unit_prefix, lat_percentile), float(lat))
break
except BaseException as e:
self._result.set_return_code(ReturnCode.MICROBENCHMARK_RESULT_PARSING_FAILURE)
logger.error(
'The result format is invalid - round: {}, benchmark: {}, raw output: {}, message: {}.'.format(
self._curr_run_index, self._name, raw_output, str(e)
)
)
return False
return True
BenchmarkRegistry.register_benchmark('disk-benchmark', DiskBenchmark)

Просмотреть файл

@ -28,6 +28,14 @@ superbench:
model_action:
- train
benchmarks:
disk-benchmark:
enable: false
modes:
- proc_num: 1
parallel: no
parameters:
block_devices:
- /dev/nvme0n1
mem-bw:
enable: true
modes:

Просмотреть файл

@ -0,0 +1,538 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for disk-performance benchmark."""
from pathlib import Path
from unittest import mock
import os
import unittest
from superbench.benchmarks import BenchmarkRegistry, BenchmarkType, ReturnCode, Platform
class DiskBenchmarkTest(unittest.TestCase):
"""Test class for disk-performance benchmark."""
def setUp(self):
"""Method called to prepare the test fixture."""
# Create fake binary file just for testing.
os.environ['SB_MICRO_PATH'] = '/tmp/superbench/'
binary_path = Path(os.getenv('SB_MICRO_PATH'), 'bin')
binary_path.mkdir(parents=True, exist_ok=True)
self.__binary_file = binary_path / 'fio'
self.__binary_file.touch(mode=0o755, exist_ok=True)
def tearDown(self):
"""Method called after the test method has been called and the result recorded."""
self.__binary_file.unlink()
def test_disk_performance_empty_param(self):
"""Test disk-performance benchmark command generation with empty parameter."""
benchmark_name = 'disk-benchmark'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
assert (benchmark_class)
benchmark = benchmark_class(benchmark_name, parameters='')
# Check basic information
assert (benchmark)
ret = benchmark._preprocess()
assert (ret is True)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.name == 'disk-benchmark')
assert (benchmark.type == BenchmarkType.MICRO)
# Command list should be empty
assert (0 == len(benchmark._commands))
@mock.patch('pathlib.Path.is_block_device')
def test_disk_performance_invalid_block_device(self, mock_is_block_device):
"""Test disk-performance benchmark command generation with invalid block device."""
mock_is_block_device.return_value = False
benchmark_name = 'disk-benchmark'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
assert (benchmark_class)
block_devices = ['mock_block_device_0']
block_device_option = '--block_devices ' + ' '.join(block_devices)
benchmark = benchmark_class(benchmark_name, parameters=block_device_option)
# Check basic information
assert (benchmark)
ret = benchmark._preprocess()
assert (ret is False)
assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
assert (benchmark.name == 'disk-benchmark')
assert (benchmark.type == BenchmarkType.MICRO)
@mock.patch('pathlib.Path.is_block_device')
def test_disk_performance_benchmark_disabled(self, mock_is_block_device):
"""Test disk-performance benchmark command generation with all benchmarks disabled."""
mock_is_block_device.return_value = True
benchmark_name = 'disk-benchmark'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
assert (benchmark_class)
block_devices = ['/dev/nvme0n1', '/dev/nvme1n1']
block_device_option = '--block_devices ' + ' '.join(block_devices)
param_str = block_device_option
param_str += ' --rand_precond_time=0'
param_str += ' --seq_read_runtime=0'
param_str += ' --seq_write_runtime=0'
param_str += ' --rand_read_runtime=0'
param_str += ' --rand_write_runtime=0'
benchmark = benchmark_class(benchmark_name, parameters=param_str)
# Check basic information
assert (benchmark)
ret = benchmark._preprocess()
assert (ret is True)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.name == 'disk-benchmark')
assert (benchmark.type == BenchmarkType.MICRO)
# Command list should be empty
assert (0 == len(benchmark._commands))
@mock.patch('pathlib.Path.is_block_device')
def test_disk_performance_benchmark_enabled(self, mock_is_block_device):
"""Test disk-performance benchmark command generation with all benchmarks enabled."""
mock_is_block_device.return_value = True
benchmark_name = 'disk-benchmark'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
assert (benchmark_class)
block_devices = ['mock_block_device_0', 'mock_block_device_1']
block_device_option = '--block_devices ' + ' '.join(block_devices)
init_test_magic = 45
curr_test_magic = init_test_magic
param_str = block_device_option
# Sequential precondition
param_str += ' --enable_seq_precond'
# Random precondition
param_str += ' --rand_precond_time=%d' % curr_test_magic
curr_test_magic += 1
# Seq/rand read/write
for io_pattern in ['seq', 'rand']:
for io_type in ['read', 'write']:
io_str = '%s_%s' % (io_pattern, io_type)
param_str += ' --%s_ramp_time=%d' % (io_str, curr_test_magic)
curr_test_magic += 1
param_str += ' --%s_runtime=%d' % (io_str, curr_test_magic)
curr_test_magic += 1
param_str += ' --%s_iodepth=%d' % (io_str, curr_test_magic)
curr_test_magic += 1
param_str += ' --%s_numjobs=%d' % (io_str, curr_test_magic)
curr_test_magic += 1
benchmark = benchmark_class(benchmark_name, parameters=param_str)
# Check basic information
assert (benchmark)
ret = benchmark._preprocess()
assert (ret is True)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.name == 'disk-benchmark')
assert (benchmark.type == BenchmarkType.MICRO)
# Check command list
# 2 files * (2 preconditions + 2 io_patterns * 2 io_types) = 12 commands
assert (12 == len(benchmark._commands))
# Check parameter assignments
command_idx = 0
for block_device in block_devices:
curr_test_magic = init_test_magic
# Sequential precondition
assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
command_idx += 1
# Random precondition
assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
curr_test_magic += 1
command_idx += 1
# Seq/rand read/write
for io_pattern in ['seq', 'rand']:
for io_type in ['read', 'write']:
assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
fio_rw = '%s%s' % (io_pattern if io_pattern == 'rand' else '', io_type)
assert ('--rw=%s' % fio_rw in benchmark._commands[command_idx])
assert ('--ramp_time=%d' % curr_test_magic in benchmark._commands[command_idx])
curr_test_magic += 1
assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
curr_test_magic += 1
assert ('--iodepth=%d' % curr_test_magic in benchmark._commands[command_idx])
curr_test_magic += 1
assert ('--numjobs=%d' % curr_test_magic in benchmark._commands[command_idx])
curr_test_magic += 1
command_idx += 1
def test_disk_performance_result_parsing(self):
"""Test disk-performance benchmark result parsing."""
benchmark_name = 'disk-benchmark'
(benchmark_class,
predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
assert (benchmark_class)
benchmark = benchmark_class(benchmark_name, parameters='')
assert (benchmark)
ret = benchmark._preprocess()
assert (ret is True)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.name == 'disk-benchmark')
assert (benchmark.type == BenchmarkType.MICRO)
# Positive case - valid raw output.
test_raw_output = """
{
"fio version" : "fio-3.16",
"timestamp" : 1626763278,
"timestamp_ms" : 1626763278577,
"time" : "Tue Jul 20 06:41:18 2021",
"global options" : {
"filename" : "/dev/nvme0n1",
"ramp_time" : "10s",
"runtime" : "30s",
"iodepth" : "64",
"numjobs" : "4",
"randrepeat" : "1",
"thread" : "1",
"ioengine" : "libaio",
"direct" : "1",
"norandommap" : "1",
"lat_percentiles" : "1",
"group_reporting" : "1"
},
"jobs" : [
{
"jobname" : "rand_read_write",
"groupid" : 0,
"error" : 0,
"eta" : 0,
"elapsed" : 41,
"job options" : {
"name" : "rand_read",
"rw" : "randrw",
"bs" : "4096",
"time_based" : "1"
},
"read" : {
"io_bytes" : 10463010816,
"io_kbytes" : 10217784,
"bw_bytes" : 348743777,
"bw" : 340570,
"iops" : 85138.890741,
"runtime" : 30002,
"total_ios" : 2554337,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 1332,
"max" : 48691,
"mean" : 2032.588341,
"stddev" : 864.921965
},
"clat_ns" : {
"min" : 278533,
"max" : 10175655,
"mean" : 1444476.063469,
"stddev" : 300748.583131
},
"lat_ns" : {
"min" : 280646,
"max" : 10177629,
"mean" : 1446562.147113,
"stddev" : 300723.879349,
"percentile" : {
"1.000000" : 872448,
"5.000000" : 1036288,
"10.000000" : 1122304,
"20.000000" : 1220608,
"30.000000" : 1286144,
"40.000000" : 1351680,
"50.000000" : 1417216,
"60.000000" : 1482752,
"70.000000" : 1564672,
"80.000000" : 1662976,
"90.000000" : 1810432,
"95.000000" : 1941504,
"99.000000" : 2244608,
"99.500000" : 2408448,
"99.900000" : 3620864,
"99.950000" : 4358144,
"99.990000" : 6062080
}
},
"bw_min" : 291288,
"bw_max" : 380288,
"bw_agg" : 99.999134,
"bw_mean" : 340567.050000,
"bw_dev" : 6222.338382,
"bw_samples" : 240,
"iops_min" : 72822,
"iops_max" : 95072,
"iops_mean" : 85141.733333,
"iops_stddev" : 1555.582888,
"iops_samples" : 240
},
"write" : {
"io_bytes" : 10454208512,
"io_kbytes" : 10209188,
"bw_bytes" : 348450387,
"bw" : 340283,
"iops" : 85066.128925,
"runtime" : 30002,
"total_ios" : 2552154,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 1383,
"max" : 315361,
"mean" : 2182.824623,
"stddev" : 919.625590
},
"clat_ns" : {
"min" : 433904,
"max" : 6300941,
"mean" : 1558511.433458,
"stddev" : 207734.850159
},
"lat_ns" : {
"min" : 441909,
"max" : 6302845,
"mean" : 1560749.444938,
"stddev" : 207695.144244,
"percentile" : {
"1.000000" : 1155072,
"5.000000" : 1269760,
"10.000000" : 1318912,
"20.000000" : 1384448,
"30.000000" : 1449984,
"40.000000" : 1499136,
"50.000000" : 1531904,
"60.000000" : 1597440,
"70.000000" : 1646592,
"80.000000" : 1728512,
"90.000000" : 1826816,
"95.000000" : 1908736,
"99.000000" : 2072576,
"99.500000" : 2179072,
"99.900000" : 2605056,
"99.950000" : 3031040,
"99.990000" : 4358144
}
},
"bw_min" : 288464,
"bw_max" : 380080,
"bw_agg" : 99.998134,
"bw_mean" : 340276.650000,
"bw_dev" : 6293.894521,
"bw_samples" : 240,
"iops_min" : 72116,
"iops_max" : 95020,
"iops_mean" : 85069.133333,
"iops_stddev" : 1573.475038,
"iops_samples" : 240
},
"trim" : {
"io_bytes" : 0,
"io_kbytes" : 0,
"bw_bytes" : 0,
"bw" : 0,
"iops" : 0.000000,
"runtime" : 0,
"total_ios" : 0,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"percentile" : {
"1.000000" : 0,
"5.000000" : 0,
"10.000000" : 0,
"20.000000" : 0,
"30.000000" : 0,
"40.000000" : 0,
"50.000000" : 0,
"60.000000" : 0,
"70.000000" : 0,
"80.000000" : 0,
"90.000000" : 0,
"95.000000" : 0,
"99.000000" : 0,
"99.500000" : 0,
"99.900000" : 0,
"99.950000" : 0,
"99.990000" : 0
}
},
"bw_min" : 0,
"bw_max" : 0,
"bw_agg" : 0.000000,
"bw_mean" : 0.000000,
"bw_dev" : 0.000000,
"bw_samples" : 0,
"iops_min" : 0,
"iops_max" : 0,
"iops_mean" : 0.000000,
"iops_stddev" : 0.000000,
"iops_samples" : 0
},
"sync" : {
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000
},
"total_ios" : 0
},
"job_runtime" : 120004,
"usr_cpu" : 4.833172,
"sys_cpu" : 20.800973,
"ctx" : 3542118,
"majf" : 0,
"minf" : 1263,
"iodepth_level" : {
"1" : 0.000000,
"2" : 0.000000,
"4" : 0.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
">=64" : 100.000000
},
"iodepth_submit" : {
"0" : 0.000000,
"4" : 100.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.000000,
">=64" : 0.000000
},
"iodepth_complete" : {
"0" : 0.000000,
"4" : 99.999922,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.100000,
">=64" : 0.000000
},
"latency_ns" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000
},
"latency_us" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.010000,
"750" : 0.070126,
"1000" : 1.756079
},
"latency_ms" : {
"2" : 95.414131,
"4" : 2.722457,
"10" : 0.040830,
"20" : 0.010000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000,
"2000" : 0.000000,
">=2000" : 0.000000
},
"latency_depth" : 64,
"latency_target" : 0,
"latency_percentile" : 100.000000,
"latency_window" : 0
}
],
"disk_util" : [
{
"name" : "nvme0n1",
"read_ios" : 3004914,
"write_ios" : 3003760,
"read_merges" : 0,
"write_merges" : 0,
"read_ticks" : 4269143,
"write_ticks" : 4598453,
"in_queue" : 11104,
"util" : 99.840351
}
]
}
"""
jobname_prefix = 'disk_performance:/dev/nvme0n1:rand_read_write'
assert (benchmark._process_raw_result(0, test_raw_output))
assert (benchmark.return_code == ReturnCode.SUCCESS)
# bs + <read, write> x <iops, 95th, 99th, 99.9th>
assert (9 == len(benchmark.result.keys()))
assert (1 == len(benchmark.result[jobname_prefix + ':bs']))
assert (4096 == benchmark.result[jobname_prefix + ':bs'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':read:iops']))
assert (85138.890741 == benchmark.result[jobname_prefix + ':read:iops'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':write:iops']))
assert (85066.128925 == benchmark.result[jobname_prefix + ':write:iops'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':read:lat_ns:95.000000']))
assert (1941504 == benchmark.result[jobname_prefix + ':read:lat_ns:95.000000'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':read:lat_ns:99.000000']))
assert (2244608 == benchmark.result[jobname_prefix + ':read:lat_ns:99.000000'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':read:lat_ns:99.900000']))
assert (3620864 == benchmark.result[jobname_prefix + ':read:lat_ns:99.900000'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':write:lat_ns:95.000000']))
assert (1908736 == benchmark.result[jobname_prefix + ':write:lat_ns:95.000000'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':write:lat_ns:99.000000']))
assert (2072576 == benchmark.result[jobname_prefix + ':write:lat_ns:99.000000'][0])
assert (1 == len(benchmark.result[jobname_prefix + ':write:lat_ns:99.900000']))
assert (2605056 == benchmark.result[jobname_prefix + ':write:lat_ns:99.900000'][0])
# Negative case - invalid raw output.
assert (benchmark._process_raw_result(1, 'Invalid raw output') is False)
assert (benchmark.return_code == ReturnCode.MICROBENCHMARK_RESULT_PARSING_FAILURE)