зеркало из https://github.com/microsoft/MLOS.git
Add basic support for repeating a trial config. (#642)
Adds very rudimentary support for repeating configs across multiple trials. We fully expect to expand on more advanced support for this once we add a proper scheduler (#463). Additional tests forth coming with #633 and related PRs.
This commit is contained in:
Родитель
b531400556
Коммит
e2a0e10ab5
|
@ -67,6 +67,13 @@
|
|||
"$ref": "#/$defs/json_config_path"
|
||||
},
|
||||
|
||||
"trial_config_repeat_count": {
|
||||
"description": "Number of times to repeat a config.",
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"examples": [3, 5]
|
||||
},
|
||||
|
||||
"storage": {
|
||||
"description": "Path to the json config describing the storage backend to use.",
|
||||
"$ref": "#/$defs/json_config_path"
|
||||
|
|
|
@ -75,6 +75,8 @@ class Launcher:
|
|||
else:
|
||||
config = {}
|
||||
|
||||
self.trial_config_repeat_count = args.trial_config_repeat_count or config.get("trial_config_repeat_count", 1)
|
||||
|
||||
log_level = args.log_level or config.get("log_level", _LOG_LEVEL)
|
||||
try:
|
||||
log_level = int(log_level)
|
||||
|
@ -195,6 +197,10 @@ class Launcher:
|
|||
help='Path to the optimizer configuration file. If omitted, run' +
|
||||
' a single trial with default (or specified in --tunable_values).')
|
||||
|
||||
parser.add_argument(
|
||||
'--trial_config_repeat_count', '--trial-config-repeat-count', required=False, type=int, default=1,
|
||||
help='Number of times to repeat each config. Default is 1 trial per config, though more may be advised.')
|
||||
|
||||
parser.add_argument(
|
||||
'--storage', required=False,
|
||||
help='Path to the storage configuration file.' +
|
||||
|
|
|
@ -36,7 +36,8 @@ def _main() -> None:
|
|||
storage=launcher.storage,
|
||||
root_env_config=launcher.root_env_config,
|
||||
global_config=launcher.global_config,
|
||||
do_teardown=launcher.teardown
|
||||
do_teardown=launcher.teardown,
|
||||
trial_config_repeat_count=launcher.trial_config_repeat_count,
|
||||
)
|
||||
|
||||
_LOG.info("Final result: %s", result)
|
||||
|
@ -48,7 +49,9 @@ def _optimize(*,
|
|||
storage: Storage,
|
||||
root_env_config: str,
|
||||
global_config: Dict[str, Any],
|
||||
do_teardown: bool) -> Tuple[Optional[float], Optional[TunableGroups]]:
|
||||
do_teardown: bool,
|
||||
trial_config_repeat_count: int = 1,
|
||||
) -> Tuple[Optional[float], Optional[TunableGroups]]:
|
||||
"""
|
||||
Main optimization loop.
|
||||
|
||||
|
@ -66,8 +69,13 @@ def _optimize(*,
|
|||
Global configuration parameters.
|
||||
do_teardown : bool
|
||||
If True, teardown the environment at the end of the experiment
|
||||
trial_config_repeat_count : int
|
||||
How many trials to repeat for the same configuration.
|
||||
"""
|
||||
# pylint: disable=too-many-locals
|
||||
if trial_config_repeat_count <= 0:
|
||||
raise ValueError(f"Invalid trial_config_repeat_count: {trial_config_repeat_count}")
|
||||
|
||||
if _LOG.isEnabledFor(logging.INFO):
|
||||
_LOG.info("Root Environment:\n%s", env.pprint())
|
||||
|
||||
|
@ -118,16 +126,18 @@ def _optimize(*,
|
|||
config_id, json.dumps(tunable_values, indent=2))
|
||||
config_id = -1
|
||||
|
||||
trial = exp.new_trial(tunables, config={
|
||||
# Add some additional metadata to track for the trial such as the
|
||||
# optimizer config used.
|
||||
# TODO: Improve for supporting multi-objective
|
||||
# (e.g., opt_target_1, opt_target_2, ... and opt_direction_1, opt_direction_2, ...)
|
||||
"optimizer": opt.name,
|
||||
"opt_target": opt.target,
|
||||
"opt_direction": opt.direction,
|
||||
})
|
||||
_run(env_context, opt_context, trial, global_config)
|
||||
for repeat_i in range(1, trial_config_repeat_count + 1):
|
||||
trial = exp.new_trial(tunables, config={
|
||||
# Add some additional metadata to track for the trial such as the
|
||||
# optimizer config used.
|
||||
# TODO: Improve for supporting multi-objective
|
||||
# (e.g., opt_target_1, opt_target_2, ... and opt_direction_1, opt_direction_2, ...)
|
||||
"optimizer": opt.name,
|
||||
"opt_target": opt.target,
|
||||
"opt_direction": opt.direction,
|
||||
"repeat_i": repeat_i,
|
||||
})
|
||||
_run(env_context, opt_context, trial, global_config)
|
||||
|
||||
if do_teardown:
|
||||
env_context.teardown()
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
"optimizer": "optimizers/one_shot_opt.jsonc",
|
||||
"storage": "storage/sqlite.jsonc",
|
||||
|
||||
"trial_config_repeat_count": 3,
|
||||
|
||||
"random_init": true,
|
||||
"random_seed": 42,
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic
|
|||
"""
|
||||
_launch_main_app(
|
||||
root_path, local_exec_service,
|
||||
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --max_iterations 3",
|
||||
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 3",
|
||||
[
|
||||
# Iteration 1: Expect first value to be the baseline
|
||||
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
|
||||
|
@ -106,6 +106,6 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic
|
|||
r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$",
|
||||
# Final result: baseline is the optimum for the mock environment
|
||||
f"^{_RE_DATE} run\\.py:\\d+ " +
|
||||
r"_optimize INFO Env: Mock environment best score: 64\.88\d+\s*$",
|
||||
r"_optimize INFO Env: Mock environment best score: 64\.53\d+\s*$",
|
||||
]
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче