зеркало из https://github.com/microsoft/MLOS.git
Rename `max_iterations` to `max_suggestions` and track in Optimizer `.suggest()` instead of `.register()` (#713)
makes optimizers and schedulers a bit simpler. Part of issue #715 Closes #711 Note: the move from `--max_iterations` to `--max_suggestions` is a breaking change, so we will need to cut a new release for this. --------- Co-authored-by: Brian Kroth <bpkroth@users.noreply.github.com>
This commit is contained in:
Родитель
647e3a2f3c
Коммит
d2e7f056aa
|
@ -195,10 +195,12 @@ Searching for an optimal set of tunable parameters is very similar to running a
|
|||
All we have to do is specifying the [`Optimizer`](./mlos_bench/optimizers/) in the top-level configuration, like in our [`azure-redis-opt.jsonc`](./mlos_bench/config/cli/azure-redis-opt.jsonc) example.
|
||||
|
||||
```sh
|
||||
mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_iterations 10
|
||||
mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_suggestions 10 --trial-config-repeat-count 3
|
||||
```
|
||||
|
||||
Note that again we use the command line option `--max_iterations` to override the default value from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc).
|
||||
Note that again we use the command line option `--max_suggestions` to override the max. number of suggested configurations to trial from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc).
|
||||
We also use `--trial-config-repeat-count` to benchmark each suggested configuration 3 times.
|
||||
That means, we will run 30 trials in total, 3 for each of the 10 suggested configurations.
|
||||
|
||||
We don't have to specify the `"tunable_values"` for the optimization: the optimizer will suggest new values on each iteration and the framework will feed this data into the benchmarking environment.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
// Licensed under the MIT License.
|
||||
//
|
||||
// Run:
|
||||
// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_iterations 10
|
||||
// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_suggestions 10
|
||||
{
|
||||
"config_path": [
|
||||
"mlos_bench/mlos_bench/config",
|
||||
|
|
|
@ -92,7 +92,7 @@ will be pushed down to the `Optimizer` configuration, e.g., [`mlos_core_flaml.js
|
|||
> NOTE: it is perfectly ok to have several files with the experiment-specific parameters (say, one for Azure, another one for Storage, and so on) and either include them in the `"globals"` section of the CLI config, and/or specify them in the command line when running the experiment, e.g.
|
||||
>
|
||||
> ```bash
|
||||
> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_iterations 10
|
||||
> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_suggestions 10
|
||||
> ```
|
||||
>
|
||||
> (Note several files after the `--globals` option).
|
||||
|
|
|
@ -7,6 +7,6 @@
|
|||
"config": {
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 100
|
||||
"max_suggestions": 100
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"config": {
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 100,
|
||||
"max_suggestions": 100,
|
||||
"optimizer_type": "FLAML"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"config": {
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 100,
|
||||
"max_suggestions": 100,
|
||||
"optimizer_type": "SMAC",
|
||||
"output_directory": null // Override to have a permanent output with SMAC history etc.
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
"config": {
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 5,
|
||||
"max_suggestions": 5,
|
||||
"seed": 42
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
"enum": ["min", "max"],
|
||||
"example": "min"
|
||||
},
|
||||
"max_iterations": {
|
||||
"description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) iterations to run when we launch the app.",
|
||||
"max_suggestions": {
|
||||
"description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) config suggestions to run when we launch the app, or no limit if 0 is provided. Note: configs may be repeated in more than one trial.",
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"example": 100
|
||||
|
|
|
@ -35,7 +35,7 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
BASE_SUPPORTED_CONFIG_PROPS = {
|
||||
"optimization_target",
|
||||
"optimization_direction",
|
||||
"max_iterations",
|
||||
"max_suggestions",
|
||||
"seed",
|
||||
"start_with_defaults",
|
||||
}
|
||||
|
@ -71,12 +71,12 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
experiment_id = self._global_config.get('experiment_id')
|
||||
self.experiment_id = str(experiment_id).strip() if experiment_id else None
|
||||
|
||||
self._iter = 1
|
||||
self._iter = 0
|
||||
# If False, use the optimizer to suggest the initial configuration;
|
||||
# if True (default), use the already initialized values for the first iteration.
|
||||
self._start_with_defaults: bool = bool(
|
||||
strtobool(str(self._config.pop('start_with_defaults', True))))
|
||||
self._max_iter = int(self._config.pop('max_iterations', 100))
|
||||
self._max_iter = int(self._config.pop('max_suggestions', 100))
|
||||
self._opt_target = str(self._config.pop('optimization_target', 'score'))
|
||||
self._opt_sign = {"min": 1, "max": -1}[self._config.pop('optimization_direction', 'min')]
|
||||
|
||||
|
@ -224,7 +224,7 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
|
||||
@abstractmethod
|
||||
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
|
||||
status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool:
|
||||
status: Optional[Sequence[Status]] = None) -> bool:
|
||||
"""
|
||||
Pre-load the optimizer with the bulk data from previous experiments.
|
||||
|
||||
|
@ -236,16 +236,13 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
Benchmark results from experiments that correspond to `configs`.
|
||||
status : Optional[Sequence[float]]
|
||||
Status of the experiments that correspond to `configs`.
|
||||
is_warm_up : bool
|
||||
True for the initial load, False for subsequent calls.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_not_empty : bool
|
||||
True if there is data to register, false otherwise.
|
||||
"""
|
||||
_LOG.info("%s the optimizer with: %d configs, %d scores, %d status values",
|
||||
"Warm-up" if is_warm_up else "Load",
|
||||
_LOG.info("Update the optimizer with: %d configs, %d scores, %d status values",
|
||||
len(configs or []), len(scores or []), len(status or []))
|
||||
if len(configs or []) != len(scores or []):
|
||||
raise ValueError("Numbers of configs and scores do not match.")
|
||||
|
@ -257,10 +254,11 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
self._start_with_defaults = False
|
||||
return has_data
|
||||
|
||||
@abstractmethod
|
||||
def suggest(self) -> TunableGroups:
|
||||
"""
|
||||
Generate the next suggestion.
|
||||
Base class' implementation increments the iteration count
|
||||
and returns the current values of the tunables.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
@ -269,13 +267,15 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
These are the same tunables we pass to the constructor,
|
||||
but with the values set to the next suggestion.
|
||||
"""
|
||||
self._iter += 1
|
||||
_LOG.debug("Iteration %d :: Suggest", self._iter)
|
||||
return self._tunables.copy()
|
||||
|
||||
@abstractmethod
|
||||
def register(self, tunables: TunableGroups, status: Status,
|
||||
score: Optional[Union[float, Dict[str, float]]] = None) -> Optional[float]:
|
||||
"""
|
||||
Register the observation for the given configuration.
|
||||
Base class' implementations logs and increments the iteration count.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -295,7 +295,6 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
"""
|
||||
_LOG.info("Iteration %d :: Register: %s = %s score: %s",
|
||||
self._iter, tunables, status, score)
|
||||
self._iter += 1
|
||||
if status.is_succeeded() == (score is None): # XOR
|
||||
raise ValueError("Status and score must be consistent.")
|
||||
return self._get_score(status, score)
|
||||
|
@ -336,7 +335,7 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr
|
|||
Return True if not converged, False otherwise.
|
||||
Base implementation just checks the iteration count.
|
||||
"""
|
||||
return self._iter <= self._max_iter
|
||||
return self._iter < self._max_iter
|
||||
|
||||
@abstractmethod
|
||||
def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]:
|
||||
|
|
|
@ -109,27 +109,24 @@ class GridSearchOptimizer(TrackBestOptimizer):
|
|||
return (dict(zip(self._config_keys, config)) for config in self._suggested_configs)
|
||||
|
||||
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
|
||||
status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool:
|
||||
if not super().bulk_register(configs, scores, status, is_warm_up):
|
||||
status: Optional[Sequence[Status]] = None) -> bool:
|
||||
if not super().bulk_register(configs, scores, status):
|
||||
return False
|
||||
if status is None:
|
||||
status = [Status.SUCCEEDED] * len(configs)
|
||||
for (params, score, trial_status) in zip(configs, scores, status):
|
||||
tunables = self._tunables.copy().assign(params)
|
||||
self.register(tunables, trial_status, nullable(float, score))
|
||||
if is_warm_up:
|
||||
# Do not advance the iteration counter during warm-up.
|
||||
self._iter -= 1
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
(score, _) = self.get_best_observation()
|
||||
_LOG.debug("%s end: %s = %s", "Warm-up" if is_warm_up else "Update", self.target, score)
|
||||
_LOG.debug("Update end: %s = %s", self.target, score)
|
||||
return True
|
||||
|
||||
def suggest(self) -> TunableGroups:
|
||||
"""
|
||||
Generate the next grid search suggestion.
|
||||
"""
|
||||
tunables = self._tunables.copy()
|
||||
tunables = super().suggest()
|
||||
if self._start_with_defaults:
|
||||
_LOG.info("Use default values for the first trial")
|
||||
self._start_with_defaults = False
|
||||
|
|
|
@ -91,8 +91,8 @@ class MlosCoreOptimizer(Optimizer):
|
|||
return f"{self.__class__.__name__}:{self._opt.__class__.__name__}"
|
||||
|
||||
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
|
||||
status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool:
|
||||
if not super().bulk_register(configs, scores, status, is_warm_up):
|
||||
status: Optional[Sequence[Status]] = None) -> bool:
|
||||
if not super().bulk_register(configs, scores, status):
|
||||
return False
|
||||
df_configs = self._to_df(configs) # Impute missing values, if necessary
|
||||
df_scores = pd.Series(scores, dtype=float) * self._opt_sign
|
||||
|
@ -103,8 +103,6 @@ class MlosCoreOptimizer(Optimizer):
|
|||
df_configs = df_configs[df_status_completed]
|
||||
df_scores = df_scores[df_status_completed]
|
||||
self._opt.register(df_configs, df_scores)
|
||||
if not is_warm_up:
|
||||
self._iter += len(df_scores)
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
(score, _) = self.get_best_observation()
|
||||
_LOG.debug("Warm-up end: %s = %s", self.target, score)
|
||||
|
@ -154,12 +152,13 @@ class MlosCoreOptimizer(Optimizer):
|
|||
return df_configs
|
||||
|
||||
def suggest(self) -> TunableGroups:
|
||||
tunables = super().suggest()
|
||||
if self._start_with_defaults:
|
||||
_LOG.info("Use default values for the first trial")
|
||||
df_config = self._opt.suggest(defaults=self._start_with_defaults)
|
||||
self._start_with_defaults = False
|
||||
_LOG.info("Iteration %d :: Suggest:\n%s", self._iter, df_config)
|
||||
return self._tunables.copy().assign(
|
||||
return tunables.assign(
|
||||
configspace_data_to_tunable_values(df_config.loc[0].to_dict()))
|
||||
|
||||
def register(self, tunables: TunableGroups, status: Status,
|
||||
|
|
|
@ -41,27 +41,24 @@ class MockOptimizer(TrackBestOptimizer):
|
|||
}
|
||||
|
||||
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
|
||||
status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool:
|
||||
if not super().bulk_register(configs, scores, status, is_warm_up):
|
||||
status: Optional[Sequence[Status]] = None) -> bool:
|
||||
if not super().bulk_register(configs, scores, status):
|
||||
return False
|
||||
if status is None:
|
||||
status = [Status.SUCCEEDED] * len(configs)
|
||||
for (params, score, trial_status) in zip(configs, scores, status):
|
||||
tunables = self._tunables.copy().assign(params)
|
||||
self.register(tunables, trial_status, nullable(float, score))
|
||||
if is_warm_up:
|
||||
# Do not advance the iteration counter during warm-up.
|
||||
self._iter -= 1
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
(score, _) = self.get_best_observation()
|
||||
_LOG.debug("Warm-up end: %s = %s", self.target, score)
|
||||
_LOG.debug("Bulk register end: %s = %s", self.target, score)
|
||||
return True
|
||||
|
||||
def suggest(self) -> TunableGroups:
|
||||
"""
|
||||
Generate the next (random) suggestion.
|
||||
"""
|
||||
tunables = self._tunables.copy()
|
||||
tunables = super().suggest()
|
||||
if self._start_with_defaults:
|
||||
_LOG.info("Use default values for the first trial")
|
||||
self._start_with_defaults = False
|
||||
|
|
|
@ -36,7 +36,3 @@ class OneShotOptimizer(MockOptimizer):
|
|||
@property
|
||||
def supports_preload(self) -> bool:
|
||||
return False
|
||||
|
||||
def suggest(self) -> TunableGroups:
|
||||
_LOG.info("Suggest: %s", self._tunables)
|
||||
return self._tunables.copy()
|
||||
|
|
|
@ -79,6 +79,7 @@ class Scheduler(metaclass=ABCMeta):
|
|||
self.optimizer = optimizer
|
||||
self.storage = storage
|
||||
self._root_env_config = root_env_config
|
||||
self._last_trial_id = -1
|
||||
|
||||
_LOG.debug("Scheduler instantiated: %s :: %s", self, config)
|
||||
|
||||
|
@ -179,21 +180,24 @@ class Scheduler(metaclass=ABCMeta):
|
|||
_LOG.debug("Config %d ::\n%s", config_id, json.dumps(tunable_values, indent=2))
|
||||
return tunables
|
||||
|
||||
def _get_optimizer_suggestions(self, last_trial_id: int = -1, is_warm_up: bool = False) -> int:
|
||||
def _schedule_new_optimizer_suggestions(self) -> bool:
|
||||
"""
|
||||
Optimizer part of the loop. Load the results of the executed trials
|
||||
into the optimizer, suggest new configurations, and add them to the queue.
|
||||
Return the last trial ID processed by the optimizer.
|
||||
Return True if optimization is not over, False otherwise.
|
||||
"""
|
||||
assert self.experiment is not None
|
||||
(trial_ids, configs, scores, status) = self.experiment.load(last_trial_id)
|
||||
(trial_ids, configs, scores, status) = self.experiment.load(self._last_trial_id)
|
||||
_LOG.info("QUEUE: Update the optimizer with trial results: %s", trial_ids)
|
||||
self.optimizer.bulk_register(configs, scores, status, is_warm_up)
|
||||
self.optimizer.bulk_register(configs, scores, status)
|
||||
self._last_trial_id = max(trial_ids, default=self._last_trial_id)
|
||||
|
||||
tunables = self.optimizer.suggest()
|
||||
self.schedule_trial(tunables)
|
||||
not_converged = self.optimizer.not_converged()
|
||||
if not_converged:
|
||||
tunables = self.optimizer.suggest()
|
||||
self.schedule_trial(tunables)
|
||||
|
||||
return max(trial_ids, default=last_trial_id)
|
||||
return not_converged
|
||||
|
||||
def schedule_trial(self, tunables: TunableGroups) -> None:
|
||||
"""
|
||||
|
|
|
@ -29,16 +29,15 @@ class SyncScheduler(Scheduler):
|
|||
"""
|
||||
super().start()
|
||||
|
||||
last_trial_id = -1
|
||||
is_warm_up = self.optimizer.supports_preload
|
||||
if not is_warm_up:
|
||||
_LOG.warning("Skip pending trials and warm-up: %s", self.optimizer)
|
||||
|
||||
while self.optimizer.not_converged():
|
||||
_LOG.info("Optimization loop: %s Last trial ID: %d",
|
||||
"Warm-up" if is_warm_up else "Run", last_trial_id)
|
||||
not_converged = True
|
||||
while not_converged:
|
||||
_LOG.info("Optimization loop: Last trial ID: %d", self._last_trial_id)
|
||||
self._run_schedule(is_warm_up)
|
||||
last_trial_id = self._get_optimizer_suggestions(last_trial_id, is_warm_up)
|
||||
not_converged = self._schedule_new_optimizer_suggestions()
|
||||
is_warm_up = False
|
||||
|
||||
def run_trial(self, trial: Storage.Trial) -> None:
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
"pathVarWithEnvVarRef": "$CUSTOM_PATH_FROM_ENV/foo",
|
||||
"varWithEnvVarRef": "user:$USER",
|
||||
|
||||
// Override the default value of the "max_iterations" parameter
|
||||
// Override the default value of the "max_suggestions" parameter
|
||||
// of the optimizer when running local tests:
|
||||
"max_iterations": 5
|
||||
"max_suggestions": 5
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer",
|
||||
"config": {
|
||||
"max_iterations": null,
|
||||
"max_suggestions": null,
|
||||
}
|
||||
}
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
"config": {
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "SMAC",
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
"config": {
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "SMAC",
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
"config": {
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
"some/path/to/tunables.jsonc"
|
||||
],
|
||||
"config": {
|
||||
"max_iterations": 100,
|
||||
"max_suggestions": 100,
|
||||
"optimization_direction": "max",
|
||||
"optimization_target": "score",
|
||||
"seed": 12345,
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 10,
|
||||
"max_suggestions": 10,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "FLAML",
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "RANDOM",
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "SMAC",
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false,
|
||||
"optimizer_type": "SMAC",
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"config": {
|
||||
// Here we do our best to list the exhaustive set of full configs available for the base optimizer config.
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"start_with_defaults": false
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
"config": {
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 20,
|
||||
"max_suggestions": 20,
|
||||
"seed": 12345,
|
||||
"optimizer_type": "SMAC",
|
||||
"space_adapter_type": "LLAMATUNE"
|
||||
|
|
|
@ -21,7 +21,7 @@ from mlos_bench.run import _main
|
|||
([
|
||||
"--config", "mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc",
|
||||
"--trial_config_repeat_count", "3",
|
||||
"--max_iterations", "3",
|
||||
"--max_suggestions", "3",
|
||||
], 64.53897),
|
||||
]
|
||||
)
|
||||
|
|
|
@ -171,10 +171,10 @@ def test_launcher_args_parse_2(config_paths: List[str]) -> None:
|
|||
globals_file_config = launcher.config_loader.load_config(globals_file, ConfigSchema.GLOBALS)
|
||||
# The actual global_config gets overwritten as a part of processing, so to test
|
||||
# this we read the original value out of the source files.
|
||||
orig_max_iters = globals_file_config.get('max_iterations', opt_config.get('config', {}).get('max_iterations', 100))
|
||||
orig_max_iters = globals_file_config.get('max_suggestions', opt_config.get('config', {}).get('max_suggestions', 100))
|
||||
assert launcher.optimizer.max_iterations \
|
||||
== orig_max_iters \
|
||||
== launcher.global_config['max_iterations']
|
||||
== launcher.global_config['max_suggestions']
|
||||
|
||||
# Check that the optimizer got initialized with random values instead of the defaults.
|
||||
# Note: the environment doesn't get updated until suggest() is called to
|
||||
|
|
|
@ -93,8 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic
|
|||
"""
|
||||
_launch_main_app(
|
||||
root_path, local_exec_service,
|
||||
# TODO: Reset --max_iterations to 3 after fixing the optimizer
|
||||
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 9",
|
||||
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_suggestions 3",
|
||||
[
|
||||
# Iteration 1: Expect first value to be the baseline
|
||||
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
|
||||
|
|
|
@ -61,7 +61,7 @@ def mock_opt_no_defaults(tunable_groups: TunableGroups) -> MockOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 5,
|
||||
"max_suggestions": 5,
|
||||
"start_with_defaults": False,
|
||||
"seed": SEED
|
||||
},
|
||||
|
@ -79,7 +79,7 @@ def mock_opt(tunable_groups: TunableGroups) -> MockOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 5,
|
||||
"max_suggestions": 5,
|
||||
"seed": SEED
|
||||
},
|
||||
)
|
||||
|
@ -96,7 +96,7 @@ def mock_opt_max(tunable_groups: TunableGroups) -> MockOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "max",
|
||||
"max_iterations": 10,
|
||||
"max_suggestions": 10,
|
||||
"seed": SEED
|
||||
},
|
||||
)
|
||||
|
@ -113,7 +113,7 @@ def flaml_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": 15,
|
||||
"max_suggestions": 15,
|
||||
"optimizer_type": "FLAML",
|
||||
"seed": SEED,
|
||||
},
|
||||
|
@ -131,7 +131,7 @@ def flaml_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "max",
|
||||
"max_iterations": 15,
|
||||
"max_suggestions": 15,
|
||||
"optimizer_type": "FLAML",
|
||||
"seed": SEED,
|
||||
},
|
||||
|
@ -157,7 +157,7 @@ def smac_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "min",
|
||||
"max_iterations": SMAC_ITERATIONS,
|
||||
"max_suggestions": SMAC_ITERATIONS,
|
||||
"optimizer_type": "SMAC",
|
||||
"seed": SEED,
|
||||
"output_directory": None,
|
||||
|
@ -179,7 +179,7 @@ def smac_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
config={
|
||||
"optimization_target": "score",
|
||||
"optimization_direction": "max",
|
||||
"max_iterations": SMAC_ITERATIONS,
|
||||
"max_suggestions": SMAC_ITERATIONS,
|
||||
"optimizer_type": "SMAC",
|
||||
"seed": SEED,
|
||||
"output_directory": None,
|
||||
|
|
|
@ -82,7 +82,7 @@ def grid_search_opt(grid_search_tunables: TunableGroups,
|
|||
# multiple of the number of elements in the grid.
|
||||
max_iterations = len(grid_search_tunables_grid) * 2 - 3
|
||||
return GridSearchOptimizer(tunables=grid_search_tunables, config={
|
||||
"max_iterations": max_iterations,
|
||||
"max_suggestions": max_iterations,
|
||||
"optimization_direction": "max",
|
||||
})
|
||||
|
||||
|
@ -161,7 +161,7 @@ def test_grid_search(grid_search_opt: GridSearchOptimizer,
|
|||
assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs)
|
||||
assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid)
|
||||
|
||||
# FIXME: Should we consider not_converged as the "max_iterations", an empty grid, or both?
|
||||
# We consider not_converged as either having reached "max_suggestions" or an empty grid?
|
||||
|
||||
# Try to empty the rest of the grid.
|
||||
while grid_search_opt.not_converged():
|
||||
|
|
|
@ -31,7 +31,7 @@ def llamatune_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
"num_low_dims": 2,
|
||||
},
|
||||
"optimization_target": "score",
|
||||
"max_iterations": 10,
|
||||
"max_suggestions": 10,
|
||||
"optimizer_type": "SMAC",
|
||||
"seed": SEED,
|
||||
# "start_with_defaults": False,
|
||||
|
|
|
@ -26,7 +26,7 @@ def mlos_core_optimizer(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
|
|||
"""
|
||||
test_opt_config = {
|
||||
'optimizer_type': 'FLAML',
|
||||
'max_iterations': 10,
|
||||
'max_suggestions': 10,
|
||||
'seed': SEED,
|
||||
}
|
||||
return MlosCoreOptimizer(tunable_groups, test_opt_config)
|
||||
|
|
|
@ -30,7 +30,7 @@ def test_init_mlos_core_smac_opt_bad_trial_count(tunable_groups: TunableGroups)
|
|||
test_opt_config = {
|
||||
'optimizer_type': 'SMAC',
|
||||
'max_trials': 10,
|
||||
'max_iterations': 11,
|
||||
'max_suggestions': 11,
|
||||
'seed': SEED,
|
||||
}
|
||||
with pytest.raises(AssertionError):
|
||||
|
@ -44,13 +44,13 @@ def test_init_mlos_core_smac_opt_max_trials(tunable_groups: TunableGroups) -> No
|
|||
"""
|
||||
test_opt_config = {
|
||||
'optimizer_type': 'SMAC',
|
||||
'max_iterations': 123,
|
||||
'max_suggestions': 123,
|
||||
'seed': SEED,
|
||||
}
|
||||
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
|
||||
# pylint: disable=protected-access
|
||||
assert isinstance(opt._opt, SmacOptimizer)
|
||||
assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_iterations']
|
||||
assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_suggestions']
|
||||
|
||||
|
||||
def test_init_mlos_core_smac_absolute_output_directory(tunable_groups: TunableGroups) -> None:
|
||||
|
|
Загрузка…
Ссылка в новой задаче