зеркало из https://github.com/microsoft/MLOS.git
Merged PR 560: Merging initial os_autotune_main work to main
We will do our OS autotuning related work on mlos-bench there. Main prior to this has been tagged as v0.0.3 for servicing Spark autotune needs if need be. Plan is to reorganize the source tree after this so we can generate separate `mlos-core` and `mlos-bench` packages.
This commit is contained in:
Родитель
a3d659402e
Коммит
d9224875c5
|
@ -139,6 +139,9 @@ cython_debug/
|
|||
|
||||
# vim swap files
|
||||
.*.swp
|
||||
.*.swo
|
||||
|
||||
junit/test-results.xml
|
||||
|
||||
junit/test-results.xml
|
||||
|
||||
|
@ -147,3 +150,12 @@ junit/test-results.xml
|
|||
.doc-prereqs.build-stamp
|
||||
.pylint.build-stamp
|
||||
.pytest.build-stamp
|
||||
|
||||
.vs/
|
||||
.vscode/
|
||||
!.vscode/settings.json
|
||||
!.vscode/extensions.json
|
||||
|
||||
# Test config. May contain sensitive information
|
||||
# like Azure credentials etc.
|
||||
/config.json
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"recommendations": [
|
||||
"EditorConfig.EditorConfig",
|
||||
"streetsidesoftware.code-spell-checker",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.python",
|
||||
"donjayamanne.python-environment-manager",
|
||||
"lextudio.restructuredtext",
|
||||
"trond-snekvik.simple-rst"
|
||||
]
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
{
|
||||
"python.defaultInterpreterPath": "${env:HOME}/.conda/envs/mlos_core/bin/python",
|
||||
"makefile.extensionOutputFolder": "./.vscode",
|
||||
"python.defaultInterpreterPath": "${env:HOME}${env:USERPROFILE}/.conda/envs/mlos_core/bin/python",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.testing.pytestEnabled": true,
|
||||
|
@ -7,6 +8,7 @@
|
|||
"Skopt",
|
||||
"conda",
|
||||
"configspace",
|
||||
"dataframe",
|
||||
"emukit",
|
||||
"gpbo",
|
||||
"ipykernel",
|
||||
|
@ -26,5 +28,10 @@
|
|||
"tolist",
|
||||
"xlabel",
|
||||
"ylabel"
|
||||
]
|
||||
],
|
||||
"restructuredtext.linter.doc8.extraArgs": [
|
||||
"--ignore D001"
|
||||
],
|
||||
"esbonio.sphinx.confDir": "${workspaceFolder}/doc/source",
|
||||
"esbonio.sphinx.buildDir": "${workspaceFolder}/doc/build/"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
16
Makefile
16
Makefile
|
@ -22,7 +22,7 @@ check: pylint
|
|||
pylint: conda-env .pylint.build-stamp
|
||||
|
||||
.pylint.build-stamp: $(PYTHON_FILES) .pylintrc
|
||||
conda run -n ${CONDA_DEFAULT_ENV} pylint -j0 mlos_core
|
||||
conda run -n ${CONDA_DEFAULT_ENV} pylint -j0 mlos_core mlos_bench
|
||||
touch .pylint.build-stamp
|
||||
|
||||
.PHONY: test
|
||||
|
@ -32,17 +32,17 @@ test: pytest
|
|||
pytest: conda-env .pytest.build-stamp
|
||||
|
||||
.pytest.build-stamp: $(PYTHON_FILES) pytest.ini
|
||||
#conda run -n ${CONDA_DEFAULT_ENV} pytest -n auto --cov=mlos_core --cov-report=xml mlos_core/
|
||||
conda run -n ${CONDA_DEFAULT_ENV} pytest --cov=mlos_core --cov-report=xml mlos_core/ --junitxml=junit/test-results.xml
|
||||
#conda run -n ${CONDA_DEFAULT_ENV} pytest -n auto --cov=mlos_core --cov-report=xml mlos_core/ mlos_bench/
|
||||
conda run -n ${CONDA_DEFAULT_ENV} pytest --cov --cov-report=xml mlos_core/ mlos_bench/ --junitxml=junit/test-results.xml
|
||||
touch .pytest.build-stamp
|
||||
|
||||
.PHONY: dist
|
||||
dist: bdist_wheel
|
||||
|
||||
.PHONY: bdist_wheel
|
||||
bdist_wheel: conda-env dist/mlos_core-*-py3-none-any.whl
|
||||
bdist_wheel: conda-env dist/mlos_core-*-py3-none-any.whl dist/mlos_bench-*-py3-none-any.whl
|
||||
|
||||
dist/mlos_core-*-py3-none-any.whl: setup.py $(PYTHON_FILES)
|
||||
dist/mlos_bench-*-py3-none-any.whl dist/mlos_core-*-py3-none-any.whl: setup.py $(PYTHON_FILES)
|
||||
conda run -n ${CONDA_DEFAULT_ENV} python3 setup.py bdist_wheel
|
||||
|
||||
.doc-prereqs.build-stamp: doc/requirements.txt
|
||||
|
@ -54,8 +54,9 @@ doc-prereqs: .doc-prereqs.build-stamp
|
|||
|
||||
.PHONY: doc
|
||||
doc: conda-env doc-prereqs
|
||||
cd doc/ && conda run -n ${CONDA_DEFAULT_ENV} sphinx-apidoc -f -e -o source/api ../mlos_core
|
||||
conda run -n ${CONDA_DEFAULT_ENV} make -C doc/ html
|
||||
rm -f doc/build/html/index.html
|
||||
cd doc/ && conda run -n ${CONDA_DEFAULT_ENV} sphinx-apidoc -f -e -M -o source/api .. ../setup.py ../pytest_configure.py
|
||||
conda run -n ${CONDA_DEFAULT_ENV} make -j -C doc/ html
|
||||
test -s doc/build/html/index.html
|
||||
cp doc/staticwebapp.config.json doc/build/html/
|
||||
|
||||
|
@ -80,3 +81,4 @@ dist-clean:
|
|||
clean: clean-check clean-test dist-clean clean-doc
|
||||
rm -f .conda-env.build-stamp .conda-env.*.build-stamp
|
||||
rm -rf mlos_core.egg-info
|
||||
rm -rf mlos_bench.egg-info
|
||||
|
|
|
@ -0,0 +1,262 @@
|
|||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"vmName": {
|
||||
"type": "string",
|
||||
"defaultValue": "osat-linux-vm",
|
||||
"metadata": {
|
||||
"description": "OS Autotune Linux VM"
|
||||
}
|
||||
},
|
||||
"adminUsername": {
|
||||
"type": "string",
|
||||
"metadata": {
|
||||
"description": "Username for the Virtual Machine."
|
||||
}
|
||||
},
|
||||
"authenticationType": {
|
||||
"type": "string",
|
||||
"defaultValue": "sshPublicKey",
|
||||
"allowedValues": [
|
||||
"sshPublicKey",
|
||||
"password"
|
||||
],
|
||||
"metadata": {
|
||||
"description": "Type of authentication to use on the Virtual Machine. SSH key is recommended."
|
||||
}
|
||||
},
|
||||
"adminPasswordOrKey": {
|
||||
"type": "secureString",
|
||||
"metadata": {
|
||||
"description": "SSH Key or password for the Virtual Machine. SSH key is recommended."
|
||||
}
|
||||
},
|
||||
"dnsLabelPrefix": {
|
||||
"type": "string",
|
||||
"defaultValue": "[toLower(format('{0}-{1}', parameters('vmName'), uniqueString(resourceGroup().id)))]",
|
||||
"metadata": {
|
||||
"description": "Unique DNS Name for the Public IP used to access the Virtual Machine."
|
||||
}
|
||||
},
|
||||
"ubuntuOSVersion": {
|
||||
"type": "string",
|
||||
"defaultValue": "18.04-LTS",
|
||||
"allowedValues": [
|
||||
"12.04.5-LTS",
|
||||
"14.04.5-LTS",
|
||||
"16.04.0-LTS",
|
||||
"18.04-LTS"
|
||||
],
|
||||
"metadata": {
|
||||
"description": "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version."
|
||||
}
|
||||
},
|
||||
"location": {
|
||||
"type": "string",
|
||||
"defaultValue": "[resourceGroup().location]",
|
||||
"metadata": {
|
||||
"description": "Location for all resources."
|
||||
}
|
||||
},
|
||||
"vmSize": {
|
||||
"type": "string",
|
||||
"defaultValue": "Standard_B2s",
|
||||
"metadata": {
|
||||
"description": "The size of the VM"
|
||||
}
|
||||
},
|
||||
"virtualNetworkName": {
|
||||
"type": "string",
|
||||
"defaultValue": "osat-vnet",
|
||||
"metadata": {
|
||||
"description": "Name of the VNET"
|
||||
}
|
||||
},
|
||||
"subnetName": {
|
||||
"type": "string",
|
||||
"defaultValue": "osat-subnet",
|
||||
"metadata": {
|
||||
"description": "Name of the subnet in the virtual network"
|
||||
}
|
||||
},
|
||||
"networkSecurityGroupName": {
|
||||
"type": "string",
|
||||
"defaultValue": "osat-sg",
|
||||
"metadata": {
|
||||
"description": "Name of the Network Security Group"
|
||||
}
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"publicIPAddressName": "[format('{0}PublicIP', parameters('vmName'))]",
|
||||
"networkInterfaceName": "[format('{0}NetInt', parameters('vmName'))]",
|
||||
"osDiskType": "Standard_LRS",
|
||||
"subnetAddressPrefix": "10.1.0.0/24",
|
||||
"addressPrefix": "10.1.0.0/16",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "[format('/home/{0}/.ssh/authorized_keys', parameters('adminUsername'))]",
|
||||
"keyData": "[parameters('adminPasswordOrKey')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"apiVersion": "2021-05-01",
|
||||
"name": "[variables('networkInterfaceName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "ipconfig1",
|
||||
"properties": {
|
||||
"subnet": {
|
||||
"id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkName'), parameters('subnetName'))]"
|
||||
},
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', parameters('networkSecurityGroupName'))]"
|
||||
}
|
||||
},
|
||||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Network/networkSecurityGroups', parameters('networkSecurityGroupName'))]",
|
||||
"[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]",
|
||||
"[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkName'), parameters('subnetName'))]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"apiVersion": "2021-05-01",
|
||||
"name": "[parameters('networkSecurityGroupName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{
|
||||
"name": "SSH",
|
||||
"properties": {
|
||||
"priority": 1000,
|
||||
"protocol": "Tcp",
|
||||
"access": "Allow",
|
||||
"direction": "Inbound",
|
||||
"sourceAddressPrefix": "*",
|
||||
"sourcePortRange": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"destinationPortRange": "22"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"apiVersion": "2021-05-01",
|
||||
"name": "[parameters('virtualNetworkName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"[variables('addressPrefix')]"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Network/virtualNetworks/subnets",
|
||||
"apiVersion": "2021-05-01",
|
||||
"name": "[format('{0}/{1}', parameters('virtualNetworkName'), parameters('subnetName'))]",
|
||||
"properties": {
|
||||
"addressPrefix": "[variables('subnetAddressPrefix')]",
|
||||
"privateEndpointNetworkPolicies": "Enabled",
|
||||
"privateLinkServiceNetworkPolicies": "Enabled"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworkName'))]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"apiVersion": "2021-05-01",
|
||||
"name": "[variables('publicIPAddressName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"sku": {
|
||||
"name": "Basic"
|
||||
},
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddressVersion": "IPv4",
|
||||
"dnsSettings": {
|
||||
"domainNameLabel": "[parameters('dnsLabelPrefix')]"
|
||||
},
|
||||
"idleTimeoutInMinutes": 4
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"apiVersion": "2021-11-01",
|
||||
"name": "[parameters('vmName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "[parameters('vmSize')]"
|
||||
},
|
||||
"storageProfile": {
|
||||
"osDisk": {
|
||||
"createOption": "FromImage",
|
||||
"managedDisk": {
|
||||
"storageAccountType": "[variables('osDiskType')]"
|
||||
}
|
||||
},
|
||||
"imageReference": {
|
||||
"publisher": "Canonical",
|
||||
"offer": "UbuntuServer",
|
||||
"sku": "[parameters('ubuntuOSVersion')]",
|
||||
"version": "latest"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', variables('networkInterfaceName'))]"
|
||||
}
|
||||
]
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "[parameters('vmName')]",
|
||||
"adminUsername": "[parameters('adminUsername')]",
|
||||
"adminPassword": "[parameters('adminPasswordOrKey')]",
|
||||
"linuxConfiguration": "[if(equals(parameters('authenticationType'), 'password'), null(), variables('linuxConfiguration'))]"
|
||||
}
|
||||
},
|
||||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Network/networkInterfaces', variables('networkInterfaceName'))]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"adminUsername": {
|
||||
"type": "string",
|
||||
"value": "[parameters('adminUsername')]"
|
||||
},
|
||||
"hostname": {
|
||||
"type": "string",
|
||||
"value": "[reference(resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))).dnsSettings.fqdn]"
|
||||
},
|
||||
"sshCommand": {
|
||||
"type": "string",
|
||||
"value": "[format('ssh {0}@{1}', parameters('adminUsername'), reference(resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))).dnsSettings.fqdn)]"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"adminUsername": {
|
||||
"value": "GEN-UNIQUE"
|
||||
},
|
||||
"adminPasswordOrKey": {
|
||||
"value": "GEN-PASSWORD"
|
||||
},
|
||||
"dnsLabelPrefix": {
|
||||
"value": "GEN-UNIQUE"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
{
|
||||
"name": "Azure VM Ubuntu Redis",
|
||||
"class": "mlos_bench.environment.CompositeEnv",
|
||||
|
||||
"config": {
|
||||
|
||||
"services": [
|
||||
{
|
||||
"class": "mlos_bench.environment.azure.AzureVMService",
|
||||
|
||||
"config": {
|
||||
"template_path": "./config/azure/azuredeploy.json",
|
||||
|
||||
"subscription": "...",
|
||||
"resource_group": "sergiym-os-autotune",
|
||||
"deployment_name": "sergiym-os-autotune-001",
|
||||
"vmName": "osat-linux-vm",
|
||||
|
||||
"accessToken": "AZURE ACCESS TOKEN (e.g., from `az account get-access-token`)"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"children": [
|
||||
{
|
||||
"name": "Deploy Ubuntu VM on Azure",
|
||||
"class": "mlos_bench.environment.azure.VMEnv",
|
||||
|
||||
"config": {
|
||||
|
||||
"cost": 1000,
|
||||
|
||||
"tunable_params": {
|
||||
"vmSize": {
|
||||
"type": "categorical",
|
||||
"default": "Standard_B4ms",
|
||||
"values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
|
||||
}
|
||||
},
|
||||
|
||||
"const_args": {
|
||||
|
||||
"adminUsername": "sergiym",
|
||||
"authenticationType": "sshPublicKey",
|
||||
"adminPasswordOrKey": "SSH PUBLIC KEY (e.g., from id_rsa.pub)",
|
||||
|
||||
"virtualNetworkName": "sergiym-osat-vnet",
|
||||
"subnetName": "sergiym-osat-subnet",
|
||||
"networkSecurityGroupName": "sergiym-osat-sg",
|
||||
|
||||
"ubuntuOSVersion": "18.04-LTS"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Boot Ubuntu VM on Azure",
|
||||
"class": "mlos_bench.environment.azure.OSEnv",
|
||||
|
||||
"config": {
|
||||
|
||||
"cost": 300,
|
||||
|
||||
"tunable_params": {
|
||||
"rootfs": {
|
||||
"type": "categorical",
|
||||
"default": "xfs",
|
||||
"values": ["xfs", "ext4", "ext2"]
|
||||
}
|
||||
},
|
||||
|
||||
"const_args": {
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Redis on Linux",
|
||||
"class": "mlos_bench.environment.AppEnv",
|
||||
|
||||
"config": {
|
||||
|
||||
"cost": 1,
|
||||
|
||||
"tunable_params": {
|
||||
"kernel.sched_migration_cost_ns": {
|
||||
"type": "int",
|
||||
"default": -1,
|
||||
"range": [0, 500000],
|
||||
"special": [-1]
|
||||
}
|
||||
},
|
||||
|
||||
"const_args": {
|
||||
"commandId": "RunBenchmark",
|
||||
"script": [
|
||||
"ls -l /",
|
||||
"uname -a",
|
||||
"sysctl kernel.sched_migration_cost_ns=${kernel.sched_migration_cost_ns}"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
|
||||
DROP TABLE IF EXISTS benchmark CASCADE;
|
||||
DROP TABLE IF EXISTS benchmark_status CASCADE;
|
||||
DROP TABLE IF EXISTS environment CASCADE;
|
||||
DROP TABLE IF EXISTS composite_environment CASCADE;
|
||||
DROP TABLE IF EXISTS experiment CASCADE;
|
||||
DROP TABLE IF EXISTS tunable_parameter CASCADE;
|
||||
DROP TABLE IF EXISTS tunable_value CASCADE;
|
||||
|
||||
DROP TYPE IF EXISTS string_id_t CASCADE;
|
||||
DROP TYPE IF EXISTS benchmark_status_t CASCADE;
|
||||
|
||||
CREATE DOMAIN string_id_t AS varchar(255) NOT NULL;
|
||||
|
||||
CREATE TYPE benchmark_status_t AS ENUM (
|
||||
'pending', 'running', 'failed', 'canceled', 'completed');
|
||||
|
||||
-- Each environment is a collection of scripts and configuration templates
|
||||
-- required to run an experiment, along with a (Python) class name that
|
||||
-- contains the code that actually launches the scripts and runs the benchmarks.
|
||||
CREATE TABLE environment (
|
||||
id string_id_t PRIMARY KEY,
|
||||
name text NOT NULL,
|
||||
class string_id_t, -- Python class that implements the experiment
|
||||
config_path text, -- (relative) git path to scripts and config templates
|
||||
config_version text, -- git branch or commit id of the scripts
|
||||
parameters json, -- Static parameters to plug into the config
|
||||
cost float -- Cost of changing the parameters' values.
|
||||
);
|
||||
|
||||
-- Composite environments are trees of environment instances.
|
||||
-- environment.class of the root environment is a (Python) class that
|
||||
-- implements the composition.
|
||||
CREATE TABLE composite_environment (
|
||||
root_id string_id_t REFERENCES environment(id),
|
||||
parent_id string_id_t REFERENCES environment(id),
|
||||
child_id string_id_t REFERENCES environment(id),
|
||||
|
||||
PRIMARY KEY (root_id, parent_id, child_id)
|
||||
);
|
||||
|
||||
-- An experiment is a series of benchmarks for the given environment.
|
||||
CREATE TABLE experiment (
|
||||
id string_id_t PRIMARY KEY,
|
||||
environment_id string_id_t REFERENCES environment(id),
|
||||
ts timestamp NOT NULL DEFAULT now(),
|
||||
parameters json -- Parameters to plug into the environment config
|
||||
);
|
||||
|
||||
CREATE TABLE benchmark (
|
||||
id serial NOT NULL PRIMARY KEY,
|
||||
experiment_id string_id_t REFERENCES experiment(id),
|
||||
ts timestamp NOT NULL DEFAULT now(),
|
||||
parameters json, -- Benchmark-specific parameters, e.g., VM id (NOT tunables!)
|
||||
final_status benchmark_status_t,
|
||||
final_result float
|
||||
);
|
||||
|
||||
CREATE TABLE benchmark_status (
|
||||
benchmark_id integer NOT NULL REFERENCES benchmark(id),
|
||||
ts timestamp NOT NULL DEFAULT now(),
|
||||
status benchmark_status_t NOT NULL,
|
||||
result float,
|
||||
telemetry json,
|
||||
|
||||
PRIMARY KEY (benchmark_id, ts)
|
||||
);
|
||||
|
||||
-- Tunable parameters' descriptions.
|
||||
-- Should be deserializeable into ConfigSpace.
|
||||
CREATE TABLE tunable_parameter (
|
||||
environment_id string_id_t REFERENCES environment(id),
|
||||
name string_id_t,
|
||||
type string_id_t,
|
||||
range json,
|
||||
default_value json,
|
||||
|
||||
PRIMARY KEY (environment_id, name)
|
||||
);
|
||||
|
||||
-- Values of the tunables for a given benchmark.
|
||||
-- (Maybe, store as JSON column in the benchmark table?)
|
||||
CREATE TABLE tunable_value (
|
||||
benchmark_id integer NOT NULL REFERENCES benchmark(id),
|
||||
environment_id string_id_t REFERENCES environment(id),
|
||||
name string_id_t,
|
||||
value json,
|
||||
|
||||
PRIMARY KEY (benchmark_id, name),
|
||||
FOREIGN KEY(environment_id, name)
|
||||
REFERENCES tunable_parameter(environment_id, name)
|
||||
);
|
|
@ -0,0 +1,15 @@
|
|||
# Documentation Generation
|
||||
|
||||
Documentation is generated using [`sphinx`](https://www.sphinx-doc.org/).
|
||||
|
||||
```sh
|
||||
make -C .. doc
|
||||
```
|
||||
|
||||
## Testing with Docker
|
||||
|
||||
```sh
|
||||
./nginx-docker.sh restart
|
||||
```
|
||||
|
||||
> Now browse to `http://localhost`
|
|
@ -0,0 +1,49 @@
|
|||
# vim: set ft=nginx:
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name localhost;
|
||||
|
||||
#access_log /var/log/nginx/host.access.log main;
|
||||
|
||||
# Expects ./doc to be mapped to /doc in the nginx container.
|
||||
location / {
|
||||
#root /usr/share/nginx/html;
|
||||
root /doc/build/html/;
|
||||
autoindex on;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
#error_page 404 /404.html;
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
|
||||
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
|
||||
#
|
||||
#location ~ \.php$ {
|
||||
# proxy_pass http://127.0.0.1;
|
||||
#}
|
||||
|
||||
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
|
||||
#
|
||||
#location ~ \.php$ {
|
||||
# root html;
|
||||
# fastcgi_pass 127.0.0.1:9000;
|
||||
# fastcgi_index index.php;
|
||||
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
|
||||
# include fastcgi_params;
|
||||
#}
|
||||
|
||||
# deny access to .htaccess files, if Apache's document root
|
||||
# concurs with nginx's one
|
||||
#
|
||||
#location ~ /\.ht {
|
||||
# deny all;
|
||||
#}
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
# A quick script to start a local webserver for testing the sphinx documentation.
|
||||
|
||||
scriptdir=$(dirname "$(readlink -f "$0")")
|
||||
cd "$scriptdir"
|
||||
|
||||
if [ "$1" == 'start' ]; then
|
||||
docker run -d --name mlos-doc-nginx -v $PWD/nginx-default.conf:/etc/nginx/conf.d/default.conf -v $PWD:/doc -p 80:80 nginx
|
||||
elif [ "$1" == 'stop' ]; then
|
||||
docker stop mlos-doc-nginx || true
|
||||
docker rm mlos-doc-nginx || true
|
||||
elif [ "$1" == 'restart' ]; then
|
||||
"$0" 'stop'
|
||||
"$0" 'start'
|
||||
else
|
||||
echo "ERROR: Invalid argument: $0." >&2
|
||||
echo "Usage: $0 [start|stop|restart]"
|
||||
exit 1
|
||||
fi
|
|
@ -1,32 +0,0 @@
|
|||
#############################
|
||||
API
|
||||
#############################
|
||||
|
||||
This is a list of all functions and classes provided by mlos_core.
|
||||
|
||||
.. currentmodule:: mlos_core
|
||||
|
||||
Optimizers
|
||||
==============
|
||||
.. currentmodule:: mlos_core.optimizers
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: class.rst
|
||||
|
||||
RandomOptimizer
|
||||
EmukitOptimizer
|
||||
SkoptOptimizer
|
||||
|
||||
|
||||
Spaces
|
||||
=========
|
||||
|
||||
|
||||
.. currentmodule:: mlos_core.spaces
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: function.rst
|
||||
|
||||
configspace_to_emukit_space
|
||||
configspace_to_skopt_space
|
|
@ -21,7 +21,7 @@ sys.path.insert(0, os.path.abspath('../..'))
|
|||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = 'mlos-core'
|
||||
project = 'MlosCore'
|
||||
copyright = '2022, GSL'
|
||||
author = 'GSL'
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Welcome to mlos-core's documentation!
|
||||
=====================================
|
||||
Welcome to MlosCore's documentation!
|
||||
====================================
|
||||
|
||||
This repository contains a stripped down implementation of essentially just the core optimizer and config space description APIs from the original `MLOS <https://github.com/microsoft/MLOS>`_.
|
||||
This repository contains a stripped down implementation of essentially just the core optimizer and config space description APIs from the original `MLOS <https://github.com/microsoft/MLOS>`_ as well as the `mlos-bench` module intended to help automate and manage running experiments for autotuning systems with `mlos-core`.
|
||||
|
||||
It is intended to provide a simplified, easier to consume (e.g. via ``pip``), with lower dependencies abstraction to
|
||||
|
||||
|
@ -9,7 +9,7 @@ It is intended to provide a simplified, easier to consume (e.g. via ``pip``), wi
|
|||
- an "optimizer" service abstraction (e.g. ``register()`` and ``suggest()``) so we can easily swap out different implementations methods of searching (e.g. random, BO, etc.)
|
||||
- provide some helpers for automating optimization experiment runner loops and data collection
|
||||
|
||||
For these design requirements we intend to reuse as much from existing OSS libraries as possible.
|
||||
For these design requirements we intend to reuse as much from existing OSS libraries as possible and layer policies and optimizations specifically geared towards autotuning over top.
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
@ -17,7 +17,14 @@ For these design requirements we intend to reuse as much from existing OSS libra
|
|||
:caption: Documentation
|
||||
|
||||
installation
|
||||
api
|
||||
overview
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:maxdepth: 4
|
||||
:caption: API Reference
|
||||
|
||||
api/modules
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
|
|
@ -6,13 +6,13 @@ Development
|
|||
|
||||
0. Create the `mlos_core` Conda environment.
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
conda env create -f conda-envs/mlos_core.yml
|
||||
|
||||
or
|
||||
or
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
# This will also ensure the environment is update to date using "conda env update -f conda-envs/mlos_core.yml"
|
||||
make conda-env
|
||||
|
@ -20,7 +20,7 @@ or
|
|||
|
||||
1. Initialize the shell environment.
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
conda activate mlos_core
|
||||
|
||||
|
@ -31,13 +31,13 @@ Distributing
|
|||
|
||||
1. Build the *wheel* file.
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
make dist
|
||||
|
||||
2. Install it (e.g. after copying it somewhere else).
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
# this will install it with emukit support:
|
||||
pip install dist/mlos_core-0.0.3-py3-none-any.whl[emukit]
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
#############################
|
||||
mlos-core API
|
||||
#############################
|
||||
|
||||
This is a list of major functions and classes provided by `mlos_core`.
|
||||
|
||||
.. currentmodule:: mlos_core
|
||||
|
||||
Optimizers
|
||||
==============
|
||||
.. currentmodule:: mlos_core.optimizers
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: class.rst
|
||||
|
||||
BaseOptimizer
|
||||
RandomOptimizer
|
||||
EmukitOptimizer
|
||||
SkoptOptimizer
|
||||
|
||||
|
||||
Spaces
|
||||
=========
|
||||
.. currentmodule:: mlos_core.spaces
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: function.rst
|
||||
|
||||
configspace_to_emukit_space
|
||||
configspace_to_skopt_space
|
||||
|
||||
#############################
|
||||
mlos-bench API
|
||||
#############################
|
||||
|
||||
This is a list of major functions and classes provided by `mlos_bench`.
|
||||
|
||||
.. currentmodule:: mlos_bench
|
||||
|
||||
Main
|
||||
====
|
||||
.. currentmodule:: mlos_bench.main
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: functions.rst
|
||||
|
||||
optimize
|
||||
|
||||
Optimizer
|
||||
=========
|
||||
.. currentmodule:: mlos_bench.opt
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: class.rst
|
||||
|
||||
Optimizer
|
||||
|
||||
Environments
|
||||
============
|
||||
.. currentmodule:: mlos_bench.environment
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: class.rst
|
||||
|
||||
Environment
|
||||
AppEnv
|
||||
CompositeEnv
|
||||
Service
|
||||
Status
|
||||
|
||||
Azure
|
||||
-----
|
||||
|
||||
.. currentmodule:: mlos_bench.environment.azure
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
:template: class.rst
|
||||
|
||||
OSEnv
|
||||
VMEnv
|
||||
AzureVMService
|
|
@ -0,0 +1,3 @@
|
|||
"""
|
||||
OS Autotune project.
|
||||
"""
|
|
@ -0,0 +1,21 @@
|
|||
"""
|
||||
Benchmarking environments for OS Autotune.
|
||||
"""
|
||||
|
||||
from mlos_bench.environment.status import Status
|
||||
from mlos_bench.environment.base_svc import Service
|
||||
from mlos_bench.environment.base_env import Environment
|
||||
|
||||
from mlos_bench.environment.app import AppEnv
|
||||
from mlos_bench.environment.composite import CompositeEnv
|
||||
from mlos_bench.environment import azure
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Status',
|
||||
'Service',
|
||||
'Environment',
|
||||
'AppEnv',
|
||||
'CompositeEnv',
|
||||
'azure',
|
||||
]
|
|
@ -0,0 +1,74 @@
|
|||
"Application-specific benchmark environment."
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from mlos_bench.environment import Environment, Status
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppEnv(Environment):
|
||||
"Application-level benchmark environment."
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Check if the environment is ready and set up the application
|
||||
and benchmarks, if necessary.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Set up")
|
||||
return True
|
||||
|
||||
def run(self, tunables):
|
||||
"""
|
||||
Submit a new experiment to the application environment.
|
||||
(Re)configure an application and launch the benchmark.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) of the OS and application
|
||||
parameters. Setting these parameters should not require an
|
||||
OS reboot.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Run: %s", tunables)
|
||||
|
||||
# FIXME: Plug in the tunables into the script for remote execution
|
||||
# params = self._combine_tunables(tunables)
|
||||
params = self._const_args
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Benchmark:\n%s", json.dumps(params, indent=2))
|
||||
|
||||
# TODO: Configure the application and start the benchmark
|
||||
(status, _output) = self._service.remote_exec(params)
|
||||
return status in {Status.PENDING, Status.READY}
|
||||
|
||||
def result(self):
|
||||
"""
|
||||
Get the results of the benchmark. This is a blocking call that waits
|
||||
for the completion of the benchmark. It can have PENDING status only if
|
||||
the environment object has been read from the storage and not updated
|
||||
with the actual status yet.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(benchmark_status, benchmark_result) : (enum, float)
|
||||
A pair of (benchmark status, benchmark result) values.
|
||||
benchmark_status is of type mlos_bench.environment.Status.
|
||||
benchmark_result is a floating point time of the benchmark in
|
||||
seconds or None if the status is not COMPLETED.
|
||||
"""
|
||||
self._result = (Status.COMPLETED, 123.456)
|
||||
_LOG.info("Benchmark result: %s", self._result)
|
||||
return self._result
|
|
@ -0,0 +1,14 @@
|
|||
"""
|
||||
Azure-specific benchmark environments for OS Autotune.
|
||||
"""
|
||||
|
||||
from mlos_bench.environment.azure.azure_vm import VMEnv
|
||||
from mlos_bench.environment.azure.azure_os import OSEnv
|
||||
from mlos_bench.environment.azure.azure_services import AzureVMService
|
||||
|
||||
|
||||
__all__ = [
|
||||
'VMEnv',
|
||||
'OSEnv',
|
||||
'AzureVMService'
|
||||
]
|
|
@ -0,0 +1,60 @@
|
|||
"OS-level benchmark environment on Azure."
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from mlos_bench.environment import Environment, Status
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OSEnv(Environment):
|
||||
"Boot-time environment for Azure VM."
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Check if the Azure VM is provisioned and can be booted.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Set up")
|
||||
return True
|
||||
|
||||
def teardown(self):
|
||||
"""
|
||||
Clean up and shut down the VM without deprovisioning it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Tear down")
|
||||
return True
|
||||
|
||||
def run(self, tunables):
|
||||
"""
|
||||
Check if Azure VM is up and running. (Re)boot it, if necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) of the OS boot-time parameters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Run: %s", tunables)
|
||||
params = self._combine_tunables(tunables)
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Start VM:\n%s", json.dumps(params, indent=2))
|
||||
|
||||
# TODO: Reboot the OS when config parameters change
|
||||
(status, _output) = self._service.vm_start(params)
|
||||
return status in {Status.PENDING, Status.READY}
|
|
@ -0,0 +1,234 @@
|
|||
"OS-level benchmark environment on Azure."
|
||||
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from mlos_bench.environment.status import Status
|
||||
from mlos_bench.environment.base_svc import Service
|
||||
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureVMService(Service):
|
||||
"Helper methods to manage VMs on Azure."
|
||||
|
||||
# Azure REST API calls as described in
|
||||
# https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines
|
||||
|
||||
_URL_DEPLOY = "https://management.azure.com" \
|
||||
"/subscriptions/%s" \
|
||||
"/resourceGroups/%s" \
|
||||
"/providers/Microsoft.Resources" \
|
||||
"/deployments/%s" \
|
||||
"?api-version=2022-05-01"
|
||||
|
||||
_URL_START = "https://management.azure.com" \
|
||||
"/subscriptions/%s" \
|
||||
"/resourceGroups/%s" \
|
||||
"/providers/Microsoft.Compute" \
|
||||
"/virtualMachines/%s" \
|
||||
"/start?api-version=2022-03-01"
|
||||
|
||||
_URL_RUN = "https://management.azure.com/" \
|
||||
"/subscriptions/%s" \
|
||||
"/resourceGroups/%s" \
|
||||
"/providers/Microsoft.Compute" \
|
||||
"/virtualMachines/%s" \
|
||||
"/runCommand?api-version=2022-03-01"
|
||||
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Create a new instance of Azure services proxy.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
config : dict
|
||||
Free-format dictionary that contains the benchmark environment
|
||||
configuration.
|
||||
"""
|
||||
super().__init__(config)
|
||||
self.register([self.vm_deploy, self.vm_start, self.remote_exec])
|
||||
|
||||
with open(config['template_path']) as fh_json:
|
||||
self._template = json.load(fh_json)
|
||||
|
||||
self._url_deploy = AzureVMService._URL_DEPLOY % (
|
||||
config["subscription"],
|
||||
config["resource_group"],
|
||||
config["deployment_name"]
|
||||
)
|
||||
|
||||
self._headers = {
|
||||
# Access token from `az account get-access-token`:
|
||||
"Authorization": "Bearer " + config["accessToken"]
|
||||
}
|
||||
|
||||
self._url_start = AzureVMService._URL_START % (
|
||||
config["subscription"],
|
||||
config["resource_group"],
|
||||
config["vmName"]
|
||||
)
|
||||
|
||||
self._url_run = AzureVMService._URL_RUN % (
|
||||
config["subscription"],
|
||||
config["resource_group"],
|
||||
config["vmName"]
|
||||
)
|
||||
|
||||
self._headers = {
|
||||
# Access token from `az account get-access-token`:
|
||||
"Authorization": "Bearer " + config["accessToken"]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _build_parameters(tunables):
|
||||
"""
|
||||
Merge tunables with other parameters and convert into
|
||||
ARM Template format.
|
||||
"""
|
||||
return {key: {"value": val} for (key, val) in tunables.items()}
|
||||
|
||||
@staticmethod
|
||||
def _extract_parameters(json_data):
|
||||
"""
|
||||
Extract parameters from the ARM Template REST response JSON.
|
||||
|
||||
Returns
|
||||
-------
|
||||
parameters : dict
|
||||
Flat dictionary of parameters and their values.
|
||||
"""
|
||||
return {
|
||||
key: val.get("value")
|
||||
for (key, val) in json_data.get(
|
||||
"properties", {}).get("parameters", {}).items()
|
||||
}
|
||||
|
||||
def vm_deploy(self, tunables):
|
||||
"""
|
||||
Check if Azure VM is ready. (Re)provision it, if necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
VMEnv tunables are variable parameters that, together with the
|
||||
VMEnv configuration, are sufficient to provision a VM.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : (Status, dict={})
|
||||
A pair of Status and result. The result is always {}.
|
||||
Status is one of {PENDING, READY, FAILED}
|
||||
"""
|
||||
_LOG.info("Deploy VM: %s :: %s", self.config["vmName"], tunables)
|
||||
|
||||
json_req = {
|
||||
"properties": {
|
||||
"mode": "Incremental",
|
||||
"template": self._template,
|
||||
"parameters": AzureVMService._build_parameters(tunables)
|
||||
}
|
||||
}
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Request: PUT %s\n%s",
|
||||
self._url_deploy, json.dumps(json_req, indent=2))
|
||||
|
||||
response = requests.put(
|
||||
self._url_deploy, headers=self._headers, json=json_req)
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Response: %s\n%s", response,
|
||||
json.dumps(response.json(), indent=2))
|
||||
else:
|
||||
_LOG.info("Response: %s", response)
|
||||
|
||||
if response.status_code == 200:
|
||||
params = AzureVMService._extract_parameters(response.json())
|
||||
_LOG.info("Extracted parameters: %s", params)
|
||||
return (Status.READY, params)
|
||||
elif response.status_code == 201:
|
||||
return (Status.PENDING, {})
|
||||
else:
|
||||
_LOG.error("Response: %s :: %s", response, response.text)
|
||||
# _LOG.error("Bad Request:\n%s", response.request.body)
|
||||
return (Status.FAILED, {})
|
||||
|
||||
def vm_start(self, tunables):
|
||||
"""
|
||||
Start the VM on Azure.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : (Status, dict={})
|
||||
A pair of Status and result. The result is always {}.
|
||||
Status is one of {PENDING, READY, FAILED}
|
||||
"""
|
||||
_LOG.info("Start VM: %s :: %s", self.config["vmName"], tunables)
|
||||
_LOG.debug("Request: POST %s", self._url_start)
|
||||
|
||||
response = requests.post(self._url_start, headers=self._headers)
|
||||
_LOG.info("Response: %s", response)
|
||||
|
||||
if response.status_code == 200:
|
||||
return (Status.PENDING, {})
|
||||
elif response.status_code == 202:
|
||||
return (Status.READY, {})
|
||||
else:
|
||||
_LOG.error("Response: %s :: %s", response, response.text)
|
||||
# _LOG.error("Bad Request:\n%s", response.request.body)
|
||||
return (Status.FAILED, {})
|
||||
|
||||
def remote_exec(self, tunables):
|
||||
"""
|
||||
Run a command on Azure VM.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
Must have "commandId", "parameters", or "script" keys.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : (Status, dict)
|
||||
A pair of Status and result.
|
||||
Status is one of {PENDING, READY, FAILED}
|
||||
"""
|
||||
|
||||
_LOG.info("Run a command on VM: %s :: %s %s %s",
|
||||
self.config["vmName"], tunables["commandId"],
|
||||
tunables.get("parameters", []),
|
||||
tunables.get("script", []))
|
||||
|
||||
json_req = tunables # Pass to REST request as-is.
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Request: POST %s\n%s",
|
||||
self._url_run, json.dumps(json_req, indent=2))
|
||||
|
||||
response = requests.post(
|
||||
self._url_run, headers=self._headers, json=json_req)
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Response: %s\n%s", response,
|
||||
json.dumps(response.json(), indent=2))
|
||||
else:
|
||||
_LOG.info("Response: %s", response)
|
||||
|
||||
if response.status_code == 200:
|
||||
# TODO: extract the results from JSON response
|
||||
return (Status.READY, {})
|
||||
elif response.status_code == 202:
|
||||
return (Status.PENDING, {})
|
||||
else:
|
||||
_LOG.error("Response: %s :: %s", response, response.text)
|
||||
# _LOG.error("Bad Request:\n%s", response.request.body)
|
||||
return (Status.FAILED, {})
|
|
@ -0,0 +1,61 @@
|
|||
"VM-level benchmark environment on Azure."
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from mlos_bench.environment import Environment, Status
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VMEnv(Environment):
|
||||
"Azure VM environment."
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Check if the Azure VM can be provisioned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Set up")
|
||||
return True
|
||||
|
||||
def teardown(self):
|
||||
"""
|
||||
Shut down the VM and release it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Tear down")
|
||||
return True
|
||||
|
||||
def run(self, tunables):
|
||||
"""
|
||||
Check if Azure VM is ready. (Re)provision and start it, if necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
VMEnv tunables are variable parameters that, together with the
|
||||
VMEnv configuration, are sufficient to provision and start a VM.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Run: %s", tunables)
|
||||
params = self._combine_tunables(tunables)
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Deploy VM:\n%s", json.dumps(params, indent=2))
|
||||
|
||||
(status, _output) = self._service.vm_deploy(params)
|
||||
return status in {Status.PENDING, Status.READY}
|
|
@ -0,0 +1,260 @@
|
|||
"A hierarchy of benchmark environments."
|
||||
|
||||
import abc
|
||||
import json
|
||||
import logging
|
||||
import importlib
|
||||
|
||||
from mlos_bench.environment import Status
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Environment(metaclass=abc.ABCMeta):
|
||||
"An abstract base of all benchmark environments."
|
||||
|
||||
@staticmethod
|
||||
def from_config(config, service=None):
|
||||
"""
|
||||
Factory method for a new environment with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
config : dict
|
||||
A dictionary with three mandatory fields:
|
||||
"name": Human-readable string describing the environment;
|
||||
"class": FQN of a Python class to instantiate;
|
||||
"config": Free-format dictionary to pass to the constructor.
|
||||
service: Service
|
||||
An optional service object (e.g., providing methods to
|
||||
deploy or reboot a VM, etc.).
|
||||
|
||||
Returns
|
||||
-------
|
||||
env : Environment
|
||||
An instance of the `Environment` class initialized with `config`.
|
||||
"""
|
||||
env_name = config["name"]
|
||||
env_class = config["class"]
|
||||
env_config = config["config"]
|
||||
_LOG.debug("Creating env: %s :: %s", env_name, env_class)
|
||||
env = Environment.new(env_name, env_class, env_config, service)
|
||||
_LOG.info("Created env: %s :: %s", env_name, env)
|
||||
return env
|
||||
|
||||
@classmethod
|
||||
def new(cls, env_name, class_name, config, service=None):
|
||||
"""
|
||||
Factory method for a new environment with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
env_name: str
|
||||
Human-readable name of the environment.
|
||||
class_name: str
|
||||
FQN of a Python class to instantiate, e.g.,
|
||||
"mlos_bench.environment.azure.VMEnv".
|
||||
Must be derived from the `Environment` class.
|
||||
config : dict
|
||||
Free-format dictionary that contains the benchmark environment
|
||||
configuration. It will be passed as a constructor parameter of
|
||||
the class specified by `name`.
|
||||
service: Service
|
||||
An optional service object (e.g., providing methods to
|
||||
deploy or reboot a VM, etc.).
|
||||
|
||||
Returns
|
||||
-------
|
||||
env : Environment
|
||||
An instance of the `Environment` class initialized with `config`.
|
||||
"""
|
||||
# We need to import mlos_bench to make the factory methods
|
||||
# like `Environment.new()` work.
|
||||
class_name_split = class_name.split(".")
|
||||
module_name = ".".join(class_name_split[:-1])
|
||||
class_id = class_name_split[-1]
|
||||
|
||||
env_module = importlib.import_module(module_name)
|
||||
env_class = getattr(env_module, class_id)
|
||||
|
||||
_LOG.info("Instantiating: %s :: class %s = %s",
|
||||
env_name, class_name, env_class)
|
||||
|
||||
assert issubclass(env_class, cls)
|
||||
return env_class(env_name, config, service)
|
||||
|
||||
def __init__(self, name, config, service=None):
|
||||
"""
|
||||
Create a new environment with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str
|
||||
Human-readable name of the environment.
|
||||
config : dict
|
||||
Free-format dictionary that contains the benchmark environment
|
||||
configuration. Each config must have at least the "tunable_params"
|
||||
and the "const_args" sections; the "cost" field can be omitted
|
||||
and is 0 by default.
|
||||
service: Service
|
||||
An optional service object (e.g., providing methods to
|
||||
deploy or reboot a VM, etc.).
|
||||
"""
|
||||
self.name = name
|
||||
self.config = config
|
||||
self._service = service
|
||||
self._result = (Status.PENDING, None)
|
||||
|
||||
self._const_args = config.get("const_args", {})
|
||||
self._tunable_params = self._parse_tunables(
|
||||
config.get("tunable_params", {}), config.get("cost", 0))
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Config for: %s\n%s",
|
||||
name, json.dumps(self.config, indent=2))
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return "Env: %s :: '%s'" % (self.__class__, self.name)
|
||||
|
||||
def _parse_tunables(self, tunables, cost=0):
|
||||
"Augment tunables with the cost."
|
||||
tunables_cost = {}
|
||||
for (key, val) in tunables.items():
|
||||
tunables_cost[key] = val.copy()
|
||||
tunables_cost[key]["cost"] = cost
|
||||
return tunables_cost
|
||||
|
||||
def _combine_tunables(self, tunables):
|
||||
"""
|
||||
Plug tunable values into the base config. If the tunable is unknown,
|
||||
ignore it (it might belong to another environment). This method should
|
||||
never mutate the original config or the tunables.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
config : dict
|
||||
Free-format dictionary that contains the new environment
|
||||
configuration.
|
||||
"""
|
||||
new_config = self._const_args.copy()
|
||||
for (key, val) in tunables.items():
|
||||
if key in self._tunable_params:
|
||||
new_config[key] = val
|
||||
return new_config
|
||||
|
||||
def tunable_params(self):
|
||||
"""
|
||||
Get the configuration space of the given environment.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
"""
|
||||
return self._tunable_params
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Set up a new benchmark environment, if necessary. This method must be
|
||||
idempotent, i.e., calling it several times in a row should be
|
||||
equivalent to a single call.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
return True
|
||||
|
||||
def teardown(self):
|
||||
"""
|
||||
Tear down the benchmark environment. This method must be idempotent,
|
||||
i.e., calling it several times in a row should be equivalent to a
|
||||
single call.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
return True
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self, tunables):
|
||||
"""
|
||||
Submit a new experiment to the environment.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
|
||||
def submit(self, tunables):
|
||||
"""
|
||||
Submit a new experiment to the environment. Set up the environment,
|
||||
if necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) pairs of tunable parameters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.info("Submit: %s", tunables)
|
||||
if self.setup():
|
||||
return self.run(tunables)
|
||||
return False
|
||||
|
||||
def status(self):
|
||||
"""
|
||||
Get the status of the environment.
|
||||
|
||||
Returns
|
||||
-------
|
||||
status : mlos_bench.environment.Status
|
||||
Current status of the benchmark environment.
|
||||
"""
|
||||
return self._result[0]
|
||||
|
||||
def result(self):
|
||||
"""
|
||||
Get the results of the benchmark. This is a blocking call that waits
|
||||
for the completion of the benchmark. It can have PENDING status only if
|
||||
the environment object has been read from the storage and not updated
|
||||
with the actual status yet.
|
||||
|
||||
Base implementation returns the results of the last .update() call.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(benchmark_status, benchmark_result) : (enum, float)
|
||||
A pair of (benchmark status, benchmark result) values.
|
||||
benchmark_status is one one of:
|
||||
PENDING
|
||||
RUNNING
|
||||
COMPLETED
|
||||
CANCELED
|
||||
FAILED
|
||||
benchmark_result is a floating point time of the benchmark in
|
||||
seconds or None if the status is not COMPLETED.
|
||||
"""
|
||||
_LOG.info("Result: %s", self._result)
|
||||
return self._result
|
|
@ -0,0 +1,139 @@
|
|||
"Base class for the service mix-ins."
|
||||
|
||||
import json
|
||||
import logging
|
||||
import importlib
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Service:
|
||||
"An abstract base of all environment services."
|
||||
|
||||
@staticmethod
|
||||
def from_config(config):
|
||||
"""
|
||||
Factory method for a new service with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
config : dict
|
||||
A dictionary with two mandatory fields:
|
||||
"class": FQN of a Python class to instantiate;
|
||||
"config": Free-format dictionary to pass to the constructor.
|
||||
|
||||
Returns
|
||||
-------
|
||||
svc : Service
|
||||
An instance of the `Service` class initialized with `config`.
|
||||
"""
|
||||
svc_class = config["class"]
|
||||
svc_config = config["config"]
|
||||
_LOG.debug("Creating service: %s", svc_class)
|
||||
service = Service.new(svc_class, svc_config)
|
||||
_LOG.info("Created service: %s", service)
|
||||
return service
|
||||
|
||||
@staticmethod
|
||||
def from_config_list(config_list, parent=None):
|
||||
"""
|
||||
Factory method for a new service with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
config_list : a list of dict
|
||||
A list where each element is a dictionary with 2 mandatory fields:
|
||||
"class": FQN of a Python class to instantiate;
|
||||
"config": Free-format dictionary to pass to the constructor.
|
||||
parent: Service
|
||||
An optional reference of the parent service to mix in.
|
||||
|
||||
Returns
|
||||
-------
|
||||
svc : Service
|
||||
An instance of the `Service` class that is a combination of all
|
||||
services from the list plus the parent mix-in.
|
||||
"""
|
||||
service = Service()
|
||||
if parent:
|
||||
service.register(parent.export())
|
||||
for config in config_list:
|
||||
service.register(Service.from_config(config).export())
|
||||
_LOG.info("Created mix-in service: %s", service.export())
|
||||
return service
|
||||
|
||||
@classmethod
|
||||
def new(cls, class_name, config):
|
||||
"""
|
||||
Factory method for a new service with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
class_name: str
|
||||
FQN of a Python class to instantiate, e.g.,
|
||||
"mlos_bench.environment.azure.AzureVMService".
|
||||
Must be derived from the `Service` class.
|
||||
config : dict
|
||||
Free-format dictionary that contains the service configuration.
|
||||
It will be passed as a constructor parameter of the class
|
||||
specified by `class_name`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
svc : Service
|
||||
An instance of the `Service` class initialized with `config`.
|
||||
"""
|
||||
# We need to import mlos_bench to make the factory methods
|
||||
# like `Service.new()` work.
|
||||
class_name_split = class_name.split(".")
|
||||
module_name = ".".join(class_name_split[:-1])
|
||||
class_id = class_name_split[-1]
|
||||
|
||||
env_module = importlib.import_module(module_name)
|
||||
svc_class = getattr(env_module, class_id)
|
||||
_LOG.info("Instantiating: %s :: %s", class_name, svc_class)
|
||||
|
||||
assert issubclass(svc_class, cls)
|
||||
return svc_class(config)
|
||||
|
||||
def __init__(self, config=None):
|
||||
"""
|
||||
Create a new service with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
config : dict
|
||||
Free-format dictionary that contains the service configuration.
|
||||
It will be passed as a constructor parameter of the class
|
||||
specified by `class_name`.
|
||||
"""
|
||||
self.config = config or {}
|
||||
self._services = {}
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Config:\n%s", json.dumps(self.config, indent=2))
|
||||
|
||||
def register(self, services):
|
||||
"""
|
||||
Register new mix-in services.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
services : dict or list
|
||||
A dictionary of string -> function pairs.
|
||||
"""
|
||||
if not isinstance(services, dict):
|
||||
services = {svc.__name__: svc for svc in services}
|
||||
self._services.update(services)
|
||||
self.__dict__.update(self._services)
|
||||
|
||||
def export(self):
|
||||
"""
|
||||
Return a dictionary of functions available in this service.
|
||||
|
||||
Returns
|
||||
-------
|
||||
services : dict
|
||||
A dictionary of string -> function pairs.
|
||||
"""
|
||||
return self._services
|
|
@ -0,0 +1,106 @@
|
|||
"Composite benchmark environment."
|
||||
|
||||
import logging
|
||||
|
||||
from mlos_bench.environment import Environment, Service
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompositeEnv(Environment):
|
||||
"Composite benchmark environment."
|
||||
|
||||
def __init__(self, name, config, service=None):
|
||||
"""
|
||||
Create a new environment with a given config.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str
|
||||
Human-readable name of the environment.
|
||||
config : dict
|
||||
Free-format dictionary that contains the environment
|
||||
configuration. Must have a "children" section.
|
||||
service: Service
|
||||
An optional service object (e.g., providing methods to
|
||||
deploy or reboot a VM, etc.).
|
||||
"""
|
||||
super().__init__(name, config, service)
|
||||
|
||||
# Propagate all config parameters except "children" and "services"
|
||||
# to every child config.
|
||||
shared_config = config.copy()
|
||||
del shared_config["children"]
|
||||
del shared_config["services"]
|
||||
|
||||
self._service = Service.from_config_list(
|
||||
config.get("services", []), parent=service)
|
||||
|
||||
self._children = []
|
||||
for child_config in config["children"]:
|
||||
child_config["config"].update(shared_config)
|
||||
env = Environment.from_config(child_config, self._service)
|
||||
self._children.append(env)
|
||||
self._tunable_params.update(env.tunable_params())
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Set up the children environments.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if all children setup() operations are successful,
|
||||
false otherwise.
|
||||
"""
|
||||
_LOG.debug("Set up: %s", self._children)
|
||||
return all(env.setup() for env in self._children)
|
||||
|
||||
def teardown(self):
|
||||
"""
|
||||
Tear down the children environments.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if all children operations are successful, false otherwise.
|
||||
"""
|
||||
reverse_children = self._children.copy().reverse()
|
||||
_LOG.debug("Tear down: %s", reverse_children)
|
||||
return all(env.teardown() for env in reverse_children)
|
||||
|
||||
def run(self, tunables):
|
||||
"""
|
||||
Submit a new experiment to the environment.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tunables : dict
|
||||
Flat dictionary of (key, value) of the parameters from all
|
||||
children environments.
|
||||
|
||||
Returns
|
||||
-------
|
||||
is_success : bool
|
||||
True if operation is successful, false otherwise.
|
||||
"""
|
||||
_LOG.debug("Run: %s with %s", self._children, tunables)
|
||||
return all(env.run(tunables) for env in self._children)
|
||||
|
||||
def result(self):
|
||||
"""
|
||||
Get the results of the benchmark.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(benchmark_status, benchmark_result) : (enum, float)
|
||||
A pair of (benchmark status, benchmark result) values.
|
||||
benchmark_status is of type mlos_bench.environment.Status.
|
||||
benchmark_result is a floating point time of the benchmark in
|
||||
seconds or None if the status is not COMPLETED.
|
||||
"""
|
||||
# For now, we just return the result of the last child environment
|
||||
# in the sequence. TODO: have a way to select the right result from
|
||||
# the children, or identify which environment actually provides the
|
||||
# final result that will be used in the optimization.
|
||||
return self._children[-1].result()
|
|
@ -0,0 +1,20 @@
|
|||
"""
|
||||
Enum for the status of the benchmark.
|
||||
"""
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class Status(enum.Enum):
|
||||
"Enum for the status of the benchmark."
|
||||
PENDING = 1
|
||||
READY = 2
|
||||
RUNNING = 3
|
||||
COMPLETED = 4
|
||||
CANCELED = 5
|
||||
FAILED = 6
|
||||
|
||||
@staticmethod
|
||||
def is_good(status):
|
||||
"Check if the status is not failed or canceled."
|
||||
return status not in {Status.CANCELED, Status.FAILED}
|
|
@ -0,0 +1,59 @@
|
|||
"""
|
||||
OS Autotune main optimization loop.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
|
||||
from mlos_bench.opt import Optimizer
|
||||
from mlos_bench.environment import Environment
|
||||
|
||||
|
||||
def optimize(config):
|
||||
"Main optimization loop."
|
||||
|
||||
env = Environment.from_config(config)
|
||||
|
||||
opt = Optimizer(env.tunable_params())
|
||||
_LOG.info("Env: %s Optimizer: %s", env, opt)
|
||||
|
||||
while opt.not_converged():
|
||||
|
||||
tunable_values = opt.suggest()
|
||||
_LOG.info("Suggestion: %s", tunable_values)
|
||||
env.submit(tunable_values)
|
||||
|
||||
bench_result = env.result() # Block and wait for the final result
|
||||
_LOG.info("Result: %s = %s", tunable_values, bench_result)
|
||||
opt.register(tunable_values, bench_result)
|
||||
|
||||
best = opt.get_best_observation()
|
||||
_LOG.info("Env: %s best result: %s", env, best)
|
||||
return best
|
||||
|
||||
###############################################################
|
||||
|
||||
|
||||
def _main():
|
||||
|
||||
with open(sys.argv[1]) as fh_json:
|
||||
config = json.load(fh_json)
|
||||
|
||||
if _LOG.isEnabledFor(logging.DEBUG):
|
||||
_LOG.debug("Config:\n%s", json.dumps(config, indent=2))
|
||||
|
||||
result = optimize(config)
|
||||
_LOG.info("Final result: %s", result)
|
||||
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s %(pathname)s:%(lineno)d %(levelname)s %(message)s',
|
||||
datefmt='%H:%M:%S'
|
||||
)
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_main()
|
|
@ -0,0 +1,46 @@
|
|||
"""
|
||||
OS Autotune main optimization loop.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Optimizer:
|
||||
"Toy random optimizer to test out the Environment API."
|
||||
|
||||
_MAX_ITER = 1
|
||||
|
||||
def __init__(self, tunables):
|
||||
_LOG.info("Create: %s", tunables)
|
||||
self._iter_left = Optimizer._MAX_ITER
|
||||
self._tunables = tunables
|
||||
self._last_values = None
|
||||
|
||||
def suggest(self):
|
||||
"Generate the next suggestion."
|
||||
# For now, get just the default values.
|
||||
# FIXME: Need to iterate over the actual values.
|
||||
tunables = {
|
||||
key: val.get("default") for (key, val) in self._tunables.items()
|
||||
}
|
||||
# TODO: Populate the tunables with some random values
|
||||
_LOG.info("Suggest: %s", tunables)
|
||||
return tunables
|
||||
|
||||
def register(self, tunables, bench):
|
||||
"Register the observation for the given configuration."
|
||||
(bench_status, bench_result) = bench
|
||||
_LOG.info("Register: %s = %s %s", tunables, bench_status, bench_result)
|
||||
self._last_values = tunables
|
||||
self._iter_left -= 1
|
||||
|
||||
def not_converged(self):
|
||||
"Return True if not converged, False otherwise."
|
||||
return self._iter_left > 0
|
||||
|
||||
def get_best_observation(self):
|
||||
"Get the best observation so far."
|
||||
# FIXME: Use the tunables' values, as passed into .register()
|
||||
return (self._last_values, 0.0)
|
21
setup.py
21
setup.py
|
@ -1,12 +1,15 @@
|
|||
"""
|
||||
Setup instructions for the mlos_core package.
|
||||
Setup instructions for the mlos_core and mlos_bench packages.
|
||||
"""
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
version='0.0.4'
|
||||
|
||||
# TODO: Create separate whl packages for mlos-core and mlos-bench?
|
||||
setup(
|
||||
name="mlos-core",
|
||||
version="0.0.3",
|
||||
name='mlos-core',
|
||||
version=version,
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
'scikit-learn>=0.22.1',
|
||||
|
@ -19,10 +22,10 @@ setup(
|
|||
'emukit': 'emukit',
|
||||
'skopt': 'scikit-optimize',
|
||||
},
|
||||
author="Microsoft",
|
||||
author_email="amueller@microsoft.com",
|
||||
description=("MLOS Core Python interface for parameter optimization."),
|
||||
license="",
|
||||
keywords="",
|
||||
#python_requires='>=3.7',
|
||||
author='Microsoft',
|
||||
author_email='mlos-maintainers@service.microsoft.com',
|
||||
description=('MLOS Core Python interface for parameter optimization.'),
|
||||
license='MIT',
|
||||
keywords='',
|
||||
# python_requires='>=3.7',
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче