Co-authored-by: Sergiy Matusevych <sergiy.matusevych@gmail.com>
This commit is contained in:
Brian Kroth 2024-01-09 16:40:15 -06:00 коммит произвёл GitHub
Родитель 3415bcc93d
Коммит c7df7ff99a
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
15 изменённых файлов: 321 добавлений и 62 удалений

Просмотреть файл

@ -1,12 +1,8 @@
[bumpversion]
current_version = 0.1.0
current_version = 0.2.4
commit = True
tag = True
[bumpversion:file:README.md]
[bumpversion:file:doc/source/installation.rst]
[bumpversion:file:doc/source/conf.py]
[bumpversion:file:mlos_core/_version.py]

64
.github/workflows/devcontainer.yml поставляемый
Просмотреть файл

@ -11,6 +11,7 @@ on:
default: false
required: false
push:
tags: ["v*"]
branches: [ main ]
pull_request:
branches: [ main ]
@ -46,6 +47,17 @@ jobs:
echo COMMIT_MESSAGES_EOF
} >> $GITHUB_OUTPUT
- name: Validate tag
if: github.ref_type == 'tag'
# Note: May need to update this for release branches in the future too.
run: |
set -x
git fetch --deepen=100
if ! git branch -a --contains ${{ github.ref_name }} | grep origin/main; then
echo "ERROR: tag ${{ github.ref_name }} doesn't appear to be included in the main branch." >&2
exit 1
fi
- name: Set NO_CACHE variable based on commit messages and for nightly builds
if: github.event_name == 'schedule' || contains(steps.get-commit-messages.outputs.COMMIT_MESSAGES, 'NO_CACHE=true') || github.event.inputs.NO_CACHE == 'true'
run: |
@ -144,7 +156,9 @@ jobs:
docker exec --user vscode --env USER=vscode mlos-devcontainer make CONDA_INFO_LEVEL=-v dist dist-test
- name: Test rebuilding the devcontainer in the devcontainer
timeout-minutes: 3
# FIXME:
# timeout-minutes: 3
timeout-minutes: 10
run: |
set -x
git --no-pager diff --exit-code
@ -157,6 +171,24 @@ jobs:
# Make sure we can publish the coverage report.
rm -f doc/build/html/htmlcov/.gitignore
- name: Publish package to Test PyPi
if: github.ref_type == 'tag'
run: |
if [ -n "${{ secrets.PYPI_TEST_USERNAME }}" ]; then
docker exec --user vscode --env USER=vscode --env MAKEFLAGS=-Oline \
--env TWINE_USERNAME=${{ secrets.PYPI_TEST_USERNAME }} --env TWINE_PASSWORD=${{ secrets.PYPI_TEST_PASSWORD }} \
mlos-devcontainer make CONDA_INFO_LEVEL=-v publish-test-pypi
fi
- name: Publish package to PyPi
if: github.repository == 'microsoft/mlos' && github.ref_type == 'tag'
run: |
if [ -n "${{ secrets.PYPI_USERNAME }}" ]; then
docker exec --user vscode --env USER=vscode --env MAKEFLAGS=-Oline \
--env TWINE_USERNAME=${{ secrets.PYPI_USERNAME }} --env TWINE_PASSWORD=${{ secrets.PYPI_PASSWORD }} \
mlos-devcontainer make CONDA_INFO_LEVEL=-v publish-pypi
fi
- name: Deploy to GitHub pages
if: github.ref == 'refs/heads/main'
uses: JamesIves/github-pages-deploy-action@v4
@ -172,20 +204,34 @@ jobs:
docker rm --force mlos-devcontainer || true
- name: Container Registry Login
if: github.repository == 'microsoft/mlos' && github.ref == 'refs/heads/main'
if: (github.repository == 'microsoft/mlos') && (github.ref == 'refs/heads/main' || github.ref_type == 'tag')
uses: docker/login-action@v3
with:
# This is the URL of the container registry, which is configured in Github
# Settings and currently corresponds to the mlos-core ACR.
registry: ${{ secrets.ACR_LOGINURL }}
username: ${{ secrets.ACR_USERNAME }}
# This secret is configured in Github Settings.
# It can also be obtained in a keyvault in the Azure portal alongside the
# other resources used.
password: ${{ secrets.ACR_PASSWORD }}
- name: Publish the container images
if: github.repository == 'microsoft/mlos' && github.ref == 'refs/heads/main'
if: (github.repository == 'microsoft/mlos') && (github.ref == 'refs/heads/main' || github.ref_type == 'tag')
timeout-minutes: 15
# We only push to the :latest tag, to avoid needing to cleanup the
# registry manually (there's currently no API for that).
run: |
set -x
docker tag devcontainer-cli:latest ${{ secrets.ACR_LOGINURL }}/devcontainer-cli:latest
docker push ${{ secrets.ACR_LOGINURL }}/devcontainer-cli:latest
docker tag mlos-devcontainer:latest ${{ secrets.ACR_LOGINURL }}/mlos-devcontainer:latest
docker push ${{ secrets.ACR_LOGINURL }}/mlos-devcontainer:latest
image_tag=''
if [ "${{ github.ref }}" == 'refs/heads/main' ]; then
image_tag='latest'
elif [ "${{ github.ref_type }}" == 'tag' ]; then
image_tag="${{ github.ref_name }}"
fi
if [ -z "$image_tag" ]; then
echo "ERROR: Unhandled event condition or ref: event=${{ github.event}}, ref=${{ github.ref }}, ref_type=${{ github.ref_type }}"
exit 1
fi
docker tag devcontainer-cli:latest ${{ secrets.ACR_LOGINURL }}/devcontainer-cli:$image_tag
docker push ${{ secrets.ACR_LOGINURL }}/devcontainer-cli:$image_tag
docker tag mlos-devcontainer:latest ${{ secrets.ACR_LOGINURL }}/mlos-devcontainer:$image_tag
docker push ${{ secrets.ACR_LOGINURL }}/mlos-devcontainer:$image_tag

Просмотреть файл

@ -55,7 +55,7 @@ Simply open the project in VSCode and follow the prompts to build and open the d
conda activate mlos
```
## Details
### Details
[`main`](https://github.com/microsoft/MLOS/tree/main) is considered the primary development branch.
@ -74,6 +74,31 @@ We expect development to follow a typical "forking" style workflow:
4. PRs are associated with [Github Issues](https://github.com/microsoft/MLOS/issues) and need [MLOS-committers](https://github.com/orgs/microsoft/teams/MLOS-committers) to sign-off (in addition to other CI pipeline checks like tests and lint checks to pass).
5. Once approved, the PR can be completed using a squash merge in order to keep a nice linear history.
## Distributing
You can also locally build and install from wheels like so:
1. Build the *wheel* file(s)
```sh
make dist
```
2. Install it.
```sh
# this will install just the optimizer component with SMAC support:
pip install "dist/tmp/mlos_core-latest-py3-none-any.whl[smac]"
```
```sh
# this will install both the optimizer and the experiment runner:
pip install "dist/mlos_bench-latest-py3-none-any.whl[azure]"
```
> Note: exact versions may differ due to automatic versioning so the `-latest-` part is a symlink.
> If distributing elsewhere, adjust for the current version number in the module's `dist` directory.
### See Also
- <https://docs.github.com/en/get-started/quickstart/fork-a-repo>

55
MAINTAINING.md Normal file
Просмотреть файл

@ -0,0 +1,55 @@
# Maintaining
Some notes for maintainers.
## Releasing
1. Bump the version using the [`update-version.sh`](./scripts/update-version.sh) script:
```sh
git checkout -b bump-version main
./scripts/update-version.sh patch # or minor or major
```
> This will create a commit and local git tag for that version.
> You won't be able to create a release from that, so don't push it.
2. Test it!
```sh
make dist-test
# Make sure that the version number on the wheels looks correct.
ls */dist/*.whl
```
3. Make and merge a PR.
4. Update the tag locally.
Once the PR with the new version files is merged.
```sh
git checkout main
git pull
git tag vM.m.p
```
> Note: `M.m.p` is the version number you just bumped to above.
5. Retest!
```sh
make dist-clean
make dist-test
```
6. Update the tag remotely.
```sh
git push --tags
```
> Once this is done, the rules in [`.github/workflows/devcontainer.yml`](./.github/workflows/devcontainer.yml) will automatically publish the wheels to [pypi](https://pypi.org/project/mlos-core/) and tagged docker images to ACR.
> \
> Note: This may fail if the version number is already published to pypi, in which case start from the beginning.

Просмотреть файл

@ -232,6 +232,8 @@ mlos_bench/dist/tmp/mlos-bench-latest.tar: PACKAGE_NAME := mlos-bench
# Check to make sure the mlos_bench module has the config directory.
[ "$(MODULE_NAME)" != "mlos_bench" ] || unzip -t $(MODULE_NAME)/dist/$(MODULE_NAME)-*-py3-none-any.whl | grep -m1 mlos_bench/config/
cd $(MODULE_NAME)/dist/tmp && ln -s ../$(MODULE_NAME)-*-py3-none-any.whl $(MODULE_NAME)-latest-py3-none-any.whl
# Check to make sure the README contents made it into the package metadata.
unzip -p $(MODULE_NAME)/dist/tmp/$(MODULE_NAME)-latest-py3-none-any.whl */METADATA | egrep -v '^[A-Z][a-zA-Z-]+:' | grep -q -i '^# mlos'
.PHONY: dist-test-env-clean
dist-test-env-clean:
@ -279,6 +281,27 @@ dist-test-clean: dist-test-env-clean
rm -f build/dist-test-env.$(PYTHON_VERSION).build-stamp
.PHONY: publish
publish: publish-pypi
.PHONY:
publish-pypi-deps: build/publish-pypi-deps.build-stamp
build/publish-pypi-deps.${CONDA_ENV_NAME}.build-stamp: build/conda-env.${CONDA_ENV_NAME}.build-stamp
conda run -n ${CONDA_ENV_NAME} pip install -U twine
touch $@
build/publish.%.py.build-stamp: build/publish-pypi-deps.${CONDA_ENV_NAME}.build-stamp build/pytest.${CONDA_ENV_NAME}.build-stamp build/dist-test.$(PYTHON_VERSION).build-stamp build/check-doc.build-stamp build/linklint-doc.build-stamp
rm -f mlos_*/dist/*.tar.gz
ls mlos_*/dist/*.tar | xargs -I% gzip -k %
repo_name=`echo "$@" | sed -e 's|build/publish\.||' -e 's|\.py\.build-stamp||'` \
&& conda run -n ${CONDA_ENV_NAME} python3 -m twine upload --repository $$repo_name \
mlos_*/dist/mlos*-*.tar.gz mlos_*/dist/mlos*-*.whl
touch $@
publish-pypi: build/publish.pypi.py.build-stamp
publish-test-pypi: build/publish.testpypi.py.build-stamp
build/doc-prereqs.${CONDA_ENV_NAME}.build-stamp: build/conda-env.${CONDA_ENV_NAME}.build-stamp
build/doc-prereqs.${CONDA_ENV_NAME}.build-stamp: doc/requirements.txt
conda run -n ${CONDA_ENV_NAME} pip install -U -r doc/requirements.txt
@ -340,7 +363,10 @@ doc/build/html/index.html: $(SPHINX_API_RST_FILES) doc/Makefile doc/copy-source-
# See check-doc
.PHONY: doc
doc: doc/build/html/.nojekyll build/check-doc.build-stamp build/linklint-doc.build-stamp
doc: doc/build/html/.nojekyll doc-test
.PHONY: doc-test
doc-test: build/check-doc.build-stamp build/linklint-doc.build-stamp
doc/build/html/htmlcov/index.html: doc/build/html/index.html
# Make the codecov html report available for the site.

Просмотреть файл

@ -21,7 +21,7 @@ MLOS is a project to enable autotuning for systems.
- [Usage Examples](#usage-examples)
- [mlos-core](#mlos-core)
- [mlos-bench](#mlos-bench)
- [Distributing](#distributing)
- [Installation](#installation)
- [See Also](#see-also)
- [Examples](#examples)
@ -127,35 +127,30 @@ See Also:
- [mlos_bench/config](./mlos_bench/mlos_bench/config/) for additional configuration details.
- [sqlite-autotuning](https://github.com/Microsoft-CISL/sqlite-autotuning) for a complete external example of using MLOS to tune `sqlite`.
## Distributing
## Installation
MLOS is not [*yet*](https://github.com/microsoft/MLOS/issues/547) published on `pypi`, so until them here are some instructions for installation for usage in production or other environments.
The MLOS modules are published to [pypi](https://pypi.org) when new tags/releases are made.
1. Build the *wheel* file(s)
To install the latest release, simply run:
```sh
make dist
```
```sh
# this will install just the optimizer component with SMAC support:
pip install -U mlos-core[smac]
2. Install it (e.g. after copying it somewhere else).
# this will install just the optimizer component with flaml support:
pip install -U "mlos-core[flaml]"
```sh
# this will install just the optimizer component with SMAC support:
pip install dist/mlos_core-0.1.0-py3-none-any.whl[smac]
# this will install just the optimizer component with smac and flaml support:
pip install -U "mlos-core[smac,flaml]"
# this will install just the optimizer component with flaml support:
pip install dist/mlos_core-0.1.0-py3-none-any.whl[flaml]
# this will install both the flaml optimizer and the experiment runner with azure support:
pip install -U "mlos-bench[flaml,azure]"
# this will install just the optimizer component with smac and flaml support:
pip install dist/mlos_core-0.1.0-py3-none-any.whl[smac,flaml]
```
# this will install both the smac optimizer and the experiment runner with ssh support:
pip install -U "mlos-bench[smac,ssh]"
```
```sh
# this will install both the optimizer and the experiment runner:
pip install dist/mlos_bench-0.1.0-py3-none-any.whl
```
> Note: exact versions may differ due to automatic versioning.
Details on using a local version from git are available in [CONTRIBUTING.md](./CONTRIBUTING.md).
## See Also

Просмотреть файл

@ -33,12 +33,15 @@ def pytest_configure(config: pytest.Config) -> None:
"""
# Workaround some issues loading emukit in certain environments.
if os.environ.get('DISPLAY', None):
import matplotlib # pylint: disable=import-outside-toplevel
matplotlib.rcParams['backend'] = 'agg'
if is_master(config) or dict(getattr(config, 'workerinput', {}))['workerid'] == 'gw0':
# Only warn once.
warn(UserWarning('DISPLAY environment variable is set, which can cause problems in some setups (e.g. WSL). '
+ f'Adjusting matplotlib backend to "{matplotlib.rcParams["backend"]}" to compensate.'))
try:
import matplotlib # pylint: disable=import-outside-toplevel
matplotlib.rcParams['backend'] = 'agg'
if is_master(config) or dict(getattr(config, 'workerinput', {}))['workerid'] == 'gw0':
# Only warn once.
warn(UserWarning('DISPLAY environment variable is set, which can cause problems in some setups (e.g. WSL). '
+ f'Adjusting matplotlib backend to "{matplotlib.rcParams["backend"]}" to compensate.'))
except ImportError:
pass
# Create a temporary directory for sharing files between master and worker nodes.
if is_master(config):
@ -72,8 +75,9 @@ def pytest_unconfigure(config: pytest.Config) -> None:
Called after all tests have completed.
"""
if is_master(config):
shared_tmp_dir = str(getattr(config, "shared_temp_dir"))
shutil.rmtree(shared_tmp_dir)
shared_tmp_dir = getattr(config, "shared_temp_dir", None)
if shared_tmp_dir:
shutil.rmtree(str(shared_tmp_dir))
@pytest.fixture(scope="session")

Просмотреть файл

@ -35,7 +35,7 @@ copyright = '2022, GSL'
author = 'GSL'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
release = '0.2.4'
try:
from setuptools_scm import get_version

Просмотреть файл

@ -2,7 +2,7 @@
This [directory](./) contains the code for the `mlos-bench` experiment runner package.
It makes use of the `mlos-core` package for its optimizer.
It makes use of the [`mlos-core`](../mlos_core/) package for its optimizer.
## Table of Contents

Просмотреть файл

@ -7,4 +7,4 @@ Version number for the mlos_core package.
"""
# NOTE: This should be managed by bumpversion.
_VERSION = '0.1.0'
_VERSION = '0.2.4'

Просмотреть файл

@ -6,14 +6,46 @@
Setup instructions for the mlos_bench package.
"""
# pylint: disable=duplicate-code
from logging import warning
from itertools import chain
from typing import Dict, List
import os
import re
from setuptools import setup, find_packages
from _version import _VERSION # pylint: disable=import-private-name
# A simple routine to read and adjust the README.md for this module into a format
# suitable for packaging.
# See Also: copy-source-tree-docs.sh
# Unfortunately we can't use that directly due to the way packaging happens inside a
# temp directory.
# Similarly, we can't use a utility script outside this module, so this code has to
# be duplicated for now.
def _get_long_desc_from_readme(base_url: str) -> dict:
pkg_dir = os.path.dirname(__file__)
readme_path = os.path.join(pkg_dir, 'README.md')
if not os.path.isfile(readme_path):
return {}
jsonc_re = re.compile(r'```jsonc')
link_re = re.compile(r'\]\(([^:#)]+)(#[a-zA-Z0-9_-]+)?\)')
with open(readme_path, mode='r', encoding='utf-8') as readme_fh:
lines = readme_fh.readlines()
# Tweak the lexers for local expansion by pygments instead of github's.
lines = [link_re.sub(f"]({base_url}" + r'/\1\2)', line) for line in lines]
# Tweak source source code links.
lines = [jsonc_re.sub(r'```json', line) for line in lines]
return {
'long_description': ''.join(lines),
'long_description_content_type': 'text/markdown',
}
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
@ -85,10 +117,28 @@ setup(
] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
extras_require=extra_requires,
author='Microsoft',
license='MIT',
**_get_long_desc_from_readme('https://github.com/microsoft/MLOS/tree/main/mlos_bench'),
author_email='mlos-maintainers@service.microsoft.com',
description=('MLOS Bench Python interface for benchmark automation and optimization.'),
license='MIT',
keywords='',
url='https://aka.ms/mlos-core',
url='https://github.com/microsoft/MLOS',
project_urls={
'Documentation': 'https://microsoft.github.io/MLOS',
'Package Source': 'https://github.com/microsoft/MLOS/tree/main/mlos_bench/',
},
python_requires='>=3.8',
keywords=[
'autotuning',
'benchmarking',
'optimization',
'systems',
],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
)

Просмотреть файл

@ -4,18 +4,22 @@ This [directory](./) contains the code for the `mlos-core` optimizer package.
## Description
`mlos-core` is an optimizer package, using Bayesian optimization to identify & sample tunable configuration parameters and propose optimal parameter values.
These are evaluated by `mlos-bench`, generating and tracking experiment results (proposed parameters, benchmark results & telemetry) to update the optimization loop.
`mlos-core` is an optimizer package, wrapping other libraries like FLAML and SMAC to use techniques like Bayesian optimization and others to identify & sample tunable configuration parameters and propose optimal parameter values with a consistent API: `suggest` and `register`.
These can be evaluated by [`mlos-bench`](../mlos_bench/), generating and tracking experiment results (proposed parameters, benchmark results & telemetry) to update the optimization loop, or used independently.
## Features
Since the tunable OS kernel parameter search space is extremely large, `mlos-core` automates the following steps to efficiently generate optimal task-specific kernel configurations.
Since the tunable parameter search space is often extremely large, `mlos-core` automates the following steps to efficiently generate optimal task-specific kernel and application configurations.
1. Reduce the search space by identifying a promising set of tunable parameters
- Map out the configuration search space: Automatically track and manage the discovery of new Linux kernel parameters and their default values across versions. Filter out non-tunable parameters (e.g., not writable) and track which kernel parameters exist for a given kernel version.
- Leverage parameter knowledge for optimization: Information on ranges, sampling intervals, parameter correlations, workload type sensitivities for tunable parameters are tracked and currently manually curated. In the future, this can be automatically maintained by scraping documentation pages on kernel parameters.
- Map out the configuration search space: Automatically track and manage the discovery of new Linux kernel parameters and their default values across versions.
Filter out non-tunable parameters (e.g., not writable) and track which kernel parameters exist for a given kernel version.
- Leverage parameter knowledge for optimization: Information on ranges, sampling intervals, parameter correlations, workload type sensitivities for tunable parameters are tracked and currently manually curated.
In the future, this can be automatically maintained by scraping documentation pages on kernel parameters.
- Tailored to application: Consider prior knowledge of the parameter's impact & an application's workload profile (e.g. network heavy, disk heavy, CPU bound, multi-threaded, latency sensitive, throughput oriented, etc.) to identify likely impactful candidates of tunable parameters, specific to a particular application.
2. Sampling to warm-start optimization in a high dimensional search space
3. Produce optimal configurations through Bayesian optimization
- Support for various optimizer algorithms (default Bayesian optimizer, Flaml, SMAC, and random for baseline comparison), that handle multiple types of constraints. This includes cost-aware optimization, that considers experiment costs given current tunable parameters.
- Support for various optimizer algorithms (default Bayesian optimizer, Flaml, SMAC, and random for baseline comparison), that handle multiple types of constraints.
This includes cost-aware optimization, that considers experiment costs given current tunable parameters.
- Integrated with `mlos-bench`, proposed configurations are logged and evaluated.

Просмотреть файл

@ -7,4 +7,4 @@ Version number for the mlos_core package.
"""
# NOTE: This should be managed by bumpversion.
_VERSION = '0.1.0'
_VERSION = '0.2.4'

Просмотреть файл

@ -6,10 +6,15 @@
Setup instructions for the mlos_core package.
"""
# pylint: disable=duplicate-code
from itertools import chain
from logging import warning
from typing import Dict, List
import os
import re
from setuptools import setup, find_packages
from _version import _VERSION # pylint: disable=import-private-name
@ -25,6 +30,34 @@ except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
# A simple routine to read and adjust the README.md for this module into a format
# suitable for packaging.
# See Also: copy-source-tree-docs.sh
# Unfortunately we can't use that directly due to the way packaging happens inside a
# temp directory.
# Similarly, we can't use a utility script outside this module, so this code has to
# be duplicated for now.
# Also, to avoid caching issues when calculating dependencies for the devcontainer,
# we return nothing when the file is not available.
def _get_long_desc_from_readme(base_url: str) -> dict:
pkg_dir = os.path.dirname(__file__)
readme_path = os.path.join(pkg_dir, 'README.md')
if not os.path.isfile(readme_path):
return {}
jsonc_re = re.compile(r'```jsonc')
link_re = re.compile(r'\]\(([^:#)]+)(#[a-zA-Z0-9_-]+)?\)')
with open(readme_path, mode='r', encoding='utf-8') as readme_fh:
lines = readme_fh.readlines()
# Tweak the lexers for local expansion by pygments instead of github's.
lines = [link_re.sub(f"]({base_url}" + r'/\1\2)', line) for line in lines]
# Tweak source source code links.
lines = [jsonc_re.sub(r'```json', line) for line in lines]
return {
'long_description': ''.join(lines),
'long_description_content_type': 'text/markdown',
}
extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
'flaml': ['flaml[blendsearch]'],
'smac': ['smac>=2.0.0'], # NOTE: Major refactoring on SMAC starting from v2.0.0
@ -62,9 +95,25 @@ setup(
extras_require=extra_requires,
author='Microsoft',
author_email='mlos-maintainers@service.microsoft.com',
description=('MLOS Core Python interface for parameter optimization.'),
license='MIT',
keywords='',
url='https://aka.ms/mlos-core',
**_get_long_desc_from_readme('https://github.com/microsoft/MLOS/tree/main/mlos_core'),
description=('MLOS Core Python interface for parameter optimization.'),
url='https://github.com/microsoft/MLOS',
project_urls={
'Documentation': 'https://microsoft.github.io/MLOS',
'Package Source': 'https://github.com/microsoft/MLOS/tree/main/mlos_core/',
},
python_requires='>=3.8',
keywords=[
'autotuning',
'optimization',
],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
)

Просмотреть файл

@ -13,5 +13,14 @@ scriptdir=$(dirname "$(readlink -f "$0")")
cd "$scriptdir/.."
set -x
# Example usage: "./update-version.sh --dry-run patch" to bump v0.0.4 -> v0.0.5, for instance.
# Example usage: "./update-version.sh --dry-run minor" to bump v0.0.4 -> v0.1.0, for instance.
# Note: the tag generated locally can be used for testing, but needs to reset
# to the upstream commit once the PR to bump the version is merged.
#
# Pushing that tag upstream consistutes a release per the github action rules
# and will generate a new package on pypi and docker image tag.
#
conda run -n ${CONDA_ENV_NAME:-mlos} bumpversion --verbose $*