This commit is contained in:
Marvin Buss 2020-07-15 21:49:38 +02:00
Родитель e21e4d8685 d3444baeb4
Коммит 9491ab7969
10 изменённых файлов: 303 добавлений и 188 удалений

Просмотреть файл

@ -19,6 +19,7 @@
"delete_service_after_deployment": false,
"profiling_enabled": false,
"profiling_dataset": "<your-dataset-for-model-profiling>",
"skip_deployment": false,
"tags": {"<your-webservice-tag-key>": "<your-webservice-tag-value>"},
"properties": {"<your-webservice-property-key>": "<your-webservice-property-value>"},
"description": "<your-webservice-description>",

Просмотреть файл

@ -21,6 +21,7 @@
"delete_service_after_deployment": false,
"profiling_enabled": false,
"profiling_dataset": "<your-dataset-for-model-profiling>",
"skip_deployment": false,
"tags": {"<your-webservice-tag-key>": "<your-webservice-tag-value>"},
"properties": {"<your-webservice-property-key>": "<your-webservice-property-value>"},
"description": "<your-webservice-description>",

Просмотреть файл

@ -20,6 +20,8 @@
"delete_service_after_deployment": false,
"profiling_enabled": false,
"profiling_dataset": "<your-dataset-for-model-profiling>",
"skip_deployment": false,
"create_image": "<'docker', 'function_blob', 'function_http' or 'function_service_bus_queue' >",
"tags": {"<your-webservice-tag-key>": "<your-webservice-tag-value>"},
"properties": {"<your-webservice-property-key>": "<your-webservice-property-value>"},
"description": "<your-webservice-description>",

13
.cloud/.azure/docker.json Normal file
Просмотреть файл

@ -0,0 +1,13 @@
{
"inference_source_directory": "<your-inference-source-directory>",
"inference_entry_script": "<your-inference-entry-script>",
"conda_file": "<your-conda-environment-file-path>",
"extra_docker_file_steps": "<your-extra-docker-steps-file-path>",
"runtime": "<'python' or 'spark-py'>",
"enable_gpu": false,
"cuda_version": "<your-cuda-version>",
"custom_base_image": "<your-custom-docker-base-image>",
"description": "<your-webservice-description>",
"skip_deployment": true,
"create_image": "<'docker', 'function_blob', 'function_http' or 'function_service_bus_queue' >"
}

2
.github/workflows/python.yml поставляемый
Просмотреть файл

@ -25,5 +25,5 @@ jobs:
- name: Test
id: python_test
run: |
pip install pytest jsonschema azureml-sdk
pip install pytest jsonschema azureml-sdk azureml-contrib-functions
pytest

Просмотреть файл

@ -1,5 +1,5 @@
FROM marvinbuss/aml-docker:1.7.0
FROM marvinbuss/aml-docker:1.9.0
LABEL maintainer="azure/gh_aml"

Просмотреть файл

@ -148,6 +148,8 @@ A sample file can be found in this repository in the folder `.cloud/.azure`. The
| test_file_function_name | | str | `"main"` | Name of the function in your python script in your repository in which you define your own tests that you want to run against the webservice endpoint. The function gets the webservice object injected and allows you to run tests against the scoring uri. The GitHub Action fails, if your script fails. |
| profiling_enabled | | bool | false | Whether or not to profile this model for an optimal combination of cpu and memory. To use this functionality, you also have to provide a model profile dataset (`profiling_dataset`). If the parameter is not specified, the Action will try to use the sample input dataset that the model was registered with. Please, note that profiling is a long running operation and can take up to 25 minutes depending on the size of the dataset. More details can be found [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb). |
| profiling_dataset | | str | null | Name of the dataset that should be used for model profiling. |
| skip_deployment | | bool | false | Indicates whether the deployment to ACI or AKS should be skipped. This can be used in combination with `create_image` to only create a Docker image that can be used for further deployment. |
| create_image | | str: `"docker"`, `"function_blob"`, `"function_http"` or `"function_service_bus_queue"` | null | Indicates whether a Docker image should be created which can be used for further deployment. |
Please visit [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.inferenceconfig?view=azure-ml-py) and [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py#deploy-workspace--name--models--inference-config-none--deployment-config-none--deployment-target-none--overwrite-false-) for more details.
@ -196,8 +198,12 @@ Please visit [this website](https://docs.microsoft.com/en-us/python/api/azureml-
| Output | Description |
| ------------------- | ------------------------------- |
| service_scoring_uri | Scoring URI of the webservice that was created (only provided if delete_service_after_test is set to False). |
| service_swagger_uri | Swagger Uri of the webservice that was created (only provided if delete_service_after_test is set to False). |
| service_scoring_uri | Scoring URI of the webservice that was created (only provided if `delete_service_after_deployment` is set to False). |
| service_swagger_uri | Swagger Uri of the webservice that was created (only provided if `delete_service_after_deployment` is set to False). |
| acr_address | The DNS name or IP address (e.g. myacr.azurecr.io) of the Azure Container Registry (ACR) (only provided if `create_image` is not None). |
| acr_username | The username for ACR (only provided if `create_image` is not None). |
| acr_password | The password for ACR (only provided if `create_image` is not None). |
| package_location | Full URI of the docker image (e.g. myacr.azurecr.io/azureml/azureml_*) (only provided if `create_image` is not None). |
| profiling_details | Dictionary of details of the model profiling result. This will only be provided, if the model profiling method is used and successfully executed. |
### Environment variables

Просмотреть файл

@ -17,9 +17,17 @@ inputs:
default: "deploy.json"
outputs:
service_scoring_uri:
description: "Scoring URI of the webservice that was created (only provided if delete_service_after_test is set to False)"
description: "Scoring URI of the webservice that was created (only provided if delete_service_after_deployment is set to False)"
service_swagger_uri:
description: "Swagger Uri of the webservice that was created (only provided if delete_service_after_test is set to False)"
description: "Swagger URI of the webservice that was created (only provided if delete_service_after_deployment is set to False)"
acr_address:
description: "The DNS name or IP address (e.g. myacr.azurecr.io) of the Azure Container Registry (ACR) (only provided if create_image is not None)"
acr_username:
description: "The username for ACR (only provided if create_image is not None)"
acr_password:
description: "The password for ACR (only provided if create_image is not None)"
package_location:
description: "Full URI of the docker image (e.g. myacr.azurecr.io/azureml/azureml_*) (only provided if create_image is not None)"
profiling_details:
description: "Dictionary of details of the model profiling result. This will only be provided, if the model profiling method is used and successfully executed."
branding:

Просмотреть файл

@ -3,6 +3,7 @@ import sys
import json
import importlib
from azureml.contrib.functions import package_http, package_blob, package_service_bus_queue
from azureml.core import Workspace, Model, ContainerRegistry
from azureml.core.compute import ComputeTarget, AksCompute
from azureml.core.model import InferenceConfig
@ -76,12 +77,21 @@ def main():
input_name="PARAMETERS_FILE"
)
# Define target cloud
if azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.usgovcloudapi.net"):
cloud = "AzureUSGovernment"
elif azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.chinacloudapi.cn"):
cloud = "AzureChinaCloud"
else:
cloud = "AzureCloud"
# Loading Workspace
print("::debug::Loading AML Workspace")
sp_auth = ServicePrincipalAuthentication(
tenant_id=azure_credentials.get("tenantId", ""),
service_principal_id=azure_credentials.get("clientId", ""),
service_principal_password=azure_credentials.get("clientSecret", "")
service_principal_password=azure_credentials.get("clientSecret", ""),
cloud=cloud
)
config_file_path = os.environ.get("GITHUB_WORKSPACE", default=".cloud/.azure")
config_file_name = "aml_arm_config.json"
@ -104,18 +114,6 @@ def main():
print(f"::error::Workspace authorizationfailed: {exception}")
raise ProjectSystemException
# Loading deployment target
print("::debug::Loading deployment target")
try:
deployment_target = ComputeTarget(
workspace=ws,
name=parameters.get("deployment_compute_target", "")
)
except ComputeTargetException:
deployment_target = None
except TypeError:
deployment_target = None
# Loading model
print("::debug::Loading model")
try:
@ -147,7 +145,7 @@ def main():
source_directory=parameters.get("inference_source_directory", "code/deploy/"),
enable_gpu=parameters.get("enable_gpu", None),
description=parameters.get("description", None),
base_image=parameters.get("base_image", None),
base_image=parameters.get("custom_base_image", None),
base_image_registry=container_registry,
cuda_version=parameters.get("cuda_version", None)
)
@ -158,192 +156,269 @@ def main():
print(f"::debug::Failed to create InferenceConfig. Trying to create no code deployment: {exception}")
inference_config = None
# Default service name
repository_name = os.environ.get("GITHUB_REPOSITORY").split("/")[-1]
branch_name = os.environ.get("GITHUB_REF").split("/")[-1]
default_service_name = f"{repository_name}-{branch_name}".lower().replace("_", "-")
service_name = parameters.get("name", default_service_name)[:32]
# Skip deployment if only Docker image should be created
if not parameters.get("skip_deployment", False):
# Default service name
repository_name = os.environ.get("GITHUB_REPOSITORY").split("/")[-1]
branch_name = os.environ.get("GITHUB_REF").split("/")[-1]
default_service_name = f"{repository_name}-{branch_name}".lower().replace("_", "-")
service_name = parameters.get("name", default_service_name)[:32]
# Loading resource configuration
print("::debug::Loading resource configuration")
model_resource_config = model.resource_configuration
cpu_cores = get_resource_config(
config=parameters.get("cpu_cores", None),
resource_config=model_resource_config,
config_name="cpu"
)
memory_gb = get_resource_config(
config=parameters.get("memory_gb", None),
resource_config=model_resource_config,
config_name="memory_in_gb"
)
gpu_cores = get_resource_config(
config=parameters.get("gpu_cores", None),
resource_config=model_resource_config,
config_name="gpu"
)
# Profiling model
print("::debug::Profiling model")
if parameters.get("profiling_enabled", False):
# Getting profiling dataset
profiling_dataset = get_dataset(
workspace=ws,
name=parameters.get("profiling_dataset", None)
# Loading run config
print("::debug::Loading run config")
model_resource_config = model.resource_configuration
cpu_cores = get_resource_config(
config=parameters.get("cpu_cores", None),
resource_config=model_resource_config,
config_name="cpu"
)
memory_gb = get_resource_config(
config=parameters.get("memory_gb", None),
resource_config=model_resource_config,
config_name="memory_in_gb"
)
gpu_cores = get_resource_config(
config=parameters.get("gpu_cores", None),
resource_config=model_resource_config,
config_name="gpu"
)
if profiling_dataset is None:
profiling_dataset = model.sample_input_dataset
# Profiling model
try:
model_profile = Model.profile(
print("::debug::Profiling model")
if parameters.get("profiling_enabled", False):
# Getting profiling dataset
profiling_dataset = get_dataset(
workspace=ws,
profile_name=f"{service_name}-profile"[:32],
name=parameters.get("profiling_dataset", None)
)
if profiling_dataset is None:
profiling_dataset = model.sample_input_dataset
# Profiling model
try:
model_profile = Model.profile(
workspace=ws,
profile_name=f"{service_name}-profile"[:32],
models=[model],
inference_config=inference_config,
input_dataset=profiling_dataset
)
model_profile.wait_for_completion(show_output=True)
# Overwriting resource configuration
cpu_cores = model_profile.recommended_cpu
memory_gb = model_profile.recommended_memory
# Setting output
profiling_details = model_profile.get_details()
print(f"::set-output name=profiling_details::{profiling_details}")
except Exception as exception:
print(f"::warning::Failed to profile model. Skipping profiling and moving on to deployment: {exception}")
# Loading deployment target
print("::debug::Loading deployment target")
try:
deployment_target = ComputeTarget(
workspace=ws,
name=parameters.get("deployment_compute_target", "")
)
except ComputeTargetException:
deployment_target = None
except TypeError:
deployment_target = None
# Creating deployment config
print("::debug::Creating deployment config")
if type(deployment_target) is AksCompute:
deployment_config = AksWebservice.deploy_configuration(
autoscale_enabled=parameters.get("autoscale_enabled", None),
autoscale_min_replicas=parameters.get("autoscale_min_replicas", None),
autoscale_max_replicas=parameters.get("autoscale_max_replicas", None),
autoscale_refresh_seconds=parameters.get("autoscale_refresh_seconds", None),
autoscale_target_utilization=parameters.get("autoscale_target_utilization", None),
collect_model_data=parameters.get("model_data_collection_enabled", None),
auth_enabled=parameters.get("authentication_enabled", None),
cpu_cores=cpu_cores,
memory_gb=memory_gb,
enable_app_insights=parameters.get("app_insights_enabled", None),
scoring_timeout_ms=parameters.get("scoring_timeout_ms", None),
replica_max_concurrent_requests=parameters.get("replica_max_concurrent_requests", None),
max_request_wait_time=parameters.get("max_request_wait_time", None),
num_replicas=parameters.get("num_replicas", None),
primary_key=os.environ.get("PRIMARY_KEY", None),
secondary_key=os.environ.get("SECONDARY_KEY", None),
tags=parameters.get("tags", None),
properties=parameters.get("properties", None),
description=parameters.get("description", None),
gpu_cores=gpu_cores,
period_seconds=parameters.get("period_seconds", None),
initial_delay_seconds=parameters.get("initial_delay_seconds", None),
timeout_seconds=parameters.get("timeout_seconds", None),
success_threshold=parameters.get("success_threshold", None),
failure_threshold=parameters.get("failure_threshold", None),
namespace=parameters.get("namespace", None),
token_auth_enabled=parameters.get("token_auth_enabled", None)
)
else:
deployment_config = AciWebservice.deploy_configuration(
cpu_cores=cpu_cores,
memory_gb=memory_gb,
tags=parameters.get("tags", None),
properties=parameters.get("properties", None),
description=parameters.get("description", None),
location=parameters.get("location", None),
auth_enabled=parameters.get("authentication_enabled", None),
ssl_enabled=parameters.get("ssl_enabled", None),
enable_app_insights=parameters.get("app_insights_enabled", None),
ssl_cert_pem_file=parameters.get("ssl_cert_pem_file", None),
ssl_key_pem_file=parameters.get("ssl_key_pem_file", None),
ssl_cname=parameters.get("ssl_cname", None),
dns_name_label=parameters.get("dns_name_label", None),
primary_key=os.environ.get("PRIMARY_KEY", None),
secondary_key=os.environ.get("SECONDARY_KEY", None),
collect_model_data=parameters.get("model_data_collection_enabled", None),
cmk_vault_base_url=os.environ.get("CMK_VAULT_BASE_URL", None),
cmk_key_name=os.environ.get("CMK_KEY_NAME", None),
cmk_key_version=os.environ.get("CMK_KEY_VERSION", None)
)
# Deploying model
print("::debug::Deploying model")
try:
# Default service name
repository_name = os.environ.get("GITHUB_REPOSITORY").split("/")[-1]
branch_name = os.environ.get("GITHUB_REF").split("/")[-1]
default_service_name = f"{repository_name}-{branch_name}".lower().replace("_", "-")[:32]
service = Model.deploy(
workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
input_dataset=profiling_dataset
deployment_config=deployment_config,
deployment_target=deployment_target,
overwrite=True
)
model_profile.wait_for_completion(show_output=True)
service.wait_for_deployment(show_output=True)
except WebserviceException as exception:
print(f"::error::Model deployment failed with exception: {exception}")
service_logs = service.get_logs()
raise AMLDeploymentException(f"Model deployment failed logs: {service_logs} \nexception: {exception}")
# Overwriting resource configuration
cpu_cores = model_profile.recommended_cpu
memory_gb = model_profile.recommended_memory
# Checking status of service
print("::debug::Checking status of service")
if service.state != "Healthy":
service_logs = service.get_logs()
print(f"::error::Model deployment failed with state '{service.state}': {service_logs}")
raise AMLDeploymentException(f"Model deployment failed with state '{service.state}': {service_logs}")
# Setting output
profiling_details = model_profile.get_details()
print(f"::set-output name=profiling_details::{profiling_details}")
except Exception as exception:
print(f"::warning::Failed to profile model. Skipping profiling and moving on to deployment: {exception}")
if parameters.get("test_enabled", False):
# Testing service
print("::debug::Testing service")
root = os.environ.get("GITHUB_WORKSPACE", default=None)
test_file_path = parameters.get("test_file_path", "code/test/test.py")
test_file_function_name = parameters.get("test_file_function_name", "main")
# Creating deployment config
print("::debug::Creating deployment config")
if type(deployment_target) is AksCompute:
deployment_config = AksWebservice.deploy_configuration(
autoscale_enabled=parameters.get("autoscale_enabled", None),
autoscale_min_replicas=parameters.get("autoscale_min_replicas", None),
autoscale_max_replicas=parameters.get("autoscale_max_replicas", None),
autoscale_refresh_seconds=parameters.get("autoscale_refresh_seconds", None),
autoscale_target_utilization=parameters.get("autoscale_target_utilization", None),
collect_model_data=parameters.get("model_data_collection_enabled", None),
auth_enabled=parameters.get("authentication_enabled", None),
cpu_cores=cpu_cores,
memory_gb=memory_gb,
enable_app_insights=parameters.get("app_insights_enabled", None),
scoring_timeout_ms=parameters.get("scoring_timeout_ms", None),
replica_max_concurrent_requests=parameters.get("replica_max_concurrent_requests", None),
max_request_wait_time=parameters.get("max_request_wait_time", None),
num_replicas=parameters.get("num_replicas", None),
primary_key=os.environ.get("PRIMARY_KEY", None),
secondary_key=os.environ.get("SECONDARY_KEY", None),
tags=parameters.get("tags", None),
properties=parameters.get("properties", None),
description=parameters.get("description", None),
gpu_cores=gpu_cores,
period_seconds=parameters.get("period_seconds", None),
initial_delay_seconds=parameters.get("initial_delay_seconds", None),
timeout_seconds=parameters.get("timeout_seconds", None),
success_threshold=parameters.get("success_threshold", None),
failure_threshold=parameters.get("failure_threshold", None),
namespace=parameters.get("namespace", None),
token_auth_enabled=parameters.get("token_auth_enabled", None)
)
else:
deployment_config = AciWebservice.deploy_configuration(
cpu_cores=cpu_cores,
memory_gb=memory_gb,
tags=parameters.get("tags", None),
properties=parameters.get("properties", None),
description=parameters.get("description", None),
location=parameters.get("location", None),
auth_enabled=parameters.get("authentication_enabled", None),
ssl_enabled=parameters.get("ssl_enabled", None),
enable_app_insights=parameters.get("app_insights_enabled", None),
ssl_cert_pem_file=parameters.get("ssl_cert_pem_file", None),
ssl_key_pem_file=parameters.get("ssl_key_pem_file", None),
ssl_cname=parameters.get("ssl_cname", None),
dns_name_label=parameters.get("dns_name_label", None),
primary_key=os.environ.get("PRIMARY_KEY", None),
secondary_key=os.environ.get("SECONDARY_KEY", None),
collect_model_data=parameters.get("model_data_collection_enabled", None),
cmk_vault_base_url=os.environ.get("CMK_VAULT_BASE_URL", None),
cmk_key_name=os.environ.get("CMK_KEY_NAME", None),
cmk_key_version=os.environ.get("CMK_KEY_VERSION", None)
)
print("::debug::Adding root to system path")
sys.path.insert(1, f"{root}")
# Deploying model
print("::debug::Deploying model")
try:
service = Model.deploy(
workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=deployment_config,
deployment_target=deployment_target,
overwrite=True
)
service.wait_for_deployment(show_output=True)
except WebserviceException as exception:
print(f"::error::Model deployment failed with exception: {exception}")
service_logs = service.get_logs()
raise AMLDeploymentException(f"Model deployment failedlogs: {service_logs} \nexception: {exception}")
print("::debug::Importing module")
test_file_path = f"{test_file_path}.py" if not test_file_path.endswith(".py") else test_file_path
try:
test_spec = importlib.util.spec_from_file_location(
name="testmodule",
location=test_file_path
)
test_module = importlib.util.module_from_spec(spec=test_spec)
test_spec.loader.exec_module(test_module)
test_function = getattr(test_module, test_file_function_name, None)
except ModuleNotFoundError as exception:
print(f"::error::Could not load python script in your repository which defines theweb service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except FileNotFoundError as exception:
print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except AttributeError as exception:
print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
# Checking status of service
print("::debug::Checking status of service")
if service.state != "Healthy":
service_logs = service.get_logs()
print(f"::error::Model deployment failed with state '{service.state}': {service_logs}")
raise AMLDeploymentException(f"Model deployment failed with state '{service.state}': {service_logs}")
# Load experiment config
print("::debug::Loading experiment config")
try:
test_function(service)
except TypeError as exception:
print(f"::error::Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except Exception as exception:
print(f"::error::The webservice tests did not complete successfully: {exception}")
raise AMLDeploymentException(f"The webservice tests did not complete successfully: {exception}")
if parameters.get("test_enabled", False):
# Testing service
print("::debug::Testing service")
root = os.environ.get("GITHUB_WORKSPACE", default=None)
test_file_path = parameters.get("test_file_path", "code/test/test.py")
test_file_function_name = parameters.get("test_file_function_name", "main")
# Deleting service if desired
if parameters.get("delete_service_after_deployment", False):
service.delete()
else:
# Creating outputs
print("::debug::Creating outputs")
print(f"::set-output name=service_scoring_uri::{service.scoring_uri}")
print(f"::set-output name=service_swagger_uri::{service.swagger_uri}")
print("::debug::Adding root to system path")
sys.path.insert(1, f"{root}")
print("::debug::Importing module")
test_file_path = f"{test_file_path}.py" if not test_file_path.endswith(".py") else test_file_path
# Creating Docker image
if parameters.get("create_image", None) is not None:
try:
test_spec = importlib.util.spec_from_file_location(
name="testmodule",
location=test_file_path
)
test_module = importlib.util.module_from_spec(spec=test_spec)
test_spec.loader.exec_module(test_module)
test_function = getattr(test_module, test_file_function_name, None)
except ModuleNotFoundError as exception:
print(f"::error::Could not load python script in your repository which defines theweb service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except FileNotFoundError as exception:
print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except AttributeError as exception:
print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
# Packaging model
if parameters.get("create_image", None) == "docker":
package = Model.package(
workspace=ws,
models=[model],
inference_config=inference_config,
generate_dockerfile=False
)
if parameters.get("create_image", None) == "function_blob":
package = package_blob(
workspace=ws,
models=[model],
inference_config=inference_config,
generate_dockerfile=False,
input_path=os.environ.get("FUNCTION_BLOB_INPUT"),
output_path=os.environ.get("FUNCTION_BLOB_OUTPUT")
)
if parameters.get("create_image", None) == "function_http":
package = package_http(
workspace=ws,
models=[model],
inference_config=inference_config,
generate_dockerfile=False,
auth_level=os.environ.get("FUNCTION_HTTP_AUTH_LEVEL")
)
if parameters.get("create_image", None) == "function_service_bus_queue":
package = package_service_bus_queue(
workspace=ws,
models=[model],
inference_config=inference_config,
generate_dockerfile=False,
input_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_INPUT"),
output_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_OUTPUT")
)
# Load experiment config
print("::debug::Loading experiment config")
try:
test_function(service)
except TypeError as exception:
print(f"::error::Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
raise AMLConfigurationException(f"Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
except Exception as exception:
print(f"::error::The webservice tests did not complete successfully: {exception}")
raise AMLDeploymentException(f"The webservice tests did not complete successfully: {exception}")
# Getting container registry details
acr = package.get_container_registry()
mask_parameter(parameter=acr.address)
mask_parameter(parameter=acr.username)
mask_parameter(parameter=acr.password)
# Deleting service if desired
if parameters.get("delete_service_after_deployment", False):
service.delete()
else:
# Create outputs
print("::debug::Creating outputs")
print(f"::set-output name=service_scoring_uri::{service.scoring_uri}")
print(f"::set-output name=service_swagger_uri::{service.swagger_uri}")
# Wait for completion and pull image
package.wait_for_creation(show_output=True)
# Creating additional outputs
print("::debug::Creating outputs")
print(f"::set-output name=acr_address::{acr.address}")
print(f"::set-output name=acr_username::{acr.username}")
print(f"::set-output name=acr_password::{acr.password}")
print(f"::set-output name=package_location::{package.location}")
except WebserviceException as exception:
print(f"::error::Image creation failed with exception: {exception}")
package_logs = package.get_logs()
raise AMLDeploymentException(f"Image creation failed with logs: {package_logs}")
print("::debug::Successfully finished Azure Machine Learning Deploy Action")

Просмотреть файл

@ -121,6 +121,15 @@ parameters_schema = {
"type": "boolean",
"description": "Indicates whether the service gets deleted after the deployment completed successfully."
},
"skip_deployment": {
"type": "boolean",
"description": "Indicates whether the deployment to ACI or AKS should be skipped. This can be used in combination with `create_image` to only create a Docker image that can be used for further deployment."
},
"create_image": {
"type": "string",
"description": "Indicates whether a Docker image should be created which can be used for further deployment.",
"pattern": "docker|function_blob|function_http|function_service_bus_queue"
},
"tags": {
"type": "object",
"description": "Dictionary of key value tags to give this Webservice."