update samples from Release-115 as a part of SDK release
This commit is contained in:
Родитель
e2dddfde85
Коммит
6529298c24
|
@ -103,7 +103,7 @@
|
|||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -6,4 +6,4 @@ dependencies:
|
|||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.11.0
|
||||
|
|
|
@ -6,4 +6,4 @@ dependencies:
|
|||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.11.0
|
||||
|
|
|
@ -22,9 +22,9 @@ dependencies:
|
|||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.35.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_win32_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
|
|
@ -22,9 +22,9 @@ dependencies:
|
|||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.35.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_linux_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
|
|
@ -23,9 +23,9 @@ dependencies:
|
|||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.35.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
|
|
@ -3,7 +3,7 @@ import platform
|
|||
|
||||
try:
|
||||
import conda
|
||||
except:
|
||||
except Exception:
|
||||
print('Failed to import conda.')
|
||||
print('This setup is usually run from the base conda environment.')
|
||||
print('You can activate the base environment using the command "conda activate base"')
|
||||
|
|
|
@ -104,7 +104,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
@ -703,7 +703,7 @@
|
|||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(entry_script=script_file_name)\n",
|
||||
"inference_config = InferenceConfig(environment = best_run.get_environment(), entry_script=script_file_name)\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
||||
" memory_gb = 2, \n",
|
||||
|
|
|
@ -93,7 +93,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -96,7 +96,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -31,7 +31,7 @@ try:
|
|||
model = Model(ws, args.model_name)
|
||||
last_train_time = model.created_time
|
||||
print("Model was last trained on {0}.".format(last_train_time))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Could not get last model train time.")
|
||||
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -91,7 +91,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -113,7 +113,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -99,7 +99,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -5,55 +5,10 @@ compute instance.
|
|||
"""
|
||||
|
||||
import argparse
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from azureml.core import Dataset, Run
|
||||
from azureml.automl.core.shared.constants import TimeSeriesInternal
|
||||
from sklearn.externals import joblib
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
|
@ -77,13 +32,25 @@ y_test = X_test.pop(target_column_name).values
|
|||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_predictions, X_trans = fitted_model.forecast(X_test)
|
||||
|
||||
# align output
|
||||
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
|
|
@ -94,7 +94,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,648 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Hierarchical Time Series - Automated ML\n",
|
||||
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Default datastore name'] = dstore.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, 'automl-hts')\n",
|
||||
"\n",
|
||||
"print('Experiment name: ' + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Upload local csv files to datastore\n",
|
||||
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore_path = \"hts-sample\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613005886349
|
||||
},
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore.upload(src_dir='./Data/', target_path=datastore_path, overwrite=True, show_progress=True) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the TabularDatasets \n",
|
||||
"\n",
|
||||
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"train_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-train.csv\"), validate=False) \n",
|
||||
"inference_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-test.csv\"), validate=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Register the TabularDatasets to the Workspace \n",
|
||||
"Finally, register the dataset to your Workspace so it can be called as an input into the training pipeline in the next notebook. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"registered_train = train_ds.register(ws, \"hts-sales-train\")\n",
|
||||
"registered_inference = inference_ds.register(ws, \"hts-sales-test\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"hts-compute\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('Found compute target: ' + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
|
||||
" max_nodes=20)\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
|
||||
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
|
||||
"\n",
|
||||
"model_explainability = True\n",
|
||||
"\n",
|
||||
"engineered_explanations = False\n",
|
||||
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
|
||||
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
|
||||
"training_level = \"SKU\"\n",
|
||||
"\n",
|
||||
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
|
||||
"time_column_name = \"date\"\n",
|
||||
"label_column_name = \"quantity\"\n",
|
||||
"forecast_horizon = 7\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\" : \"forecasting\",\n",
|
||||
" \"primary_metric\" : \"normalized_root_mean_squared_error\",\n",
|
||||
" \"label_column_name\": label_column_name,\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" \"forecast_horizon\": forecast_horizon,\n",
|
||||
" \"hierarchy_column_names\": hierarchy,\n",
|
||||
" \"hierarchy_training_level\": training_level,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
" \"pipeline_fetch_max_batch_size\": 15,\n",
|
||||
" \"model_explainability\": model_explainability,\n",
|
||||
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
|
||||
" \"iteration_timeout_minutes\" : 10,\n",
|
||||
" \"iterations\" : 10,\n",
|
||||
" \"n_cross_validations\": 2\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"hts_parameters = HTSTrainParameters(\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" hierarchy_column_names=hierarchy,\n",
|
||||
" training_level=training_level,\n",
|
||||
" enable_engineered_explanations=engineered_explanations\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up hierarchy training pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"* **experiment:** The experiment used for training.\n",
|
||||
"* **train_data:** The tabular dataset to be used as input to the training run.\n",
|
||||
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n",
|
||||
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n",
|
||||
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=registered_train,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" train_pipeline_parameters=hts_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 1h 11m using a Standard_D12_V2 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Optional] Get the explanations\n",
|
||||
"First we need to download the explanations to the local disk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if model_explainability:\n",
|
||||
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
|
||||
" expl_output.download(\"training_explanations\")\n",
|
||||
"else:\n",
|
||||
" print(\"Model explanations are available only if model_explainability is set to True.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" explanations_dirrectory = os.listdir(os.path.join('training_explanations', 'azureml'))\n",
|
||||
" if len(explanations_dirrectory) > 1:\n",
|
||||
" print(\"Warning! The directory contains multiple explanations, only the first one will be displayed.\")\n",
|
||||
" print('The explanations are located at {}.'.format(explanations_dirrectory[0]))\n",
|
||||
" # Now we will list all the explanations.\n",
|
||||
" explanation_path = os.path.join('training_explanations', 'azureml', explanations_dirrectory[0], 'training_explanations')\n",
|
||||
" print(\"Available explanations\")\n",
|
||||
" print(\"==============================\")\n",
|
||||
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
|
||||
"else:\n",
|
||||
" print(\"Model explanations are available only if model_explainability is set to True.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"View the explanations on \"state\" level."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"\n",
|
||||
"explanation_type = 'raw'\n",
|
||||
"level = 'state'\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" display(pd.read_csv(os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(explanation_type, level)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Forecasting\n",
|
||||
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
|
||||
"#### HTSInferenceParameters arguments\n",
|
||||
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n",
|
||||
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n",
|
||||
"\n",
|
||||
"#### get_many_models_batch_inference_steps arguments\n",
|
||||
"* **experiment:** The experiment used for inference run.\n",
|
||||
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"* **compute_target:** The compute target that runs the inference pipeline.\n",
|
||||
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n",
|
||||
"* **process_count_per_node:** The number of processes per node.\n",
|
||||
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n",
|
||||
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n",
|
||||
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
|
||||
"\n",
|
||||
"inference_parameters = HTSInferenceParameters(\n",
|
||||
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
|
||||
" allocation_method=\"proportions_of_historical_average\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=registered_inference,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" inference_pipeline_parameters=inference_parameters,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded the \"forecast_results\" folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
|
||||
"forecasts.download(\"forecast_results\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resbumit the Pipeline\n",
|
||||
"\n",
|
||||
"The inference pipeline can be submitted with different configurations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"})\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
name: auto-ml-forecasting-hierarchical-timeseries
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
|
@ -0,0 +1,717 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Many Models - Automated ML\n",
|
||||
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Default datastore name'] = dstore.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, 'automl-many-models')\n",
|
||||
"\n",
|
||||
"print('Experiment name: ' + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n",
|
||||
"\n",
|
||||
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
|
||||
"\n",
|
||||
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
|
||||
"\n",
|
||||
"1. Registering the blob container as a Datastore to the Workspace\n",
|
||||
"2. Registering a tabular dataset to the Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.1 Data Preparation\n",
|
||||
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
|
||||
"\n",
|
||||
"The container has\n",
|
||||
"<ol>\n",
|
||||
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
|
||||
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
|
||||
"</ol>\n",
|
||||
"\n",
|
||||
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
|
||||
" \n",
|
||||
"<h3> How sample data in blob store looks like</h3>\n",
|
||||
"\n",
|
||||
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
|
||||
"![image-4.png](mm-1.png)\n",
|
||||
"\n",
|
||||
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"![image-3.png](mm-2.png)\n",
|
||||
"\n",
|
||||
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"\n",
|
||||
"![image-5.png](mm-3.png)\n",
|
||||
"\n",
|
||||
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"![image-6.png](mm-4.png)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.2 Register the blob container as DataStore\n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||
"\n",
|
||||
"In this next step, we will be registering blob storage as datastore to the Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Datastore\n",
|
||||
"\n",
|
||||
"# Please change the following to point to your own blob container and pass in account_key\n",
|
||||
"blob_datastore_name = \"automl_many_models\"\n",
|
||||
"container_name = \"automl-sample-notebook-data\"\n",
|
||||
"account_name = \"automlsamplenotebookdata\"\n",
|
||||
"\n",
|
||||
"oj_datastore = Datastore.register_azure_blob_container(workspace=ws, \n",
|
||||
" datastore_name=blob_datastore_name, \n",
|
||||
" container_name=container_name,\n",
|
||||
" account_name=account_name,\n",
|
||||
" create_if_not_exists=True) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.3 Using tabular datasets \n",
|
||||
"\n",
|
||||
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"ds_name_small = 'oj-data-small-tabular'\n",
|
||||
"input_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(ds_name_small + '/'), validate=False)\n",
|
||||
"\n",
|
||||
"inference_name_small = 'oj-inference-small-tabular'\n",
|
||||
"inference_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(inference_name_small + '/'), validate=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"mm-compute\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('Found compute target: ' + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
|
||||
" max_nodes=20)\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsTrainParameters\n",
|
||||
"\n",
|
||||
"partition_column_names = ['Store', 'Brand']\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\" : 'forecasting',\n",
|
||||
" \"primary_metric\" : 'normalized_root_mean_squared_error',\n",
|
||||
" \"iteration_timeout_minutes\" : 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\" : 15,\n",
|
||||
" \"experiment_timeout_hours\" : 0.25,\n",
|
||||
" \"label_column_name\" : 'Quantity',\n",
|
||||
" \"n_cross_validations\" : 3,\n",
|
||||
" \"time_column_name\": 'WeekStarting',\n",
|
||||
" \"drop_column_names\": 'Revenue',\n",
|
||||
" \"max_horizon\" : 6,\n",
|
||||
" \"grain_column_names\": partition_column_names,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(automl_settings=automl_settings, partition_column_names=partition_column_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up many models pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=input_ds_small,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" run_invocation_timeout=920,\n",
|
||||
" train_pipeline_parameters=mm_paramters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Publish and schedule the train pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 5.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
|
||||
"# description = 'train many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
" \n",
|
||||
"# training_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\", \n",
|
||||
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
|
||||
"# pipeline_id=training_pipeline_id, \n",
|
||||
"# experiment_name=experiment.name, \n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6.0 Forecasting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up output dataset for inference data\n",
|
||||
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(name='many_models_inference_output', destination=(dstore, 'oj/inference_data/')).register_on_complete(name='oj_inference_data_ds')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||
"\n",
|
||||
"#### ManyModelsInferenceParameters arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n",
|
||||
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n",
|
||||
"\n",
|
||||
"#### get_many_models_batch_inference_steps arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** The compute target that runs the inference pipeline.|\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** The number of processes per node.\n",
|
||||
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsInferenceParameters\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=['Store', 'Brand'],\n",
|
||||
" time_column_name=\"WeekStarting\",\n",
|
||||
" target_column_name=\"Quantity\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=inference_ds_small,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" run_invocation_timeout=300,\n",
|
||||
" output_datastore=output_inference_data_ds,\n",
|
||||
" train_run_id=training_run.id,\n",
|
||||
" train_experiment_name=training_run.experiment.name,\n",
|
||||
" inference_pipeline_parameters=mm_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||
"\n",
|
||||
"The following code snippet:\n",
|
||||
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n",
|
||||
"3. Displays the top 10 rows of the predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||
"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(inference_run, forecasting_results_name, forecasting_output_name)\n",
|
||||
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
|
||||
"df.columns = [\"Week Starting\", \"Store\", \"Brand\", \"Quantity\", \"Advert\", \"Price\" , \"Revenue\", \"Predicted\" ]\n",
|
||||
"print(\"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7.0 Publish and schedule the inference pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
|
||||
"# description = 'forecast many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
" \n",
|
||||
"# forecasting_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\", \n",
|
||||
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
|
||||
"# pipeline_id=forecasting_pipeline_id, \n",
|
||||
"# experiment_name=experiment.name, \n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
name: auto-ml-forecasting-many-models
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-1.png
Normal file
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-1.png
Normal file
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 176 KiB |
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-2.png
Normal file
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-2.png
Normal file
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 165 KiB |
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-3.png
Normal file
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-3.png
Normal file
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 162 KiB |
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-4.png
Normal file
Двоичные данные
how-to-use-azureml/automated-machine-learning/forecasting-many-models/mm-4.png
Normal file
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 166 KiB |
|
@ -81,7 +81,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
@ -728,14 +728,16 @@
|
|||
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
|
||||
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
|
||||
"# The section 'data' contains the data frame in the form of dictionary.\n",
|
||||
"test_sample = json.dumps({\"data\": json.loads(X_query.to_json(orient=\"records\"))})\n",
|
||||
"sample_quantiles=[0.025,0.975]\n",
|
||||
"test_sample = json.dumps({'data': X_query.to_dict(orient='records'), 'quantiles': sample_quantiles})\n",
|
||||
"response = aci_service.run(input_data = test_sample)\n",
|
||||
"# translate from networkese to datascientese\n",
|
||||
"try: \n",
|
||||
" res_dict = json.loads(response)\n",
|
||||
" y_fcst_all = pd.DataFrame(res_dict['index'])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast'] \n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast']\n",
|
||||
" y_fcst_all['prediction_interval'] = res_dict['prediction_interval']\n",
|
||||
"except:\n",
|
||||
" print(res_dict)"
|
||||
]
|
||||
|
|
|
@ -5,55 +5,10 @@ compute instance.
|
|||
"""
|
||||
|
||||
import argparse
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from azureml.core import Dataset, Run
|
||||
from azureml.automl.core.shared.constants import TimeSeriesInternal
|
||||
from sklearn.externals import joblib
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
|
@ -77,13 +32,25 @@ y_test = X_test.pop(target_column_name).values
|
|||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_predictions, X_trans = fitted_model.forecast(X_test)
|
||||
|
||||
# align output
|
||||
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
|
|
@ -27,20 +27,31 @@ ws = run.experiment.workspace
|
|||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
X_test = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_pred, X_trans = fitted_model.forecast(X_test_df)
|
||||
|
||||
# rename target column
|
||||
X_trans.reset_index(drop=False, inplace=True)
|
||||
X_trans.rename(columns={TimeSeriesInternal.DUMMY_TARGET_COLUMN: 'predicted'}, inplace=True)
|
||||
X_trans['actual'] = y_test_df[target_column_name].values
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
X_test[target_column_name] = y_test_df[target_column_name]
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
clean.rename(columns={target_column_name: 'actual'}, inplace=True)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = X_trans.to_csv(file_name, header=True, index=False) # added Index
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
|
|
@ -96,7 +96,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
from azureml.core.run import Run
|
||||
import joblib
|
||||
import os
|
||||
import shap
|
||||
import xgboost
|
||||
|
||||
OUTPUT_DIR = './outputs/'
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
|
||||
run = Run.get_context()
|
||||
|
||||
# get a dataset on income prediction
|
||||
X, y = shap.datasets.adult()
|
||||
|
||||
# train an XGBoost model (but any other tree model type should work)
|
||||
model = xgboost.XGBClassifier()
|
||||
model.fit(X, y)
|
||||
|
||||
explainer = shap.explainers.GPUTree(model, X)
|
||||
X_shap = X[:100]
|
||||
shap_values = explainer(X_shap)
|
||||
|
||||
print("computed shap values:")
|
||||
print(shap_values)
|
||||
|
||||
# write X_shap out as a pickle file for later visualization
|
||||
x_shap_pkl = 'x_shap.pkl'
|
||||
with open(x_shap_pkl, 'wb') as file:
|
||||
joblib.dump(value=X_shap, filename=os.path.join(OUTPUT_DIR, x_shap_pkl))
|
||||
run.upload_file('x_shap_adult_census.pkl', os.path.join(OUTPUT_DIR, x_shap_pkl))
|
||||
|
||||
model_file_name = 'xgboost_.pkl'
|
||||
# save model in the outputs folder so it automatically gets uploaded
|
||||
with open(model_file_name, 'wb') as file:
|
||||
joblib.dump(value=model, filename=os.path.join(OUTPUT_DIR,
|
||||
model_file_name))
|
||||
|
||||
# register the model
|
||||
run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name))
|
||||
original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer',
|
||||
model_path='xgboost_model.pkl')
|
|
@ -0,0 +1,297 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.png)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Explain tree-based models on GPU using GPUTreeExplainer\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"_**This notebook illustrates how to use shap's GPUTreeExplainer on an Azure GPU machine.**_\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Problem: Train a tree-based model and explain the model on an Azure GPU machine using the GPUTreeExplainer.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"## Table of Contents\n",
|
||||
"\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Run model explainer locally at training time](#Explain)\n",
|
||||
" 1. Apply feature transformations\n",
|
||||
" 1. Train a binary classification model\n",
|
||||
" 1. Explain the model on raw features\n",
|
||||
" 1. Generate global explanations\n",
|
||||
" 1. Generate local explanations\n",
|
||||
"1. [Visualize explanations](#Visualize)\n",
|
||||
"1. [Deploy model and scoring explainer](#Deploy)\n",
|
||||
"1. [Next steps](#Next)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates how to use the GPUTreeExplainer on some simple datasets. Like the TreeExplainer, the GPUTreeExplainer is specifically designed for tree-based machine learning models, but it is designed to accelerate the computations using NVIDIA GPUs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run with a GPU machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.core.model import Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. To run the script, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose an experiment name.\n",
|
||||
"experiment_name = 'gpu-tree-explainer'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create project directory\n",
|
||||
"\n",
|
||||
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"project_folder = './azureml-shap-gpu-tree-explainer'\n",
|
||||
"os.makedirs(project_folder, exist_ok=True)\n",
|
||||
"shutil.copy('gpu_tree_explainer.py', project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up a compute cluster\n",
|
||||
"This section uses a user-provided compute cluster (named \"gpu-shap-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"num_nodes = 1\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"gpu-shap-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n",
|
||||
" # To use GPUTreeExplainer, select a GPU such as \"STANDARD_NC6\" \n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = num_nodes)\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure & Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# Create a new RunConfig object\n",
|
||||
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"# Set compute target to AmlCompute target created in previous step\n",
|
||||
"run_config.target = amlcompute_cluster_name\n",
|
||||
"\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"environment_name = \"shap-gpu-tree\"\n",
|
||||
"\n",
|
||||
"env = Environment(environment_name)\n",
|
||||
"\n",
|
||||
"env.docker.enabled = True\n",
|
||||
"env.docker.base_image = None\n",
|
||||
"env.docker.base_dockerfile = \"\"\"\n",
|
||||
"FROM rapidsai/rapidsai:cuda10.0-devel-ubuntu18.04\n",
|
||||
"RUN apt-get update && \\\n",
|
||||
"apt-get install -y fuse && \\\n",
|
||||
"apt-get install -y build-essential && \\\n",
|
||||
"apt-get install -y python3-dev && \\\n",
|
||||
"source activate rapids && \\\n",
|
||||
"apt-get install -y g++ && \\\n",
|
||||
"printenv && \\\n",
|
||||
"echo \"which nvcc: \" && \\\n",
|
||||
"which nvcc && \\\n",
|
||||
"pip install azureml-defaults && \\\n",
|
||||
"pip install azureml-telemetry && \\\n",
|
||||
"cd /usr/local/src && \\\n",
|
||||
"git clone https://github.com/slundberg/shap && \\\n",
|
||||
"cd shap && \\\n",
|
||||
"mkdir build && \\\n",
|
||||
"python setup.py install --user && \\\n",
|
||||
"pip uninstall -y xgboost && \\\n",
|
||||
"rm /conda/envs/rapids/lib/libxgboost.so && \\\n",
|
||||
"pip install xgboost==1.4.2\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"env.python.user_managed_dependencies = True\n",
|
||||
"\n",
|
||||
"from azureml.core import Run\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder, \n",
|
||||
" script='gpu_tree_explainer.py', \n",
|
||||
" compute_target=amlcompute_cluster_name,\n",
|
||||
" environment=env) \n",
|
||||
"run = experiment.submit(config=src)\n",
|
||||
"run"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "ilmat"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
name: train-explain-model-gpu-tree-explainer
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
|
@ -11,4 +11,4 @@ dependencies:
|
|||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.10.0
|
||||
|
|
|
@ -10,4 +10,4 @@ dependencies:
|
|||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.10.0
|
||||
|
|
|
@ -10,4 +10,4 @@ dependencies:
|
|||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.10.0
|
||||
|
|
|
@ -12,4 +12,4 @@ dependencies:
|
|||
- azureml-dataset-runtime
|
||||
- azureml-core
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.10.0
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
"2. Running an arbitrary Python script that the customer has in DBFS\n",
|
||||
"3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n",
|
||||
"4. Running a JAR job that the customer has in DBFS.\n",
|
||||
"5. How to get run context in a Databricks interactive cluster\n",
|
||||
"\n",
|
||||
"## Before you begin:\n",
|
||||
"\n",
|
||||
|
@ -699,14 +700,14 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 5. Running demo notebook already added to the Databricks workspace using existing cluster\n",
|
||||
"First you need register DBFS datastore and make sure path_on_datastore does exist in databricks file system, you can browser the files by refering [this](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
||||
"\n",
|
||||
"Find existing_cluster_id by opeing Azure Databricks UI with Clusters page and in url you will find a string connected with '-' right after \"clusters/\"."
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -745,11 +746,11 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -764,11 +765,11 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -781,14 +782,14 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 6. Running a Python script in Databricks that currenlty is in local computer with existing cluster\n",
|
||||
"### 6. Running a Python script in Databricks that is currently in local computer with existing cluster\n",
|
||||
"When you access azure blob or data lake storage from an existing (interactive) cluster, you need to ensure the Spark configuration is set up correctly to access this storage and this set up may require the cluster to be restarted.\n",
|
||||
"\n",
|
||||
"If you set permit_cluster_restart to True, AML will check if the spark configuration needs to be updated and restart the cluster for you if required. This will ensure that the storage can be correctly accessed from the Databricks cluster."
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -813,11 +814,11 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -832,11 +833,11 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -849,18 +850,71 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### How to get run context in a Databricks interactive cluster\n",
|
||||
"\n",
|
||||
"Users are used to being able to use Run.get_context() to retrieve the parent_run_id for a given run_id. In DatabricksStep, however, a little more work is required to achieve this.\n",
|
||||
"\n",
|
||||
"The solution is to parse the script arguments and set corresponding environment variables to access the run context from within Databricks.\n",
|
||||
"Note that this workaround is not required for job clusters. \n",
|
||||
"\n",
|
||||
"Here is a code sample:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from azureml.core import Run\n",
|
||||
"import argparse\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def populate_environ():\n",
|
||||
" parser = argparse.ArgumentParser(description='Process arguments passed to script')\n",
|
||||
" parser.add_argument('--AZUREML_SCRIPT_DIRECTORY_NAME')\n",
|
||||
" parser.add_argument('--AZUREML_RUN_TOKEN')\n",
|
||||
" parser.add_argument('--AZUREML_RUN_TOKEN_EXPIRY')\n",
|
||||
" parser.add_argument('--AZUREML_RUN_ID')\n",
|
||||
" parser.add_argument('--AZUREML_ARM_SUBSCRIPTION')\n",
|
||||
" parser.add_argument('--AZUREML_ARM_RESOURCEGROUP')\n",
|
||||
" parser.add_argument('--AZUREML_ARM_WORKSPACE_NAME')\n",
|
||||
" parser.add_argument('--AZUREML_ARM_PROJECT_NAME')\n",
|
||||
" parser.add_argument('--AZUREML_SERVICE_ENDPOINT')\n",
|
||||
"\n",
|
||||
" args = parser.parse_args()\n",
|
||||
" os.environ['AZUREML_SCRIPT_DIRECTORY_NAME'] = args.AZUREML_SCRIPT_DIRECTORY_NAME\n",
|
||||
" os.environ['AZUREML_RUN_TOKEN'] = args.AZUREML_RUN_TOKEN\n",
|
||||
" os.environ['AZUREML_RUN_TOKEN_EXPIRY'] = args.AZUREML_RUN_TOKEN_EXPIRY\n",
|
||||
" os.environ['AZUREML_RUN_ID'] = args.AZUREML_RUN_ID\n",
|
||||
" os.environ['AZUREML_ARM_SUBSCRIPTION'] = args.AZUREML_ARM_SUBSCRIPTION\n",
|
||||
" os.environ['AZUREML_ARM_RESOURCEGROUP'] = args.AZUREML_ARM_RESOURCEGROUP\n",
|
||||
" os.environ['AZUREML_ARM_WORKSPACE_NAME'] = args.AZUREML_ARM_WORKSPACE_NAME\n",
|
||||
" os.environ['AZUREML_ARM_PROJECT_NAME'] = args.AZUREML_ARM_PROJECT_NAME\n",
|
||||
" os.environ['AZUREML_SERVICE_ENDPOINT'] = args.AZUREML_SERVICE_ENDPOINT\n",
|
||||
"\n",
|
||||
"populate_environ()\n",
|
||||
"run = Run.get_context(allow_offline=False)\n",
|
||||
"print(run._run_dto[\"parent_run_id\"])\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Next: ADLA as a Compute Target\n",
|
||||
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sanpil"
|
||||
"name": "shbijlan"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
|
|
|
@ -40,7 +40,7 @@ def get_num(arg_num, file_num):
|
|||
|
||||
|
||||
def write_num_to_file(num, file_path):
|
||||
if file_path is not None and file_path is not '':
|
||||
if file_path is not None and file_path != '':
|
||||
output_dir = file_path
|
||||
else:
|
||||
output_dir = '.'
|
||||
|
|
|
@ -258,7 +258,7 @@
|
|||
" - azureml-defaults\n",
|
||||
" - azureml-opendatasets\n",
|
||||
" - chainer==5.1.0\n",
|
||||
" - cupy-cuda90==5.1.0\n",
|
||||
" - cupy-cuda100==5.1.0\n",
|
||||
" - mpi4py==3.0.0\n",
|
||||
" - pytest"
|
||||
]
|
||||
|
@ -275,7 +275,7 @@
|
|||
"chainer_env = Environment.from_conda_specification(name = 'chainer-5.1.0-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04'\n",
|
||||
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'\n",
|
||||
"\n",
|
||||
"docker_config = DockerConfiguration(use_docker=True)"
|
||||
]
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
@ -386,10 +386,9 @@
|
|||
"\n",
|
||||
"# Set compute target to AmlCompute\n",
|
||||
"conda_run_config.target = compute_target\n",
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"# specify CondaDependencies obj\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = automl_run.get_environment().python.conda_dependencies"
|
||||
"conda_run_config.environment = automl_run.get_environment()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -589,7 +588,7 @@
|
|||
"from azureml.responsibleai.tools.model_analysis.counterfactual_config import CounterfactualConfig\n",
|
||||
"\n",
|
||||
"cf_config = CounterfactualConfig(model_analysis_run, conda_run_config)\n",
|
||||
"cf_config.add_request(total_CFs=10, desired_range=[10, 300], feature_importance=False)\n",
|
||||
"cf_config.add_request(total_CFs=10, desired_range=[10, 300])\n",
|
||||
"cf_run = model_analysis_run.submit_child(cf_config)\n",
|
||||
"cf_run.wait_for_completion(raise_on_error=True, wait_post_processing=True)"
|
||||
]
|
||||
|
@ -630,6 +629,22 @@
|
|||
"source": [
|
||||
"counterfactual_object.visualize_as_dataframe(show_only_changes=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Visualize counterfactual feature importance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"counterfactual_object.summary_importance"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
|
@ -8,5 +8,5 @@ dependencies:
|
|||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.11.0
|
||||
- liac-arff
|
||||
|
|
|
@ -100,7 +100,7 @@
|
|||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.34.0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.35.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -225,9 +225,8 @@
|
|||
"\n",
|
||||
"Try out these notebooks to learn more about MLflow-Azure Machine Learning integration:\n",
|
||||
"\n",
|
||||
" * [Train a model using remote compute on Azure Cloud](../train-on-remote/train-on-remote.ipynb)\n",
|
||||
" * [Deploy the model as a web service](../deploy-model/deploy-model.ipynb)\n",
|
||||
" * [Train a model using Pytorch and MLflow](../../ml-frameworks/using-mlflow/train-and-deploy-pytorch)\n",
|
||||
" * [Train a model using remote compute on Azure Cloud](../train-remote/train-remote.ipynb)\n",
|
||||
" * [Train a model using Pytorch and MLflow](../../../ml-frameworks/using-mlflow/train-and-deploy-pytorch)\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from sklearn.datasets import load_diabetes
|
||||
from sklearn.linear_model import Ridge
|
||||
|
@ -11,7 +12,6 @@ import mlflow.sklearn
|
|||
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
with mlflow.start_run():
|
||||
X, y = load_diabetes(return_X_y=True)
|
||||
|
|
|
@ -4,15 +4,15 @@ import sys
|
|||
|
||||
def convert(imgf, labelf, outf, n):
|
||||
f = open(imgf, "rb")
|
||||
l = open(labelf, "rb")
|
||||
temp = open(labelf, "rb")
|
||||
o = open(outf, "w")
|
||||
|
||||
f.read(16)
|
||||
l.read(8)
|
||||
temp.read(8)
|
||||
images = []
|
||||
|
||||
for i in range(n):
|
||||
image = [ord(l.read(1))]
|
||||
image = [ord(temp.read(1))]
|
||||
for j in range(28 * 28):
|
||||
image.append(ord(f.read(1)))
|
||||
images.append(image)
|
||||
|
@ -21,7 +21,7 @@ def convert(imgf, labelf, outf, n):
|
|||
o.write(",".join(str(pix) for pix in image) + "\n")
|
||||
f.close()
|
||||
o.close()
|
||||
l.close()
|
||||
temp.close()
|
||||
|
||||
|
||||
mounted_input_path = sys.argv[1]
|
||||
|
|
5
index.md
5
index.md
|
@ -27,7 +27,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML |
|
||||
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb) | Classification | Creditcard | AML Compute | None | None | AutomatedML |
|
||||
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
||||
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/responsible-ai/auto-ml-regresion-responsibleai/auto-ml-regression-responsibleai.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
||||
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/responsible-ai/auto-ml-regression-responsibleai/auto-ml-regression-responsibleai.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
||||
| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None |
|
||||
| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None |
|
||||
| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None |
|
||||
|
@ -108,6 +108,8 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||
| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-hierarchical-timeseries](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-univariate-recipe-experiment-settings](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb) | | | | | | |
|
||||
| [auto-ml-forecasting-univariate-recipe-run-experiment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb) | | | | | | |
|
||||
| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | |
|
||||
|
@ -124,6 +126,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||
| [production-deploy-to-aks-ssl](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb) | | | | | | |
|
||||
| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | |
|
||||
| [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | |
|
||||
| [train-explain-model-gpu-tree-explainer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb) | | | | | | |
|
||||
| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | |
|
||||
| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | |
|
||||
| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | |
|
||||
|
|
|
@ -102,7 +102,7 @@
|
|||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -25,8 +25,8 @@ def get_class_label_dict(labels_dir):
|
|||
label = []
|
||||
labels_path = os.path.join(labels_dir, 'labels.txt')
|
||||
proto_as_ascii_lines = tf.gfile.GFile(labels_path).readlines()
|
||||
for l in proto_as_ascii_lines:
|
||||
label.append(l.rstrip())
|
||||
for temp in proto_as_ascii_lines:
|
||||
label.append(temp.rstrip())
|
||||
return label
|
||||
|
||||
|
||||
|
|
|
@ -60,13 +60,6 @@
|
|||
"## Download and prepare data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the necessary packages. The Open Datasets package contains a class representing each data source (`NycTlcGreen` for example) to easily filter date parameters before downloading."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
|
|
@ -2,4 +2,3 @@ name: regression-automated-ml
|
|||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-opendatasets
|
||||
|
|
Загрузка…
Ссылка в новой задаче