This commit is contained in:
Bala P V 2022-02-24 18:36:40 -08:00
Родитель 7609af6a85
Коммит 468438ec4f
23 изменённых файлов: 16 добавлений и 802 удалений

Просмотреть файл

@ -1,7 +0,0 @@
$schema: https://azuremlsdk2.blob.core.windows.net/latest/asset.schema.json
name: sampledata1
version: 3
description: sample dataset
datastore: azureml:workspaceblobstore
local_path: ./data
path: /sample/data/path

Просмотреть файл

@ -1,10 +0,0 @@
"Month", "Average", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015"
"May", 0.1, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0
"Jun", 0.5, 2, 1, 1, 0, 0, 1, 1, 2, 2, 0, 1
"Jul", 0.7, 5, 1, 1, 2, 0, 1, 3, 0, 2, 2, 1
"Aug", 2.3, 6, 3, 2, 4, 4, 4, 7, 8, 2, 2, 3
"Sep", 3.5, 6, 4, 7, 4, 2, 8, 5, 2, 5, 2, 5
"Oct", 2.0, 8, 0, 1, 3, 2, 5, 1, 5, 2, 3, 0
"Nov", 0.5, 3, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1
"Dec", 0.0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1
1 Month Average 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
2 May 0.1 0 0 1 1 0 0 0 2 0 0 0
3 Jun 0.5 2 1 1 0 0 1 1 2 2 0 1
4 Jul 0.7 5 1 1 2 0 1 3 0 2 2 1
5 Aug 2.3 6 3 2 4 4 4 7 8 2 2 3
6 Sep 3.5 6 4 7 4 2 8 5 2 5 2 5
7 Oct 2.0 8 0 1 3 2 5 1 5 2 3 0
8 Nov 0.5 3 0 0 1 1 0 1 0 1 0 1
9 Dec 0.0 1 0 1 0 0 0 0 0 0 0 1

Просмотреть файл

@ -1,257 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"# Prerequisite"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#import required libraries\n",
"from azure.ml import MLClient\n",
"from azure.ml.entities import Code, Dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#Enter details of your AML workspace\n",
"subscription_id = '<SUBSCRIPTION_ID>'\n",
"resource_group = '<RESOURCE_GROUP>'\n",
"workspace = '<AML_WORKSPACE_NAME>'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#get a handle to the workspace\n",
"ml_client = MLClient(subscription_id, resource_group, workspace)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Pipeline job with registered component\n",
"## Register components"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azure.ml.entities import CommandComponent\n",
"parent_dir = '.'\n",
"environment = \"AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:5\"\n",
"\n",
"train_component = CommandComponent(\n",
" name=\"Train\",\n",
" version=\"32\",\n",
" inputs=dict(\n",
" training_data=dict(type=\"path\"),\n",
" max_epocs=dict(type=\"integer\"),\n",
" learning_rate=dict(type=\"number\", default=0.01),\n",
" learning_rate_schedule=dict(type=\"string\", default=\"time-based\")\n",
" ),\n",
" outputs=dict(\n",
" model_output=dict(type=\"path\")\n",
" ),\n",
" code=Code(local_path=parent_dir + \"/train_src\"),\n",
" environment=environment,\n",
" command=\"python train.py --training_data ${{inputs.training_data}} --max_epocs ${{inputs.max_epocs}} \"\n",
" \"--learning_rate ${{inputs.learning_rate}} --learning_rate_schedule ${{\"\n",
" \"inputs.learning_rate_schedule}} --model_output ${{outputs.model_output}} \"\n",
")\n",
"ml_client.components.create_or_update(train_component)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"score_component = CommandComponent(\n",
" name=\"Score\", \n",
" version=\"32\",\n",
" inputs=dict(\n",
" model_input=dict(type=\"path\"),\n",
" test_data=dict(type=\"path\"),\n",
" ),\n",
" outputs=dict(\n",
" score_output=dict(type=\"path\")\n",
" ),\n",
" code=Code(local_path=parent_dir + \"/score_src\"),\n",
" environment=environment,\n",
" command=\"python score.py --model_input ${{inputs.model_input}} --test_data ${{inputs.test_data}} \"\n",
" \"--score_output ${{outputs.score_output}} \"\n",
")\n",
"ml_client.components.create_or_update(score_component)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"eval_component = CommandComponent(\n",
" name=\"Eval\",\n",
" version=\"32\",\n",
" inputs=dict(\n",
" scoring_result=dict(type=\"path\"),\n",
" ),\n",
" outputs=dict(\n",
" eval_output=dict(type=\"path\")\n",
" ),\n",
" code=Code(local_path=parent_dir + \"/eval_src\"),\n",
" environment=environment,\n",
" command=\"python eval.py --scoring_result ${{inputs.scoring_result}} --eval_output ${{outputs.eval_output}}\"\n",
")\n",
"ml_client.components.create_or_update(eval_component)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azure.ml import dsl, MLClient\n",
"from azure.ml.dsl import Pipeline\n",
"from azure.ml.entities import Component as ComponentEntity, Dataset\n",
"from pathlib import Path\n",
"\n",
"def generate_dsl_pipeline(\n",
" client: MLClient,\n",
" train_component: ComponentEntity,\n",
" score_component: ComponentEntity,\n",
" eval_component: ComponentEntity,\n",
" ) -> Pipeline:\n",
" # 1. Load component funcs\n",
" train_func = dsl.load_component(\n",
" client=client,\n",
" name=train_component.name,\n",
" version=train_component.version,\n",
" )\n",
" score_func = dsl.load_component(\n",
" client=client,\n",
" name=score_component.name,\n",
" version=score_component.version,\n",
" )\n",
" eval_func = dsl.load_component(\n",
" client=client,\n",
" name=eval_component.name,\n",
" version=eval_component.version,\n",
" )\n",
"\n",
" # 2. Construct pipeline\n",
" @dsl.pipeline(\n",
" compute=\"cpu-cluster\",\n",
" description=\"E2E dummy train-score-eval pipeline with registered components\",\n",
" )\n",
" def sample_pipeline(\n",
" pipeline_job_training_input,\n",
" pipeline_job_test_input,\n",
" pipeline_job_training_max_epocs,\n",
" pipeline_job_training_learning_rate,\n",
" pipeline_job_learning_rate_schedule,\n",
" ):\n",
" train_job = train_func(\n",
" training_data=pipeline_job_training_input,\n",
" max_epocs=pipeline_job_training_max_epocs,\n",
" learning_rate=pipeline_job_training_learning_rate,\n",
" learning_rate_schedule=pipeline_job_learning_rate_schedule,\n",
" )\n",
" score_job = score_func(model_input=train_job.outputs.model_output, test_data=pipeline_job_test_input)\n",
" score_job.outputs.score_output.mode = \"upload\"\n",
" evaluate_job = eval_func(scoring_result=score_job.outputs.score_output)\n",
" return {\n",
" \"pipeline_job_trained_model\": train_job.outputs.model_output,\n",
" \"pipeline_job_scored_data\": score_job.outputs.score_output,\n",
" \"pipeline_job_evaluation_report\": evaluate_job.outputs.eval_output,\n",
" }\n",
"\n",
" pipeline = sample_pipeline(\n",
" Dataset(local_path=parent_dir + \"/data/\"),\n",
" Dataset(local_path=parent_dir + \"/data/\"),\n",
" 20,\n",
" 1.8,\n",
" \"time-based\",\n",
" )\n",
" pipeline.outputs.pipeline_job_trained_model.mode = \"upload\"\n",
" pipeline.outputs.pipeline_job_scored_data.mode = \"upload\"\n",
" pipeline.outputs.pipeline_job_evaluation_report.mode = \"upload\"\n",
" return pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submit pipeline job"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create pipeline instance\n",
"pipeline = generate_dsl_pipeline(ml_client, train_component, score_component, eval_component)\n",
"# submit job to workspace\n",
"ml_client.jobs.create_or_update(pipeline, experiment_name=\"e2e_registered_components\", continue_run_on_step_failure=True)"
]
}
],
"metadata": {
"interpreter": {
"hash": "3e9e0e270b75c5e6da2e22113ba4f77b864d68f95da6601809c29e46c73ae6bb"
},
"kernelspec": {
"display_name": "Python 3.7.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1,17 +0,0 @@
name: Eval
version: 32
display_name: Eval
type: command
inputs:
scoring_result:
type: path
outputs:
eval_output:
type: path
environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:5
code:
local_path: ./eval_src
command: >-
python eval.py
--scoring_result ${{inputs.scoring_result}}
--eval_output ${{outputs.eval_output}}

Просмотреть файл

@ -1,25 +0,0 @@
import argparse
from pathlib import Path
from datetime import datetime
parser = argparse.ArgumentParser("score")
parser.add_argument("--scoring_result", type=str, help="Path of scoring result")
parser.add_argument("--eval_output", type=str, help="Path of output evaluation result")
args = parser.parse_args()
print ("hello evaluation world...")
lines = [
f'Scoring result path: {args.scoring_result}',
f'Evaluation output path: {args.eval_output}',
]
for line in lines:
print(line)
# Evaluate the incoming scoring result and output evaluation result.
# Here only output a dummy file for demo.
curtime = datetime.now().strftime("%b-%d-%Y %H:%M:%S")
eval_msg = f"Eval done at {curtime}\n"
(Path(args.eval_output) / 'eval_result.txt').write_text(eval_msg)

Просмотреть файл

@ -1,69 +0,0 @@
from azure.ml import dsl, MLClient
from azure.ml.dsl import Pipeline
from azure.ml.entities import Component as ComponentEntity, Dataset
from pathlib import Path
parent_dir = str(Path(__file__).parent)
def generate_dsl_pipeline(
client: MLClient,
pipeline_samples_e2e_registered_train_components: ComponentEntity,
pipeline_samples_e2e_registered_score_components: ComponentEntity,
pipeline_samples_e2e_registered_eval_components: ComponentEntity,
) -> Pipeline:
# 1. Load component funcs
train_func = dsl.load_component(
client=client,
name=pipeline_samples_e2e_registered_train_components.name,
version=pipeline_samples_e2e_registered_train_components.version,
)
score_func = dsl.load_component(
client=client,
name=pipeline_samples_e2e_registered_score_components.name,
version=pipeline_samples_e2e_registered_score_components.version,
)
eval_func = dsl.load_component(
client=client,
name=pipeline_samples_e2e_registered_eval_components.name,
version=pipeline_samples_e2e_registered_eval_components.version,
)
# 2. Construct pipeline
@dsl.pipeline(
compute="cpu-cluster",
description="E2E dummy train-score-eval pipeline with registered components",
)
def sample_pipeline(
pipeline_job_training_input,
pipeline_job_test_input,
pipeline_job_training_max_epocs,
pipeline_job_training_learning_rate,
pipeline_job_learning_rate_schedule,
):
train_job = train_func(
training_data=pipeline_job_training_input,
max_epocs=pipeline_job_training_max_epocs,
learning_rate=pipeline_job_training_learning_rate,
learning_rate_schedule=pipeline_job_learning_rate_schedule,
)
score_job = score_func(model_input=train_job.outputs.model_output, test_data=pipeline_job_test_input)
score_job.outputs.score_output.mode = "upload"
evaluate_job = eval_func(scoring_result=score_job.outputs.score_output)
return {
"pipeline_job_trained_model": train_job.outputs.model_output,
"pipeline_job_scored_data": score_job.outputs.score_output,
"pipeline_job_evaluation_report": evaluate_job.outputs.eval_output,
}
pipeline = sample_pipeline(
Dataset(local_path=parent_dir + "./data/"),
Dataset(local_path=parent_dir + "./data/"),
20,
1.8,
"time-based",
)
pipeline.outputs.pipeline_job_trained_model.mode = "upload"
pipeline.outputs.pipeline_job_scored_data.mode = "upload"
pipeline.outputs.pipeline_job_evaluation_report.mode = "upload"
return pipeline

Просмотреть файл

@ -1,55 +0,0 @@
type: pipeline
description: "E2E dummy train-score-eval pipeline with registered components"
inputs:
pipeline_job_training_input:
dataset:
local_path: ./data
pipeline_job_test_input:
dataset:
local_path: ./data
pipeline_job_training_max_epocs: 20
pipeline_job_training_learning_rate: 1.8
pipeline_job_learning_rate_schedule: 'time-based'
outputs:
pipeline_job_trained_model:
mode: upload
pipeline_job_scored_data:
mode: upload
pipeline_job_evaluation_report:
mode: upload
#settings:
# datastore: azureml:workspaceblobstore
compute: azureml:cpu-cluster
jobs:
train_job:
type: component
component: azureml:Train:31
inputs:
training_data: ${{inputs.pipeline_job_training_input}}
max_epocs: ${{inputs.pipeline_job_training_max_epocs}}
learning_rate: ${{inputs.pipeline_job_training_learning_rate}}
learning_rate_schedule: ${{inputs.pipeline_job_learning_rate_schedule}}
outputs:
model_output: ${{outputs.pipeline_job_trained_model}}
score_job:
type: component
component: azureml:Score:31
inputs:
model_input: ${{jobs.train_job.outputs.model_output}}
test_data: ${{inputs.pipeline_job_test_input}}
outputs:
score_output: ${{outputs.pipeline_job_scored_data}}
evaluate_job:
type: component
component: azureml:Eval:31
inputs:
scoring_result: ${{jobs.score_job.outputs.score_output}}
outputs:
eval_output: ${{outputs.pipeline_job_evaluation_report}}

Просмотреть файл

@ -1,20 +0,0 @@
name: Score
version: 32
display_name: Score
type: command
inputs:
model_input:
type: path
test_data:
type: path
outputs:
score_output:
type: path
environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:5
code:
local_path: ./score_src
command: >-
python score.py
--model_input ${{inputs.model_input}}
--test_data ${{inputs.test_data}}
--score_output ${{outputs.score_output}}

Просмотреть файл

@ -1,29 +0,0 @@
import argparse
from pathlib import Path
parser = argparse.ArgumentParser("score")
parser.add_argument("--model_input", type=str, help="Path of input model")
parser.add_argument("--test_data", type=str, help="Path to test data")
parser.add_argument("--score_output", type=str, help="Path of scoring output")
args = parser.parse_args()
print ("hello scoring world...")
lines = [
f'Model path: {args.model_input}',
f'Test data path: {args.test_data}',
f'Scoring output path: {args.score_output}',
]
for line in lines:
print(line)
# Load the model from input port
# Here only print the model as text since it is a dummy one
model = (Path(args.model_input) / 'model.txt').read_text()
print('Model: ', model)
# Do scoring with the input model
# Here only print text to output file as demo
(Path(args.score_output) / 'score.txt').write_text('Scored with the following mode:\n{}'.format(model))

Просмотреть файл

@ -1,28 +0,0 @@
name: Train
display_name: Train
version: 32
type: command
inputs:
training_data:
type: path
max_epocs:
type: integer
learning_rate:
type: number
default: 0.01
learning_rate_schedule:
type: string
default: time-based
outputs:
model_output:
type: path
code:
local_path: ./train_src
environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:5
command: >-
python train.py
--training_data ${{inputs.training_data}}
--max_epocs ${{inputs.max_epocs}}
--learning_rate ${{inputs.learning_rate}}
--learning_rate_schedule ${{inputs.learning_rate_schedule}}
--model_output ${{outputs.model_output}}

Просмотреть файл

@ -1,43 +0,0 @@
import argparse
from pathlib import Path
from uuid import uuid4
from datetime import datetime
import os
parser = argparse.ArgumentParser("train")
parser.add_argument("--training_data", type=str, help="Path to training data")
parser.add_argument("--max_epocs", type=int, help="Max # of epocs for the training")
parser.add_argument("--learning_rate", type=float, help="Learning rate")
parser.add_argument("--learning_rate_schedule", type=str, help="Learning rate schedule")
parser.add_argument("--model_output", type=str, help="Path of output model")
args = parser.parse_args()
print ("hello training world...")
lines = [
f'Training data path: {args.training_data}',
f'Max epocs: {args.max_epocs}',
f'Learning rate: {args.learning_rate}',
f'Learning rate: {args.learning_rate_schedule}',
f'Model output path: {args.model_output}',
]
for line in lines:
print(line)
print("mounted_path files: ")
arr = os.listdir(args.training_data)
print(arr)
for filename in arr:
print ("reading file: %s ..." % filename)
with open(os.path.join(args.training_data, filename), 'r') as handle:
print (handle.read())
# Do the train and save the trained model as a file into the output folder.
# Here only output a dummy data for demo.
curtime = datetime.now().strftime("%b-%d-%Y %H:%M:%S")
model = f"This is a dummy model with id: {str(uuid4())} generated at: {curtime}\n"
(Path(args.model_output) / 'model.txt').write_text(model)

Просмотреть файл

@ -1,133 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"# Prerequisite"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#import required libraries\n",
"from azure.ml import MLClient\n",
"from azure.ml.entities import Code, Dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#Enter details of your AML workspace\n",
"subscription_id = '<SUBSCRIPTION_ID>'\n",
"resource_group = '<RESOURCE_GROUP>'\n",
"workspace = '<AML_WORKSPACE_NAME>'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#get a handle to the workspace\n",
"ml_client = MLClient(subscription_id, resource_group, workspace)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic pipeline job\n",
"\n",
"## Build pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azure.ml import dsl\n",
"from azure.ml.dsl import Pipeline\n",
"from pathlib import Path\n",
"\n",
"parent_dir = '.'\n",
"\n",
"\n",
"def generate_dsl_pipeline() -> Pipeline:\n",
" # 1. Load component funcs\n",
" a_func = dsl.load_component(yaml_file=parent_dir + \"/componentA.yml\")\n",
" b_func = dsl.load_component(yaml_file=parent_dir + \"/componentB.yml\")\n",
" c_func = dsl.load_component(yaml_file=parent_dir + \"/componentC.yml\")\n",
"\n",
" # 2. Construct pipeline\n",
" @dsl.pipeline(\n",
" compute=\"cpu-cluster\",\n",
" description=\"Basic Pipeline Job with 3 Hello World components\",\n",
" )\n",
" def sample_pipeline():\n",
" componentA_job = a_func()\n",
" componentB_job = b_func()\n",
" componentC_job = c_func()\n",
"\n",
" pipeline = sample_pipeline()\n",
" return pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Submit pipeline job"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create pipeline instance\n",
"pipeline = generate_dsl_pipeline()\n",
"# submit job to workspace\n",
"ml_client.jobs.create_or_update(pipeline, experiment_name=\"basic_pipeline\", continue_run_on_step_failure=True)"
]
}
],
"metadata": {
"interpreter": {
"hash": "3e9e0e270b75c5e6da2e22113ba4f77b864d68f95da6601809c29e46c73ae6bb"
},
"kernelspec": {
"display_name": "Python 3.7.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1,14 +0,0 @@
type: command
name: componentA
display_name: componentA
version: 1
code:
local_path: ./componentA_src
environment:
image: docker.io/python
command: >-
python hello.py

Просмотреть файл

@ -1,3 +0,0 @@
print ("Hello Python World...\nI'm component A :-)")

Просмотреть файл

@ -1,14 +0,0 @@
type: command
name: componentB
display_name: componentB
version: 1
code:
local_path: ./componentB_src
environment:
image: docker.io/python
command: >-
python hello.py

Просмотреть файл

@ -1,2 +0,0 @@
print ("Hello Python World...\nI'm component B :-)")

Просмотреть файл

@ -1,14 +0,0 @@
type: command
name: componentC
display_name: componentC
version: 1
code:
local_path: ./componentC_src
environment:
image: docker.io/python
command: >-
python hello.py

Просмотреть файл

@ -1,3 +0,0 @@
print ("Hello Python World...\nI'm component C :-)")

Просмотреть файл

@ -1,25 +0,0 @@
from azure.ml import dsl
from azure.ml.dsl import Pipeline
from pathlib import Path
parent_dir = str(Path(__file__).parent)
def generate_dsl_pipeline() -> Pipeline:
# 1. Load component funcs
a_func = dsl.load_component(yaml_file=parent_dir + "./componentA.yml")
b_func = dsl.load_component(yaml_file=parent_dir + "./componentB.yml")
c_func = dsl.load_component(yaml_file=parent_dir + "./componentC.yml")
# 2. Construct pipeline
@dsl.pipeline(
compute="cpu-cluster",
description="Basic Pipeline Job with 3 Hello World components",
)
def sample_pipeline():
componentA_job = a_func()
componentB_job = b_func()
componentC_job = c_func()
pipeline = sample_pipeline()
return pipeline

Просмотреть файл

@ -1,16 +0,0 @@
type: pipeline
description: "Basic Pipeline Job with 3 Hello World components"
compute: azureml:cpu-cluster
jobs:
componentA_job:
type: component
component: file:./componentA.yml
componentB_job:
type: component
component: file:./componentB.yml
componentC_job:
type: component
component: file:./componentC.yml

Просмотреть файл

@ -17,7 +17,8 @@
"\n",
"#import required libraries\n",
"from azure.ml import MLClient\n",
"from azure.ml.entities import Code, Dataset"
"from azure.ml.entities import Code, Dataset\n",
"from azure.identity import InteractiveBrowserCredential"
]
},
{
@ -40,7 +41,7 @@
"outputs": [],
"source": [
"#get a handle to the workspace\n",
"ml_client = MLClient(subscription_id, resource_group, workspace)"
"ml_client = MLClient(InteractiveBrowserCredential(), subscription_id, resource_group, workspace)"
]
},
{
@ -158,7 +159,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
"version": "3.7.12"
},
"orig_nbformat": 4
},

Просмотреть файл

@ -102,7 +102,7 @@
"# create pipeline instance\n",
"pipeline = generate_dsl_pipeline()\n",
"# submit job to workspace\n",
"ml_client.create_or_update(pipeline, experiment_name=\"basic_pipeline\", continue_run_on_step_failure=True)"
"ml_client.jobs.create_or_update(pipeline, experiment_name=\"basic_pipeline\", continue_run_on_step_failure=True)"
]
},
{
@ -141,7 +141,7 @@
" \"--learning_rate ${{inputs.learning_rate}} --learning_rate_schedule ${{\"\n",
" \"inputs.learning_rate_schedule}} --model_output ${{outputs.model_output}} \"\n",
")\n",
"ml_client.create_or_update(train_component)\n"
"ml_client.components.create_or_update(train_component)\n"
]
},
{
@ -165,7 +165,7 @@
" command=\"python score.py --model_input ${{inputs.model_input}} --test_data ${{inputs.test_data}} \"\n",
" \"--score_output ${{outputs.score_output}} \"\n",
")\n",
"ml_client.create_or_update(score_component)"
"ml_client.components.create_or_update(score_component)"
]
},
{
@ -187,7 +187,7 @@
" environment=environment,\n",
" command=\"python eval.py --scoring_result ${{inputs.scoring_result}} --eval_output ${{outputs.eval_output}}\"\n",
")\n",
"ml_client.create_or_update(eval_component)"
"ml_client.components.create_or_update(eval_component)"
]
},
{
@ -287,7 +287,7 @@
"# create pipeline instance\n",
"pipeline = generate_dsl_pipeline(ml_client, train_component, score_component, eval_component)\n",
"# submit job to workspace\n",
"ml_client.create_or_update(pipeline, experiment_name=\"e2e_registered_components\", continue_run_on_step_failure=True)"
"ml_client.jobs.create_or_update(pipeline, experiment_name=\"e2e_registered_components\", continue_run_on_step_failure=True)"
]
},
{
@ -329,7 +329,7 @@
"# create pipeline instance\n",
"pipeline = generate_dsl_pipeline()\n",
"# submit job to workspace\n",
"ml_client.create_or_update(pipeline, experiment_name=\"nyc_taxi_data_regression\", continue_run_on_step_failure=True)"
"ml_client.jobs.create_or_update(pipeline, experiment_name=\"nyc_taxi_data_regression\", continue_run_on_step_failure=True)"
]
}
],
@ -352,7 +352,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
"version": "3.7.12"
}
},
"nbformat": 4,

Просмотреть файл

@ -17,7 +17,8 @@
"\n",
"#import required libraries\n",
"from azure.ml import MLClient\n",
"from azure.ml.entities import Code, Dataset"
"from azure.ml.entities import Code, Dataset\n",
"from azure.identity import InteractiveBrowserCredential"
]
},
{
@ -30,11 +31,7 @@
"#Enter details of your AML workspace\n",
"subscription_id = '<SUBSCRIPTION_ID>'\n",
"resource_group = '<RESOURCE_GROUP>'\n",
"workspace = '<AML_WORKSPACE_NAME>'\n",
"\n",
"subscription_id = '4aaa645c-5ae2-4ae9-a17a-84b9023bc56a'\n",
"resource_group = 'itp-pilot-ResGrp'\n",
"workspace = 'itp-pilot-eastus'"
"workspace = '<AML_WORKSPACE_NAME>'"
]
},
{
@ -44,7 +41,7 @@
"outputs": [],
"source": [
"#get a handle to the workspace\n",
"ml_client = MLClient(subscription_id, resource_group, workspace)"
"ml_client = MLClient(InteractiveBrowserCredential(), subscription_id, resource_group, workspace)"
]
},
{
@ -128,7 +125,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
"version": "3.7.12"
},
"orig_nbformat": 4
},