This commit is contained in:
YanZhangADS 2018-12-20 12:33:28 +00:00
Родитель 2efe572a93
Коммит 5472817c25
5 изменённых файлов: 0 добавлений и 2479 удалений

Просмотреть файл

@ -1,772 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Deploying a DL model to AKS CPU cluster\n",
"This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import Webservice, AksWebservice\n",
"from azureml.core.image import Image\n",
"from azureml.core.model import Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"print(azureml.core.VERSION)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"print(os.getcwd())\n",
"print(os.listdir(os.getcwd()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get workspace\n",
"Load existing workspace from the config file info."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Creating the model pickle file\n",
"import tensorflow as tf\n",
"from resnet152 import ResNet152\n",
"from keras.preprocessing import image\n",
"from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
"model = ResNet152(weights='imagenet')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save_weights(\"model_resnet_weights.h5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.load_weights('model_resnet_weights.h5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the model\n",
"Register an existing trained model, add descirption and tags."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Register the model\n",
"from azureml.core.model import Model\n",
"model = Model.register(model_path = \"model_resnet_weights.h5\", # this points to a local file\n",
" model_name = \"resnet_model\", # this is the name the model is registered as\n",
" tags = {'model': \"dl\", 'framework': \"resnet\"},\n",
" description = \"resnet 152 model\",\n",
" workspace = ws)\n",
"\n",
"print(model.name, model.description, model.version)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the scoring script at local host"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#define init() function\n",
"def init():\n",
" import tensorflow as tf\n",
" from resnet152 import ResNet152\n",
" from keras.preprocessing import image\n",
" from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
" import numpy as np\n",
" import timeit as t\n",
" import base64\n",
" import json\n",
" from PIL import Image, ImageOps\n",
" from io import BytesIO\n",
" import logging\n",
"\n",
" global model\n",
" model = ResNet152(weights='imagenet')\n",
" print('Model loaded')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"init()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#define run() function \n",
"def run(inputString):\n",
" \n",
" import tensorflow as tf\n",
" from resnet152 import ResNet152\n",
" from keras.preprocessing import image\n",
" from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
" import numpy as np\n",
" import timeit as t\n",
" import base64\n",
" import json\n",
" from PIL import Image, ImageOps\n",
" from io import BytesIO\n",
" import logging \n",
" \n",
" model = ResNet152(weights='imagenet')\n",
" print('Model loaded')\n",
" \n",
" \n",
" \n",
" responses = []\n",
" base64Dict = json.loads(inputString)\n",
"\n",
" for k, v in base64Dict.items():\n",
" img_file_name, base64Img = k, v\n",
" decoded_img = base64.b64decode(base64Img)\n",
" img_buffer = BytesIO(decoded_img)\n",
" imageData = Image.open(img_buffer).convert(\"RGB\")\n",
" \n",
" # Evaluate the model using the input data\n",
" img = ImageOps.fit(imageData, (224,224), Image.ANTIALIAS)\n",
" img = np.array(img) # shape: (224, 224, 3)\n",
" \n",
" img = np.expand_dims(img, axis=0)\n",
" img = preprocess_input(img)\n",
" \n",
" preds = model.predict(img)\n",
" print('Predicted:', decode_predictions(preds, top=3))\n",
" resp = {img_file_name: str(decode_predictions(preds, top=3))}\n",
"\n",
" responses.append(resp)\n",
" return json.dumps(responses)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from io import BytesIO\n",
"from PIL import Image, ImageOps\n",
"import base64\n",
"import json\n",
"\n",
"img_path = '220px-Lynx_lynx_poing.jpg'\n",
"encoded = None\n",
"with open(img_path, 'rb') as file:\n",
" encoded = base64.b64encode(file.read())\n",
"img_dict = {img_path: encoded.decode('utf-8')}\n",
"body = json.dumps(img_dict)\n",
"resp = run(body)\n",
"print(resp)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Write and save scoring script"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile score.py\n",
"def init():\n",
" import tensorflow as tf\n",
" from resnet152 import ResNet152\n",
" from keras.preprocessing import image\n",
" from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
" import numpy as np\n",
" import timeit as t\n",
" import base64\n",
" import json\n",
" from PIL import Image, ImageOps\n",
" from io import BytesIO\n",
" import logging\n",
"\n",
" global model\n",
" model = ResNet152(weights='imagenet')\n",
" print('Model loaded')\n",
" \n",
"def run(inputString):\n",
" \n",
" import tensorflow as tf\n",
" from resnet152 import ResNet152\n",
" from keras.preprocessing import image\n",
" from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
" import numpy as np\n",
" import timeit as t\n",
" import base64\n",
" import json\n",
" from PIL import Image, ImageOps\n",
" from io import BytesIO\n",
" import logging \n",
" \n",
" model = ResNet152(weights='imagenet')\n",
" print('Model loaded')\n",
" \n",
" responses = []\n",
" base64Dict = json.loads(inputString)\n",
"\n",
" for k, v in base64Dict.items():\n",
" img_file_name, base64Img = k, v\n",
" decoded_img = base64.b64decode(base64Img)\n",
" img_buffer = BytesIO(decoded_img)\n",
" imageData = Image.open(img_buffer).convert(\"RGB\")\n",
" \n",
" # Evaluate the model using the input data\n",
" img = ImageOps.fit(imageData, (224,224), Image.ANTIALIAS)\n",
" img = np.array(img) # shape: (224, 224, 3)\n",
" \n",
" img = np.expand_dims(img, axis=0)\n",
" img = preprocess_input(img)\n",
" \n",
" preds = model.predict(img)\n",
" print('Predicted:', decode_predictions(preds, top=3))\n",
" resp = {img_file_name: str(decode_predictions(preds, top=3))}\n",
"\n",
" responses.append(resp)\n",
" return json.dumps(responses) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Image\n",
"Create an image using the registered model the script that will load and run the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.image import ContainerImage\n",
"\n",
"image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n",
" runtime = \"python\",\n",
" conda_file = \"myenv_yz.yml\",\n",
" docker_file = \"mydockerfile\",\n",
" description = \"Image for AKS Deployment Tutorial\",\n",
" tags = {\"name\":\"AKS\",\"project\":\"AML\"}, \n",
" dependencies = [\"resnet152.py\"] \n",
" )\n",
"\n",
"image = ContainerImage.create(name = \"myimage12\",\n",
" # this is the model object\n",
" models = [], \n",
" image_config = image_config,\n",
" workspace = ws)\n",
"\n",
"image.wait_for_creation(show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image.wait_for_creation(show_output = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Provision the AKS Cluster¶ \n",
"This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Provision AKS cluster\n",
"# Use the default configuration (can also provide parameters to customize)\n",
"prov_config = AksCompute.provisioning_configuration()\n",
"\n",
"aks_name = 'yanz-aks-cpu' \n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"#Provision AKS cluster with GPU machine\n",
"# Use the default configuration (can also provide parameters to customize)\n",
"prov_config = AksCompute.provisioning_configuration(vm_size='Standard_NC6')\n",
"\n",
"aks_name = 'yanz-aks-1' \n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)\n",
"'''"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Optional step: Attach existing AKS cluster¶"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Attach an existing AKS cluster\n",
"# Use the default configuration (can also provide parameters to customize)\n",
"resource_id = '/subscriptions/edf507a2-6235-46c5-b560-fd463ba2e771/resourcegroups/yanzamlworkspace/providers/Microsoft.ContainerService/managedClusters/yanz-aks-1c6750233554'\n",
"\n",
"create_name='my-existing-aks' \n",
"# Create the cluster\n",
"aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)\n",
"# Wait for the operation to complete\n",
"aks_target.wait_for_completion(True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Attach an existing AKS cluster (CPU)\n",
"# Use the default configuration (can also provide parameters to customize)\n",
"resource_id = '/subscriptions/edf507a2-6235-46c5-b560-fd463ba2e771/resourcegroups/yanzamlworkspace/providers/Microsoft.ContainerService/managedClusters/yanz-aks-cpu1549812594'\n",
"\n",
"create_name='my-cpu-aks' \n",
"# Create the cluster\n",
"aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)\n",
"# Wait for the operation to complete\n",
"aks_target.wait_for_completion(True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_target"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#list images\n",
"images = ws.images()\n",
"images"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#for img in ws.images():\n",
"# if img.name == 'myimage1': img.delete()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"aks_target.wait_for_completion(show_output = True)\n",
"print(aks_target.provisioning_state)\n",
"print(aks_target.provisioning_errors)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!az aks get-credentials -n 'yanz-aks-cpu1549812594' -g yanzamlworkspace -a -f config_cpuaks"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!kubectl --kubeconfig config get services"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy web service to AKS¶ "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"#Deploy web service to AKS\n",
"#Set the web service configuration (using default here)\n",
"aks_config = AksWebservice.deploy_configuration()\n",
"print(aks_config)\n",
"'''"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Deploy web service to AKS\n",
"#Set the web service configuration (using customized configuration)\n",
"aks_config = AksWebservice.deploy_configuration(memory_gb=2.0, enable_app_insights=True)\n",
"print(aks_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# by default the 500MB -- memory_gb; up to 1.4 GB - manually - profiling ; cluster capacity is 24G\n",
"help(AksWebservice)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"aks_service_name ='yanz-aks-service-14'\n",
"\n",
"aks_service = Webservice.deploy_from_image(workspace = ws, \n",
" name = aks_service_name,\n",
" image = image,\n",
" deployment_config = aks_config,\n",
" deployment_target = aks_target)\n",
"aks_service.wait_for_deployment(show_output = True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service¶ \n",
"We test the web sevice by passing data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from io import BytesIO\n",
"from PIL import Image, ImageOps\n",
"import base64\n",
"import json\n",
"\n",
"img_path = '220px-Lynx_lynx_poing.jpg'\n",
"encoded = None\n",
"with open(img_path, 'rb') as file:\n",
" encoded = base64.b64encode(file.read())\n",
"img_dict = {img_path: encoded.decode('utf-8')}\n",
"body = json.dumps(img_dict)\n",
"resp = aks_service.run(input_data = body)\n",
"print(resp)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_service.update(enable_app_insights=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(aks_service.state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_service.update_deployment_state()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!kubectl --kubeconfig config_cpuaks proxy --port 8011"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"log = aks_service.get_logs(5000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open(\"servicelog_ws14\", \"w\") as json_file:\n",
" json_file.write(log)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#debug web service failure\n",
"print(ws.webservices()['yanz-aks-service-11'].get_logs())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#clean up resources\n",
"#aks_target = AksCompute(name='jaya-aks-1',workspace=ws)\n",
"#aks_target.delete()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#alternate code to clean up resources\n",
"#!az aks delete --resource-group jayavienna --name jaya-aks-2 --yes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#for s in ws.webservices():\n",
"# print(s.name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#s = Webservice(ws, 'jaya-aks-service-2')\n",
"#s.delete()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#from azureml.core import Workspace\n",
"#from azureml.core.compute import AksCompute, ComputeTarget\n",
"\n",
"#ws = Workspace.from_config()\n",
"\n",
"#for c in ws.compute_targets():\n",
"# print(c.name)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1,342 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Test web application locally¶ \n",
"This notebook pulls some images and tests them against the local web app running inside the Docker container we made previously."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from testing_utilities import to_img, img_url_to_json, plot_predictions\n",
"import requests\n",
"from dotenv import get_key, find_dotenv\n",
"from azureml._model_management._constants import MMS_WORKSPACE_API_VERSION\n",
"from azureml._model_management._util import (get_docker_client, pull_docker_image, get_docker_port, \n",
" container_scoring_call, cleanup_container)\n",
"from azureml.core import Workspace\n",
"from testing_utilities import img_url_to_json\n",
"import json\n",
"import docker\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"print(azureml.core.VERSION)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"my_workspace = Workspace.from_config()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image = my_workspace.images['image1']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image_name = image.image_location"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"registry_name = my_workspace.get_details()['containerRegistry'].split('/')[-1]\n",
"query_results = !az acr show -n {registry_name} -g {my_workspace.resource_group} --query loginServer -o tsv\n",
"acr_server = query_results.s\n",
"query_results = !az acr credential show -n {registry_name} -g {my_workspace.resource_group}\n",
"acr_credentials = json.loads(query_results.s)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dc = get_docker_client(acr_credentials['username'], \n",
" acr_credentials['passwords'][0]['value'], \n",
" acr_server)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pull_docker_image(dc, image_name, acr_credentials['username'], acr_credentials['passwords'][0]['value'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the Docker conatainer in the background and open port 80. Notice we are using nvidia-docker and not docker command."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"container_labels = {'containerName': 'pytorchgpu'}\n",
"container = dc.containers.run(image_name, \n",
" detach=True, \n",
" ports={'5001/tcp': 80},\n",
" labels=container_labels,\n",
" runtime='nvidia' )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# TODO Add wait function\n",
"container_logs = container.logs().decode('UTF8')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%time container_logs.find(\"Users's init has completed successfully\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(container.logs().decode('UTF8'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"client = docker.APIClient()\n",
"details = client.inspect_container(container.id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"service_ip = details['NetworkSettings']['Ports']['5001/tcp'][0]['HostIp']\n",
"service_port = details['NetworkSettings']['Ports']['5001/tcp'][0]['HostPort']"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Wait a few seconds for the application to spin up and then check that everything works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('Checking service on {} port {}'.format(service_ip, service_port))\n",
"!curl 'http://{service_ip}:{service_port}/'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"IMAGEURL = \"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plt.imshow(to_img(IMAGEURL))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"jsonimg = img_url_to_json(IMAGEURL)\n",
"jsonimg[:100]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"headers = {\"content-type\": \"application/json\"}\n",
"%time r = requests.post('http://0.0.0.0:80/score', data=jsonimg, headers=headers)\n",
"print(r)\n",
"r.json()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's try a few more images."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"images = (\n",
" \"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg\",\n",
" \"https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg\",\n",
" \"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e6/Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg/1920px-Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg\",\n",
" \"http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/\",\n",
" \"https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg\",\n",
" \"http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"url = \"http://0.0.0.0:80/score\"\n",
"results = [\n",
" requests.post(url, data=img_url_to_json(img), headers=headers) for img in images\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plot_predictions(images, results)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image_data = list(map(img_url_to_json, images)) # Retrieve the images and data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"timer_results = list()\n",
"for img in image_data:\n",
" res=%timeit -r 1 -o -q requests.post(url, data=img, headers=headers)\n",
" timer_results.append(res.best)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"timer_results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Average time taken: {0:4.2f} ms\".format(10 ** 3 * np.mean(timer_results)))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"container.stop()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -1,229 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Test Pretrained Model in a Seperate Conda Environment from Local Host\n",
"\n",
"Objective:\n",
"- Produce yml file that to be used to create docker image\n",
"- Test the pretrained model in the conda envionment generated from this yml file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import azureml.core\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core import Experiment\n",
"from azureml.core import ScriptRunConfig"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# define variables\n",
"experiment_name = 'train-on-local'\n",
"env_name = 'myenv.yml'\n",
"sample_projects_folder = os.path.join(os.getcwd(),'sample_projects')\n",
"project_folder = os.path.join(sample_projects, 'train-on-local')\n",
"script_name = 'predict.py'\n",
"script_path = os.path.join(project_folder, script_name)\n",
"deploy_folder = os.path.join(os.getcwd(), 'deploy')\n",
"\n",
"os.makedirs(project_folder, exist_ok=True)\n",
"print('Sample projects will be created in {}.'.format(sample_projects_folder))\n",
"\n",
"os.makedirs(deploy_folder, exist_ok=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create an azureml experiment\n",
"exp = Experiment(workspace=ws, name=experiment_name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile $script_path\n",
"\n",
"#Develop the model\n",
"import tensorflow as tf\n",
"import keras\n",
"from resnet152 import ResNet152\n",
"from keras.preprocessing import image\n",
"from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"import numpy as np\n",
"from PIL import Image\n",
"import wget\n",
"\n",
"model = ResNet152(weights='imagenet')\n",
"\n",
"#model.summary( )\n",
"\n",
"wget.download('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg')\n",
"\n",
"img_path = '220px-Lynx_lynx_poing.jpg'\n",
"#print(Image.open(img_path).size)\n",
"Image.open(img_path)\n",
"\n",
"img = image.load_img(img_path, target_size=(224, 224))\n",
"img = image.img_to_array(img)\n",
"img = np.expand_dims(img, axis=0)\n",
"img = preprocess_input(img)\n",
"\n",
"preds = model.predict(img)\n",
"print('Predicted:', decode_predictions(preds, top=3))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create environment yml file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# System managed runs\n",
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"# Editing a run configuration property on-fly.\n",
"#run_config = RunConfiguration.load(project_object = project, run_config_name = \"local\")\n",
"run_config_system_managed = RunConfiguration()\n",
"\n",
"# Use a new conda environment that is to be created from the conda_dependencies.yml file\n",
"#run_config.environment.python.user_managed_dependencies = False\n",
"\n",
"# Automatically create the conda environment before the run\n",
"#run_config.prepare_environment = True\n",
"\n",
"run_config_system_managed.environment.python.user_managed_dependencies = False\n",
"run_config_system_managed.auto_prepare_environment = True\n",
"\n",
"# add scikit-learn to the conda_dependencies.yml file\n",
"cd = CondaDependencies()\n",
"cd.add_conda_package('scikit-learn')\n",
"cd.add_conda_package('python=3.6')\n",
"cd.add_conda_package('tensorflow')\n",
"cd.add_conda_package('tornado==4.5.3')\n",
"\n",
"cd.add_pip_package('papermill==0.14.1')\n",
"cd.add_pip_package('python-dotenv==0.9.0')\n",
"cd.add_pip_package('Pillow==5.2.0')\n",
"cd.add_pip_package('wget==3.2')\n",
"cd.add_pip_package('aiohttp==3.3.2')\n",
"cd.add_pip_package('toolz==0.9.0')\n",
"cd.add_pip_package('tqdm==4.23.4')\n",
"cd.add_pip_package('azure-cli==2.0.41')\n",
"#cd.add_pip_package('tensorflow')\n",
"cd.add_pip_package('keras==2.2.0')\n",
"\n",
"#cd.save_to_file(project_dir = project_folder, conda_file_path = run_config.environment.python.conda_dependencies_file)\n",
"\n",
"run_config_system_managed.environment.python.conda_dependencies = cd"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# produce the yml file for delpoying to AKS cluster\n",
"with open(os.path.join(deploy_folder, env_name),\"w\") as f:\n",
" f.write(cd.serialize_to_string())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## System managed runs\n",
"The objective is to test the script in a conda environment configured using the yml file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"src = ScriptRunConfig(source_directory=project_folder, script=script_name, run_config=run_config_system_managed)\n",
"run = exp.submit(src)\n",
"run.wait_for_completion(show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1,2 +0,0 @@
# make sure the image has GLIBCXX_3.4.22
RUN add-apt-repository ppa:ubuntu-toolchain-r/test && apt-get update && apt-get install -y gcc-4.9 && apt-get upgrade libstdc++6 -y