This commit is contained in:
fboylu 2019-01-25 19:27:00 +00:00
Родитель 8c4ef33fa0
Коммит 77c036064c
6 изменённых файлов: 1715 добавлений и 5 удалений

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -0,0 +1,302 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Develop Model Driver\n",
"\n",
"In this notebook, we will develop the API that will call our model. This module initializes the model, transforms the input so that it is in the appropriate format and defines the scoring method that will produce the predictions. The API will expect the input to be in JSON format. Once a request is received, the API will convert the json encoded request body into the image format. There are two main functions in the API: init() and run(). The init() function loads the model and returns a scoring function. The run() function process the images and uses the first function to score them.\n",
"\n",
" Note: Always make sure you don't have any lingering notebooks running (Shutdown previous notebooks). Otherwise it may cause GPU memory issue."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import Webservice, AksWebservice\n",
"from azureml.core.image import Image\n",
"from azureml.core.model import Model\n",
"from dotenv import set_key, get_key, find_dotenv\n",
"import logging\n",
"from testing_utilities import img_url_to_json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import keras\n",
"import tensorflow\n",
"print(\"Keras: \", keras.__version__)\n",
"print(\"Tensorflow: \", tensorflow.__version__)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"env_path = find_dotenv(raise_error_if_not_found=True)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"_MODEL_NAME = get_key(env_path, 'model_name')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Write and save driver script"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Overwriting driver.py\n"
]
}
],
"source": [
"%%writefile driver.py\n",
"\n",
"import tensorflow as tf\n",
"from resnet152 import ResNet152\n",
"from keras.preprocessing import image\n",
"from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n",
"\n",
"from azureml.core.model import Model\n",
"\n",
"import numpy as np\n",
"import timeit as t\n",
"import base64\n",
"import json\n",
"from PIL import Image, ImageOps\n",
"from io import BytesIO\n",
"import logging\n",
"\n",
"number_results = 3\n",
"logger = logging.getLogger(\"model_driver\")\n",
"\n",
"def _base64img_to_numpy(base64_img_string):\n",
" decoded_img = base64.b64decode(base64_img_string)\n",
" img_buffer = BytesIO(decoded_img)\n",
" imageData = Image.open(img_buffer).convert(\"RGB\")\n",
" img = ImageOps.fit(imageData, (224, 224), Image.ANTIALIAS)\n",
" img = image.img_to_array(img)\n",
" return img\n",
"\n",
"def create_scoring_func():\n",
" \"\"\" Initialize ResNet 152 Model \n",
" \"\"\" \n",
" start = t.default_timer()\n",
" # model = ResNet152(weights='imagenet')\n",
" model_name = 'resnet_model'\n",
" model_path=Model.get_model_path(model_name)\n",
" model = ResNet152()\n",
" model.load_weights(model_path)\n",
" end = t.default_timer()\n",
" \n",
" loadTimeMsg = \"Model loading time: {0} ms\".format(round((end-start)*1000, 2))\n",
" logger.info(loadTimeMsg)\n",
" \n",
" def call_model(img_array):\n",
" img_array = np.expand_dims(img_array, axis=0)\n",
" img_array = preprocess_input(img_array)\n",
" preds = model.predict(img_array)\n",
" preds = decode_predictions(preds, top=number_results)[0] \n",
" return preds\n",
" \n",
" return call_model \n",
"\n",
"\n",
"def tuple_float_to_str(x):\n",
" \"\"\"tuple x = ('n02127052', 'lynx', 0.9816483) convert the 3rd element type numpy.float32 to string\n",
" return: ('n02127052', 'lynx', '0.9816483')\n",
" \"\"\"\n",
" a= list(x)\n",
" a[2]= str(a[2])\n",
" b= tuple(a)\n",
" return b\n",
" \n",
"def get_model_api():\n",
" logger = logging.getLogger(\"model_driver\")\n",
" scoring_func = create_scoring_func()\n",
" \n",
" def process_and_score(inputString):\n",
" \"\"\" Classify the input using the loaded model\n",
" \"\"\"\n",
" start = t.default_timer()\n",
"\n",
" base64Dict = json.loads(inputString) \n",
" k, v = next(iter(base64Dict.items())) \n",
" img_array = _base64img_to_numpy(base64Img)\n",
" preds = scoring_func(img_array)\n",
" \n",
" # convert the 3rd element type of preds items from numpy.float32 to string\n",
" for i, s in enumerate(preds):\n",
" preds[i] = tuple_float_to_str(s)\n",
"\n",
" responses = {img_file_name: preds}\n",
" \n",
" end = t.default_timer()\n",
" \n",
" logger.info(\"Predictions: {0}\".format(responses))\n",
" logger.info(\"Predictions took {0} ms\".format(round((end-start)*1000, 2)))\n",
" return (responses, \"Computed in {0} ms\".format(round((end-start)*1000, 2)))\n",
" return process_and_score\n",
"\n",
"def init():\n",
" \"\"\" Initialise the model and scoring function\n",
" \"\"\"\n",
" global process_and_score\n",
" process_and_score = get_model_api()\n",
" \n",
"def run(raw_data):\n",
" \"\"\" Make a prediction based on the data passed in using the preloaded model\n",
" \"\"\"\n",
" return process_and_score(json.dumps(json.loads(raw_data)['input']))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the driver¶ \n",
"We test the driver by passing data."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"logging.basicConfig(level=logging.DEBUG)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"%run driver.py"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's load the workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep=\"\\n\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_path = Model.get_model_path(_MODEL_NAME, _workspace=ws)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"IMAGEURL = \"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Always make sure you don't have any lingering notebooks running. Otherwise it may cause GPU memory issue.\n",
"init()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"jsonimg = img_url_to_json(IMAGEURL)\n",
"resp = run(jsonimg)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"# Clear GPU memory\n",
"from keras import backend as K\n",
"K.clear_session()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, we will [build a docker image with this modle driver and other supporting files](03_BuildImage.ipynb)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.14"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -0,0 +1,131 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tear it all down\n",
"Once you are done with your cluster you can use the following two commands to destroy it all."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"from dotenv import set_key, get_key, find_dotenv"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep=\"\\n\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"env_path = find_dotenv(raise_error_if_not_found=True)\n",
"resource_group = get_key(env_path, 'resource_group')\n",
"aks_name = get_key(env_path, 'aks_name')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once you are done with your cluster you can use the following command to delete the AKS cluster. This step may take a few minutes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_target = AksCompute(name=aks_name,workspace=ws)\n",
"aks_aml_name = aks_target.cluster_resource_id.rsplit(\"/\")[-1]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!az aks delete -n $aks_aml_name -g $resource_group -y"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, you should delete the resource group. This also deletes the AKS cluster and can be used instead of the above command if the resource group is only used for this purpose."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!az group delete --name $resource_group -y"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[31mThe scope '/subscriptions/edf507a2-6235-46c5-b560-fd463ba2e771/resourcegroups/yanzbaimm' cannot perform delete operation because following scope(s) are locked: '/subscriptions/edf507a2-6235-46c5-b560-fd463ba2e771/resourceGroups/yanzbaimm'. Please remove the lock and try again.\u001b[0m\r\n"
]
}
],
"source": [
"!az group delete --name yanzbaimm -y"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.14"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -149,8 +149,7 @@
" start = t.default_timer()\n",
"\n",
" base64Dict = json.loads(inputString) \n",
" for k, v in base64Dict.items():\n",
" img_file_name, base64Img = k, v \n",
" k, v = next(iter(base64Dict.items())) \n",
" img_array = _base64img_to_numpy(base64Img)\n",
" preds = scoring_func(img_array)\n",
" \n",
@ -288,14 +287,14 @@
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
"pygments_lexer": "ipython2",
"version": "2.7.14"
}
},
"nbformat": 4,