Improves formatting based on black, consolidates imports

This commit is contained in:
MSalvaris 2018-10-08 11:14:27 +01:00
Родитель 05ce5d72f4
Коммит 9c3cea0aac
6 изменённых файлов: 119 добавлений и 93 удалений

Просмотреть файл

@ -23,16 +23,15 @@
# In this noteook, we will go through the steps to load the ResNet152 model, pre-process the images to the required format and call the model to find the top predictions.
# +
import PIL
import numpy as np
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import PIL
from PIL import Image
import wget
from PIL import Image
from torchvision import models, transforms
# -
print(torch.__version__)

Просмотреть файл

@ -22,11 +22,6 @@
# In this notebook, we will develop the API that will call our model. This module initializes the model, transforms the input so that it is in the appropriate format and defines the scoring method that will produce the predictions. The API will expect the input to be in JSON format. Once a request is received, the API will convert the json encoded request body into the image format. There are two main functions in the API. The first function loads the model and returns a scoring function. The second function process the images and uses the first function to score them.
import logging
from testing_utilities import img_url_to_json
# We use the writefile magic to write the contents of the below cell to driver.py which includes the driver methods.
# +
# %%writefile driver.py
import base64
@ -36,21 +31,24 @@ import os
import timeit as t
from io import BytesIO
from pprint import pprint
import PIL
import numpy as np
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import PIL
from PIL import Image, ImageOps
from PIL import Image
from testing_utilities import img_url_to_json
from torchvision import models, transforms
_LABEL_FILE = os.getenv('LABEL_FILE', "synset.txt")
# We use the writefile magic to write the contents of the below cell to driver.py which includes the driver methods.
_LABEL_FILE = os.getenv("LABEL_FILE", "synset.txt")
_NUMBER_RESULTS = 3
def _create_label_lookup(label_path):
with open(label_path, 'r') as f:
with open(label_path, "r") as f:
label_list = [l.rstrip() for l in f]
def _label_lookup(*label_locks):
@ -66,16 +64,18 @@ def _load_model():
softmax = nn.Softmax(dim=1).cuda()
model = model.eval()
preprocess_input = transforms.Compose([
preprocess_input = transforms.Compose(
[
torchvision.transforms.Resize((224, 224), interpolation=PIL.Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def predict_for(image):
image = preprocess_input(image)
with torch.no_grad():
image = image.expand(1,3,224,224)
image = image.expand(1, 3, 224, 224)
image_gpu = image.type(torch.float).cuda()
outputs = model(image_gpu)
pred_proba = softmax(outputs)
@ -85,17 +85,16 @@ def _load_model():
def _base64img_to_pil_image(base64_img_string):
if base64_img_string.startswith('b\''):
if base64_img_string.startswith("b'"):
base64_img_string = base64_img_string[2:-1]
base64Img = base64_img_string.encode('utf-8')
base64Img = base64_img_string.encode("utf-8")
# Preprocess the input data
startPreprocess = t.default_timer()
decoded_img = base64.b64decode(base64Img)
img_buffer = BytesIO(decoded_img)
# Load image with PIL (RGB)
pil_img = Image.open(img_buffer).convert('RGB')
pil_img = Image.open(img_buffer).convert("RGB")
return pil_img
@ -107,7 +106,7 @@ def create_scoring_func(label_path=_LABEL_FILE):
predict_for = _load_model()
end = t.default_timer()
loadTimeMsg = "Model loading time: {0} ms".format(round((end-start)*1000, 2))
loadTimeMsg = "Model loading time: {0} ms".format(round((end - start) * 1000, 2))
logger.info(loadTimeMsg)
def call_model(image, number_results=_NUMBER_RESULTS):
@ -115,6 +114,7 @@ def create_scoring_func(label_path=_LABEL_FILE):
selected_results = np.flip(np.argsort(pred_proba), 0)[:number_results]
labels = labels_for(*selected_results)
return list(zip(labels, pred_proba[selected_results].astype(np.float64)))
return call_model
@ -128,17 +128,21 @@ def get_model_api():
results = {}
for key, base64_img_string in images_dict.items():
rgb_image = _base64img_to_pil_image(base64_img_string)
results[key]=scoring_func(rgb_image, number_results=_NUMBER_RESULTS)
results[key] = scoring_func(rgb_image, number_results=number_results)
end = t.default_timer()
logger.info("Predictions: {0}".format(results))
logger.info("Predictions took {0} ms".format(round((end-start)*1000, 2)))
return (results, 'Computed in {0} ms'.format(round((end-start)*1000, 2)))
logger.info("Predictions took {0} ms".format(round((end - start) * 1000, 2)))
return (results, "Computed in {0} ms".format(round((end - start) * 1000, 2)))
return process_and_score
def version():
return torch.__version__
# -
# Let's test the module.
@ -157,7 +161,7 @@ predict_for = get_model_api()
jsonimg = img_url_to_json(IMAGEURL)
json_load_img = json.loads(jsonimg)
body = json_load_img['input']
body = json_load_img["input"]
resp = predict_for(body)
pprint(resp[0])

Просмотреть файл

@ -25,15 +25,15 @@
# %load_ext autoreload
# %autoreload 2
import os
import matplotlib.pyplot as plt
import numpy as np
from testing_utilities import to_img, img_url_to_json, plot_predictions
import requests
from dotenv import get_key
# %matplotlib inline
image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo')
image_name = get_key(".env", "docker_login") + "/" + get_key(".env", "image_repo")
image_name
# Run the Docker conatainer in the background and open port 80. Notice we are using nvidia-docker and not docker command.
@ -57,22 +57,26 @@ plt.imshow(to_img(IMAGEURL))
jsonimg = img_url_to_json(IMAGEURL)
jsonimg[:100]
headers = {'content-type': 'application/json'}
headers = {"content-type": "application/json"}
# %time r = requests.post('http://0.0.0.0:80/score', data=jsonimg, headers=headers)
print(r)
r.json()
# Let's try a few more images.
images = ('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg',
'https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg',
'https://upload.wikimedia.org/wikipedia/commons/thumb/e/e6/Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg/1920px-Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg',
'http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/',
'https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg',
'http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg')
images = (
"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg",
"https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e6/Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg/1920px-Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg",
"http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/",
"https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg",
"http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg",
)
url = 'http://0.0.0.0:80/score'
results = [requests.post(url, data=img_url_to_json(img), headers=headers) for img in images]
url = "http://0.0.0.0:80/score"
results = [
requests.post(url, data=img_url_to_json(img), headers=headers) for img in images
]
plot_predictions(images, results)
@ -87,7 +91,7 @@ for img in image_data:
timer_results
print('Average time taken: {0:4.2f} ms'.format(10**3 * np.mean(timer_results)))
print("Average time taken: {0:4.2f} ms".format(10 ** 3 * np.mean(timer_results)))
# + {"active": "ipynb", "language": "bash"}
# docker stop $(docker ps -q)

Просмотреть файл

@ -31,7 +31,6 @@
#
# This guide assumes is designed to be run on linux and requires that the Azure CLI is installed.
import os
import json
from testing_utilities import write_json_to_file
from dotenv import set_key, get_key

Просмотреть файл

@ -27,14 +27,15 @@ import numpy as np
from testing_utilities import img_url_to_json, to_img, plot_predictions
import requests
import json
# %matplotlib inline
service_json = !kubectl get service azure-dl -o json
service_dict = json.loads(''.join(service_json))
app_url = service_dict['status']['loadBalancer']['ingress'][0]['ip']
# service_json = !kubectl get service azure-dl -o json
service_dict = json.loads("".join(service_json))
app_url = service_dict["status"]["loadBalancer"]["ingress"][0]["ip"]
scoring_url = 'http://{}/score'.format(app_url)
version_url = 'http://{}/version'.format(app_url)
scoring_url = "http://{}/score".format(app_url)
version_url = "http://{}/version".format(app_url)
# Quickly check the web application is working
@ -47,10 +48,12 @@ IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_
plt.imshow(to_img(IMAGEURL))
# headers = {'content-type': 'application/json','X-Marathon-App-Id': app_id}
headers = {'content-type': 'application/json'}
headers = {"content-type": "application/json"}
jsonimg = img_url_to_json(IMAGEURL)
r = requests.post(scoring_url, data=jsonimg, headers=headers) # Run the request twice since the first time takes a
# little longer due to the loading of the model
r = requests.post(
scoring_url, data=jsonimg, headers=headers
) # Run the request twice since the first time takes a
# little longer due to the loading of the model
# %time r = requests.post(scoring_url, data=jsonimg, headers=headers)
r.json()
@ -59,14 +62,19 @@ r.json()
# Lets try a few more images
images = ('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg',
'https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg',
'https://upload.wikimedia.org/wikipedia/commons/thumb/e/e6/Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg/1920px-Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg',
'http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/',
'https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg',
'http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg')
images = (
"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg",
"https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e6/Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg/1920px-Harmony_of_the_Seas_%28ship%2C_2016%29_001.jpg",
"http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/",
"https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg",
"http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg",
)
results = [requests.post(scoring_url, data=img_url_to_json(img), headers=headers) for img in images]
results = [
requests.post(scoring_url, data=img_url_to_json(img), headers=headers)
for img in images
]
plot_predictions(images, results)
@ -83,6 +91,6 @@ for img in image_data:
timer_results
print('Average time taken: {0:4.2f} ms'.format(10**3 * np.mean(timer_results)))
print("Average time taken: {0:4.2f} ms".format(10 ** 3 * np.mean(timer_results)))
# We have tested that the model works and we can mode on to the [next notebook to get sense of its throughput](06_SpeedTestWebApp.ipynb)

Просмотреть файл

@ -25,7 +25,6 @@
# +
import asyncio
import json
import urllib.request
from timeit import default_timer
import aiohttp
@ -46,11 +45,11 @@ CONCURRENT_REQUESTS = 4 # Number of requests at a time
# Get the IP address of our service
service_json = !kubectl get service azure-dl -o json
service_dict = json.loads(''.join(service_json))
app_url = service_dict['status']['loadBalancer']['ingress'][0]['ip']
service_dict = json.loads("".join(service_json))
app_url = service_dict["status"]["loadBalancer"]["ingress"][0]["ip"]
scoring_url = 'http://{}/score'.format(app_url)
version_url = 'http://{}/version'.format(app_url)
scoring_url = "http://{}/score".format(app_url)
version_url = "http://{}/version".format(app_url)
!curl $version_url # Reports the Tensorflow Version
@ -59,32 +58,40 @@ plt.imshow(to_img(IMAGEURL))
# Here, we use varitions of the same image to test the service.
url_list = [[scoring_url, jsonimg] for jsonimg in gen_variations_of_one_image(IMAGEURL, NUMBER_OF_REQUESTS)]
url_list = [
[scoring_url, jsonimg]
for jsonimg in gen_variations_of_one_image(IMAGEURL, NUMBER_OF_REQUESTS)
]
def decode(result):
return json.loads(result.decode("utf-8"))
async def fetch(url, session, data, headers):
start_time = default_timer()
async with session.request('post', url, data=data, headers=headers) as response:
async with session.request("post", url, data=data, headers=headers) as response:
resp = await response.read()
elapsed = default_timer() - start_time
return resp, elapsed
async def bound_fetch(sem, url, session, data, headers):
# Getter function with semaphore.
async with sem:
return await fetch(url, session, data, headers)
async def await_with_progress(coros):
results=[]
results = []
for f in tqdm(asyncio.as_completed(coros), total=len(coros)):
result = await f
results.append((decode(result[0]),result[1]))
results.append((decode(result[0]), result[1]))
return results
async def run(url_list, num_concurrent=CONCURRENT_REQUESTS):
headers = {'content-type': 'application/json'}
headers = {"content-type": "application/json"}
tasks = []
# create instance of Semaphore
sem = asyncio.Semaphore(num_concurrent)
@ -98,21 +105,26 @@ async def run(url_list, num_concurrent=CONCURRENT_REQUESTS):
tasks.append(task)
return await await_with_progress(tasks)
# Below we run the 100 requests against our deployed service
loop = asyncio.get_event_loop()
start_time = default_timer()
complete_responses = loop.run_until_complete(asyncio.ensure_future(run(url_list, num_concurrent=CONCURRENT_REQUESTS)))
complete_responses = loop.run_until_complete(
asyncio.ensure_future(run(url_list, num_concurrent=CONCURRENT_REQUESTS))
)
elapsed = default_timer() - start_time
print('Total Elapsed {}'.format(elapsed))
print('Avg time taken {0:4.2f} ms'.format(1000*elapsed/len(url_list)))
print("Total Elapsed {}".format(elapsed))
print("Avg time taken {0:4.2f} ms".format(1000 * elapsed / len(url_list)))
# Below we can see the output of some of our calls
complete_responses[:3]
num_succesful=[i[0]['result'][0]['image'][0][0] for i in complete_responses].count('n02127052 lynx, catamount')
print('Succesful {} out of {}'.format(num_succesful, len(url_list)))
num_succesful = [i[0]["result"][0]["image"][0][0] for i in complete_responses].count(
"n02127052 lynx, catamount"
)
print("Succesful {} out of {}".format(num_succesful, len(url_list)))
# Example response
plt.imshow(to_img(IMAGEURL))