Updates notebooks with final run through
This commit is contained in:
Родитель
d16ad0afeb
Коммит
572abd8888
|
@ -22,7 +22,6 @@
|
|||
|
||||
# In this noteook, we will go through the steps to load the ResNet152 model, pre-process the images to the required format and call the model to find the top predictions.
|
||||
|
||||
# +
|
||||
import PIL
|
||||
import numpy as np
|
||||
import torch
|
||||
|
@ -32,14 +31,14 @@ import wget
|
|||
from PIL import Image
|
||||
from torchvision import models, transforms
|
||||
|
||||
# -
|
||||
|
||||
print(torch.__version__)
|
||||
print(torchvision.__version__)
|
||||
|
||||
# We download the synset for the model. This translates the output of the model to a specific label.
|
||||
|
||||
!wget "http://data.dmlc.ml/mxnet/models/imagenet/synset.txt"
|
||||
|
||||
# We first load the model which we imported from the resnet152 module. This can take about 10s.
|
||||
# We first load the model which we imported torchvision. This can take about 10s.
|
||||
|
||||
# %%time
|
||||
model = models.resnet152(pretrained=True)
|
||||
|
@ -49,6 +48,7 @@ model = models.resnet152(pretrained=True)
|
|||
model=model.cuda()
|
||||
|
||||
print(model)
|
||||
print('Number of parameters {}'.format(sum([param.view(-1).size()[0] for param in model.parameters()])))
|
||||
|
||||
# Let's test our model with an image of a Lynx.
|
||||
|
||||
|
@ -58,7 +58,7 @@ img_path = '220px-Lynx_lynx_poing.jpg'
|
|||
print(Image.open(img_path).size)
|
||||
Image.open(img_path)
|
||||
|
||||
# Below, we load the image by resizing to (224, 224) and then preprocessing using the methods from keras preprocessing and imagenet utilities.
|
||||
# Below, we load the image. Then we compose transformation which resize the image to (224, 224) and then convert it to a PyTorch tensor and normalize the pixel values.
|
||||
|
||||
img = Image.open(img_path).convert('RGB')
|
||||
|
||||
|
@ -71,7 +71,7 @@ preprocess_input = transforms.Compose([
|
|||
img = Image.open(img_path)
|
||||
img = preprocess_input(img)
|
||||
|
||||
# Now, let's call the model on our image to predict the top 3 labels. This will take a few seconds.
|
||||
# Let's make a label look up function to make it easy to lookup the classes from the synset file
|
||||
|
||||
def create_label_lookup():
|
||||
with open('synset.txt', 'r') as f:
|
||||
|
@ -82,8 +82,12 @@ def create_label_lookup():
|
|||
|
||||
label_lookup = create_label_lookup()
|
||||
|
||||
# We will apply softmax to the output of the model to get probabilities for each label
|
||||
|
||||
softmax = nn.Softmax(dim=1).cuda()
|
||||
|
||||
# Now, let's call the model on our image to predict the top 3 labels. This will take a few seconds.
|
||||
|
||||
model = model.eval()
|
||||
|
||||
# %%time
|
||||
|
|
|
@ -30,7 +30,6 @@ import logging
|
|||
import os
|
||||
import timeit as t
|
||||
from io import BytesIO
|
||||
from pprint import pprint
|
||||
|
||||
import PIL
|
||||
import numpy as np
|
||||
|
@ -38,7 +37,6 @@ import torch
|
|||
import torch.nn as nn
|
||||
import torchvision
|
||||
from PIL import Image
|
||||
from testing_utilities import img_url_to_json
|
||||
from torchvision import models, transforms
|
||||
|
||||
# We use the writefile magic to write the contents of the below cell to driver.py which includes the driver methods.
|
||||
|
@ -147,6 +145,10 @@ def version():
|
|||
|
||||
# Let's test the module.
|
||||
|
||||
import logging
|
||||
from testing_utilities import img_url_to_json
|
||||
from pprint import pprint
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# We run the file driver.py which will bring everything into the context of the notebook.
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
# jupytext_format_version: '1.3'
|
||||
# jupytext_formats: py:light
|
||||
# kernelspec:
|
||||
# display_name: Python [conda env:AKSDeploymentKeras]
|
||||
# display_name: Python [conda env:AKSDeploymentPytorch]
|
||||
# language: python
|
||||
# name: conda-env-AKSDeploymentKeras-py
|
||||
# name: conda-env-AKSDeploymentPytorch-py
|
||||
# language_info:
|
||||
# codemirror_mode:
|
||||
# name: ipython
|
||||
|
@ -15,7 +15,7 @@
|
|||
# name: python
|
||||
# nbconvert_exporter: python
|
||||
# pygments_lexer: ipython3
|
||||
# version: 3.5.5
|
||||
# version: 3.6.6
|
||||
# ---
|
||||
|
||||
# # Build Docker image
|
||||
|
@ -27,36 +27,33 @@ import os
|
|||
from os import path
|
||||
import json
|
||||
import shutil
|
||||
from dotenv import set_key, get_key
|
||||
from dotenv import set_key, get_key, find_dotenv
|
||||
from pathlib import Path
|
||||
|
||||
# We will be using the following Docker information to push the image to docker hub.
|
||||
|
||||
env_path = find_dotenv()
|
||||
if env_path=='':
|
||||
Path('.env').touch()
|
||||
env_path = find_dotenv()
|
||||
|
||||
# +
|
||||
# "YOUR_DOCKER_LOGIN"
|
||||
|
||||
# + {"tags": ["parameters"]}
|
||||
# %%writefile .env
|
||||
# This cell is tagged `parameters`
|
||||
# Please modify the values below as you see fit
|
||||
|
||||
# Your docker login and image repository name
|
||||
|
||||
|
||||
# -
|
||||
|
||||
set_key('.env', 'docker_login', 'masalvar')
|
||||
set_key(env_path, "docker_login", "masalvar")
|
||||
|
||||
set_key('.env', 'image_repo', 'pytorch-gpu')
|
||||
set_key(env_path, "image_repo", "pytorch-gpu")
|
||||
|
||||
!cat .env
|
||||
|
||||
os.makedirs('flaskwebapp', exist_ok=True)
|
||||
os.makedirs(os.path.join('flaskwebapp', 'nginx'), exist_ok=True)
|
||||
os.makedirs(os.path.join('flaskwebapp', 'etc'), exist_ok=True)
|
||||
os.makedirs("flaskwebapp", exist_ok=True)
|
||||
os.makedirs(os.path.join("flaskwebapp", "nginx"), exist_ok=True)
|
||||
os.makedirs(os.path.join("flaskwebapp", "etc"), exist_ok=True)
|
||||
|
||||
shutil.copy('synset.txt', 'flaskwebapp')
|
||||
shutil.copy('driver.py', 'flaskwebapp')
|
||||
os.listdir('flaskwebapp')
|
||||
shutil.copy("synset.txt", "flaskwebapp")
|
||||
shutil.copy("driver.py", "flaskwebapp")
|
||||
os.listdir("flaskwebapp")
|
||||
|
||||
# Below, we create the module for the Flask web application.
|
||||
|
||||
|
@ -274,13 +271,13 @@ CMD ["supervisord", "-c", "/code/etc/supervisord.conf"]
|
|||
|
||||
# The image name below refers to our dockerhub account. If you wish to push the image to your account make sure you change the docker login.
|
||||
|
||||
image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo')
|
||||
image_name = get_key(env_path, 'docker_login') + '/' +get_key(env_path, 'image_repo')
|
||||
application_path = 'flaskwebapp'
|
||||
docker_file_location = path.join(application_path, 'dockerfile')
|
||||
|
||||
# Next, we build our docker image. The output of this cell is cleared from this notebook as it is quite long due to all the installations required to build the image. However, you should make sure you see 'Successfully built' and 'Successfully tagged' messages in the last line of the output when you run the cell.
|
||||
|
||||
!docker build -t $image_name -f $docker_file_location $application_path --no-cache
|
||||
!docker build -t $image_name -f $docker_file_location $application_path
|
||||
|
||||
# Below we will push the image created to our dockerhub registry. Make sure you have already logged in to the appropriate dockerhub account using the docker login command. If you haven't loged in to the approrpiate dockerhub account you will get an error.
|
||||
|
||||
|
|
|
@ -23,22 +23,20 @@
|
|||
|
||||
# This notebook pulls some images and tests them against the local web app running inside the Docker container we made previously.
|
||||
|
||||
# %load_ext autoreload
|
||||
# %autoreload 2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from testing_utilities import to_img, img_url_to_json, plot_predictions
|
||||
import requests
|
||||
from dotenv import get_key
|
||||
|
||||
# %matplotlib inline
|
||||
|
||||
image_name = get_key(".env", "docker_login") + "/" + get_key(".env", "image_repo")
|
||||
env_path = find_dotenv(raise_error_if_not_found=True)
|
||||
image_name = get_key(env_path, "docker_login") + "/" + get_key(env_path, "image_repo")
|
||||
image_name
|
||||
|
||||
# Run the Docker conatainer in the background and open port 80. Notice we are using nvidia-docker and not docker command.
|
||||
|
||||
# + {"active": "ipynb", "language": "bash"}
|
||||
# + {"active": "ipynb"}
|
||||
# nvidia-docker run -p 80:80 $1
|
||||
# -
|
||||
|
||||
|
@ -46,7 +44,7 @@ image_name
|
|||
|
||||
!curl 'http://0.0.0.0:80/'
|
||||
|
||||
!curl 'http://0.0.0.0:80/version' #reports tensorflow version
|
||||
!curl 'http://0.0.0.0:80/version' #reports pytorch version
|
||||
|
||||
# Pull an image of a Lynx to test our local web app with.
|
||||
|
||||
|
@ -93,7 +91,7 @@ timer_results
|
|||
|
||||
print("Average time taken: {0:4.2f} ms".format(10 ** 3 * np.mean(timer_results)))
|
||||
|
||||
# + {"active": "ipynb", "language": "bash"}
|
||||
# + {"active": "ipynb"}
|
||||
# docker stop $(docker ps -q)
|
||||
# -
|
||||
|
||||
|
|
|
@ -33,38 +33,26 @@
|
|||
|
||||
import json
|
||||
from testing_utilities import write_json_to_file
|
||||
from dotenv import set_key, get_key
|
||||
from dotenv import set_key, get_key, find_dotenv
|
||||
|
||||
# <a id='section1'></a>
|
||||
# ## Setup
|
||||
# Below are the various name definitions for the resources needed to setup AKS.
|
||||
|
||||
set_key('.env', 'selected_subscription', 'Team Danielle Internal')
|
||||
set_key('.env', 'resource_group', 'msaksrg')
|
||||
set_key('.env', 'aks_name', 'msaks')
|
||||
set_key('.env', 'location', 'eastus')
|
||||
env_path = find_dotenv(raise_error_if_not_found=True)
|
||||
|
||||
# + {"tags": ["parameters"]}
|
||||
# # %%writefile --append .env
|
||||
# # This cell is tagged `parameters`
|
||||
# # Please modify the values below as you see fit
|
||||
set_key(env_path, 'selected_subscription', 'Team Danielle Internal')
|
||||
set_key(env_path, 'resource_group', 'msaksrg')
|
||||
set_key(env_path, 'aks_name', 'msaks')
|
||||
set_key(env_path, 'location', 'eastus')
|
||||
|
||||
# # If you have multiple subscriptions select the subscription you want to use
|
||||
# selected_subscription = "Team Danielle Internal"
|
||||
|
||||
# # Resource group, name and location for AKS cluster.
|
||||
# resource_group = "mabouaks"
|
||||
# aks_name = "mabouaks"
|
||||
# location = "eastus"
|
||||
# -
|
||||
|
||||
image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo')
|
||||
image_name = get_key(env_path, 'docker_login') + '/' +get_key(env_path, 'image_repo')
|
||||
|
||||
# <a id='section2'></a>
|
||||
# ## Azure account login
|
||||
# If you are not already logged in to an Azure account, the command below will initiate a login. It will pop up a browser where you can select an Azure account.
|
||||
|
||||
# + {"active": "ipynb", "language": "bash"}
|
||||
# + {"active": "ipynb"}
|
||||
# list=`az account list -o table`
|
||||
# if [ "$list" == '[]' ] || [ "$list" == '' ]; then
|
||||
# az login -o table
|
||||
|
@ -73,7 +61,7 @@ image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo'
|
|||
# fi
|
||||
# -
|
||||
|
||||
!az account set --subscription "{get_key('.env', 'selected_subscription')}"
|
||||
!az account set --subscription "{get_key(env_path, 'selected_subscription')}"
|
||||
|
||||
!az account show
|
||||
|
||||
|
@ -87,13 +75,13 @@ image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo'
|
|||
# ### Create resource group
|
||||
# Azure encourages the use of groups to organise all the Azure components you deploy. That way it is easier to find them but also we can deleted a number of resources simply by deleting the group.
|
||||
|
||||
!az group create --name {get_key('.env', 'resource_group')} \
|
||||
--location {get_key('.env', 'location')}
|
||||
!az group create --name {get_key(env_path, 'resource_group')} \
|
||||
--location {get_key(env_path, 'location')}
|
||||
|
||||
# Below, we create the AKS cluster in the resource group we created earlier. This can take up to 15 minutes.
|
||||
|
||||
!az aks create --resource-group {get_key('.env', 'resource_group')} \
|
||||
--name {get_key('.env', 'aks_name')} \
|
||||
!az aks create --resource-group {get_key(env_path, 'resource_group')} \
|
||||
--name {get_key(env_path, 'aks_name')} \
|
||||
--node-count 1 \
|
||||
--generate-ssh-keys \
|
||||
-s Standard_NC6
|
||||
|
@ -109,7 +97,8 @@ image_name = get_key('.env', 'docker_login') + '/' +get_key('.env', 'image_repo'
|
|||
#
|
||||
# To configure kubectl to connect to the Kubernetes cluster, run the following command:
|
||||
|
||||
!az aks get-credentials --resource-group $resource_group --name $aks_name
|
||||
!az aks get-credentials --resource-group {get_key(env_path, 'resource_group')}\
|
||||
--name {get_key(env_path, 'aks_name')}
|
||||
|
||||
# Let's verify connection by listing the nodes.
|
||||
|
||||
|
|
|
@ -27,10 +27,9 @@ import numpy as np
|
|||
from testing_utilities import img_url_to_json, to_img, plot_predictions
|
||||
import requests
|
||||
import json
|
||||
|
||||
# %matplotlib inline
|
||||
|
||||
# service_json = !kubectl get service azure-dl -o json
|
||||
service_json = !kubectl get service azure-dl -o json
|
||||
service_dict = json.loads("".join(service_json))
|
||||
app_url = service_dict["status"]["loadBalancer"]["ingress"][0]["ip"]
|
||||
|
||||
|
@ -47,13 +46,12 @@ IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_
|
|||
|
||||
plt.imshow(to_img(IMAGEURL))
|
||||
|
||||
# headers = {'content-type': 'application/json','X-Marathon-App-Id': app_id}
|
||||
headers = {"content-type": "application/json"}
|
||||
jsonimg = img_url_to_json(IMAGEURL)
|
||||
r = requests.post(
|
||||
scoring_url, data=jsonimg, headers=headers
|
||||
) # Run the request twice since the first time takes a
|
||||
# little longer due to the loading of the model
|
||||
# little longer due to the loading of the model
|
||||
# %time r = requests.post(scoring_url, data=jsonimg, headers=headers)
|
||||
r.json()
|
||||
|
||||
|
|
|
@ -21,7 +21,9 @@
|
|||
# # Tear it all down
|
||||
# Once you are done with your cluster you can use the following two commands to destroy it all.
|
||||
|
||||
from dotenv import get_key
|
||||
from dotenv import get_key, find_dotenv
|
||||
|
||||
env_path = find_dotenv(raise_error_if_not_found=True)
|
||||
|
||||
# Once you are done with your cluster you can use the following two commands to destroy it all. First, delete the application.
|
||||
|
||||
|
@ -29,12 +31,10 @@ from dotenv import get_key
|
|||
|
||||
# Next, you delete the AKS cluster. This step may take a few minutes.
|
||||
|
||||
get_key('.env', 'resource_group')
|
||||
|
||||
!az aks delete -n {get_key('.env', 'aks_name')} \
|
||||
-g {get_key('.env', 'resource_group')} \
|
||||
!az aks delete -n {get_key(env_path, 'aks_name')} \
|
||||
-g {get_key(env_path, 'resource_group')} \
|
||||
-y
|
||||
|
||||
# Finally, you should delete the resource group. This also deletes the AKS cluster and can be used instead of the above command if the resource group is only used for this purpose.
|
||||
|
||||
!az group delete --name {get_key('.env', 'resource_group')} -y
|
||||
!az group delete --name {get_key(env_path, 'resource_group')} -y
|
||||
|
|
|
@ -1,372 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""ResNet152 model for Keras.
|
||||
|
||||
# Reference:
|
||||
|
||||
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
|
||||
|
||||
Adaptation of code from flyyufelix, mvoelk, BigMoyan, fchollet at https://github.com/adamcasson/resnet152
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
|
||||
from keras.layers import Input
|
||||
from keras.layers import Dense
|
||||
from keras.layers import Activation
|
||||
from keras.layers import Flatten
|
||||
from keras.layers import Conv2D
|
||||
from keras.layers import MaxPooling2D
|
||||
from keras.layers import GlobalMaxPooling2D
|
||||
from keras.layers import ZeroPadding2D
|
||||
from keras.layers import AveragePooling2D
|
||||
from keras.layers import GlobalAveragePooling2D
|
||||
from keras.layers import BatchNormalization
|
||||
from keras.layers import add
|
||||
from keras.models import Model
|
||||
import keras.backend as K
|
||||
from keras.engine.topology import get_source_inputs
|
||||
from keras.utils import layer_utils
|
||||
from keras import initializers
|
||||
from keras.engine import Layer, InputSpec
|
||||
from keras.preprocessing import image
|
||||
from keras.utils.data_utils import get_file
|
||||
from keras.applications.imagenet_utils import decode_predictions
|
||||
from keras.applications.imagenet_utils import preprocess_input
|
||||
from keras.applications.imagenet_utils import _obtain_input_shape
|
||||
|
||||
import sys
|
||||
sys.setrecursionlimit(3000)
|
||||
|
||||
WEIGHTS_PATH = 'https://github.com/adamcasson/resnet152/releases/download/v0.1/resnet152_weights_tf.h5'
|
||||
WEIGHTS_PATH_NO_TOP = 'https://github.com/adamcasson/resnet152/releases/download/v0.1/resnet152_weights_tf_notop.h5'
|
||||
|
||||
class Scale(Layer):
|
||||
"""Custom Layer for ResNet used for BatchNormalization.
|
||||
|
||||
Learns a set of weights and biases used for scaling the input data.
|
||||
the output consists simply in an element-wise multiplication of the input
|
||||
and a sum of a set of constants:
|
||||
|
||||
out = in * gamma + beta,
|
||||
|
||||
where 'gamma' and 'beta' are the weights and biases larned.
|
||||
|
||||
Keyword arguments:
|
||||
axis -- integer, axis along which to normalize in mode 0. For instance,
|
||||
if your input tensor has shape (samples, channels, rows, cols),
|
||||
set axis to 1 to normalize per feature map (channels axis).
|
||||
momentum -- momentum in the computation of the exponential average
|
||||
of the mean and standard deviation of the data, for
|
||||
feature-wise normalization.
|
||||
weights -- Initialization weights.
|
||||
List of 2 Numpy arrays, with shapes:
|
||||
`[(input_shape,), (input_shape,)]`
|
||||
beta_init -- name of initialization function for shift parameter
|
||||
(see [initializers](../initializers.md)), or alternatively,
|
||||
Theano/TensorFlow function to use for weights initialization.
|
||||
This parameter is only relevant if you don't pass a `weights` argument.
|
||||
gamma_init -- name of initialization function for scale parameter (see
|
||||
[initializers](../initializers.md)), or alternatively,
|
||||
Theano/TensorFlow function to use for weights initialization.
|
||||
This parameter is only relevant if you don't pass a `weights` argument.
|
||||
|
||||
"""
|
||||
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
|
||||
self.momentum = momentum
|
||||
self.axis = axis
|
||||
self.beta_init = initializers.get(beta_init)
|
||||
self.gamma_init = initializers.get(gamma_init)
|
||||
self.initial_weights = weights
|
||||
super(Scale, self).__init__(**kwargs)
|
||||
|
||||
def build(self, input_shape):
|
||||
self.input_spec = [InputSpec(shape=input_shape)]
|
||||
shape = (int(input_shape[self.axis]),)
|
||||
|
||||
self.gamma = K.variable(self.gamma_init(shape), name='%s_gamma'%self.name)
|
||||
self.beta = K.variable(self.beta_init(shape), name='%s_beta'%self.name)
|
||||
self.trainable_weights = [self.gamma, self.beta]
|
||||
|
||||
if self.initial_weights is not None:
|
||||
self.set_weights(self.initial_weights)
|
||||
del self.initial_weights
|
||||
|
||||
def call(self, x, mask=None):
|
||||
input_shape = self.input_spec[0].shape
|
||||
broadcast_shape = [1] * len(input_shape)
|
||||
broadcast_shape[self.axis] = input_shape[self.axis]
|
||||
|
||||
out = K.reshape(self.gamma, broadcast_shape) * x + K.reshape(self.beta, broadcast_shape)
|
||||
return out
|
||||
|
||||
def get_config(self):
|
||||
config = {"momentum": self.momentum, "axis": self.axis}
|
||||
base_config = super(Scale, self).get_config()
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
def identity_block(input_tensor, kernel_size, filters, stage, block):
|
||||
"""The identity_block is the block that has no conv layer at shortcut
|
||||
|
||||
Keyword arguments
|
||||
input_tensor -- input tensor
|
||||
kernel_size -- defualt 3, the kernel size of middle conv layer at main path
|
||||
filters -- list of integers, the nb_filters of 3 conv layer at main path
|
||||
stage -- integer, current stage label, used for generating layer names
|
||||
block -- 'a','b'..., current block label, used for generating layer names
|
||||
|
||||
"""
|
||||
eps = 1.1e-5
|
||||
|
||||
if K.image_dim_ordering() == 'tf':
|
||||
bn_axis = 3
|
||||
else:
|
||||
bn_axis = 1
|
||||
|
||||
nb_filter1, nb_filter2, nb_filter3 = filters
|
||||
conv_name_base = 'res' + str(stage) + block + '_branch'
|
||||
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
||||
scale_name_base = 'scale' + str(stage) + block + '_branch'
|
||||
|
||||
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=False)(input_tensor)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
|
||||
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
|
||||
|
||||
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
|
||||
x = Conv2D(nb_filter2, (kernel_size, kernel_size), name=conv_name_base + '2b', use_bias=False)(x)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
|
||||
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
|
||||
|
||||
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
|
||||
|
||||
x = add([x, input_tensor], name='res' + str(stage) + block)
|
||||
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
|
||||
return x
|
||||
|
||||
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
|
||||
"""conv_block is the block that has a conv layer at shortcut
|
||||
|
||||
Keyword arguments:
|
||||
input_tensor -- input tensor
|
||||
kernel_size -- defualt 3, the kernel size of middle conv layer at main path
|
||||
filters -- list of integers, the nb_filters of 3 conv layer at main path
|
||||
stage -- integer, current stage label, used for generating layer names
|
||||
block -- 'a','b'..., current block label, used for generating layer names
|
||||
|
||||
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
|
||||
And the shortcut should have subsample=(2,2) as well
|
||||
|
||||
"""
|
||||
eps = 1.1e-5
|
||||
|
||||
if K.image_dim_ordering() == 'tf':
|
||||
bn_axis = 3
|
||||
else:
|
||||
bn_axis = 1
|
||||
|
||||
nb_filter1, nb_filter2, nb_filter3 = filters
|
||||
conv_name_base = 'res' + str(stage) + block + '_branch'
|
||||
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
||||
scale_name_base = 'scale' + str(stage) + block + '_branch'
|
||||
|
||||
x = Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=False)(input_tensor)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
|
||||
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
|
||||
|
||||
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
|
||||
x = Conv2D(nb_filter2, (kernel_size, kernel_size),
|
||||
name=conv_name_base + '2b', use_bias=False)(x)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
|
||||
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
|
||||
|
||||
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
|
||||
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
|
||||
|
||||
shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
|
||||
name=conv_name_base + '1', use_bias=False)(input_tensor)
|
||||
shortcut = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '1')(shortcut)
|
||||
shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)
|
||||
|
||||
x = add([x, shortcut], name='res' + str(stage) + block)
|
||||
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
|
||||
return x
|
||||
|
||||
def ResNet152(include_top=True, weights=None,
|
||||
input_tensor=None, input_shape=None,
|
||||
large_input=False, pooling=None,
|
||||
classes=1000):
|
||||
"""Instantiate the ResNet152 architecture.
|
||||
|
||||
Keyword arguments:
|
||||
include_top -- whether to include the fully-connected layer at the
|
||||
top of the network. (default True)
|
||||
weights -- one of `None` (random initialization) or "imagenet"
|
||||
(pre-training on ImageNet). (default None)
|
||||
input_tensor -- optional Keras tensor (i.e. output of `layers.Input()`)
|
||||
to use as image input for the model.(default None)
|
||||
input_shape -- optional shape tuple, only to be specified if
|
||||
`include_top` is False (otherwise the input shape has to be
|
||||
`(224, 224, 3)` (with `channels_last` data format) or
|
||||
`(3, 224, 224)` (with `channels_first` data format). It should
|
||||
have exactly 3 inputs channels, and width and height should be
|
||||
no smaller than 197. E.g. `(200, 200, 3)` would be one valid value.
|
||||
(default None)
|
||||
large_input -- if True, then the input shape expected will be
|
||||
`(448, 448, 3)` (with `channels_last` data format) or
|
||||
`(3, 448, 448)` (with `channels_first` data format). (default False)
|
||||
pooling -- Optional pooling mode for feature extraction when
|
||||
`include_top` is `False`.
|
||||
- `None` means that the output of the model will be the 4D
|
||||
tensor output of the last convolutional layer.
|
||||
- `avg` means that global average pooling will be applied to
|
||||
the output of the last convolutional layer, and thus
|
||||
the output of the model will be a 2D tensor.
|
||||
- `max` means that global max pooling will be applied.
|
||||
(default None)
|
||||
classes -- optional number of classes to classify image into, only
|
||||
to be specified if `include_top` is True, and if no `weights`
|
||||
argument is specified. (default 1000)
|
||||
|
||||
Returns:
|
||||
A Keras model instance.
|
||||
|
||||
Raises:
|
||||
ValueError: in case of invalid argument for `weights`,
|
||||
or invalid input shape.
|
||||
"""
|
||||
if weights not in {'imagenet', None}:
|
||||
raise ValueError('The `weights` argument should be either '
|
||||
'`None` (random initialization) or `imagenet` '
|
||||
'(pre-training on ImageNet).')
|
||||
|
||||
if weights == 'imagenet' and include_top and classes != 1000:
|
||||
raise ValueError('If using `weights` as imagenet with `include_top`'
|
||||
' as true, `classes` should be 1000')
|
||||
|
||||
eps = 1.1e-5
|
||||
|
||||
if large_input:
|
||||
img_size = 448
|
||||
else:
|
||||
img_size = 224
|
||||
|
||||
# Determine proper input shape
|
||||
input_shape = _obtain_input_shape(input_shape,
|
||||
default_size=img_size,
|
||||
min_size=197,
|
||||
data_format=K.image_data_format(),
|
||||
require_flatten=include_top)
|
||||
|
||||
if input_tensor is None:
|
||||
img_input = Input(shape=input_shape)
|
||||
else:
|
||||
if not K.is_keras_tensor(input_tensor):
|
||||
img_input = Input(tensor=input_tensor, shape=input_shape)
|
||||
else:
|
||||
img_input = input_tensor
|
||||
|
||||
# handle dimension ordering for different backends
|
||||
if K.image_dim_ordering() == 'tf':
|
||||
bn_axis = 3
|
||||
else:
|
||||
bn_axis = 1
|
||||
|
||||
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
|
||||
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
|
||||
x = BatchNormalization(epsilon=eps, axis=bn_axis, name='bn_conv1')(x)
|
||||
x = Scale(axis=bn_axis, name='scale_conv1')(x)
|
||||
x = Activation('relu', name='conv1_relu')(x)
|
||||
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
|
||||
|
||||
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
|
||||
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
|
||||
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
|
||||
|
||||
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
|
||||
for i in range(1,8):
|
||||
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b'+str(i))
|
||||
|
||||
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
|
||||
for i in range(1,36):
|
||||
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b'+str(i))
|
||||
|
||||
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
|
||||
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
|
||||
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
|
||||
|
||||
if large_input:
|
||||
x = AveragePooling2D((14, 14), name='avg_pool')(x)
|
||||
else:
|
||||
x = AveragePooling2D((7, 7), name='avg_pool')(x)
|
||||
|
||||
# include classification layer by default, not included for feature extraction
|
||||
if include_top:
|
||||
x = Flatten()(x)
|
||||
x = Dense(classes, activation='softmax', name='fc1000')(x)
|
||||
else:
|
||||
if pooling == 'avg':
|
||||
x = GlobalAveragePooling2D()(x)
|
||||
elif pooling == 'max':
|
||||
x = GlobalMaxPooling2D()(x)
|
||||
|
||||
# Ensure that the model takes into account
|
||||
# any potential predecessors of `input_tensor`.
|
||||
if input_tensor is not None:
|
||||
inputs = get_source_inputs(input_tensor)
|
||||
else:
|
||||
inputs = img_input
|
||||
# Create model.
|
||||
model = Model(inputs, x, name='resnet152')
|
||||
|
||||
# load weights
|
||||
if weights == 'imagenet':
|
||||
if include_top:
|
||||
weights_path = get_file('resnet152_weights_tf.h5',
|
||||
WEIGHTS_PATH,
|
||||
cache_subdir='models',
|
||||
md5_hash='cdb18a2158b88e392c0905d47dcef965')
|
||||
else:
|
||||
weights_path = get_file('resnet152_weights_tf_notop.h5',
|
||||
WEIGHTS_PATH_NO_TOP,
|
||||
cache_subdir='models',
|
||||
md5_hash='4a90dcdafacbd17d772af1fb44fc2660')
|
||||
model.load_weights(weights_path, by_name=True)
|
||||
if K.backend() == 'theano':
|
||||
layer_utils.convert_all_kernels_in_model(model)
|
||||
if include_top:
|
||||
maxpool = model.get_layer(name='avg_pool')
|
||||
shape = maxpool.output_shape[1:]
|
||||
dense = model.get_layer(name='fc1000')
|
||||
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
|
||||
|
||||
if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':
|
||||
warnings.warn('You are using the TensorFlow backend, yet you '
|
||||
'are using the Theano '
|
||||
'image data format convention '
|
||||
'(`image_data_format="channels_first"`). '
|
||||
'For best performance, set '
|
||||
'`image_data_format="channels_last"` in '
|
||||
'your Keras config '
|
||||
'at ~/.keras/keras.json.')
|
||||
return model
|
||||
|
||||
if __name__ == '__main__':
|
||||
model = ResNet152(include_top=True, weights='imagenet')
|
||||
|
||||
img_path = 'elephant.jpg'
|
||||
img = image.load_img(img_path, target_size=(224,224))
|
||||
x = image.img_to_array(img)
|
||||
x = np.expand_dims(x, axis=0)
|
||||
x = preprocess_input(x)
|
||||
print('Input image shape:', x.shape)
|
||||
|
||||
preds = model.predict(x)
|
||||
print('Predicted:', decode_predictions(preds))
|
Загрузка…
Ссылка в новой задаче