Preparing for model release
This commit is contained in:
Родитель
e97d8372e9
Коммит
53a39c1f1f
|
@ -0,0 +1,61 @@
|
|||
################
|
||||
#
|
||||
# classify_image.py
|
||||
#
|
||||
# Test driver for running inference with the ClassificationModel class in models.py.
|
||||
#
|
||||
################
|
||||
|
||||
#%% Imports
|
||||
|
||||
from models import ClassificationModel
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
|
||||
#%% Functions
|
||||
|
||||
def run_classification_model(model_path,image_path):
|
||||
|
||||
# [560, 560] for ensemble, [560] or [448] for single models like ResNeXt
|
||||
model_input_sizes = [560,560]
|
||||
|
||||
model = ClassificationModel(model_path, model_input_sizes, useGPU=True)
|
||||
species, vals = model.predict_image(image_path, 3)
|
||||
|
||||
for i in range(0, len(species)):
|
||||
print('%d) %s\tlikelihood: %f' % (i+1, species[i], vals[i]))
|
||||
|
||||
|
||||
#% Command-line driver
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('model_path', type=str, help='Model filename')
|
||||
parser.add_argument('image_path', type=str, help='Image filename')
|
||||
|
||||
if len(sys.argv[1:])==0:
|
||||
parser.print_help()
|
||||
parser.exit()
|
||||
|
||||
args = parser.parse_args()
|
||||
run_classification_model(args.model_path,args.image_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
main()
|
||||
|
||||
|
||||
#%% Interactive driveer
|
||||
|
||||
if False:
|
||||
|
||||
#%%
|
||||
|
||||
model_path = r"D:\temp\species_classification\sc_all_extended_ensemble_resnext_inceptionV4_560_2019.08.27_model.pytorch"
|
||||
image_path = r"D:\temp\species_classification\190215-meer-full.jpg"
|
||||
run_classification_model(model_path,image_path)
|
||||
|
||||
|
|
@ -11,10 +11,6 @@
|
|||
################
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import copy
|
||||
from enum import Enum
|
||||
import numpy as np
|
||||
import pretrainedmodels
|
||||
|
|
|
@ -1,17 +1,28 @@
|
|||
#%% Constants and imports
|
||||
|
||||
# Note to self:
|
||||
###
|
||||
#
|
||||
# onnx_test_driver.py
|
||||
#
|
||||
# Example driver to run an image through the .onnx version of our species
|
||||
# classification model.
|
||||
#
|
||||
# Installation notes:
|
||||
#
|
||||
# conda install pytorch-nightly -c pytorch
|
||||
# pip install future
|
||||
# pip install opencv-python
|
||||
# pip install onnx
|
||||
|
||||
# pip install caffe2
|
||||
#
|
||||
# Useful links:
|
||||
#
|
||||
# https://caffe2.ai/docs/tutorial-loading-pre-trained-models.html
|
||||
# https://github.com/onnx/tutorials/blob/master/tutorials/OnnxCaffe2Import.ipynb
|
||||
#
|
||||
###
|
||||
|
||||
#%% Constants and imports
|
||||
|
||||
import cv2
|
||||
from caffe2.python import core, workspace
|
||||
import onnx
|
||||
import caffe2.python.onnx.backend
|
||||
import numpy as np
|
||||
|
@ -20,15 +31,15 @@ from operator import itemgetter
|
|||
|
||||
TOP_K = 5
|
||||
|
||||
IMAGE_FILENAME = '/data/images/lion.jpg'
|
||||
MODEL_FILENAME = '/data/models/exported_model.onnx'
|
||||
CLASSLIST_FILENAME = '/data/models/classlist.txt'
|
||||
IMAGE_FILENAME = r"D:\temp\species_classification\190215-meer-full.jpg"
|
||||
MODEL_FILENAME = r"D:\temp\species_classification\sc_all_extended_ensemble_resnext_inceptionV4_560_2019.08.27_model.onnx"
|
||||
CLASSLIST_FILENAME = r"D:\temp\species_classification\sc_all_extended_ensemble_resnext_inceptionV4_560_2019.08.27_classes.txt"
|
||||
|
||||
# Target mean / std; should match the values used at training time
|
||||
MODEL_IMAGE_SIZE = 224
|
||||
MODEL_RESIZE_SIZE = 256
|
||||
OVERSIZE_FACTOR = 1.3
|
||||
|
||||
|
||||
#%% Load and prepare image (using cv2)
|
||||
|
||||
# Load image
|
||||
|
@ -65,15 +76,19 @@ imgExpanded = np.expand_dims(imgFinal,0).astype('float32')
|
|||
|
||||
# cv2.imshow('image',imgCropped); cv2.waitKey(0); cv2.destroyAllWindows()
|
||||
|
||||
|
||||
#%% Load model
|
||||
|
||||
model = onnx.load(MODEL_FILENAME)
|
||||
|
||||
|
||||
#%% Run model
|
||||
|
||||
# Run the ONNX model with Caffe2
|
||||
outputs = caffe2.python.onnx.backend.run_model(model, [imgExpanded])
|
||||
outputs_softmax = outputs[0][0].astype(np.float64)
|
||||
|
||||
|
||||
#%% Print top K class names
|
||||
|
||||
# Load class names
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
################
|
||||
#
|
||||
# test.py
|
||||
#
|
||||
# Test driver for running inference with the ClassificationModel class in models.py.
|
||||
#
|
||||
################
|
||||
|
||||
from models import *
|
||||
|
||||
def main():
|
||||
|
||||
image_path = '/path/to/image.jpg'
|
||||
model_path = '/path/to/model.pth.tar'
|
||||
# [560, 560] for ensemble, [560] or [448] for single models like ResNeXt
|
||||
model_input_sizes = [560]
|
||||
|
||||
model = ClassificationModel(model_path, model_input_sizes, useGPU = True)
|
||||
species, vals = model.predict_image(image_path, 3)
|
||||
|
||||
for i in range(0, len(species)):
|
||||
print('%d) %s\tlikelihood: %f' % (i+1, species[i], vals[i]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -15,9 +15,6 @@
|
|||
# * a taxonomy file, so the scientific names used in the training data can
|
||||
# be mapped to common names.
|
||||
#
|
||||
# We are currently testing against PyTorch 0.4.1 and Cuda 9.0, and we have tested on
|
||||
# both Linux and Windows.
|
||||
#
|
||||
#######
|
||||
|
||||
|
||||
|
@ -35,15 +32,14 @@ API_ROOT = r'd:\git\SpeciesClassification'
|
|||
# Path to taxa.csv, for latin --> common mapping
|
||||
#
|
||||
# Set to None to disable latin --> common mapping
|
||||
TAXONOMY_PATH = r'd:\temp\taxa.csv' # None
|
||||
TAXONOMY_PATH = r'd:\temp\species_classification\taxa.19.08.28.0536.csv' # None
|
||||
|
||||
IMAGES_TO_CLASSIFY = [
|
||||
r"D:\temp\animals\African_Elephant\30651.ngsversion.1421960098780.jpg",
|
||||
r"D:\temp\animals\Alligator\Alligator_mississippiensis_01.JPG"
|
||||
r"D:\temp\species_classification\190215-meer-full.jpg",
|
||||
r"D:\temp\species_classification\African_Bush_Elephant.jpg"
|
||||
]
|
||||
|
||||
# CLASSIFICATION_MODEL_PATH = r'd:\temp\models\inc4-incres2-560-78.5\model_deploy.pth.tar'
|
||||
CLASSIFICATION_MODEL_PATH = r"D:\temp\models\resnext-448-78.8\model_best.pth.tar"
|
||||
CLASSIFICATION_MODEL_PATH = "D:\temp\species_classification\sc_all_extended_ensemble_resnext_inceptionV4_560_2019.08.27_model.pytorch"
|
||||
|
||||
# Detection (i.e., bounding box generation) is optional; set to None
|
||||
# to disable detection
|
||||
|
@ -61,8 +57,8 @@ USE_GPU = True
|
|||
# We typically specify [560,560] if we're loading our Inception/InceptionResnet
|
||||
# ensemble. For ResNext, we typically specify [448].
|
||||
#
|
||||
# IMAGE_SIZES = [560, 560]
|
||||
IMAGE_SIZES = [448]
|
||||
IMAGE_SIZES = [560, 560]
|
||||
# IMAGE_SIZES = [448]
|
||||
|
||||
|
||||
#%% Path setup to import the classification code
|
||||
|
|
Загрузка…
Ссылка в новой задаче