This commit is contained in:
namizzz 2018-08-24 19:16:19 +08:00
Родитель 5cab1be0ec 4028767d0a
Коммит ccd2843678
7 изменённых файлов: 215 добавлений и 90 удалений

69
.travis.yml Normal file
Просмотреть файл

@ -0,0 +1,69 @@
sudo: required
dist: xenial
cache:
directories:
- $HOME/.cache/pip
before_install:
- export PATH=$PATH
before_script: true
after_failure: true
after_success: true
after_script: true
script: bash test.sh
notifications:
email:
on_success: never
on_failure: never
matrix:
include:
- name: "MMdnn no-onnx test with py2/ubuntu 16.04"
language: python
python: "2.7"
env: TEST_ONNX=false
install:
- pip install -q -r requirements.txt
- pip install -q http://download.pytorch.org/whl/cpu/torch-0.4.0-cp27-cp27mu-linux_x86_64.whl
- pip install -q torchvision
# - pip install -q paddlepaddle==0.11.0
- name: "MMdnn onnx test with py2/ubuntu 16.04"
language: python
python: "2.7"
env: TEST_ONNX=true
install:
- pip install -q -r requirements.txt
- pip install -q http://download.pytorch.org/whl/cpu/torch-0.4.0-cp27-cp27mu-linux_x86_64.whl
- pip install -q torchvision
# - pip install -q paddlepaddle==0.11.0
- pip install -q onnx==1.2.1
- pip install -q onnx-tf==1.1.2
- name: "MMdnn no-onnx test with py3/ubuntu 16.04"
language: python
python: "3.6"
env: TEST_ONNX=false
install:
- pip install -q -r requirements.txt
- pip install -q http://download.pytorch.org/whl/cpu/torch-0.4.0-cp36-cp36m-linux_x86_64.whl
- pip install -q torchvision
- name: "MMdnn onnx test with py3/ubuntu 16.04"
language: python
python: "3.6"
env: TEST_ONNX=true
install:
- pip install -q -r requirements.txt
- pip install -q http://download.pytorch.org/whl/cpu/torch-0.4.0-cp36-cp36m-linux_x86_64.whl
- pip install -q torchvision
- pip install -q onnx==1.2.1
- pip install -q onnx-tf==1.1.2

Просмотреть файл

@ -1,6 +1,7 @@
# ![MMdnn](https://ndqzpq.dm2304.livefilestore.com/y4mF9ON1vKrSy0ew9dM3Fw6KAvLzQza2nL9JiMSIfgfKLbqJPvuxwOC2VIur_Ycz4TvVpkibMkvKXrX-N9QOkyh0AaUW4qhWDak8cyM0UoLLxc57apyhfDaxflLlZrGqiJgzn1ztsxiaZMzglaIMhoo8kjPuZ5-vY7yoWXqJuhC1BDHOwgNPwIgzpxV1H4k1oQzmewThpAJ_w_fUHzianZtMw?width=35&height=35&cropmode=none) MMdnn
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
[![Linux](https://travis-ci.org/Microsoft/MMdnn.svg?branch=master)](https://travis-ci.org/Microsoft/MMdnn)
A comprehensive, cross-framework solution to convert, visualize and diagnosis deep neural network models. The "MM" in MMdnn stands for model management and "dnn" is an acronym for deep neural network.

Просмотреть файл

@ -146,12 +146,26 @@ if __name__=='__main__':
self.save_weights(self.weights_dict, dstWeightPath)
@staticmethod
def _shapeToStr(shapes):
return [dim.size if dim.size > 0 else 1 for dim in shapes.dim]
def _get_symmetric_padding(self, IR_node):
stride_h = IR_node.get_attr('strides')[1]
stride_w = IR_node.get_attr('strides')[2]
# check if have pad layer
IR_parent_node = self.IR_graph.get_parent(IR_node.name, [0])
if IR_parent_node.type == 'Pad':
pads = IR_parent_node.get_attr('pads')
else:
pads = IR_node.get_attr('pads')
pad_h = pads[1] + (0 if pads[1] == pads[5] else stride_h)
pad_w = pads[2] + (0 if pads[2] == pads[6] else stride_w)
return pad_h, pad_w
def check_if_need_transpose(self, IR_node):
parent = self.IR_graph.get_parent(IR_node.name, [0])
while parent.type == 'Flatten' or parent.type == 'Dropout' or parent.type == 'Reshape':
@ -166,17 +180,8 @@ if __name__=='__main__':
def emit_Conv(self, IR_node):
# check if have pad layer
pad_h = 0
pad_w = 0
IR_parent_node = self.IR_graph.get_parent(IR_node.name, [0])
if IR_parent_node.type == 'Pad':
pad_h = IR_parent_node.get_attr('pads')[1]
pad_w = IR_parent_node.get_attr('pads')[2]
else:
pad_h = IR_node.get_attr('pads')[1]
pad_w = IR_node.get_attr('pads')[2]
# implement asymmetric paddings by applying symmetric padding then cropping
pad_h, pad_w = self._get_symmetric_padding(IR_node)
num_output = IR_node.get_attr('kernel_shape')[-1]
if IR_node.type == "DepthwiseConv":
@ -186,7 +191,7 @@ if __name__=='__main__':
num_group = IR_node.get_attr("group", 1)
self.add_body(1, "n.{:<15} = L.Convolution(n.{}, kernel_h={}, kernel_w={}, stride={}, num_output={}, pad_h={}, pad_w={}, group={}, \
bias_term={}, ntop=1)".format(
bias_term={}, ntop=1)".format(
IR_node.variable_name,
self.parent_variable_name(IR_node),
IR_node.get_attr('kernel_shape')[0],
@ -198,8 +203,6 @@ bias_term={}, ntop=1)".format(
num_group,
IR_node.get_attr('use_bias', False)))
self.check_if_need_crop(IR_node)
dim = len(IR_node.get_attr('strides')) - 2
if self.weight_loaded:
if IR_node.type == "DepthwiseConv":
@ -207,6 +210,7 @@ bias_term={}, ntop=1)".format(
self.weights_dict[IR_node.name]['weights'] = np.transpose(self.weights_dict[IR_node.name]['weights'], [dim + 1, dim] + list(range(0, dim)))
self.weights_dict[IR_node.variable_name] = self.weights_dict.pop(IR_node.name)
self.check_if_need_crop(IR_node)
# keys = []
# for key in self.weights_dict[IR_node.name].keys():
# keys.append(key)
@ -220,10 +224,7 @@ bias_term={}, ntop=1)".format(
shape = shape_to_list(shape)
h_i = shape[1]
w_i = shape[2]
pad_h = IR_node.get_attr('pads')[1]
pad_w = IR_node.get_attr('pads')[2]
pad_h, pad_w = self._get_symmetric_padding(IR_node)
stride_h = IR_node.get_attr('strides')[1]
stride_w = IR_node.get_attr('strides')[2]
@ -253,14 +254,21 @@ bias_term={}, ntop=1)".format(
k_w = IR_node.get_attr('kernel_shape')[1]
caffe_ho, caffe_wo = self.compute_output_shape(IR_node, k_h, k_w)
# if asymmetric padding, set offset to 1
pads = IR_node.get_attr('pads')
offset = [0 if pads[1] == pads[5] else 1,
0 if pads[2] == pads[6] else 1]
if caffe_ho > ir_ho or caffe_wo > ir_wo:
crop_layer_variable_name = IR_node.variable_name + "_crop"
self.add_body(1, "n.{:<15} = L.Crop(n.{}, L.DummyData(shape=[dict(dim=[1, {}, {}, {}])], ntop=1), ntop=1)".format(
self.add_body(1, "n.{:<15} = L.Crop(n.{}, L.DummyData(shape=[dict(dim=[1, {}, {}, {}])], \
ntop=1), ntop=1, offset={})".format(
crop_layer_variable_name,
IR_node.variable_name,
shape[3],
ir_ho,
ir_wo
ir_wo,
offset
))
# Change the layer name
IR_node.real_name = IR_node.real_name + "_crop"
@ -284,13 +292,14 @@ bias_term={}, ntop=1)".format(
pooling_type,
IR_node.get_attr('strides')[1]))
else:
pad_h, pad_w = self._get_symmetric_padding(IR_node)
self.add_body(1, "n.{:<15} = L.Pooling(n.{}, pool={}, kernel_size={}, pad_h={}, pad_w={}, stride={}, ntop=1)".format(
IR_node.variable_name,
self.parent_variable_name(IR_node),
pooling_type,
IR_node.get_attr('kernel_shape')[1],
IR_node.get_attr('pads')[1],
IR_node.get_attr('pads')[2],
pad_h,
pad_w,
IR_node.get_attr('strides')[1]))
# check if need crop output shape

Просмотреть файл

@ -51,7 +51,6 @@ from keras.models import Model
from keras import layers
import keras.backend as K
import numpy as np
import keras_applications
def load_weights_from_file(weight_file):
@ -547,6 +546,7 @@ def KitModel(weight_file = None):
from keras.applications.mobilenet import relu6
str_relu6 = 'keras.applications.mobilenet.relu6'
except:
import keras_applications
from keras_applications import mobilenet_v2
mobilenet_v2.layers.ReLU
str_relu6 = "keras_applications.mobilenet_v2.layers.ReLU(6, name='relu6')"

10
requirements.txt Normal file
Просмотреть файл

@ -0,0 +1,10 @@
six==1.10.0
numpy==1.14.5
protobuf==3.6.1
pillow == 5.2.0
setuptools==39.1.0
tensorflow==1.10.0
keras==2.1.6
coremltools==0.8
mxnet==1.1.0.post0
cntk==2.5

36
test.sh Executable file
Просмотреть файл

@ -0,0 +1,36 @@
#!/bin/bash
# Abort on Error
set -e
export PING_SLEEP=60s
export WORKDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export BUILD_OUTPUT=$WORKDIR/build.out
touch $BUILD_OUTPUT
dump_output() {
echo Tailing the last 100 lines of output:
tail -100 $BUILD_OUTPUT
}
error_handler() {
echo ERROR: An error was encountered with the build.
dump_output
exit 1
}
# If an error occurs, run our error handler to output a tail of the build
trap 'error_handler' ERR
# Set up a repeating loop to send some output to Travis.
bash -c "while true; do echo \$(date) - building ...; sleep $PING_SLEEP; done" &
PING_LOOP_PID=$!
# My build is using maven, but you could build anything with this, E.g.
python -m pytest -s -q tests/ >> $BUILD_OUTPUT 2>&1
# The build finished without returning an error so dump a tail of the output
dump_output
# nicely terminate the ping output loop
kill $PING_LOOP_PID

Просмотреть файл

@ -564,7 +564,7 @@ class TestModels(CorrectnessTest):
input_data = np.expand_dims(img, 0)
model_converted.blobs[model_converted._layer_names[0]].data[...] = input_data
predict = model_converted.forward()[model_converted._layer_names[-1]][0]
predict = model_converted.forward()[model_converted._blob_names[-1]][0]
converted_predict = np.squeeze(predict)
del model_converted
@ -783,23 +783,23 @@ class TestModels(CorrectnessTest):
'tensorflow' : {
'vgg19' : [OnnxEmit],
'inception_v1' : [OnnxEmit],
'inception_v3' : [OnnxEmit],
# 'resnet_v1_50' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# 'resnet_v1_152' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# 'resnet_v2_50' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# 'resnet_v2_152' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
'mobilenet_v1_1.0' : [OnnxEmit],
'mobilenet_v2_1.0_224' : [OnnxEmit],
# 'nasnet-a_large' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
'inception_resnet_v2' : [OnnxEmit],
# 'inception_v1' : [OnnxEmit],
# 'inception_v3' : [OnnxEmit],
# # 'resnet_v1_50' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# # 'resnet_v1_152' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# # 'resnet_v2_50' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# # 'resnet_v2_152' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# 'mobilenet_v1_1.0' : [OnnxEmit],
# 'mobilenet_v2_1.0_224' : [OnnxEmit],
# # 'nasnet-a_large' : [OnnxEmit], # POOL: strides > window_shape not supported due to inconsistency between CPU and GPU implementations
# 'inception_resnet_v2' : [OnnxEmit],
},
'tensorflow_frozen' : {
'inception_v1' : [OnnxEmit],
'inception_v3' : [OnnxEmit],
'mobilenet_v1_1.0' : [OnnxEmit],
},
# 'tensorflow_frozen' : {
# 'inception_v1' : [OnnxEmit],
# 'inception_v3' : [OnnxEmit],
# 'mobilenet_v1_1.0' : [OnnxEmit],
# },
'coreml' : {
'inception_v3' : [OnnxEmit],
@ -868,23 +868,23 @@ class TestModels(CorrectnessTest):
},
'tensorflow' : {
'vgg19' : [CaffeEmit, CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
'inception_v1' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
'inception_v3' : [CaffeEmit, CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
'resnet_v1_50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
'resnet_v1_152' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
'resnet_v2_50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
'resnet_v2_152' : [CaffeEmit, CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
'mobilenet_v1_1.0' : [CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CaffeEmit(Crash)
'mobilenet_v2_1.0_224' : [CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CaffeEmit(Crash)
'nasnet-a_large' : [MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: KerasEmit(Slice Layer: https://blog.csdn.net/lujiandong1/article/details/54936185)
'inception_resnet_v2' : [CaffeEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # CoremlEmit worked once, then always crashed
'vgg19' : [CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
# 'inception_v1' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
# 'inception_v3' : [CaffeEmit, CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
# 'resnet_v1_50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
# 'resnet_v1_152' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
# 'resnet_v2_50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CntkEmit
# 'resnet_v2_152' : [CaffeEmit, CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit],
# 'mobilenet_v1_1.0' : [CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CaffeEmit(Crash)
# 'mobilenet_v2_1.0_224' : [CoreMLEmit, CntkEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: CaffeEmit(Crash)
# 'nasnet-a_large' : [MXNetEmit, PytorchEmit, TensorflowEmit], # TODO: KerasEmit(Slice Layer: https://blog.csdn.net/lujiandong1/article/details/54936185)
# 'inception_resnet_v2' : [CaffeEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], # CoremlEmit worked once, then always crashed
},
'tensorflow_frozen' : {
'inception_v1' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit], # TODO: CntkEmit
'inception_v3' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit], # TODO: CntkEmit
'mobilenet_v1_1.0' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit]
# 'inception_v1' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit], # TODO: CntkEmit
# 'inception_v3' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit], # TODO: CntkEmit
# 'mobilenet_v1_1.0' : [TensorflowEmit, KerasEmit, MXNetEmit, CoreMLEmit]
},
'coreml' : {
@ -979,59 +979,59 @@ class TestModels(CorrectnessTest):
def test_caffe(self):
try:
import caffe
self._test_function('caffe', self.CaffeParse)
except ImportError:
print('Please install caffe! Or caffe is not supported in your platform.', file=sys.stderr)
# def test_caffe(self):
# try:
# import caffe
# self._test_function('caffe', self.CaffeParse)
# except ImportError:
# print('Please install caffe! Or caffe is not supported in your platform.', file=sys.stderr)
def test_cntk(self):
try:
import cntk
self._test_function('cntk', self.CntkParse)
except ImportError:
print('Please install cntk! Or cntk is not supported in your platform.', file=sys.stderr)
# def test_cntk(self):
# try:
# import cntk
# self._test_function('cntk', self.CntkParse)
# except ImportError:
# print('Please install cntk! Or cntk is not supported in your platform.', file=sys.stderr)
def test_coreml(self):
from coremltools.models.utils import macos_version
if macos_version() < (10, 13):
print('Coreml is not supported in your platform.', file=sys.stderr)
else:
self._test_function('coreml', self.CoremlParse)
# def test_coreml(self):
# from coremltools.models.utils import macos_version
# if macos_version() < (10, 13):
# print('Coreml is not supported in your platform.', file=sys.stderr)
# else:
# self._test_function('coreml', self.CoremlParse)
def test_keras(self):
self._test_function('keras', self.KerasParse)
# def test_keras(self):
# self._test_function('keras', self.KerasParse)
def test_mxnet(self):
self._test_function('mxnet', self.MXNetParse)
# def test_mxnet(self):
# self._test_function('mxnet', self.MXNetParse)
def test_darknet(self):
self._test_function('darknet', self.DarknetParse)
# def test_darknet(self):
# self._test_function('darknet', self.DarknetParse)
def test_paddle(self):
# omit tensorflow lead to crash
import tensorflow as tf
try:
import paddle.v2 as paddle
self._test_function('paddle', self.PaddleParse)
except ImportError:
print('Please install Paddlepaddle! Or Paddlepaddle is not supported in your platform.', file=sys.stderr)
# def test_paddle(self):
# # omit tensorflow lead to crash
# import tensorflow as tf
# try:
# import paddle.v2 as paddle
# self._test_function('paddle', self.PaddleParse)
# except ImportError:
# print('Please install Paddlepaddle! Or Paddlepaddle is not supported in your platform.', file=sys.stderr)
def test_pytorch(self):
self._test_function('pytorch', self.PytorchParse)
# def test_pytorch(self):
# self._test_function('pytorch', self.PytorchParse)
def test_tensorflow(self):
self._test_function('tensorflow', self.TensorFlowParse)
def test_tensorflow_frozen(self):
self._test_function('tensorflow_frozen', self.TensorFlowFrozenParse)
# def test_tensorflow_frozen(self):
# self._test_function('tensorflow_frozen', self.TensorFlowFrozenParse)