From 1088c1044abd5c939082ba381f9de8ee9c9bb65d Mon Sep 17 00:00:00 2001 From: Jiahao Yao Date: Fri, 11 May 2018 15:01:54 +0800 Subject: [PATCH 1/3] add coreml_extractor --- mmdnn/conversion/examples/cntk/extract_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmdnn/conversion/examples/cntk/extract_model.py b/mmdnn/conversion/examples/cntk/extract_model.py index 6d2b258..e48453b 100644 --- a/mmdnn/conversion/examples/cntk/extract_model.py +++ b/mmdnn/conversion/examples/cntk/extract_model.py @@ -34,7 +34,7 @@ def _main(): type=_text_type, help='Test Image Path') parser.add_argument('-o', '--output_dir', default='./', - type=_text_type, help='Caffe Checkpoint file name') + type=_text_type, help='CNTK Checkpoint file name') args = parser.parse_args() From 57f512610d97aea9e8e192ad882b6101d9352262 Mon Sep 17 00:00:00 2001 From: Jiahao Yao Date: Fri, 11 May 2018 17:59:10 +0800 Subject: [PATCH 2/3] coreml to pytorch passed --- mmdnn/conversion/coreml/README.md | 19 ++- mmdnn/conversion/coreml/coreml_parser.py | 42 +----- mmdnn/conversion/pytorch/pytorch_emitter.py | 154 ++++++++++++++++++-- tests/test_conversion_imagenet.py | 10 +- 4 files changed, 166 insertions(+), 59 deletions(-) diff --git a/mmdnn/conversion/coreml/README.md b/mmdnn/conversion/coreml/README.md index 2fa4ab5..23aee24 100644 --- a/mmdnn/conversion/coreml/README.md +++ b/mmdnn/conversion/coreml/README.md @@ -1,6 +1,21 @@ # CoreML README -Currently we only implemented the CoreML emitter (IR -> CoreML) part. Any contribution to CoreML model parser (CoreML -> IR) part or emitter part is welcome. + +We tested the [Awesome-CoreML-Models](https://github.com/likedan/Awesome-CoreML-Models) and the parser works. Any contribution is welcome. + +Models | Caffe | CoreML | CNTK | Keras | MXNet | PyTorch | TensorFlow +:-----------------------:|:-----:|:------:|:----:|:-----:|:-----:|:-------:|:------:| +Vgg16 | √ | √ | | √ | √ | √ | √ +Inception_v3 | √ | √ | | √ | √ | √ | √ +ResNet 50 | √ | √ | | √ | √ | √ | √ +MobileNet V1 | √ | √ | | √ | √ | √ | √ +Tiny-yolo | | √ | | √ | √ | √ | √ + +**√** - Correctness tested + +**o** - Some difference after conversion + +**space** - not tested --- @@ -90,7 +105,7 @@ The inference result is slightly different from the original keras model. Curren ## Develop version -macOS High Sierra 10.13.2 (17C205) +macOS High Sierra 10.13.3 (17C205) @ 2018/01/10 diff --git a/mmdnn/conversion/coreml/coreml_parser.py b/mmdnn/conversion/coreml/coreml_parser.py index 97ed04a..63ecf85 100644 --- a/mmdnn/conversion/coreml/coreml_parser.py +++ b/mmdnn/conversion/coreml/coreml_parser.py @@ -490,47 +490,6 @@ class CoremlParser(Parser): - - def _convert_pooling(self, source_node, dim, pooling_type, is_global): - source_node_layer = source_node.layer - IR_node = self.IR_graph.node.add() - - # name, op - CoremlParser._copy_and_repo(resource_node, IR_node, "Pool") - - # input edge - self.convert_inedge(source_node, IR_node) - - kwargs = {} - - kwargs['pooling_type'] = pooling_type - - if is_global: - kwargs['global_pooling'] = True - kwargs['strides'] = [1] * (dim + 2) - else: - - # padding - self._convert_padding(source_node, IR_node) - - # strides - # [1, sd, sh, sw, 1] - kwargs['strides'] = [1, 1] + list(source_node) + [1] - - # window_shape - # [1, pd, ph, pw, 1] - kwagrs['kernel_shape'] = [1,1] + list(source_node_layer.kernelSize) + [1] - - assign_IRnode_values(IR_node, kwargs) - - if is_global: - flatten_node = self.IR_graph.node.add() - flatten_node.name = source_node_layer.name + "_flatten" - flatten_node.op = 'Flatten' - flatten_node.input.append(source_node_layer.name) - CoremlParser._set_output_shape(source_node, flatten_node) - source_node.real_name = flatten_node_layer.name - def _convert_merge(self, source_node, new_name = None): IR_node = self.IR_graph.node.add() @@ -865,6 +824,7 @@ class CoremlParser(Parser): # [1, sd, sh, sw, 1] kwargs['strides'] = [1] + list(coreml_node_pool.stride) + [1] + # window_shape # [1, pd, ph, pw, 1] kwargs['kernel_shape'] = [1] + list(coreml_node_pool.kernelSize) + [1] diff --git a/mmdnn/conversion/pytorch/pytorch_emitter.py b/mmdnn/conversion/pytorch/pytorch_emitter.py index 8d982a7..e9bdf69 100644 --- a/mmdnn/conversion/pytorch/pytorch_emitter.py +++ b/mmdnn/conversion/pytorch/pytorch_emitter.py @@ -201,27 +201,45 @@ class KitModel(nn.Module): )) else: + # for e in IR_node.get_attr('dilations', []): + # assert e == 1 + + # pool_size = IR_node.get_attr('kernel_shape')[1:-1] + # strides = IR_node.get_attr('strides')[1:-1] + + # print(IR_node.get_attr('pads')) + # padding = IR_node.get_attr('pads')[1:dim] + # ceil_mode = self.is_ceil_mode(IR_node.get_attr('pads')) + + # # input_node = self._defuse_padding(IR_node, exstr) + # self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( + # IR_node.variable_name, + # pool_name, + # self.parent_variable_name(IR_node), + # tuple(pool_size), + # tuple(strides), + # tuple(padding), + # ceil_mode + # )) + + # Change to padding defuse + input_node = self._defuse_padding(IR_node) for e in IR_node.get_attr('dilations', []): assert e == 1 pool_size = IR_node.get_attr('kernel_shape')[1:-1] strides = IR_node.get_attr('strides')[1:-1] - padding = IR_node.get_attr('pads')[1:dim] - ceil_mode = self.is_ceil_mode(IR_node.get_attr('pads')) - - # input_node = self._defuse_padding(IR_node, exstr) self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( IR_node.variable_name, pool_name, - self.parent_variable_name(IR_node), + input_node, tuple(pool_size), tuple(strides), - tuple(padding), - ceil_mode + 0, + False )) - def emit_UNKNOWN(self, IR_node): print(IR_node.name) @@ -306,6 +324,14 @@ class KitModel(nn.Module): self.IR_graph.get_parent(IR_node.name, [0]).real_variable_name)) + def emit_LeakyRelu(self, IR_node): + self.add_body(2, "{:<15} = F.leaky_relu({}, negative_slope={})".format( + IR_node.variable_name, + self.IR_graph.get_parent(IR_node.name, [0]).real_variable_name, + IR_node.get_attr('alpha'))) + + + def emit_Relu6(self, IR_node): self.add_body(2, "{:<15} = F.relu6({})".format( IR_node.variable_name, @@ -432,6 +458,23 @@ class KitModel(nn.Module): self.parent_variable_name(IR_node) )) + def emit_Scale(self, IR_node): + self.used_layers.add(IR_node.type) + dim = len(IR_node.layer.attr['_output_shapes'].list.shape[0].dim) - 2 + + self.add_init(2, "self.{} = self.__scale({}, '{}', num_features={})".format( + IR_node.variable_name, + dim, + IR_node.name, + IR_node.layer.attr['_output_shapes'].list.shape[0].dim[-1].size + )) + + self.add_body(2, "{:<15} = self.{}({})".format( + IR_node.variable_name, + IR_node.variable_name, + self.parent_variable_name(IR_node) + )) + def emit_Squeeze(self, IR_node): self.add_body(2, "{:<15} = torch.squeeze({})".format( @@ -450,11 +493,11 @@ class KitModel(nn.Module): def emit_Pad(self, IR_node): - if IR_node.get_attr('mode') == 'constant': + if IR_node.get_attr('mode').lower() == 'constant': mode = "mode = 'constant', value = {}".format(0) - elif IR_node.get_attr('mode') == 'reflect': + elif IR_node.get_attr('mode').lower() == 'reflect': mode = "mode = 'reflect'" - elif IR_node.get_attr('mode') == 'SYMMETRIC': + elif IR_node.get_attr('mode').upper() == 'SYMMETRIC': mode = "mode = 'replicate'" else: assert False @@ -565,6 +608,95 @@ class KitModel(nn.Module): return layer""") + def _layer_Scale(self): + self.add_body(0, """ + # from torch.nn.parameter import Parameter + + class _Scale(nn.Module): + + def __init__(self, num_features, affine=True): + super(KitModel._Scale, self).__init__() + self.num_features = num_features + self.affine = affine + + self.running_mean = torch.zeros(num_features) + self.running_var = torch.ones(num_features) + self.training = False + self.eps = 1e-5 + if self.affine: + self.weight = nn.Parameter(torch.Tensor(num_features)) + self.bias = nn.Parameter(torch.Tensor(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.reset_parameters() + + + def reset_parameters(self): + if self.affine: + self.weight.data.uniform_() + self.bias.data.zero_() + + def _check_input_dim(self, input): + raise NotImplementedError + + def forward(self, input): + self._check_input_dim(input) + + return F.batch_norm( + input, self.running_mean, self.running_var, self.weight, self.bias, + self.training, + 0 , self.eps) + + + class Scale1d(_Scale): + + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError('expected 2D or 3D input (got {}D input)' + .format(input.dim())) + + + + class Scale2d(_Scale): + + + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim())) + + + class Scale3d(_Scale): + + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim())) + + + @staticmethod + def __scale(dim, name, **kwargs): + if dim == 1: layer = KitModel.Scale1d(**kwargs) + elif dim == 2: layer = KitModel.Scale2d(**kwargs) + elif dim == 3: layer = KitModel.Scale3d(**kwargs) + else: raise NotImplementedError() + + if 'scale' in __weights_dict[name]: + layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale'])) + else: + layer.weight.data.fill_(1) + + if 'bias' in __weights_dict[name]: + layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias'])) + else: + layer.bias.data.fill_(0) + + return layer""") + + + + def _layer_LRN(self): self.add_body(0, """ class LRN(nn.Module): diff --git a/tests/test_conversion_imagenet.py b/tests/test_conversion_imagenet.py index 85ce64f..ddb9167 100644 --- a/tests/test_conversion_imagenet.py +++ b/tests/test_conversion_imagenet.py @@ -739,11 +739,11 @@ class TestModels(CorrectnessTest): }, 'coreml' : { - 'inception_v3' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, TensorflowEmit], - 'mobilenet' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, TensorflowEmit], - 'resnet50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, TensorflowEmit], - 'tinyyolo' : [CoreMLEmit, KerasEmit, MXNetEmit, TensorflowEmit], - 'vgg16' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, TensorflowEmit], + 'inception_v3' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], + 'mobilenet' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], + 'resnet50' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], + 'tinyyolo' : [CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], + 'vgg16' : [CaffeEmit, CoreMLEmit, KerasEmit, MXNetEmit, PytorchEmit, TensorflowEmit], }, 'darknet' : { From e9eb39f65547f7db822871a9d411c20a8ce98160 Mon Sep 17 00:00:00 2001 From: Jiahao Yao Date: Thu, 17 May 2018 14:45:04 +0800 Subject: [PATCH 3/3] Update pytorch_emitter.py different solution according to 'max' and 'avg' --- mmdnn/conversion/pytorch/pytorch_emitter.py | 71 +++++++++++---------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/mmdnn/conversion/pytorch/pytorch_emitter.py b/mmdnn/conversion/pytorch/pytorch_emitter.py index e9bdf69..60fccd0 100644 --- a/mmdnn/conversion/pytorch/pytorch_emitter.py +++ b/mmdnn/conversion/pytorch/pytorch_emitter.py @@ -201,44 +201,51 @@ class KitModel(nn.Module): )) else: - # for e in IR_node.get_attr('dilations', []): - # assert e == 1 - # pool_size = IR_node.get_attr('kernel_shape')[1:-1] - # strides = IR_node.get_attr('strides')[1:-1] + if IR_node.get_attr('pooling_type') == "MAX": + # Change to padding defuse + input_node = self._defuse_padding(IR_node,", value=float('-inf')") + for e in IR_node.get_attr('dilations', []): + assert e == 1 - # print(IR_node.get_attr('pads')) - # padding = IR_node.get_attr('pads')[1:dim] - # ceil_mode = self.is_ceil_mode(IR_node.get_attr('pads')) + pool_size = IR_node.get_attr('kernel_shape')[1:-1] + strides = IR_node.get_attr('strides')[1:-1] - # # input_node = self._defuse_padding(IR_node, exstr) - # self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( - # IR_node.variable_name, - # pool_name, - # self.parent_variable_name(IR_node), - # tuple(pool_size), - # tuple(strides), - # tuple(padding), - # ceil_mode - # )) + self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( + IR_node.variable_name, + pool_name, + input_node, + tuple(pool_size), + tuple(strides), + 0, + False + )) - # Change to padding defuse - input_node = self._defuse_padding(IR_node) - for e in IR_node.get_attr('dilations', []): - assert e == 1 + elif IR_node.get_attr('pooling_type') == "AVG": - pool_size = IR_node.get_attr('kernel_shape')[1:-1] - strides = IR_node.get_attr('strides')[1:-1] + for e in IR_node.get_attr('dilations', []): + assert e == 1 + + pool_size = IR_node.get_attr('kernel_shape')[1:-1] + strides = IR_node.get_attr('strides')[1:-1] + + padding = IR_node.get_attr('pads')[1:dim] + ceil_mode = self.is_ceil_mode(IR_node.get_attr('pads')) + + # input_node = self._defuse_padding(IR_node, exstr) + self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( + IR_node.variable_name, + pool_name, + self.parent_variable_name(IR_node), + tuple(pool_size), + tuple(strides), + tuple(padding), + ceil_mode + )) + + else: + raise ValueError() - self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={}, padding={}, ceil_mode={})".format( - IR_node.variable_name, - pool_name, - input_node, - tuple(pool_size), - tuple(strides), - 0, - False - )) def emit_UNKNOWN(self, IR_node): print(IR_node.name)