Fix bug in import of Conv op in ONNX.

This commit is contained in:
Spandan Tiwari 2018-08-14 16:00:38 -07:00
Родитель ceaec5636f
Коммит 6547e2ce7f
2 изменённых файлов: 27 добавлений и 1 удалений

Просмотреть файл

@ -3176,7 +3176,7 @@ FunctionPtr ONNXToCNTKHelper::CreateCNTKConvTransposeNode(const Node *node, cons
FunctionPtr ONNXToCNTKHelper::CreateCNTKConvNode(const Node *node, const std::vector<Variable> &inputs)
{
Variable convolutionMap = inputs[1];
size_t numSpatialDim = convolutionMap.Shape().Rank() - 1; // This is conv op dimension, i.e. 2 for 2D conv, 3 for 3D conv.
size_t numSpatialDim = convolutionMap.Shape().Rank() - 2; // This is conv op dimension, i.e. 2 for 2D conv, 3 for 3D conv.
NDShape strides = GetNamedAttributeAsShape(node, "strides", false, NDShape(std::vector<size_t>(numSpatialDim, 1u)));
NDShape dilation = GetNamedAttributeAsShape(node, "dilations", false, NDShape(std::vector<size_t>(numSpatialDim, 1u)));
@ -3189,6 +3189,14 @@ FunctionPtr ONNXToCNTKHelper::CreateCNTKConvNode(const Node *node, const std::ve
std::vector<bool> cntkConvAutoPadding;
auto convOperand = GetNodeOperandWithPaddingResolved(/*output arg first*/ cntkConvAutoPadding, strides, node, inputs[0]);
// At this point length of vectors strides, dilation, and cntkConvAutoPadding must be equal to
// number of spatial dimensions (2 for 2D conv, 3 for 3D conv).
// In order to match the expected input for CNTK Convolution API we will append one more element
// in each for the "channel" axis.
strides = strides.AppendShape({ 1 });
dilation = dilation.AppendShape({ 1 });
cntkConvAutoPadding.push_back(false);
auto operandPlaceholder = PlaceholderVariable(convOperand.Shape(), L"operand", {});
auto convmapPlaceholder = PlaceholderVariable(convolutionMap.Shape(), L"convolutionMap", {});
FunctionPtr operandWithBatchAxis = ToBatch(operandPlaceholder);

Просмотреть файл

@ -425,6 +425,24 @@ def test_Concat(tmpdir, dtype):
verify_one_input(model, data1, tmpdir, 'Concat_1')
@pytest.mark.parametrize("dtype", DType_Config)
def test_Conv(tmpdir, dtype, device_id):
if device_id == -1 and dtype == np.float16:
pytest.skip('Test is skipped on CPU with float16 data')
device = cntk_device(device_id)
with C.default_options(dtype=dtype):
input_shape = (3, 20, 32)
img = np.reshape(np.arange(np.prod(input_shape), dtype = dtype), input_shape)
x = C.input_variable(input_shape)
kernel_shape = (64, 3, 3, 3) # For convolution the shape is (O x I x W x H)
kernel = C.constant(value = np.ones(shape=(kernel_shape), dtype = dtype))
conv_model = C.convolution(kernel, x, auto_padding = [False, True, True])
verify_one_input(conv_model, img, tmpdir, 'Conv_0', device)
@pytest.mark.parametrize("dtype", DType_Config)
def test_ConvTranspose(tmpdir, dtype, device_id):
if device_id == -1 and dtype == np.float16: