fix reduce ops for onnx export/import with all_static_axes() and

all_axes().
This commit is contained in:
Bowen Bao 2018-08-15 10:21:33 -07:00
Родитель ee2fa5e70d
Коммит 0e57d20b2c
2 изменённых файлов: 106 добавлений и 4 удалений

Просмотреть файл

@ -1377,9 +1377,17 @@ int64_t CNTKToONNXHelper::ConvertAxisToOnnx(const Axis &axis, const Variable &op
std::vector<int64_t> CNTKToONNXHelper::ConvertAxesToOnnx(const std::vector<Axis> &axes, const Variable &operand)
{
if (std::any_of(axes.cbegin(), axes.cend(), [](const Axis &axis) {return axis == Axis::AllStaticAxes(); }))
if (std::any_of(axes.cbegin(), axes.cend(), [](const Axis &axis) {return axis == Axis::AllStaticAxes() || axis == Axis::AllAxes(); }))
{
std::vector<int64_t> onnxAxes;
if (std::any_of(axes.cbegin(), axes.cend(), [](const Axis &axis) {return axis == Axis::AllAxes(); }))
{
for (int i = 0; i<operand.DynamicAxes().size(); i++)
{
onnxAxes.push_back(i);
}
}
for (int i = 0; i < operand.Shape().Rank(); i++)
{
onnxAxes.push_back(i + operand.DynamicAxes().size());
@ -2994,6 +3002,15 @@ void CNTKToONNXHelper::CopyAttributes(const FunctionPtr& src, LotusIR::Node* nod
else if (src->Attributes().Contains(L"axis"))
reductionAxes.push_back((Axis)(src->Attributes()[L"axis"].Value<Axis>()));
// Reduction on batch axis in CNTK removes the batch axis, even if keepdims is true.
// For ONNX export we need to make sure we export keepdims as 0 (false).
// The same applies for AllStaticAxes.
if (reductionAxes.size() == 1
&& (reductionAxes[0] == Axis::DefaultBatchAxis()
|| reductionAxes[0] == Axis::AllStaticAxes()
|| reductionAxes[0] == Axis::AllAxes()))
keepReducedDimensions = 0;
node->AddAttribute(attributesMap[L"keepdims"], keepReducedDimensions);
std::vector<int64_t> axes = ConvertAxesToOnnx(reductionAxes, src->Inputs()[0]);
@ -3364,7 +3381,11 @@ void CNTKToONNXHelper::CopyAttributes(const FunctionPtr& src, LotusIR::Node* nod
reductionAxes = AsVector<Axis>(src->Attributes()[L"axisVec"].Value<std::vector<DictionaryValue>>());
// Reduction on batch axis in CNTK removes the batch axis, even if keepdims is true.
// For ONNX export we need to make sure we export keepdims as 0 (false).
if (reductionAxes.size() == 1 && (reductionAxes[0] == Axis::DefaultBatchAxis()))
// The same applies for AllStaticAxes.
if (reductionAxes.size() == 1
&& (reductionAxes[0] == Axis::DefaultBatchAxis()
|| reductionAxes[0] == Axis::AllStaticAxes()
|| reductionAxes[0] == Axis::AllAxes()))
keepReducedDimensions = 0;
std::vector<int64_t> axes = ConvertAxesToOnnx(reductionAxes, src->Inputs()[0]);
node->AddAttribute("axes", axes);
@ -3380,7 +3401,7 @@ void CNTKToONNXHelper::CopyAttributes(const FunctionPtr& src, LotusIR::Node* nod
int64_t ax = ConvertAxisToOnnx(axis, src->Inputs()[0]);
node->AddAttribute("axis", ax);
}
}
node->AddAttribute("keepdims", keepReducedDimensions);
}

Просмотреть файл

@ -1172,6 +1172,16 @@ def test_ReduceL1(tmpdir, dtype):
model = C.reduce_l1(x, 1)
verify_one_input(model, data, tmpdir, 'ReduceL1_1')
model = C.reduce_l1(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceL1_2')
x = C.input_variable(data.shape)
model = C.reduce_l1(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceL1_3')
model = C.reduce_l1(x, C.Axis.all_axes())
verify_one_input(model, [data], tmpdir, 'ReduceL1_4')
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceL2(tmpdir, dtype):
with C.default_options(dtype = dtype):
@ -1179,6 +1189,13 @@ def test_ReduceL2(tmpdir, dtype):
model = C.reduce_l2(data, 0)
verify_no_input(model, tmpdir, 'ReduceL2_0')
model = C.reduce_l2(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceL2_1')
x = C.input_variable(data.shape)
model = C.reduce_l2(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceL2_2')
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceSumSquare(tmpdir, dtype):
with C.default_options(dtype = dtype):
@ -1186,6 +1203,16 @@ def test_ReduceSumSquare(tmpdir, dtype):
model = C.reduce_sum_square(data, 0)
verify_no_input(model, tmpdir, 'ReduceSumSquare_0')
model = C.reduce_sum_square(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceSumSquare_1')
x = C.input_variable(data.shape)
model = C.reduce_sum_square(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceSumSquare_2')
model = C.reduce_sum_square(x, C.Axis.all_axes())
verify_one_input(model, [data], tmpdir, 'ReduceSumSquare_3')
#ReduceLogSum
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceLogSum(tmpdir, dtype):
@ -1193,7 +1220,14 @@ def test_ReduceLogSum(tmpdir, dtype):
data = np.array([[[5,1], [20,2]],[[30,1], [40,2]],[[55,1], [60,2]]], dtype=dtype)
model = C.reduce_log_sum_exp(data, axis=0)
verify_no_input(model, tmpdir, 'ReduceLogSum_0')
verify_no_input(model, tmpdir, 'ReduceLogSum_0')
model = C.reduce_log_sum_exp(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceLogSum_1')
x = C.input_variable(data.shape)
model = C.reduce_log_sum_exp(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceLogSum_2')
#ReduceMax
@pytest.mark.parametrize("dtype", DType_Config)
@ -1203,6 +1237,13 @@ def test_ReduceMax(tmpdir, dtype):
model = C.reduce_max(data, 0)
verify_no_input(model, tmpdir, 'ReduceMax_0')
model = C.reduce_max(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceMax_1')
x = C.input_variable(data.shape)
model = C.reduce_max(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceMax_2')
#ReduceMean
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceMean(tmpdir, dtype):
@ -1211,6 +1252,13 @@ def test_ReduceMean(tmpdir, dtype):
model = C.reduce_mean(data, 0)
verify_no_input(model, tmpdir, 'ReduceMean_0')
model = C.reduce_mean(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceMean_1')
x = C.input_variable(data.shape)
model = C.reduce_mean(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceMean_2')
#ReduceMin
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceMin(tmpdir, dtype):
@ -1219,6 +1267,13 @@ def test_ReduceMin(tmpdir, dtype):
model = C.reduce_min(data, 0)
verify_no_input(model, tmpdir, 'ReduceMin_0')
model = C.reduce_min(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceMin_1')
x = C.input_variable(data.shape)
model = C.reduce_min(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceMin_2')
#ReduceProd
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceProd(tmpdir, dtype):
@ -1227,6 +1282,13 @@ def test_ReduceProd(tmpdir, dtype):
model = C.reduce_prod(data, 0)
verify_no_input(model, tmpdir, 'ReduceProd_0')
model = C.reduce_prod(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceProd_1')
x = C.input_variable(data.shape)
model = C.reduce_prod(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceProd_2')
#ReduceSum
@pytest.mark.parametrize("dtype", DType_Config)
def test_ReduceSum(tmpdir, dtype):
@ -1235,6 +1297,25 @@ def test_ReduceSum(tmpdir, dtype):
model = C.reduce_sum(data, 0)
verify_no_input(model, tmpdir, 'ReduceSum_0')
model = C.reduce_sum(data, [0, 1, 2])
verify_no_input(model, tmpdir, 'ReduceSum_1')
model = C.reduce_sum(data, [0, 2])
verify_no_input(model, tmpdir, 'ReduceSum_2')
model = C.reduce_sum(data, [0, 2], keepdims=False)
verify_no_input(model, tmpdir, 'ReduceSum_3')
model = C.reduce_sum(data, C.Axis.all_static_axes())
verify_no_input(model, tmpdir, 'ReduceSum_4')
x = C.input_variable(data.shape)
model = C.reduce_sum(x, C.Axis.default_batch_axis())
verify_one_input(model, [data], tmpdir, 'ReduceSum_5')
model = C.reduce_sum(x, C.Axis.all_axes())
verify_one_input(model, [data], tmpdir, 'ReduceSum_6')
#Relu
@pytest.mark.parametrize("dtype", DType_Config)
def test_Relu(tmpdir, dtype):