Add ONNX export support for ones_like, zeros_like, and eye_like ops.
This commit is contained in:
Родитель
c2072cc4ab
Коммит
27f47bba83
|
@ -4172,6 +4172,12 @@ namespace CNTK
|
|||
///
|
||||
CNTK_API FunctionPtr OnesLike(const Variable& operand, const std::wstring& name = L"");
|
||||
|
||||
///
|
||||
/// Create an instance of a constant operation. This produces a constant tensor with specified fill value
|
||||
/// with the shape and dynamic axes specified by the operand.
|
||||
///
|
||||
CNTK_API FunctionPtr ConstantLike(const Variable& operand, const double fillValue = 0.0, const std::wstring& name = L"");
|
||||
|
||||
///
|
||||
/// Create an instance of a eye-like operation. This produces ones with the shape and dynamic axes specified by the operand.
|
||||
///
|
||||
|
|
|
@ -1716,16 +1716,18 @@ namespace CNTK
|
|||
|
||||
FunctionPtr ZerosLike(const Variable& operand, const std::wstring& name)
|
||||
{
|
||||
auto additionalProperties = Dictionary();
|
||||
additionalProperties[PrimitiveFunctionAttribute::AttributeNameFillValue] = 0.0;
|
||||
|
||||
return UnaryOp(PrimitiveOpType::ConstantOp, operand, std::move(additionalProperties), name);
|
||||
return ConstantLike(operand, 0.0, name);
|
||||
}
|
||||
|
||||
FunctionPtr OnesLike(const Variable& operand, const std::wstring& name)
|
||||
{
|
||||
return ConstantLike(operand, 1.0, name);
|
||||
}
|
||||
|
||||
FunctionPtr ConstantLike(const Variable& operand, const double fillValue, const std::wstring& name)
|
||||
{
|
||||
auto additionalProperties = Dictionary();
|
||||
additionalProperties[PrimitiveFunctionAttribute::AttributeNameFillValue] = 1.0;
|
||||
additionalProperties[PrimitiveFunctionAttribute::AttributeNameFillValue] = fillValue;
|
||||
|
||||
return UnaryOp(PrimitiveOpType::ConstantOp, operand, std::move(additionalProperties), name);
|
||||
}
|
||||
|
|
|
@ -3876,6 +3876,23 @@ void CNTKToONNXHelper::CopyAttributes(const FunctionPtr& src, onnxruntime::Node*
|
|||
size_t k = src->Attributes()[L"numItems"].Value<size_t>();
|
||||
node->AddAttribute(attributesMap[L"numItems"], static_cast<int64_t>(k));
|
||||
}
|
||||
else if (src->OpName() == L"ConstantOp")
|
||||
{
|
||||
float value = 0.0f;
|
||||
if (src->Attributes().Contains(L"fillValue"))
|
||||
value = (float)src->Attributes()[L"fillValue"].Value<double>();
|
||||
node->AddAttribute("value", value);
|
||||
}
|
||||
else if (src->OpName() == L"EyeLikeOp")
|
||||
{
|
||||
if (src->Attributes().Contains(L"OutputSparse"))
|
||||
{
|
||||
auto value = (bool)src->Attributes()[L"OutputSparse"].Value<bool>();
|
||||
if (value)
|
||||
fprintf(stderr, "Warning: EyeLikeOp - Op is configured for sparse output. Sparse is not supported in ONNX. Exporting as dense.");
|
||||
}
|
||||
node->AddAttribute("k", static_cast<int64_t>(0));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -4468,7 +4485,8 @@ onnxruntime::Node* CNTKToONNXHelper::CreateONNXNodesForStraightThrough(const Fun
|
|||
const std::unordered_map<Variable, Variable>& compositeOutputsMap)
|
||||
{
|
||||
// This method exports CNTK's StraighThrough estimator op through an ONNX sub-graph.
|
||||
// ONNX subgraph consists of Greater + Cast + Mul + Sub ops.
|
||||
// ONNX subgraph consists of Greater + Cast + Mul + Sub ops. It is essentially doing:
|
||||
// Output = Cast(Input > 0)*2 - 1;
|
||||
std::vector<onnxruntime::NodeArg*> inputs;
|
||||
ProcessInputs(src, graph, functionNodes, variableNodes, compositeOutputsMap, inputs);
|
||||
|
||||
|
|
|
@ -2930,6 +2930,47 @@ FunctionPtr ONNXToCNTKHelper::CreateFunction(const Node *node, const std::vector
|
|||
FunctionPtr cntkFunction = TopK(inputs[0], k, axis, ToFixedWStringFromMultiByte(node->Name()));
|
||||
return cntkFunction;
|
||||
}
|
||||
else if (onnxOpName == "ConstantLike")
|
||||
{
|
||||
// We only support limited scenarios for ConstantLike in CNTK importer.
|
||||
// Creating the output tensor from 'shape' attribute is not supported.
|
||||
// Also, 'dtype' attribute is ignored (limitations of Cast op in CNTK),
|
||||
// and the output tensor type is always the same as the input tensor type.
|
||||
if (inputs.size() == 0)
|
||||
{
|
||||
if (!HasNamedAttribute(node, "shape"))
|
||||
LogicError("ConstantLike: Either input tensor or 'shape' attribute must be provided.");
|
||||
else
|
||||
RuntimeError("ConstantLike: 'shape' attribute not supported in CNTK importer. Only tensor input supported.");
|
||||
}
|
||||
if (HasNamedAttribute(node, "dtype"))
|
||||
fprintf(stderr, "Warning: ConstantLike - 'dtype' attributed is not supported in CNTK importer. Datatype of the input tensor is used for output type.");
|
||||
|
||||
float value = static_cast<double>(GetNamedAttributeAsFloat(node, "value", 0.0f));
|
||||
return ConstantLike(inputs[0], value, ToFixedWStringFromMultiByte(node->Name()));
|
||||
}
|
||||
else if (onnxOpName == "EyeLike")
|
||||
{
|
||||
// We only support limited scenarios for EyeLike in CNTK importer.
|
||||
// Only k=0 (main diagonal) is supported.
|
||||
// Also, 'dtype' attribute is ignored (limitations of Cast op in CNTK),
|
||||
// and the output tensor type is always the same as the input tensor type.
|
||||
if (inputs[0].Shape().Rank() != 2)
|
||||
LogicError("EyeLike: Input tensor must be 2D tensor.");
|
||||
if (!HasNamedAttribute(node, "k"))
|
||||
{
|
||||
size_t k = static_cast<size_t>(GetNamedAttributeAsInt64(node, "k"));
|
||||
if (k != 0)
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
if (HasNamedAttribute(node, "dtype"))
|
||||
fprintf(stderr, "Warning: ConstantLike - 'dtype' attributed is not supported in CNTK importer. Datatype of the input tensor is used for output type.");
|
||||
|
||||
// Note that we create EyeLike op with isOutputSparse=true (default).
|
||||
// ONNX does not have any explicit control on this, so just for efficiency
|
||||
// we choose sparse output.
|
||||
return EyeLike(inputs[0], true, ToFixedWStringFromMultiByte(node->Name()));
|
||||
}
|
||||
else
|
||||
{
|
||||
LogicError("ONNX (%s) is not supported in CNTK", onnxOpName.c_str());
|
||||
|
|
|
@ -443,6 +443,12 @@ namespace ONNX
|
|||
{ L"StraightThrough",{ {
|
||||
{ L"StraightThrough", "StraightThrough" },
|
||||
} } },
|
||||
{ L"ConstantOp",{ {
|
||||
{ L"ConstantOp", "ConstantLike" },
|
||||
} } },
|
||||
{ L"EyeLikeOp",{ {
|
||||
{ L"EyeLikeOp", "EyeLike" },
|
||||
} } },
|
||||
};
|
||||
|
||||
// given a cntkOpName and cntk attribute OpName which is saved in CNTK::Function's attribute,
|
||||
|
|
|
@ -660,6 +660,32 @@ def test_Exp(tmpdir, dtype):
|
|||
model = C.exp(x)
|
||||
verify_one_input(model, data, tmpdir, 'Exp_1')
|
||||
|
||||
#EyeLike
|
||||
@pytest.mark.parametrize("dtype", DType_Config)
|
||||
def test_EyeLike(tmpdir, dtype):
|
||||
dim_size = 4
|
||||
with C.default_options(dtype = dtype):
|
||||
data = np.arange(dim_size*dim_size, dtype=dtype).reshape((dim_size, dim_size))
|
||||
x = C.input_variable((dim_size, dim_size), dtype=dtype, dynamic_axes=[])
|
||||
model = C.eye_like(x, sparse_output=False)
|
||||
output_ref = model.eval({x:data})
|
||||
|
||||
# For this op, we use custom verfication because the output is sparse.
|
||||
name = 'EyeLike_0'
|
||||
test_model_path = os.path.join(str(tmpdir), R'test_' + name)
|
||||
os.mkdir(test_model_path)
|
||||
test_data_path = os.path.join(str(test_model_path), R'test_data_set_0')
|
||||
os.mkdir(test_data_path)
|
||||
filename = os.path.join(str(test_model_path), name + R'.onnx')
|
||||
model.save(filename, format=C.ModelFormat.ONNX)
|
||||
loaded_model = C.Function.load(filename, format=C.ModelFormat.ONNX)
|
||||
onnx_model = onnx.load(filename);
|
||||
# Below is the trick to convert the sprase tensor to dense.
|
||||
z = C.times(loaded_model, np.eye(dim_size))
|
||||
output_test = z.eval({z.arguments[0]:data})
|
||||
|
||||
assert np.allclose(output_test, output_ref, 1e-05, 1e-08)
|
||||
|
||||
#Flatten
|
||||
@pytest.mark.parametrize("dtype", DType_Config)
|
||||
def test_Flatten(tmpdir, dtype):
|
||||
|
@ -1192,6 +1218,17 @@ def test_Neg(tmpdir, dtype):
|
|||
model = C.negate(data0)
|
||||
verify_no_input(model, tmpdir, 'Neg_0')
|
||||
|
||||
#OnesLike
|
||||
@pytest.mark.parametrize("dtype", DType_Config)
|
||||
def test_OnesLike(tmpdir, dtype):
|
||||
if dtype==np.float16:
|
||||
pytest.skip('Test is skipped with float16 data due to ONNX spec type inference bug.') # Can be removed when ONNX bug fix PR is merged.
|
||||
with C.default_options(dtype = dtype):
|
||||
data = np.arange(24, dtype=dtype).reshape((6, 4))
|
||||
x = C.input_variable((6, 4), dtype=dtype)
|
||||
model = C.ones_like(x)
|
||||
verify_one_input(model, data, tmpdir, 'OnesLike_0')
|
||||
|
||||
#OptimizedRNNStack
|
||||
OPTIM_RNN_STACK_CONFIGS = ((True, 1, 2, 3, 'lstm'), (False, 1, 4, 8, 'lstm'),
|
||||
(True, 2, 2, 3, 'lstm'), (True, 2, 4, 8, 'lstm'), (True, 2, 6, 8, 'lstm'),
|
||||
|
@ -1772,6 +1809,17 @@ def test_Select(flag, if_true, if_false, tmpdir):
|
|||
model = C.element_select(flag, if_true, if_false_var)
|
||||
verify_one_input(model, if_false, tmpdir, 'Select_1_if_false')
|
||||
|
||||
#ZerosLike
|
||||
@pytest.mark.parametrize("dtype", DType_Config)
|
||||
def test_ZerosLike(tmpdir, dtype):
|
||||
if dtype==np.float16:
|
||||
pytest.skip('Test is skipped with float16 data due to ONNX spec type inference bug.') # Can be removed when ONNX bug fix PR is merged.
|
||||
with C.default_options(dtype = dtype):
|
||||
data = np.arange(24, dtype=dtype).reshape((6, 4))
|
||||
x = C.input_variable((6, 4), dtype=dtype)
|
||||
model = C.zeros_like(x)
|
||||
verify_one_input(model, data, tmpdir, 'ZerosLike_0')
|
||||
|
||||
# Cos
|
||||
@pytest.mark.parametrize("dtype", DType_Config)
|
||||
def test_Cos(tmpdir, dtype):
|
||||
|
|
Загрузка…
Ссылка в новой задаче