diff --git a/CNTK.sln b/CNTK.sln index ecd82e9de..cb1846746 100644 --- a/CNTK.sln +++ b/CNTK.sln @@ -1452,6 +1452,9 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "PerformanceProfilerDll", "S EndProjectSection EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CNTKLibraryCSEvalExamplesTest", "Tests\EndToEndTests\EvalClientTests\CNTKLibraryCSEvalExamplesTest\CNTKLibraryCSEvalExamplesTest.csproj", "{3500A847-E024-4E7D-92DD-CC587C17460B}" + ProjectSection(ProjectDependencies) = postProject + {50EF9EE6-5018-453E-A063-F77044EF1A97} = {50EF9EE6-5018-453E-A063-F77044EF1A97} + EndProjectSection EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "GoogLeNet", "GoogLeNet", "{789B4AB8-40F1-4A37-823A-BC20D80C8BF1}" ProjectSection(SolutionItems) = preProject diff --git a/Makefile b/Makefile index 38e5b29b3..5af3ef239 100644 --- a/Makefile +++ b/Makefile @@ -506,6 +506,7 @@ CNTKLIBRARY_TESTS_SRC =\ $(CNTKLIBRARY_TESTS_SRC_PATH)/BlockTests.cpp \ $(CNTKLIBRARY_TESTS_SRC_PATH)/TensorTests.cpp \ $(CNTKLIBRARY_TESTS_SRC_PATH)/ValueTests.cpp \ + $(CNTKLIBRARY_TESTS_SRC_PATH)/LoadLegacyModelTests.cpp \ $(CNTKLIBRARY_TESTS_SRC_PATH)/TrainerTests.cpp \ $(CNTKLIBRARY_TESTS_SRC_PATH)/CifarResNet.cpp \ $(CNTKLIBRARY_TESTS_SRC_PATH)/SerializationTests.cpp \ diff --git a/Source/CNTKv2LibraryDll/API/CNTKLibrary.h b/Source/CNTKv2LibraryDll/API/CNTKLibrary.h index e1c1380f5..1b5eb7792 100644 --- a/Source/CNTKv2LibraryDll/API/CNTKLibrary.h +++ b/Source/CNTKv2LibraryDll/API/CNTKLibrary.h @@ -1861,8 +1861,11 @@ private: private: explicit Parameter(const NDArrayViewPtr& value, const std::wstring& name, const std::wstring& uid) - : Variable(value->Shape(), VariableKind::Parameter, value->GetDataType(), value->DeepClone(false), true, {}, name, uid) - {} + : Variable(value->Shape(), VariableKind::Parameter, value->GetDataType(), value, true, {}, name, uid) + { + if (value->IsReadOnly()) + InvalidArgument("Parameter cannot be constructed from a read-only NDArrayView value; you can create a non read-only clone of the value and use that instead!"); + } }; // Implementation note: The Variable type is a value type and not polymorphic in nature. @@ -1943,7 +1946,7 @@ private: private: Constant(const NDArrayViewPtr& value, const std::wstring& name, const std::wstring& uid) - : Variable(value->Shape(), VariableKind::Constant, value->GetDataType(), value->DeepClone(), false, {}, name, uid) + : Variable(value->Shape(), VariableKind::Constant, value->GetDataType(), value, false, {}, name, uid) {} /// @@ -2603,6 +2606,7 @@ namespace CNTK CNTK_API virtual void Backward(const BackPropStatePtr& state, const std::unordered_map& rootGradientValues, std::unordered_map& backPropagatedGradientValuesForInputs); + /// /// Returns the name of the operation that this Function denotes /// @@ -2631,8 +2635,11 @@ namespace CNTK /// /// Infers the shape, data type and dynamic axes of the outputs of 'this' function based on the /// Function's inputs, and returns Output Variable objects containing the inferred information + /// Result cannot exceed the max number of outputs (128). + /// The passed "outputs" vector should also reserve 128 elements in order to not cause memory allocation during + /// crossing of dll boundary. /// - CNTK_API virtual std::vector InferOutputs() = 0; + CNTK_API virtual void InferOutputs(std::vector& outputs) = 0; public: @@ -2737,9 +2744,9 @@ namespace CNTK /// /// Returns all Input variables of 'this' Function. /// - std::vector Inputs() const + std::vector Inputs(bool pythonOperandOrder = false) const { - return *(InputsImpl().get()); + return *(InputsImpl(pythonOperandOrder).get()); } /// @@ -2837,6 +2844,11 @@ namespace CNTK /// CNTK_API void PrintGraph() const; + /// + /// Maimum number of outputs that is currently supported. + /// + static const int MaxNumOutputs = 64; + protected: /// /// Protected constructor for derived 'Function' types to specify the actual input and output variables for the (primitive) Function instance. @@ -2858,6 +2870,7 @@ namespace CNTK // Returns a outputs without ref-counting the owner. CNTK_API std::vector& RawOutputs() const; + private: CNTK_API std::shared_ptr>> BlockArgumentsMappingImpl() const; @@ -2882,10 +2895,10 @@ namespace CNTK return filteredInputs; } - CNTK_API std::shared_ptr> InputsImpl() const; + CNTK_API std::shared_ptr> InputsImpl(bool pythonOperandOrder = false) const; CNTK_API std::shared_ptr> OutputsImpl() const; - void ValidateOrUpdateOutputs(std::unordered_map& visitedFunctions, bool& recurrentNodeOutputModified); + void ValidateOrUpdateOutputs(std::unordered_map& visitedFunctions, bool& recurrentNodeOutputModified, std::vector& buffer); static void ReplacePlaceholderInPlace(Variable& var, const std::unordered_map& placeholderReplacements, diff --git a/Source/CNTKv2LibraryDll/BackCompat.cpp b/Source/CNTKv2LibraryDll/BackCompat.cpp index f2ce2ce4b..13cd57c21 100644 --- a/Source/CNTKv2LibraryDll/BackCompat.cpp +++ b/Source/CNTKv2LibraryDll/BackCompat.cpp @@ -20,6 +20,8 @@ #include "ReshapingNodes.h" #include "DeprecatedNodes.h" #include "RNNNodes.h" +#include "PreComputeNodes.h" +#include "DeprecatedNodes.h" using namespace Microsoft::MSR::CNTK; @@ -49,9 +51,7 @@ namespace CNTK Variable var; if (node->IsLeaf()) - { var = ResolveLeaf(node); - } else { // This is a non-leaf node and maps to a primitive Function @@ -78,6 +78,22 @@ namespace CNTK } private: + + template + Variable CreateParameterOrConstantFromNodeValue(const ComputationNodeBasePtr& node, bool isConstant) + { + auto& matrix = node->As>()->Value(); + auto tensorView = new TensorView(std::make_shared>(matrix.AsReference()), AsTensorViewShape(node->GetSampleLayout())); + NDArrayViewPtr value = MakeSharedObject(AsDataType(), AsDeviceDescriptor(matrix.GetDeviceId()), AsStorageFormat(matrix.GetFormat()), AsNDShape(node->GetSampleLayout()), false, tensorView); + + auto kind = isConstant ? VariableKind::Constant : VariableKind::Parameter; + + std::wstring varUid, varName; + std::tie(varUid, varName) = UidAndNameFromCNTKInternalNodeName(node->NodeName(), kind); + + return isConstant ? (Variable)Constant(value, varName, varUid) : Parameter(value, varName, varUid); + } + template Variable ResolveLeaf(const ComputationNodeBasePtr& node) { @@ -104,13 +120,7 @@ namespace CNTK if (node->Is>()) { bool isConstant = (node->GetLearningRateMultiplier() == 0); - auto& matrix = node->As>()->Value(); - auto tensorView = new TensorView(std::make_shared>(matrix.AsReference()), AsTensorViewShape(node->GetSampleLayout())); - NDArrayViewPtr value = MakeSharedObject(AsDataType(), AsDeviceDescriptor(matrix.GetDeviceId()), AsStorageFormat(matrix.GetFormat()), variableShape, false, tensorView); - - auto kind = isConstant ? VariableKind::Constant : VariableKind::Parameter; - std::tie(varUid, varName) = UidAndNameFromCNTKInternalNodeName(node->NodeName(), kind); - return isConstant ? (Variable)Constant(value, varName, varUid) : Parameter(value, varName, varUid); + return CreateParameterOrConstantFromNodeValue(node, isConstant); } LogicError("CNTK::LoadLegacyModel: Unsupported legacy CNTK node named '%S'", node->NodeName().c_str()); @@ -311,7 +321,7 @@ namespace CNTK // tensor dimensions flattended into the column dimension of the 2D paramater matrix // We need to recover the actual tensor shape of the parameter in this case auto& convolutionMapVar = inputVars[0]; - if (convolutionNode->IsConvolution2D()) + if (convolutionNode->IsConvolution2D() || (convolutionMapVar.Shape().Rank() == 2)) { assert(convolutionMapVar.Shape().Rank() == 2); assert(convolutionMapVar.IsConstant() || convolutionMapVar.IsParameter()); @@ -444,6 +454,24 @@ namespace CNTK opType = PrimitiveOpType::EditDistanceError; } + else if ((node->OperationName() == OperationNameOf(MeanNode)) || (node->OperationName() == OperationNameOf(InvStdDevNode))) + { + auto precomputeNode = node->As>(); + if (!precomputeNode->HasComputed()) + InvalidArgument("Loading a CNTK legacy V1 model containing a Mean/InvStdDev precompute node, whose computation is unfinished, is not supported!"); + + return CreateParameterOrConstantFromNodeValue(node, /* isConstant =*/ true); + } + else if (node->OperationName() == OperationNameOf(PerDimMeanVarNormalizationNode)) + { + auto meanValue = Constant(inputVars[1]).Value(); + auto invStdDevValue = Constant(inputVars[2]).Value(); + + std::wstring uid, name; + std::tie(uid, name) = UidAndNameFromCNTKInternalNodeName(node->NodeName()); + + return PerDimMeanVarianceNormalize(inputVars[0], meanValue, invStdDevValue, name); + } else LogicError("Unsupported ComputationNode with OperationName='%S' found when loading legacy CNTK model", node->OperationName().c_str()); @@ -500,16 +528,16 @@ namespace CNTK if (ComputationNetwork::IsNodePtr>(rootNode)) { - rootVariables.push_back(resolver.GetVariable(rootNode).Owner()); + auto var = resolver.GetVariable(rootNode); + rootVariables.push_back(var.IsOutput() ? (Variable)var.Owner() : var); } else if (ComputationNetwork::IsNodePtr>(rootNode)) { - rootVariables.push_back(resolver.GetVariable(rootNode).Owner()); + auto var = resolver.GetVariable(rootNode); + rootVariables.push_back(var.IsOutput() ? (Variable)var.Owner() : var); } else - { LogicError("LoadLegacyModel(): invalid computation node element type."); - } } auto rootComposite = Combine(rootVariables); diff --git a/Source/CNTKv2LibraryDll/BlockFunction.h b/Source/CNTKv2LibraryDll/BlockFunction.h index 5df80da78..ff3d5ebb3 100644 --- a/Source/CNTKv2LibraryDll/BlockFunction.h +++ b/Source/CNTKv2LibraryDll/BlockFunction.h @@ -131,7 +131,7 @@ namespace CNTK return blockFunctionInputs; } - virtual std::vector InferOutputs() override + void InferOutputs(std::vector& outputs) override { // We determine the outputs by replacing the arguments of the composite with new placeholders with updated // shape etc. information matching the corresponding mapped input @@ -148,17 +148,14 @@ namespace CNTK m_composite->ReplacePlaceholders(replacementMap); - std::vector blockFunctionOutputs; auto compositeOutputs = m_composite->RawOutputs(); for (auto compositeOutput : compositeOutputs) { auto output = OutputVariable(compositeOutput.Shape(), compositeOutput.GetDataType(), compositeOutput.DynamicAxes(), Name()); output.m_dataFields->m_blockFunctionVariableMapping = compositeOutput; - blockFunctionOutputs.push_back(output); + outputs.push_back(output); } - - return blockFunctionOutputs; } private: diff --git a/Source/CNTKv2LibraryDll/CompositeFunction.h b/Source/CNTKv2LibraryDll/CompositeFunction.h index 2dc7deb24..7dc054637 100644 --- a/Source/CNTKv2LibraryDll/CompositeFunction.h +++ b/Source/CNTKv2LibraryDll/CompositeFunction.h @@ -93,9 +93,10 @@ namespace CNTK NOT_IMPLEMENTED; } - virtual std::vector InferOutputs() override + void InferOutputs(std::vector& outputs) override { - return m_rootFunction->InitOutputs(); + auto& inferred = m_rootFunction->InitOutputs(); + outputs.assign(inferred.begin(), inferred.end()); } virtual void Backward(const BackPropStatePtr& state, @@ -146,29 +147,29 @@ namespace CNTK } template - static void PreorderTraverseVariables(const FunctionPtr& rootFunction, const FunctionType& functor) + static void PreorderTraverseVariables(const FunctionPtr& rootFunction, const FunctionType& functor, bool pythonOperandOrder = false) { std::unordered_set visitedFunctions; - PreorderTraverseVariables(rootFunction, visitedFunctions, functor); + PreorderTraverseVariables(rootFunction, visitedFunctions, functor, pythonOperandOrder); } // Recursively traverses the Function graph underlying the 'rootFunction' invoking the provided functor for all visited nodes in the graph. template - static void PreorderTraverseVariables(const FunctionPtr& rootFunction, std::unordered_set& visitedFunctions, const FunctionType& functor) + static void PreorderTraverseVariables(const FunctionPtr& rootFunction, std::unordered_set& visitedFunctions, const FunctionType& functor, bool pythonOperandOrder = false) { visitedFunctions.insert(rootFunction); auto rootFunctionOutputs = rootFunction->InitOutputs(); for (const auto& rootOutput : rootFunctionOutputs) functor(rootOutput); - auto rootFunctionInputs = rootFunction->Inputs(); + auto rootFunctionInputs = rootFunction->Inputs(pythonOperandOrder); for (const auto& rootInput : rootFunctionInputs) { functor(rootInput); if (rootInput.IsOutput() && visitedFunctions.find(rootInput.Owner()) == visitedFunctions.end()) { const auto& function = rootInput.Owner(); - PreorderTraverseVariables(function, visitedFunctions, functor); + PreorderTraverseVariables(function, visitedFunctions, functor, pythonOperandOrder); } } } @@ -201,11 +202,11 @@ namespace CNTK m_allPrimitiveFunctions(std::move(allPrimitiveFunctions)), m_networkMatricesAllocated(false) {} - std::vector DetermineInputs() const + std::vector DetermineInputs(bool pythonOperandOrder = false) const { const auto& root = RootFunction(); std::unordered_set visitedFunctions; - return DetermineInputs(root, visitedFunctions); + return DetermineInputs(root, visitedFunctions, pythonOperandOrder); } // Recursively traverses the Function graph and populates the provided set of functions. @@ -216,7 +217,7 @@ namespace CNTK } // Recursively traverses the Function graph underlying the 'rootFunction' to determine all the leaves (aka inputs) of the graph - static std::vector DetermineInputs(const FunctionPtr& rootFunction, std::unordered_set& visitedFunctions) + static std::vector DetermineInputs(const FunctionPtr& rootFunction, std::unordered_set& visitedFunctions, bool pythonOperandOrder = false) { vector functions; std::vector inputs; @@ -227,7 +228,7 @@ namespace CNTK inputs.push_back(var); uniqueInputs.insert(var); } - }); + }, pythonOperandOrder); return inputs; } diff --git a/Source/CNTKv2LibraryDll/Function.cpp b/Source/CNTKv2LibraryDll/Function.cpp index 0cddbb203..7d8ef07ca 100644 --- a/Source/CNTKv2LibraryDll/Function.cpp +++ b/Source/CNTKv2LibraryDll/Function.cpp @@ -16,7 +16,9 @@ namespace CNTK std::vector& Function::InitOutputs() { std::call_once(m_outputsInitFlag, [this]() { - auto outputs = InferOutputs(); + std::vector outputs; + outputs.reserve(Function::MaxNumOutputs); + InferOutputs(outputs); std::unordered_set uniqueOutputs; for (auto outputVar : outputs) { @@ -51,14 +53,26 @@ namespace CNTK return const_cast(this)->InitOutputs(); } - std::shared_ptr> Function::InputsImpl() const + std::shared_ptr> Function::InputsImpl(bool pythonOperandOrder) const { - const CompositeFunction* compositeFunction = dynamic_cast(this); std::vector inputs; + + const CompositeFunction* compositeFunction = dynamic_cast(this); if (compositeFunction == nullptr) - inputs = m_inputs; + { + // For the Times and TransposeTimes primitive functions, if we want the python operand order + // then we need to reorder the operands as stored in m_inputs + const PrimitiveFunction* primitiveFunction = dynamic_cast(this); + if (pythonOperandOrder && primitiveFunction && ((primitiveFunction->OpType() == PrimitiveOpType::Times) || (primitiveFunction->OpType() == PrimitiveOpType::TransposeTimes))) + { + assert(m_inputs.size() == 2); + inputs = { m_inputs[1], m_inputs[0] }; + } + else + inputs = m_inputs; + } else - inputs = compositeFunction->DetermineInputs(); + inputs = compositeFunction->DetermineInputs(pythonOperandOrder); return std::shared_ptr>(new std::vector(std::move(inputs)), [](std::vector* ptr) { delete ptr; }); } @@ -289,7 +303,7 @@ namespace CNTK return updated; } - void Function::ValidateOrUpdateOutputs(std::unordered_map& visitedFunctions, bool& recurrentNodeOutputModified) + void Function::ValidateOrUpdateOutputs(std::unordered_map& visitedFunctions, bool& recurrentNodeOutputModified, std::vector& outputsUsingNewInputs) { assert(visitedFunctions.find(this) == visitedFunctions.end()); visitedFunctions[this] = 1; @@ -301,13 +315,17 @@ namespace CNTK { auto owner = input.Owner().get(); if (visitedFunctions.find(owner) == visitedFunctions.end()) - owner->ValidateOrUpdateOutputs(visitedFunctions, recurrentNodeOutputModified); + { + outputsUsingNewInputs.clear(); + owner->ValidateOrUpdateOutputs(visitedFunctions, recurrentNodeOutputModified, outputsUsingNewInputs); + } else visitedFunctions[owner]++; } } - auto outputsUsingNewInputs = this->InferOutputs(); + outputsUsingNewInputs.clear(); + this->InferOutputs(outputsUsingNewInputs); auto currentOutputs = RawOutputs(); for (size_t i = 0; i < currentOutputs.size(); ++i) { @@ -468,11 +486,13 @@ namespace CNTK const size_t maxNumValidationPassesAllowed = 128; bool recurrentNodeOutputModified = false; size_t numValidationPasses = 0; + std::vector outputVarBuffer; + outputVarBuffer.reserve(Function::MaxNumOutputs); do { recurrentNodeOutputModified = false; functionVisitCounts.clear(); - RootFunction()->ValidateOrUpdateOutputs(functionVisitCounts, recurrentNodeOutputModified); + RootFunction()->ValidateOrUpdateOutputs(functionVisitCounts, recurrentNodeOutputModified, outputVarBuffer); numValidationPasses++; } while (recurrentNodeOutputModified && (numValidationPasses < maxNumValidationPassesAllowed)); @@ -1120,12 +1140,11 @@ namespace CNTK FunctionPtr PerDimMeanVarianceNormalize(const Variable& operand, const NDArrayViewPtr& mean, const NDArrayViewPtr& invStdDev, const std::wstring& name) { - // TODO: Should this too be encapsulated as a block? - + auto operandPlaceholder = PlaceholderVariable(L"operand"); Constant meanVar(mean); Constant invStdDevVar(invStdDev); - return ElementTimes(Minus(operand, meanVar), invStdDevVar, name); + return AsBlock(std::move(ElementTimes(Minus(operandPlaceholder, meanVar), invStdDevVar)), { { operandPlaceholder, operand } }, L"PerDimMeanVarianceNormalize", name); } FunctionPtr Convolution(const Variable& convolutionMap, diff --git a/Source/CNTKv2LibraryDll/PrimitiveFunction.cpp b/Source/CNTKv2LibraryDll/PrimitiveFunction.cpp index d29a71ea6..d091b78d9 100644 --- a/Source/CNTKv2LibraryDll/PrimitiveFunction.cpp +++ b/Source/CNTKv2LibraryDll/PrimitiveFunction.cpp @@ -216,10 +216,13 @@ namespace CNTK return outputDynamicAxes; } - /*virtual*/ std::vector PrimitiveFunction::InferOutputs() /*override*/ + void PrimitiveFunction::InferOutputs(std::vector& outputs) { if (m_op == PrimitiveOpType::Combine) - return m_inputs; + { + outputs.assign(m_inputs.begin(), m_inputs.end()); + return; + } DataType outputDataType = GetOutputDataType(m_op, m_inputs, true); std::vector outputDynamicAxes = GetOutputDynamicAxes(m_op, m_inputs, m_attributes); @@ -682,7 +685,7 @@ namespace CNTK } } - return{ OutputVariable(outputShape, outputDataType, outputDynamicAxes, Name().empty() ? L"" : Name()) }; + outputs.push_back({ OutputVariable(outputShape, outputDataType, outputDynamicAxes, Name().empty() ? L"" : Name()) }); } static const std::wstring s_primitiveFunctionTypeValue = L"PrimitiveFunction"; diff --git a/Source/CNTKv2LibraryDll/PrimitiveFunction.h b/Source/CNTKv2LibraryDll/PrimitiveFunction.h index 2ec056199..8d677bb78 100644 --- a/Source/CNTKv2LibraryDll/PrimitiveFunction.h +++ b/Source/CNTKv2LibraryDll/PrimitiveFunction.h @@ -700,7 +700,7 @@ namespace CNTK static DataType GetOutputDataType(PrimitiveOpType op, std::vector& inputs, bool inferDimensions); static std::vector GetOutputDynamicAxes(PrimitiveOpType op, std::vector& inputs, Dictionary& functionConfig); - virtual std::vector InferOutputs() override; + void InferOutputs(std::vector& outputs) override; private: PrimitiveOpType m_op; diff --git a/Tests/EndToEndTests/CNTKv2Library/UnitTests/run-test b/Tests/EndToEndTests/CNTKv2Library/UnitTests/run-test index 1dfde3aad..c16e23047 100755 --- a/Tests/EndToEndTests/CNTKv2Library/UnitTests/run-test +++ b/Tests/EndToEndTests/CNTKv2Library/UnitTests/run-test @@ -44,6 +44,20 @@ DeleteModelsAfterTest=0 cntkrun 01_OneHidden.cntk "stderr=- command=trainNetwork trainNetwork=[SGD=[maxEpochs=1]]" || exit $? cp $OutputDir/Models/01_OneHidden $TestDataDir || exit $? + +# Train model that is used by LoadLegacyModel tests. +# Setup predefined variables $DataDir, $ConfigDir, and $OutputDir that are required by cntkrun +ConfigDir=$TEST_ROOT_DIR/../../Examples/Speech/AN4/Config +OutputDir=$ConfigDir/../Output +DataDir=$ConfigDir/../Data + +[ -d $OutputDir ] || mkdir $OutputDir || exit $? +[ -d $OutputDir/Models ] && rm -rf $OutputDir/Models +DeleteModelsAfterTest=0 +[ -f $ConfigDir/FeedForward.cntk ] || exit 1 +cntkrun FeedForward.cntk "stderr=- command=speechTrain parallelTrain=false speechTrain=[SGD=[maxEpochs=1]]" || exit $? +cp $OutputDir/Models/cntkSpeechFF.dnn $TestDataDir || exit $? + # Set CUDA_VISIBLE_DEVICES to exclude all gpu if running on cpu device if [ "$TEST_DEVICE" == "cpu" ]; then export CUDA_VISIBLE_DEVICES=-1 diff --git a/Tests/EndToEndTests/EvalClientTests/CNTKLibraryCSEvalExamplesTest/CNTKLibraryCSEvalExamplesTest.csproj b/Tests/EndToEndTests/EvalClientTests/CNTKLibraryCSEvalExamplesTest/CNTKLibraryCSEvalExamplesTest.csproj index 27357a01c..0bc03b943 100644 --- a/Tests/EndToEndTests/EvalClientTests/CNTKLibraryCSEvalExamplesTest/CNTKLibraryCSEvalExamplesTest.csproj +++ b/Tests/EndToEndTests/EvalClientTests/CNTKLibraryCSEvalExamplesTest/CNTKLibraryCSEvalExamplesTest.csproj @@ -1,5 +1,5 @@  - + Debug @@ -60,21 +60,16 @@ false + - + - - - {50ef9ee6-5018-453e-a063-f77044ef1a97} - CNTKLibraryManagedDll - - CntkBitmapExtensions.cs @@ -84,7 +79,11 @@ - + + + + +