renamed ComputationNode::m_children to m_inputs
This commit is contained in:
Родитель
f8ac1eae70
Коммит
9d0bc1b15e
|
@ -139,7 +139,7 @@ namespace Microsoft { namespace MSR { namespace BS {
|
|||
typedef shared_ptr<ComputationNode> ComputationNodePtr;
|
||||
|
||||
// inputs and output
|
||||
vector<ComputationNodePtr> m_children; // these are the inputs
|
||||
vector<ComputationNodePtr> m_inputs; // these are the inputs
|
||||
MatrixPtr m_functionValue; // this is the result
|
||||
|
||||
// other
|
||||
|
@ -183,29 +183,29 @@ namespace Microsoft { namespace MSR { namespace BS {
|
|||
|
||||
virtual void AttachInputs(ComputationNodePtr arg)
|
||||
{
|
||||
m_children.resize(1);
|
||||
m_children[0] = arg;
|
||||
m_inputs.resize(1);
|
||||
m_inputs[0] = arg;
|
||||
}
|
||||
virtual void AttachInputs(ComputationNodePtr leftNode, ComputationNodePtr rightNode)
|
||||
{
|
||||
m_children.resize(2);
|
||||
m_children[0] = leftNode;
|
||||
m_children[1] = rightNode;
|
||||
m_inputs.resize(2);
|
||||
m_inputs[0] = leftNode;
|
||||
m_inputs[1] = rightNode;
|
||||
}
|
||||
virtual void AttachInputs(ComputationNodePtr arg1, ComputationNodePtr arg2, ComputationNodePtr arg3)
|
||||
{
|
||||
m_children.resize(3);
|
||||
m_children[0] = arg1;
|
||||
m_children[1] = arg2;
|
||||
m_children[2] = arg3;
|
||||
m_inputs.resize(3);
|
||||
m_inputs[0] = arg1;
|
||||
m_inputs[1] = arg2;
|
||||
m_inputs[2] = arg3;
|
||||
}
|
||||
void AttachInputs(vector<ComputationNodePtr> && inputs, size_t num = 0/*0 means all OK*/)
|
||||
{
|
||||
if (num != 0 && inputs.size() != num)
|
||||
LogicError("AttachInputs: called with incorrect number of arguments");
|
||||
m_children = inputs;
|
||||
m_inputs = inputs;
|
||||
}
|
||||
const std::vector<ComputationNodePtr> & GetChildren() const { return m_children; }
|
||||
const std::vector<ComputationNodePtr> & GetChildren() const { return m_inputs; }
|
||||
|
||||
/*HasToString::*/ wstring ToString() const
|
||||
{
|
||||
|
@ -213,12 +213,12 @@ namespace Microsoft { namespace MSR { namespace BS {
|
|||
wstring result = TidyName(NodeName()) + L" : " + wstring(OperationName());
|
||||
if (!m_tag.empty())
|
||||
result += L" {tag: " + m_tag + L"}";
|
||||
if (m_children.empty()) result.append(L"()");
|
||||
if (m_inputs.empty()) result.append(L"()");
|
||||
else
|
||||
{
|
||||
wstring args;
|
||||
bool first = true;
|
||||
for (auto & child : m_children)
|
||||
for (auto & child : m_inputs)
|
||||
{
|
||||
if (first)
|
||||
first = false;
|
||||
|
|
|
@ -3032,17 +3032,17 @@ nNodePtr Value)
|
|||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
m_children.resize(2);
|
||||
m_inputs.resize(2);
|
||||
\end_layout
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
m_children[0] = scalarValue;
|
||||
m_inputs[0] = scalarValue;
|
||||
\end_layout
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
m_children[1] = Value;
|
||||
m_inputs[1] = Value;
|
||||
\end_layout
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
@ -3149,7 +3149,7 @@ virtual void Validate()
|
|||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
if (m_children.size() != 2)
|
||||
if (m_inputs.size() != 2)
|
||||
\end_layout
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
|
|
@ -114,13 +114,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
InferImageDimsFromInput(0);
|
||||
}
|
||||
|
||||
//virtual void AttachInputs(const ComputationNodePtr c1, const ComputationNodePtr c2)
|
||||
//{
|
||||
// m_children.resize(2);
|
||||
// m_children[0] = c1;
|
||||
// m_children[1] = c2;
|
||||
//}
|
||||
|
||||
public:
|
||||
virtual bool UnitTest() {
|
||||
size_t nT = 3;
|
||||
|
|
|
@ -63,7 +63,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
m_cacheGradientCalcOrders.clear();
|
||||
m_cachedOuterLoopNodes.clear();
|
||||
|
||||
m_inputs.clear();
|
||||
m_inputValues.clear();
|
||||
m_learnableParameters.clear();
|
||||
|
||||
m_nameToNodeMap.clear(); // will also deref and likely deallocate all nodes we hold in here
|
||||
|
@ -551,12 +551,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
m_cachedOuterLoopNodes.clear();
|
||||
}
|
||||
|
||||
// lazily reate the m_inputs[] and m_learnableParameters lists
|
||||
// lazily reate the m_inputValues[] and m_learnableParameters lists
|
||||
// The only other side effect is to call GetEvalOrder(), which will cache the evaluation order for the given root node.
|
||||
void ComputationNetwork::CollectInputAndLearnableParameters(const ComputationNodeBasePtr& rootNode)
|
||||
{
|
||||
//not found
|
||||
if (m_inputs.find(rootNode) == m_inputs.end())
|
||||
if (m_inputValues.find(rootNode) == m_inputValues.end())
|
||||
{
|
||||
list<ComputationNodeBasePtr> inputs;
|
||||
|
||||
|
@ -570,7 +570,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
inputs.push_back(node);
|
||||
}
|
||||
}
|
||||
m_inputs[rootNode] = inputs;
|
||||
m_inputValues[rootNode] = inputs;
|
||||
}
|
||||
|
||||
//not found
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
// EvaluateThisNode() -> ForwardProp() // the familiar names
|
||||
// ComputeInputPartial() -> BackpropTo()
|
||||
// OnEvaluateBeginIteration() -> BeginForwardProp() // and similar functions likewise
|
||||
// m_children -> m_inputs // likewise related functions
|
||||
// Inputs() -> Input() // or In()? or GetInput()?
|
||||
// Children() -> Inputs()
|
||||
// ChildrenSize() -> NumInputs()
|
||||
|
@ -573,7 +572,7 @@ public:
|
|||
void ClearCaches()
|
||||
{
|
||||
m_built.clear();
|
||||
m_inputs.clear();
|
||||
m_inputValues.clear();
|
||||
m_learnableParameters.clear();
|
||||
ClearCalcOrderCaches();
|
||||
}
|
||||
|
@ -594,7 +593,7 @@ public:
|
|||
{
|
||||
if (bNoBuild == false)
|
||||
BuildAndValidateSubNetwork(rootNode);
|
||||
return m_inputs[rootNode];
|
||||
return m_inputValues[rootNode];
|
||||
}
|
||||
|
||||
std::list<ComputationNodeBasePtr>& LearnableNodes(const ComputationNodeBasePtr& rootNode)
|
||||
|
@ -990,7 +989,7 @@ private: // TODO: make all private that can be made private
|
|||
std::map<const ComputationNodeBasePtr, std::list<ComputationNodeBasePtr>> m_cacheGradientCalcOrders;
|
||||
std::map<const ComputationNodeBasePtr, ComputationNodeBasePtr> m_cachedOuterLoopNodes;
|
||||
|
||||
std::map<const ComputationNodeBasePtr, std::list<ComputationNodeBasePtr>> m_inputs; // [out node] -> all input nodes feeding into out node
|
||||
std::map<const ComputationNodeBasePtr, std::list<ComputationNodeBasePtr>> m_inputValues; // [out node] -> all input nodes feeding into out node
|
||||
std::map<const ComputationNodeBasePtr, std::list<ComputationNodeBasePtr>> m_learnableParameters; // [out node] -> all parameter nodes feeding into out node
|
||||
|
||||
// pool for matrices that can be shared across nodes
|
||||
|
|
|
@ -581,7 +581,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// TODO: not nice--why not always call this in ValidateSubNetwork() only?
|
||||
FormRecurrentLoops(rootNode);
|
||||
|
||||
// for the m_inputs and m_learnableParameters sets for this rootNode
|
||||
// for the m_inputValues and m_learnableParameters sets for this rootNode
|
||||
CollectInputAndLearnableParameters(rootNode);
|
||||
|
||||
// validate the rootNode and all nodes it depends on, in evaluation order
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
//wstring name = NodeName(); name;
|
||||
//fprintf(stderr, "\nDetermining Layout --> %ls:", name.c_str());
|
||||
MBLayoutPtr pMBLayout; // starts with NULL layout
|
||||
for (auto child : m_children)
|
||||
for (auto child : m_inputs)
|
||||
{
|
||||
//wstring cname = child->NodeName(); cname;
|
||||
//fprintf(stderr, " %ls(%s)", cname.c_str(), child->m_pMBLayout ? "." : "NULL");
|
||||
|
@ -54,10 +54,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// single input that maps its input element-wise (e.g. Sigmoid)
|
||||
void ComputationNodeBase::ValidateUnaryMap(bool isFinalValidationPass)
|
||||
{
|
||||
assert(m_children.size() == 1);
|
||||
assert(m_inputs.size() == 1);
|
||||
ComputationNodeBase::Validate(isFinalValidationPass);
|
||||
InferMBLayoutFromInputsForStandardCase();
|
||||
SetDims(m_children[0]->GetNumRows(), DetermineNumCols(m_children[0]));
|
||||
SetDims(m_inputs[0]->GetNumRows(), DetermineNumCols(m_inputs[0]));
|
||||
InferImageDimsFromInputs();
|
||||
}
|
||||
// binary zip operation, e.g. Plus
|
||||
|
@ -65,7 +65,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// This also helpfully resizes the children if not yet sized.
|
||||
void ComputationNodeBase::ValidateBinaryZip(bool isFinalValidationPass, bool allowMultiples)
|
||||
{
|
||||
assert(m_children.size() == 2);
|
||||
assert(m_inputs.size() == 2);
|
||||
ComputationNodeBase::Validate(isFinalValidationPass);
|
||||
InferMBLayoutFromInputsForStandardCase();
|
||||
|
||||
|
@ -89,7 +89,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// unary reduce-to-(1,1) operation, e.g. MatrixL1RegNode
|
||||
void ComputationNodeBase::ValidateUnaryReduce(bool isFinalValidationPass)
|
||||
{
|
||||
assert(m_children.size() == 1);
|
||||
assert(m_inputs.size() == 1);
|
||||
ComputationNodeBase::Validate(isFinalValidationPass);
|
||||
m_pMBLayout = nullptr; // this node does not hold mini-batch data
|
||||
SetDims(1, 1);
|
||||
|
@ -121,7 +121,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// if dimension not specified we assume two operands' dimensions should be the same
|
||||
// NOTE: The assert is set to check if >= 2 since this is called from nodes which have more than two children.
|
||||
// The number of children is formally verified elsewhere, so this will not break consistency.
|
||||
assert(m_children.size() >= 2);
|
||||
assert(m_inputs.size() >= 2);
|
||||
for (size_t index = 0; index < 2; index++)
|
||||
{
|
||||
auto in = Inputs(index);
|
||||
|
|
|
@ -242,7 +242,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
RuntimeError("Cannot copy from one node type to another node type");
|
||||
if (flags & CopyNodeFlags::copyNodeChildren)
|
||||
{
|
||||
node->m_children = m_children;
|
||||
node->m_inputs = m_inputs;
|
||||
}
|
||||
if (flags & CopyNodeFlags::copyNodeValue)
|
||||
{
|
||||
|
@ -308,15 +308,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
virtual void Validate(bool isFinalValidationPass) // main base validation function
|
||||
{
|
||||
// check for NULL pointers
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
{
|
||||
if (!m_children[i])
|
||||
if (!m_inputs[i])
|
||||
RuntimeError("Validate: Input [%d] of %ls node '%ls' is empty (NULL, not connected).", (int)i, OperationName().c_str(), NodeName().c_str());
|
||||
}
|
||||
// check for empty inputs
|
||||
if (isFinalValidationPass)
|
||||
{
|
||||
for (const auto & child : m_children)
|
||||
for (const auto & child : m_inputs)
|
||||
{
|
||||
if (child->GetNumRows() == 0 || (!child->HasMBLayout() && child->GetNumCols() == 0))
|
||||
RuntimeError("%ls %ls operation: input %ls %ls has 0 elements.",
|
||||
|
@ -356,7 +356,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
void AttachInputs(const ComputationNodeBasePtr& firstInput, const ComputationNodeBasePtr& secondInput, const ComputationNodeBasePtr &thirdInput, const ComputationNodeBasePtr& fourthInput, const ComputationNodeBasePtr& fifthInput) { AttachInputs(std::vector<ComputationNodeBasePtr> { firstInput, secondInput, thirdInput, fourthInput, fifthInput } ); }
|
||||
void AttachInputs(const ComputationNodeBasePtr& firstInput, const ComputationNodeBasePtr& secondInput, const ComputationNodeBasePtr &thirdInput, const ComputationNodeBasePtr& fourthInput, const ComputationNodeBasePtr& fifthInput, const ComputationNodeBasePtr& sixthInput) { AttachInputs(std::vector<ComputationNodeBasePtr> { firstInput, secondInput, thirdInput, fourthInput, fifthInput, sixthInput } ); }
|
||||
|
||||
virtual void DetachInputs() { m_children.clear(); }
|
||||
virtual void DetachInputs() { m_inputs.clear(); }
|
||||
|
||||
// helper for the factory function for ComputationNodes
|
||||
static vector<ComputationNodeBasePtr> GetInputsFromConfig(const ScriptableObjects::IConfigRecordPtr configp)
|
||||
|
@ -378,8 +378,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
return inputs;
|
||||
}
|
||||
|
||||
const std::vector<ComputationNodeBasePtr> & GetChildren() const { return m_children; }
|
||||
ComputationNodeBasePtr Inputs(size_t index) const { return m_children[index]; } // TODO: delete this; change to m_children
|
||||
const std::vector<ComputationNodeBasePtr> & GetChildren() const { return m_inputs; }
|
||||
ComputationNodeBasePtr Inputs(size_t index) const { return m_inputs[index]; } // TODO: delete this; change to m_inputs
|
||||
|
||||
//return true if the node's value should be computed before the normal training. e.g., mean and invStd of input features.
|
||||
virtual bool /*IComputationNode::*/RequiresPreCompute() const { return false; }
|
||||
|
@ -452,7 +452,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
fprintf(stderr, "(");
|
||||
for (size_t i = 0; i<ChildrenSize(); i++)
|
||||
{
|
||||
const auto & child = m_children[i];
|
||||
const auto & child = m_inputs[i];
|
||||
if (i > 0)
|
||||
fprintf(stderr, ", ");
|
||||
|
||||
|
@ -502,14 +502,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
bool IsChildAnImage(const size_t index) const
|
||||
{
|
||||
return m_children[index]->m_imageLayout.GetWidth() != 1 || m_children[index]->m_imageLayout.GetNumChannels() != 1;
|
||||
return m_inputs[index]->m_imageLayout.GetWidth() != 1 || m_inputs[index]->m_imageLayout.GetNumChannels() != 1;
|
||||
}
|
||||
|
||||
const ImageLayout & GetImageLayout() const { return m_imageLayout; }
|
||||
|
||||
pair<ImageLayout, ImageLayout> GetImageLayouts() const { return make_pair(m_inputImageLayout, m_imageLayout); } // helper for Validate()
|
||||
|
||||
const size_t ChildrenSize() const { return m_children.size(); } // TODO: rename to NumChildren() or NumInputs(); and inside here where we use m_children, use m_children.size() as well
|
||||
const size_t ChildrenSize() const { return m_inputs.size(); } // TODO: rename to NumChildren() or NumInputs(); and inside here where we use m_inputs, use m_inputs.size() as well
|
||||
|
||||
virtual void SetInput(const size_t childIndex, const ComputationNodeBasePtr& node) = 0;
|
||||
|
||||
|
@ -555,7 +555,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
if (index >= ChildrenSize())
|
||||
InvalidArgument("InferImageDimsFromInput: output index");
|
||||
|
||||
const auto & child = m_children[index];
|
||||
const auto & child = m_inputs[index];
|
||||
if (child != nullptr)
|
||||
m_inputImageLayout = child->m_imageLayout;
|
||||
if (outputSameAsInput)
|
||||
|
@ -573,7 +573,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
bool IsEqualTo(const ComputationNodeBasePtr& other) const //this will be used to determine whehter two nodes are the same
|
||||
{
|
||||
if (OperationName() != other->OperationName() || m_children.size() != other->m_children.size())
|
||||
if (OperationName() != other->OperationName() || m_inputs.size() != other->m_inputs.size())
|
||||
return false;
|
||||
|
||||
if (NodeName() == other->NodeName()) //assume names are unique in the system
|
||||
|
@ -582,8 +582,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
if (IsLeaf() && other->IsLeaf()) //since names are not equal otherwise will return above
|
||||
return false;
|
||||
|
||||
for (size_t i=0; i<m_children.size(); i++)
|
||||
if (!(m_children[i] == other->m_children[i]))
|
||||
for (size_t i=0; i<m_inputs.size(); i++)
|
||||
if (!(m_inputs[i] == other->m_inputs[i]))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -619,10 +619,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// children first for function evaluation
|
||||
if (OperationName() != L"PairNetwork" || !skipPairNetwork) // (don't step through network-pair boundary if called from FormRecurrentLoops())
|
||||
{
|
||||
for (int i = 0; i < m_children.size(); i++)
|
||||
for (int i = 0; i < m_inputs.size(); i++)
|
||||
{
|
||||
if (m_children[i])
|
||||
m_children[i]->EnumerateNodesRec(visited, result, skipPairNetwork);
|
||||
if (m_inputs[i])
|
||||
m_inputs[i]->EnumerateNodesRec(visited, result, skipPairNetwork);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// TODO: use range-based for
|
||||
for (size_t i = 0; i < ChildrenSize(); i++)
|
||||
{
|
||||
if (IsOlderThan(*m_children[i]))
|
||||
if (IsOlderThan(*m_inputs[i]))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -668,12 +668,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
if (visited.find(curNode) == visited.end())
|
||||
{
|
||||
for (size_t i = 0; i < curNode->m_children.size(); i++)
|
||||
for (size_t i = 0; i < curNode->m_inputs.size(); i++)
|
||||
{
|
||||
arcs.push_back(ComputationArc(curNode, curNode->m_children[i]));
|
||||
arcs.push_back(ComputationArc(curNode, curNode->m_inputs[i]));
|
||||
|
||||
if (visited.find(curNode->m_children[i]) == visited.end()) // this children has not been visited before
|
||||
tovisit.push_front(curNode->m_children[i]); // going to visit each of the children
|
||||
if (visited.find(curNode->m_inputs[i]) == visited.end()) // this children has not been visited before
|
||||
tovisit.push_front(curNode->m_inputs[i]); // going to visit each of the children
|
||||
}
|
||||
visited.insert(curNode);
|
||||
}
|
||||
|
@ -715,7 +715,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
std::wstring m_nodeName;
|
||||
|
||||
// inputs
|
||||
std::vector<ComputationNodeBasePtr> m_children;
|
||||
std::vector<ComputationNodeBasePtr> m_inputs;
|
||||
|
||||
// dimensions and layout
|
||||
// Data is stored as a matrix, but often it is interpreted as a more complex structure.
|
||||
|
@ -829,12 +829,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
const auto * pNumInputs = dynamic_cast<INumInputs*>(this); // if this class also derives from NumInputs<N> then N is the expected number of inputs
|
||||
if (pNumInputs && pNumInputs->GetExpectedNumInputs() != inputs.size())
|
||||
RuntimeError("%ls operation '%ls' expects %d inputs (given: %d)", OperationName().c_str(), NodeName().c_str(), (int)pNumInputs->GetExpectedNumInputs(), (int)inputs.size());
|
||||
m_children.resize(inputs.size());
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
m_inputs.resize(inputs.size());
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
if (inputs[i])
|
||||
m_children[i] = UpCast(inputs[i]); // (UpCast() checks the type; the assignment then downcasts it again)
|
||||
m_inputs[i] = UpCast(inputs[i]); // (UpCast() checks the type; the assignment then downcasts it again)
|
||||
else
|
||||
m_children[i] = nullptr; // during network creation, nullpts are possible
|
||||
m_inputs[i] = nullptr; // during network creation, nullpts are possible
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -878,10 +878,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
virtual void AllocateGradientMatricesForChildren(MatrixPool& matrixPool) override
|
||||
{
|
||||
for (int i = 0; i < m_children.size(); i++)
|
||||
for (int i = 0; i < m_inputs.size(); i++)
|
||||
{
|
||||
if (m_children[i]->NeedGradient())
|
||||
m_children[i]->RequestMatricesBeforeGradientComp(matrixPool);
|
||||
if (m_inputs[i]->NeedGradient())
|
||||
m_inputs[i]->RequestMatricesBeforeGradientComp(matrixPool);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -911,12 +911,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
// we format it like "name : type rows x cols ( args )"
|
||||
wstring result = /*TidyName*/(NodeName()) + L" : " + OperationName();
|
||||
result.append(msra::strfun::wstrprintf(L" %d x %d", (int)m_functionValues->GetNumRows(), (int)m_functionValues->GetNumCols()));
|
||||
if (m_children.empty()) result.append(L" ()");
|
||||
if (m_inputs.empty()) result.append(L" ()");
|
||||
else
|
||||
{
|
||||
wstring args;
|
||||
bool first = true;
|
||||
for (auto & child : m_children)
|
||||
for (auto & child : m_inputs)
|
||||
{
|
||||
if (first)
|
||||
first = false;
|
||||
|
@ -1006,7 +1006,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
{
|
||||
if (i > 0)
|
||||
fprintf(stderr, ", ");
|
||||
fprintf(stderr, "%ls[%lu, %lu]", m_children[i] ? m_children[i]->NodeName().c_str():L"NULL", m_children[i]->GetNumRows(), m_children[i]->GetNumCols());
|
||||
fprintf(stderr, "%ls[%lu, %lu]", m_inputs[i] ? m_inputs[i]->NodeName().c_str():L"NULL", m_inputs[i]->GetNumRows(), m_inputs[i]->GetNumCols());
|
||||
}
|
||||
fprintf(stderr, ")");
|
||||
}
|
||||
|
@ -1033,10 +1033,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
inline ComputationNodePtr Inputs(const size_t childIndex) const // TODO: rename to Input
|
||||
{
|
||||
#ifdef _DEBUG // profile shows this is range check very expensive in release mode, skip it
|
||||
if (childIndex >= m_children.size())
|
||||
if (childIndex >= m_inputs.size())
|
||||
LogicError("Inputs: childIndex is out of range.");
|
||||
#endif
|
||||
return UpCast(m_children[childIndex]);
|
||||
return UpCast(m_inputs[childIndex]);
|
||||
}
|
||||
|
||||
void /*ComputationNodeBase::*/SetInput(const size_t childIndex, const ComputationNodeBasePtr& inode) override
|
||||
|
@ -1044,15 +1044,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
const ComputationNodePtr node = UpCast(inode);
|
||||
|
||||
//require first nodes specified before the second to avoid null nodes condition.
|
||||
if (childIndex > m_children.size())
|
||||
if (childIndex > m_inputs.size())
|
||||
InvalidArgument("SetInput: You must specify the input for children with index less than this one first.");
|
||||
|
||||
// expand the inputs to exist up to the desired index
|
||||
while (childIndex >= m_children.size())
|
||||
m_children.push_back(nullptr);
|
||||
while (childIndex >= m_inputs.size())
|
||||
m_inputs.push_back(nullptr);
|
||||
|
||||
// set the input value
|
||||
m_children[childIndex] = node;
|
||||
m_inputs[childIndex] = node;
|
||||
}
|
||||
|
||||
const Matrix<ElemType>& FunctionValues() const { return *m_functionValues; }
|
||||
|
@ -1154,10 +1154,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
MaskMissingGradientColumnsToZero(FrameRange(m_pMBLayout));
|
||||
}
|
||||
bool anyChildNeedsGradient = false;
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
anyChildNeedsGradient |= Inputs(i)->m_needsGradient;
|
||||
if (anyChildNeedsGradient)
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
Inputs(i)->MaskMissingValuesColumnsToZero(FrameRange(Inputs(i)->GetMBLayout()));
|
||||
#endif
|
||||
}
|
||||
|
@ -1167,7 +1167,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
{
|
||||
Base::OnComputeGradientEndIteration();
|
||||
#ifdef TRACK_GAP_NANS
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
{
|
||||
ComputationNodePtr child = Inputs(i);
|
||||
if (child->m_needsGradient)
|
||||
|
@ -1188,7 +1188,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
if (frameRange.IsAllFrames() && IsPartOfLoop() && childrenInThisLoop)
|
||||
LogicError("%ls %ls operation: ComputeGradientForChildren called with whole-batch FrameRange on node that participates in a loop", NodeName().c_str(), OperationName().c_str());
|
||||
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
{
|
||||
ComputationNodePtr child = Inputs(i);
|
||||
if (child->m_needsGradient &&
|
||||
|
@ -1234,7 +1234,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
void /*ComputationNodeBase::*/ClearGradientForChildren() override // TODO: bad naming--this just clears the lazy flags, whereas LazyZeroGradient() actually clears the values
|
||||
{
|
||||
for (size_t i = 0; i < m_children.size(); i++)
|
||||
for (size_t i = 0; i < m_inputs.size(); i++)
|
||||
Inputs(i)->m_gradientInitialized = false;
|
||||
}
|
||||
|
||||
|
@ -1494,7 +1494,7 @@ protected: \
|
|||
using Base::MaskMissingColumnsToZero; using Base::MaskMissingValuesColumnsToZero; using Base::MaskMissingGradientColumnsToZero; using Base::InvalidateMissingValuesColumns; using Base::InvalidateMissingGradientColumns; \
|
||||
using Base::DataSlice; using Base::ValueSlice; using Base::GradientValues; using Base::GradientValuesPtr; using Base::GradientSlice; using Base::MaskedValueSlice; using Base::MaskedGradientSlice; \
|
||||
using Base::EvaluateThisNode; using Base::ComputeInputPartial; \
|
||||
using Base::m_children; using Base::m_deviceId; using Base::m_functionValues; using Base::m_gradientValues; \
|
||||
using Base::m_inputs; using Base::m_deviceId; using Base::m_functionValues; using Base::m_gradientValues; \
|
||||
using Base::m_inputImageLayout; using Base::m_imageLayout; \
|
||||
using Base::m_parameterUpdateRequired; using Base::m_nodeName; \
|
||||
using Base::CreateMatrixIfNull; using Base::RequestMatrixFromPool; using Base::ReleaseMatrixToPool; \
|
||||
|
|
|
@ -334,7 +334,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
ValidateInferChildDims(0, m_imageLayout.GetNumChannels(), weightCols);
|
||||
|
||||
if (isFinalValidationPass && (Inputs(0)->GetNumCols() != weightCols || Inputs(0)->GetNumRows() != m_imageLayout.GetNumChannels()))
|
||||
LogicError("convolutionWeight matrix %ls should have dimension [%d, %d] which is [outputChannels, kernelWidth * kernelHeight * inputChannels]", m_children[0]->NodeName().c_str(), (int)m_imageLayout.GetNumChannels(), (int)weightCols);
|
||||
LogicError("convolutionWeight matrix %ls should have dimension [%d, %d] which is [outputChannels, kernelWidth * kernelHeight * inputChannels]", m_inputs[0]->NodeName().c_str(), (int)m_imageLayout.GetNumChannels(), (int)weightCols);
|
||||
|
||||
size_t inputDim = m_inputImageLayout.GetWidth() * m_inputImageLayout.GetHeight() * m_inputImageLayout.GetNumChannels();
|
||||
if (Inputs(1)->GetNumRows() == 0)
|
||||
|
|
|
@ -78,7 +78,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
m_topK = 1;
|
||||
// TODO: Make topK a constructor parameter
|
||||
if (m_children.size() == 3)
|
||||
if (m_inputs.size() == 3)
|
||||
{
|
||||
if (Inputs(2)->GetNumRows() != 1 || Inputs(2)->GetNumCols() != 1)
|
||||
throw std::logic_error("TopK in ErrorPredictionNode must be a scalar value.");
|
||||
|
|
|
@ -484,7 +484,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
virtual void AllocateGradientMatricesForChildren(MatrixPool& matrixPool) override
|
||||
{
|
||||
//this is a special handling case. We need to allocate sparse matrix directly instead of from pool.
|
||||
if (m_children[0]->NeedGradient() && Inputs(1)->FunctionValues().GetMatrixType() == SPARSE)
|
||||
if (m_inputs[0]->NeedGradient() && Inputs(1)->FunctionValues().GetMatrixType() == SPARSE)
|
||||
{
|
||||
CreateMatrixIfNull(Inputs(0)->GradientValuesPtr());
|
||||
Inputs(0)->GradientValues().SwitchToMatrixType(SPARSE, MatrixFormat::matrixFormatSparseBlockCol, false);
|
||||
|
|
|
@ -1393,16 +1393,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
InferImageDimsFromInput(1, false);
|
||||
}
|
||||
|
||||
//virtual void AttachInputs(const ComputationNodePtr obs, const ComputationNodePtr inputGate, const ComputationNodePtr forgetGate, const ComputationNodePtr outputGate, const ComputationNodePtr memoryCellWgt)
|
||||
//{
|
||||
// m_children.resize(5);
|
||||
// m_children[0] = obs;
|
||||
// m_children[1] = inputGate;
|
||||
// m_children[2] = forgetGate;
|
||||
// m_children[3] = outputGate;
|
||||
// m_children[4] = memoryCellWgt;
|
||||
//}
|
||||
|
||||
virtual void DumpNodeInfo(const bool printValues, File& fstream) const override
|
||||
{
|
||||
Base::DumpNodeInfo(printValues, fstream);
|
||||
|
|
|
@ -1528,7 +1528,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
m_temp->AssignDifferenceOf(Inputs(0)->ValueSlice(frameRange), *m_classZeroLabels); // TODO: need a slice for m_classZeroLabels?
|
||||
|
||||
// Multiply the vector by the Inputs(2)->FunctionValues()
|
||||
if (m_children.size() == 3) // without weight
|
||||
if (m_inputs.size() == 3) // without weight
|
||||
m_temp->AssignElementProductOf(*m_temp, Inputs(2)->ValueSlice(frameRange)); // TODO: is Inputs(2) minibatch data? Confirm
|
||||
|
||||
// divide class by p (class 1) or (1-p) (class 0)
|
||||
|
@ -1579,7 +1579,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
m_temp->AssignLogOf(*m_result);
|
||||
|
||||
// The error is the negative of the sum of the result
|
||||
if (m_children.size() == 2)
|
||||
if (m_inputs.size() == 2)
|
||||
FunctionValues().AssignSumOfElements(*m_temp);
|
||||
else
|
||||
FunctionValues().AssignInnerProductOf(Inputs(2)->ValueSlice(frameRange), *m_temp, false);
|
||||
|
@ -1588,27 +1588,29 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
|
||||
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
|
||||
{
|
||||
if (m_children.size() != 2 && m_children.size() != 3)
|
||||
InvalidArgument("%ls %ls operation requires two or three inputs.", NodeName().c_str(), OperationName().c_str());
|
||||
if (m_inputs.size() != 2 && m_inputs.size() != 3)
|
||||
InvalidArgument("%ls %ls operation requires two or three inputs.", NodeName().c_str(), OperationName().c_str());
|
||||
|
||||
ValidateBinaryReduce(isFinalValidationPass);
|
||||
ValidateBinaryReduce(isFinalValidationPass);
|
||||
|
||||
/* Note that this is the same as ValidateInferBinaryChildrenDims, but done for the 3rd child if it exists */
|
||||
if (m_children.size() == 3)
|
||||
{
|
||||
auto in = Inputs(2);
|
||||
auto other = Inputs(1);
|
||||
// borrow any unset dimension on one input from the other input
|
||||
size_t rows = in->GetNumRows() == 0 ? other->GetNumRows()/*borrow from peer*/ : in->GetNumRows()/*keep as is*/;
|
||||
size_t cols = (!in->HasMBLayout() && in->GetNumCols() == 0) ? other->GetNumCols()/*borrow from peer*/ : in->GetNumCols()/*keep as is*/;
|
||||
/* Note that this is the same as ValidateInferBinaryChildrenDims, but done for the 3rd child if it exists */
|
||||
if (m_inputs.size() == 3)
|
||||
{
|
||||
auto in = Inputs(2);
|
||||
auto other = Inputs(1);
|
||||
// borrow any unset dimension on one input from the other input
|
||||
size_t rows = in->GetNumRows() == 0 ? other->GetNumRows()/*borrow from peer*/ : in->GetNumRows()/*keep as is*/;
|
||||
size_t cols = (!in->HasMBLayout() && in->GetNumCols() == 0) ? other->GetNumCols()/*borrow from peer*/ : in->GetNumCols()/*keep as is*/;
|
||||
|
||||
ValidateInferChildDims(2, rows, cols);
|
||||
ValidateInferChildDims(2, rows, cols);
|
||||
|
||||
if (isFinalValidationPass &&
|
||||
if (isFinalValidationPass &&
|
||||
!(Inputs(0)->GetNumRows() == Inputs(2)->GetNumRows() &&
|
||||
(Inputs(0)->HasMBLayout() || (Inputs(0)->GetNumCols() == Inputs(2)->GetNumCols()))))
|
||||
(Inputs(0)->HasMBLayout() || (Inputs(0)->GetNumCols() == Inputs(2)->GetNumCols()))))
|
||||
{
|
||||
LogicError("The Matrix dimensions of the second argument in the %ls %ls operation do not match.", NodeName().c_str(), OperationName().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//request matrices needed to do node function value evaluation
|
||||
|
|
Загрузка…
Ссылка в новой задаче