Integrate fmegen/vs2015/possible-issues into master

This commit is contained in:
Project Philly 2016-11-28 11:08:07 -08:00
Родитель dc09fece3e ffb06d8355
Коммит 58d19a66a9
25 изменённых файлов: 182 добавлений и 166 удалений

Просмотреть файл

@ -290,13 +290,13 @@ inline FunctionPtr SetupFullyConnectedDNNLayer(Variable input, size_t outputDim,
void OutputFunctionInfo(FunctionPtr func)
{
auto inputVariables = func->Arguments();
fprintf(stderr, "Function '%S': Input Variables (count=%lu)\n", func->Name().c_str(), inputVariables.size());
fprintf(stderr, "Function '%S': Input Variables (count=%lu)\n", func->Name().c_str(), (unsigned long)inputVariables.size());
for_each(inputVariables.begin(), inputVariables.end(), [](const Variable v) {
fprintf(stderr, " name=%S, kind=%d\n", v.Name().c_str(), static_cast<int>(v.Kind()));
});
auto outputVariables = func->Outputs();
fprintf(stderr, "Function '%S': Output Variables (count=%lu)\n", func->Name().c_str(), outputVariables.size());
fprintf(stderr, "Function '%S': Output Variables (count=%lu)\n", func->Name().c_str(), (unsigned long)outputVariables.size());
for_each(outputVariables.begin(), outputVariables.end(), [](const Variable v) {
fprintf(stderr, " name=%S, kind=%d\n", v.Name().c_str(), static_cast<int>(v.Kind()));
});
@ -379,7 +379,7 @@ void RunEvaluationClassifier(FunctionPtr evalFunc, const DeviceDescriptor& devic
auto outputDim = outputVar.Shape()[0];
for (size_t i = 0; i < numSamples; i++)
{
fprintf(stderr, "Iteration:%lu, Sample %lu:\n", t, i);
fprintf(stderr, "Iteration:%lu, Sample %lu:\n", (unsigned long)t, (unsigned long)i);
fprintf(stderr, " ");
dataIndex = i * outputDim;
for (size_t j = 0; j < std::min((size_t)10, outputDim); j++)
@ -444,7 +444,7 @@ void RunEvaluationOneHidden(FunctionPtr evalFunc, const DeviceDescriptor& device
auto outputDim = outputVar.Shape()[0];
for (size_t i = 0; i < numSamples; i++)
{
fprintf(stderr, "Iteration:%lu, Sample %lu:\n", t, i);
fprintf(stderr, "Iteration:%lu, Sample %lu:\n", (unsigned long)t, (unsigned long)i);
fprintf(stderr, "Ouput:");
for (size_t j = 0; j < outputDim; j++)
{

Просмотреть файл

@ -762,10 +762,10 @@ public:
// next try the globals, this includes macros and global constants
if (!localOnly)
{
auto found = s_global.FindSymbol(name);
if (found != NULL)
auto found2 = s_global.FindSymbol(name);
if (found2 != NULL)
{
NDLNode<ElemType>* node = found;
NDLNode<ElemType>* node = found2;
if (node->GetType() == ndlTypeMacro)
{
// if we are calling a macro we need to keep track of formal parameters,

Просмотреть файл

@ -1470,8 +1470,8 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildNetworkFromDbnFile(co
// move to CPU since element-wise operation is expensive and can go wrong in GPU
int curDevId = globalStdDev.GetDeviceId();
globalStdDev.TransferFromDeviceToDevice(curDevId, CPUDEVICE, true, false, false);
for (int i = 0; i < globalStdDev.GetNumRows(); i++)
globalStdDev(i, 0) = (ElemType) 1.0 / (const ElemType) globalStdDev(i, 0);
for (int i2 = 0; i2 < globalStdDev.GetNumRows(); i2++)
globalStdDev(i2, 0) = (ElemType) 1.0 / (const ElemType) globalStdDev(i2, 0);
globalStdDev.TransferFromDeviceToDevice(CPUDEVICE, curDevId, true, false, false);
if (!CheckDbnTag(fstream, "BNET"))

Просмотреть файл

@ -317,11 +317,11 @@ void MELScript<ElemType>::CallFunction(const std::string& p_name, const ConfigPa
RuntimeError("CopyInputs requires two symbols from the same network, %s and %s belong to different networks", params[0].c_str(), params[1].c_str());
ProcessNDLScript(netNdlFrom, ndlPassAll);
for (GenNameValue name : names)
for (GenNameValue name2 : names)
{
auto& node = name.first;
auto& node = name2.first;
std::wstring nodeName = node->NodeName();
std::wstring toNodeName = name.second;
std::wstring toNodeName = name2.second;
netNdlTo->cn->CopyNode(*netNdlFrom->cn, nodeName, toNodeName, CopyNodeFlags::copyNodeInputLinks);
}

Просмотреть файл

@ -319,11 +319,11 @@ namespace CNTK
if (replacingVariable.IsOutput())
{
auto ownerFunc = replacingVariable.Owner();
std::unordered_set<FunctionPtr> visitedFunctions;
Collect(ownerFunc, visitedFunctions);
std::unordered_set<FunctionPtr> visitedFunctions2;
Collect(ownerFunc, visitedFunctions2);
// Add the newly visited functions to 'm_allPrimitiveFunctions' set
m_allPrimitiveFunctions.insert(visitedFunctions.begin(), visitedFunctions.end());
m_allPrimitiveFunctions.insert(visitedFunctions2.begin(), visitedFunctions2.end());
}
}
std::unordered_map<const Function*, size_t> functionVisitCounts;

Просмотреть файл

@ -145,8 +145,8 @@ namespace CNTK
outputDynamicAxes.push_back(Axis::NewUniqueDynamicAxis(L"whereNodeDynamicAxis"));
}
for (size_t i = 1; i < inputs[0].DynamicAxes().size(); ++i)
outputDynamicAxes.push_back(inputs[0].DynamicAxes()[i]);
for (size_t i2 = 1; i2 < inputs[0].DynamicAxes().size(); ++i2)
outputDynamicAxes.push_back(inputs[0].DynamicAxes()[i2]);
functionConfig[PrimitiveFunction::AttributeNameNewDynamicAxes] = AsDictionaryValueVector(outputDynamicAxes);
}
@ -407,8 +407,8 @@ namespace CNTK
outputShape = ConvolutionOpOutputShape(op, inputs[1].Shape(), kernelShape, outputMapCount, strides, sharing, autoPadding, lowerPad, upperPad, transpose, inferDimensions);
if (originalKernelShape != kernelShape)
{
for (size_t i = 0; i < kernelShape.Rank(); ++i)
inputs[0].m_dataFields->m_shape[i] = kernelShape[i];
for (size_t i2 = 0; i2 < kernelShape.Rank(); ++i2)
inputs[0].m_dataFields->m_shape[i2] = kernelShape[i2];
}
functionConfig[PrimitiveFunction::AttributeNameSharing] = AsDictionaryValueVector(sharing);
@ -435,8 +435,8 @@ namespace CNTK
RuntimeError("Prediction output operand's shape %S is incompatible with label operand's shape %S for the %S operation", AsStringForErrorReporting(predictionShape).c_str(), AsStringForErrorReporting(labelsShape).c_str(), PrimitiveOpTypeName(op).c_str());
std::vector<int> reductionAxes;
for (int i = 0; i < (int)inputs[0].Shape().Rank(); ++i)
reductionAxes.push_back(i);
for (int i3 = 0; i3 < (int)inputs[0].Shape().Rank(); ++i3)
reductionAxes.push_back(i3);
outputShape = ReductionOpOutputShape(op, predictionShape, reductionAxes, /*preserveReductionAxes =*/ false);
break;

Просмотреть файл

@ -401,8 +401,8 @@ public:
// found another opening brace, push it on the stack
else
{
const auto braceFound = openBraces.find(brace); // index of brace
braceStack.push_back(closingBraces[braceFound]); // closing symbol for current
const auto braceFound2 = openBraces.find(brace); // index of brace
braceStack.push_back(closingBraces[braceFound2]); // closing symbol for current
}
}
// hit end before everything was closed: error
@ -558,13 +558,13 @@ public:
// now look for contained braces before the next break
if (tokenEnd != npos)
{
const auto braceEndFound = FindBraces(stringParse, tokenEnd);
const auto braceEndFound2 = FindBraces(stringParse, tokenEnd);
// found an embedded brace, extend token to the end of the braces
if (braceEndFound != npos)
if (braceEndFound2 != npos)
{
// token includes the closing brace
tokenEnd = braceEndFound + 1;
tokenEnd = braceEndFound2 + 1;
}
}

Просмотреть файл

@ -282,10 +282,10 @@ public:
{
resize(n, m);
}
void resize(size_t n, size_t m)
void resize(size_t n2, size_t m)
{
numcols = m;
fixed_vector<T>::resize(n * m);
fixed_vector<T>::resize(n2 * m);
}
size_t cols() const
{

Просмотреть файл

@ -9,7 +9,6 @@
#define _FILEUTIL_
#include "Basics.h"
#include <stdio.h>
#ifdef __WINDOWS__
#define NOMINMAX
#include "Windows.h" // for mmreg.h and FILETIME
@ -702,18 +701,8 @@ class auto_file_ptr
{
if (f && f != stdin && f != stdout && f != stderr)
{
bool readMode = false;
#ifdef _WIN32
if ((f->_flag&_IOREAD) == _IOREAD)
readMode = true;
#else
int mode = fcntl(fileno(f), F_GETFL);
if ((mode & O_ACCMODE) == O_RDONLY)
readMode = true;
#endif
int rc = ::fclose(f);
if (!readMode && (rc != FCLOSE_SUCCESS) && !std::uncaught_exception())
if ((rc != FCLOSE_SUCCESS) && !std::uncaught_exception())
RuntimeError("auto_file_ptr: failed to close file: %s", strerror(errno));
f = NULL;

Просмотреть файл

@ -557,7 +557,7 @@ uint64_t fgetpos(FILE* f)
void fsetpos(FILE* f, uint64_t reqpos)
{
#ifdef _MSC_VER // standard does not allow to cast between fpos_t and integer numbers, and indeed it does not work on Linux (but on Windows and GCC)
#ifdef _MSC_VER // special hack for VS CRT
#if (_MSC_VER <= 1800) // Note: this does not trigger if loaded in vs2013 mode in vs2015!
// Visual Studio's ::fsetpos() flushes the read buffer. This conflicts with a situation where
// we generally read linearly but skip a few bytes or KB occasionally, as is
// the case in speech recognition tools. This requires a number of optimizations.
@ -580,6 +580,34 @@ void fsetpos(FILE* f, uint64_t reqpos)
if (curpos != fgetpos(f) || curpos + f->_cnt != cureob)
break; // oops
}
#else
// special hack for VS CRT (for VS2015)
// Visual Studio's ::fsetpos() flushes the read buffer. This conflicts with a situation where
// we generally read linearly but skip a few bytes or KB occasionally, as is
// the case in speech recognition tools. This requires a number of optimizations.
#define MAX_FREAD_SKIP 65536
// forward seeks up to 64KiB are simulated
// through a dummy read instead of fsetpos to
// the new position.
uint64_t curpos = fgetpos(f);
size_t n = min((size_t)reqpos - (size_t)curpos, (size_t)MAX_FREAD_SKIP);
// TODO: if we only skip a limited number of bytes, fread() them
// instead of fsetpos() to the new position since the vs2015
// libraries might drop the internal buffer and thus have to re-read
// from the new position, somthing that costs performance.
if (n < MAX_FREAD_SKIP)
{
// in case we stay in the internal buffer, no fileio is needed for this operation.
char buf[MAX_FREAD_SKIP];
fread(buf, sizeof(buf[0]), n, f); // (this may fail, but really shouldn't)
// if we made it then do not call fsetpos()
if (reqpos == fgetpos(f))
return;
}
#undef MAX_FREAD_SKIP
#endif // end special hack for VS CRT
// actually perform the seek

Просмотреть файл

@ -331,21 +331,21 @@ void ComputationNetwork::DetermineSCCsR(ComputationNodeBasePtr cur,
#if 1
if (loopId != m_allSEQNodes.size())
LogicError("DetermineSCCsR: %ls %ls operation has inconsistent loopId (%d) vs. m_allSEQNodes.size() (%d)", cur->NodeName().c_str(), cur->OperationName().c_str(), (int)loopId, (int)m_allSEQNodes.size());
SEQTraversalFlowControlNode rInfo(m_allSEQNodes.size(), cur);
SEQTraversalFlowControlNode rInfo2(m_allSEQNodes.size(), cur);
#else
assert(loopId == m_allSEQNodes.size()); // BUGBUG: Only true if all loops are shared among roots. Fix: use m_allSEQNodes.size() instead
SEQTraversalFlowControlNode rInfo(loopId, cur);
SEQTraversalFlowControlNode rInfo2(loopId, cur);
#endif
// TODO: can we prove that 'cur' == nestedNodes.front()? If so, we won't need to store it separately.
rInfo.m_nestedNodes = move(nestedNodes); // TODO: make these two part of the constructor
for (auto node : rInfo.m_nestedNodes)
rInfo2.m_nestedNodes = move(nestedNodes); // TODO: make these two part of the constructor
for (auto node : rInfo2.m_nestedNodes)
{
node->m_isPartOfLoop = true; // this is the only flag in ComputationNode that escapes FormRecurrentLoops()!
// TODO: ^^ We should instead remember a pointer to our loop sentinel
node->m_loopId = rInfo.m_loopId; // Note: m_loopId is only used inside this source file, and only for reordering
node->m_loopId = rInfo2.m_loopId; // Note: m_loopId is only used inside this source file, and only for reordering
}
rInfo.m_steppingDirection = DetermineLoopDirection(rInfo.m_nestedNodes);
m_allSEQNodes.push_back(make_shared<SEQTraversalFlowControlNode>(move(rInfo)));
rInfo2.m_steppingDirection = DetermineLoopDirection(rInfo2.m_nestedNodes);
m_allSEQNodes.push_back(make_shared<SEQTraversalFlowControlNode>(move(rInfo2)));
loopId++; // and count it TODO: may be removed
}
}

Просмотреть файл

@ -643,16 +643,16 @@ private:
for (let& clonedNodesKV : clonedNodes)
{
let& node = clonedNodesKV.second;
let& inputs = node->GetInputs();
for (size_t i = 0; i < inputs.size(); i++)
let& inputs2 = node->GetInputs();
for (size_t i = 0; i < inputs2.size(); i++)
{
fprintf(stderr, "%ls.inputs[%d] = %ls (%d)", node->NodeName().c_str(), (int)i, inputs[i]->NodeName().c_str(), (int)inputs[i]->m_uniqueNumericId);
let iter = clonedNodes.find(inputs[i]);
fprintf(stderr, "%ls.inputs[%d] = %ls (%d)", node->NodeName().c_str(), (int)i, inputs2[i]->NodeName().c_str(), (int)inputs2[i]->m_uniqueNumericId);
let iter = clonedNodes.find(inputs2[i]);
if (iter == clonedNodes.end())
continue;
// input is also a cloned node: relink
node->SetInput(i, iter->second);
fprintf(stderr, " ==> %ls (%d)\n", inputs[i]->NodeName().c_str(), (int)inputs[i]->m_uniqueNumericId);
fprintf(stderr, " ==> %ls (%d)\n", inputs2[i]->NodeName().c_str(), (int)inputs2[i]->m_uniqueNumericId);
numRelinks++;
}
}

Просмотреть файл

@ -377,9 +377,9 @@ void CNTKEvalExtended<ElemType>::ForwardPassT(const std::vector<ValueBuffer<Elem
ComputationNetwork::BumpEvalTimeStamp(m_inputNodes);
for (size_t i = 0; i < m_outputNodes.size(); ++i)
for (size_t i2 = 0; i2 < m_outputNodes.size(); ++i2)
{
auto node = m_outputNodes[i];
auto node = m_outputNodes[i2];
this->m_net->ForwardProp(node);
shared_ptr<Matrix<ElemType>> outputMatrix = dynamic_pointer_cast<Matrix<ElemType>>(node->ValuePtr());
auto pMBLayout = node->GetMBLayout();
@ -393,7 +393,7 @@ void CNTKEvalExtended<ElemType>::ForwardPassT(const std::vector<ValueBuffer<Elem
if (seq.size() != 1)
RuntimeError("Only 1 output sequence supported by this API");
ValueContainer<ElemType>& vec = outputs[i].m_buffer;
ValueContainer<ElemType>& vec = outputs[i2].m_buffer;
size_t numElements = outputMatrix->GetNumElements();

Просмотреть файл

@ -237,8 +237,8 @@ Section* BinaryWriter<ElemType>::CreateSection(const ConfigParameters& config, S
break;
case sectionTypeLabel: // label data
{
size_t elementSize = sizeof(LabelIdType);
dataSize = records * elementSize + sectionHeaderMin;
size_t elementSize2 = sizeof(LabelIdType);
dataSize = records * elementSize2 + sectionHeaderMin;
auto sectionLabel = new SectionLabel(file, parentSection, filePositionNext, mappingMain, dataSize);
SectionData dataType = sectionDataInt;
LabelKind labelKind = labelCategory; // default
@ -246,7 +246,7 @@ Section* BinaryWriter<ElemType>::CreateSection(const ConfigParameters& config, S
{
labelKind = labelRegression;
dataType = sectionDataFloat;
elementSize = sizeof(ElemType);
elementSize2 = sizeof(ElemType);
}
else if (config.Match(L"labelType", L"Category"))
{
@ -258,7 +258,7 @@ Section* BinaryWriter<ElemType>::CreateSection(const ConfigParameters& config, S
}
// initialize the section header
sectionLabel->InitHeader(sectionTypeLabel, sectionName + ":Labels", dataType, (WORD) elementSize);
sectionLabel->InitHeader(sectionTypeLabel, sectionName + ":Labels", dataType, (WORD) elementSize2);
// initialize the special label header items
sectionLabel->SetLabelKind(labelKind);

Просмотреть файл

@ -53,10 +53,10 @@ TextConfigHelper::TextConfigHelper(const ConfigParameters& config)
map<string, wstring> aliasToInputMap;
for (const pair<string, ConfigParameters>& section : input)
{
ConfigParameters input = section.second;
ConfigParameters input2 = section.second;
wstring name = msra::strfun::utf16(section.first);
if (!input.ExistsCurrent(L"dim") || !input.ExistsCurrent(L"format"))
if (!input2.ExistsCurrent(L"dim") || !input2.ExistsCurrent(L"format"))
{
RuntimeError("Input section for input '%ls' does not specify all the required parameters, "
"\"dim\" and \"format\".", name.c_str());
@ -65,8 +65,8 @@ TextConfigHelper::TextConfigHelper(const ConfigParameters& config)
StreamDescriptor stream;
stream.m_id = id++;
stream.m_name = name;
stream.m_sampleDimension = input(L"dim");
string type = input(L"format");
stream.m_sampleDimension = input2(L"dim");
string type = input2(L"format");
if (AreEqualIgnoreCase(type, "dense"))
{
@ -88,9 +88,9 @@ TextConfigHelper::TextConfigHelper(const ConfigParameters& config)
}
// alias is optional
if (input.ExistsCurrent(L"alias"))
if (input2.ExistsCurrent(L"alias"))
{
stream.m_alias = input(L"alias");
stream.m_alias = input2(L"alias");
if (stream.m_alias.empty())
{
RuntimeError("Alias value for input '%ls' is empty.", name.c_str());

Просмотреть файл

@ -956,7 +956,6 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
bool skip = false;
// on first minibatch, make sure we can supply data for requested nodes
std::map<std::wstring, size_t>::iterator iter;
if (m_checkDictionaryKeys)
{
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
@ -1132,21 +1131,21 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
m_pMBLayout->AddGap(i, m_numValidFrames[i], m_mbNumTimeSteps);
} // if (!frameMode)
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter2 = matrices.begin(); iter2 != matrices.end(); iter2++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter->first); // can be features or labels
if (m_nameToTypeMap[iter->first] == InputOutputTypes::real)
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter2->first); // can be features or labels
if (m_nameToTypeMap[iter2->first] == InputOutputTypes::real)
{
id = m_featureNameToIdMap[iter->first];
dim = m_featureNameToDimMap[iter->first];
id = m_featureNameToIdMap[iter2->first];
dim = m_featureNameToDimMap[iter2->first];
data.SetValue(dim, m_mbNumTimeSteps * m_numSeqsPerMB, data.GetDeviceId(), m_featuresBufferMultiIO[id].get(), matrixFlagNormal);
}
else if (m_nameToTypeMap[iter->first] == InputOutputTypes::category)
else if (m_nameToTypeMap[iter2->first] == InputOutputTypes::category)
{
id = m_labelNameToIdMap[iter->first];
dim = m_labelNameToDimMap[iter->first];
id = m_labelNameToIdMap[iter2->first];
dim = m_labelNameToDimMap[iter2->first];
data.SetValue(dim, m_mbNumTimeSteps * m_numSeqsPerMB, data.GetDeviceId(), m_labelsBufferMultiIO[id].get(), matrixFlagNormal);
}
}
@ -1209,16 +1208,16 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
}
actualmbsize[i] = m_mbNumTimeSteps;
const size_t endFr = startFr + actualmbsize[i]; // actual end frame index of this segment
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter3 = matrices.begin(); iter3 != matrices.end(); iter3++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter->first); // can be features or labels
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter3->first); // can be features or labels
if (m_nameToTypeMap[iter->first] == InputOutputTypes::real)
if (m_nameToTypeMap[iter3->first] == InputOutputTypes::real)
{
id = m_featureNameToIdMap[iter->first];
dim = m_featureNameToDimMap[iter->first];
id = m_featureNameToIdMap[iter3->first];
dim = m_featureNameToDimMap[iter3->first];
if ((m_featuresBufferMultiIO[id] == nullptr) ||
(m_featuresBufferAllocatedMultiIO[id] < (dim * m_mbNumTimeSteps * m_numSeqsPerMB)) /*buffer size changed. can be partial minibatch*/)
@ -1250,10 +1249,10 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
}
}
}
else if (m_nameToTypeMap[iter->first] == InputOutputTypes::category)
else if (m_nameToTypeMap[iter3->first] == InputOutputTypes::category)
{
id = m_labelNameToIdMap[iter->first];
dim = m_labelNameToDimMap[iter->first];
id = m_labelNameToIdMap[iter3->first];
dim = m_labelNameToDimMap[iter3->first];
if ((m_labelsBufferMultiIO[id] == nullptr) ||
(m_labelsBufferAllocatedMultiIO[id] < (dim * m_mbNumTimeSteps * m_numSeqsPerMB)))
{
@ -1282,16 +1281,16 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
assert(endFr == m_numFramesToProcess[i]); // we are at the end
// fill frames for the tail of this utterance
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter4 = matrices.begin(); iter4 != matrices.end(); iter4++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter->first); // can be features or labels
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter4->first); // can be features or labels
if (m_nameToTypeMap[iter->first] == InputOutputTypes::real)
if (m_nameToTypeMap[iter4->first] == InputOutputTypes::real)
{
id = m_featureNameToIdMap[iter->first];
dim = m_featureNameToDimMap[iter->first];
id = m_featureNameToIdMap[iter4->first];
dim = m_featureNameToDimMap[iter4->first];
if ((m_featuresBufferMultiIO[id] == nullptr) ||
(m_featuresBufferAllocatedMultiIO[id] < (dim * m_mbNumTimeSteps * m_numSeqsPerMB)) /*buffer size changed. can be partial minibatch*/)
@ -1323,10 +1322,10 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
}
}
}
else if (m_nameToTypeMap[iter->first] == InputOutputTypes::category)
else if (m_nameToTypeMap[iter4->first] == InputOutputTypes::category)
{
id = m_labelNameToIdMap[iter->first];
dim = m_labelNameToDimMap[iter->first];
id = m_labelNameToIdMap[iter4->first];
dim = m_labelNameToDimMap[iter4->first];
if ((m_labelsBufferMultiIO[id] == nullptr) ||
(m_labelsBufferAllocatedMultiIO[id] < (dim * m_mbNumTimeSteps * m_numSeqsPerMB)))
{
@ -1367,16 +1366,16 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
m_pMBLayout->AddSequence(NEW_SEQUENCE_ID, i, startT, startT + m_numFramesToProcess[i]);
// copy the data
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter5 = matrices.begin(); iter5 != matrices.end(); iter5++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
// Matrix<ElemType>& data = *matrices[iter->first]; // can be features or labels
// Matrix<ElemType>& data = *matrices[iter5->first]; // can be features or labels
if (m_nameToTypeMap[iter->first] == InputOutputTypes::real)
if (m_nameToTypeMap[iter5->first] == InputOutputTypes::real)
{
id = m_featureNameToIdMap[iter->first];
dim = m_featureNameToDimMap[iter->first];
id = m_featureNameToIdMap[iter5->first];
dim = m_featureNameToDimMap[iter5->first];
if (sizeof(ElemType) == sizeof(float))
{
for (size_t t = startT, fr = 0; t < endT; t++, fr++) // column major, so iterate columns
@ -1400,10 +1399,10 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
}
}
}
else if (m_nameToTypeMap[iter->first] == InputOutputTypes::category)
else if (m_nameToTypeMap[iter5->first] == InputOutputTypes::category)
{
id = m_labelNameToIdMap[iter->first];
dim = m_labelNameToDimMap[iter->first];
id = m_labelNameToIdMap[iter5->first];
dim = m_labelNameToDimMap[iter5->first];
for (size_t t = startT, fr = 0; t < endT; t++, fr++)
{
for (int d = 0; d < dim; d++)
@ -1441,21 +1440,21 @@ bool HTKMLFReader<ElemType>::GetMinibatchToTrainOrTest(StreamMinibatchInputs& ma
} // for (size_t i = 0; i < m_numSeqsPerMB; i++)
// we are done filling all parallel sequences
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter6 = matrices.begin(); iter6 != matrices.end(); iter6++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter->first); // can be features or labels
if (m_nameToTypeMap[iter->first] == InputOutputTypes::real)
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter6->first); // can be features or labels
if (m_nameToTypeMap[iter6->first] == InputOutputTypes::real)
{
id = m_featureNameToIdMap[iter->first];
dim = m_featureNameToDimMap[iter->first];
id = m_featureNameToIdMap[iter6->first];
dim = m_featureNameToDimMap[iter6->first];
data.SetValue(dim, m_mbNumTimeSteps * m_numSeqsPerMB, data.GetDeviceId(), m_featuresBufferMultiIO[id].get(), matrixFlagNormal);
}
else if (m_nameToTypeMap[iter->first] == InputOutputTypes::category)
else if (m_nameToTypeMap[iter6->first] == InputOutputTypes::category)
{
id = m_labelNameToIdMap[iter->first];
dim = m_labelNameToDimMap[iter->first];
id = m_labelNameToIdMap[iter6->first];
dim = m_labelNameToDimMap[iter6->first];
data.SetValue(dim, m_mbNumTimeSteps * m_numSeqsPerMB, data.GetDeviceId(), m_labelsBufferMultiIO[id].get(), matrixFlagNormal);
}
}
@ -1547,7 +1546,6 @@ void HTKMLFReader<ElemType>::fillOneUttDataforParallelmode(StreamMinibatchInputs
template <class ElemType>
bool HTKMLFReader<ElemType>::GetMinibatchToWrite(StreamMinibatchInputs& matrices)
{
std::map<std::wstring, size_t>::iterator iter;
if (m_checkDictionaryKeys)
{
for (auto iter = m_featureNameToIdMap.begin(); iter != m_featureNameToIdMap.end(); iter++)
@ -1598,9 +1596,9 @@ bool HTKMLFReader<ElemType>::GetMinibatchToWrite(StreamMinibatchInputs& matrices
// This broadcasts a vector to be multiple columns, as needed for i-vector support
msra::dbn::matrix feat_col(feat);
feat.resize(feat.rows(), nfr);
for (size_t i = 0; i < feat.rows(); i++)
for (size_t i2 = 0; i2 < feat.rows(); i2++)
for (size_t j = 0; j < feat.cols(); j++)
feat(i, j) = feat_col(i, 0);
feat(i2, j) = feat_col(i2, 0);
}
fprintf(stderr, "evaluate: reading %d frames of %ls\n", (int) feat.cols(), ((wstring) path).c_str());
@ -1613,16 +1611,16 @@ bool HTKMLFReader<ElemType>::GetMinibatchToWrite(StreamMinibatchInputs& matrices
// populate input matrices
bool first = true;
for (auto iter = matrices.begin(); iter != matrices.end(); iter++)
for (auto iter2 = matrices.begin(); iter2 != matrices.end(); iter2++)
{
// dereference matrix that corresponds to key (input/output name) and
// populate based on whether its a feature or a label
if (m_nameToTypeMap.find(iter->first) != m_nameToTypeMap.end() && m_nameToTypeMap[iter->first] == InputOutputTypes::real)
if (m_nameToTypeMap.find(iter2->first) != m_nameToTypeMap.end() && m_nameToTypeMap[iter2->first] == InputOutputTypes::real)
{
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter->first); // can be features or labels (TODO: Really? Didn't we just ^^^ check that it is 'real'?)
size_t id = m_featureNameToIdMap[iter->first];
size_t dim = m_featureNameToDimMap[iter->first];
Matrix<ElemType>& data = matrices.GetInputMatrix<ElemType>(iter2->first); // can be features or labels (TODO: Really? Didn't we just ^^^ check that it is 'real'?)
size_t id = m_featureNameToIdMap[iter2->first];
size_t dim = m_featureNameToDimMap[iter2->first];
const msra::dbn::matrix feat = m_fileEvalSource->ChunkOfFrames(id);

Просмотреть файл

@ -1849,20 +1849,20 @@ bool BatchSequenceReader<ElemType>::GetMinibatchData(size_t& /*out*/ firstPosInS
// generate the output label token
if (labelOut.type != labelNone)
{
const auto& labelValue = m_labelTemp[pos];
const auto& labelValue2 = m_labelTemp[pos];
LabelIdType labelId;
if (labelOut.type == labelCategory)
{
pos++; // consume it --TODO: value is not used after this
labelId = GetIdFromLabel(labelValue, labelOut);
labelId = GetIdFromLabel(labelValue2, labelOut);
}
else if (nextWord)
{
// this is the next word (pos was already incremented above when reading out labelValue)
if (EqualCI(labelValue, labelIn.endSequence)) // end symbol may differ between input and output
if (EqualCI(labelValue2, labelIn.endSequence)) // end symbol may differ between input and output
labelId = GetIdFromLabel(labelIn.endSequence, labelIn);
else
labelId = GetIdFromLabel(labelValue, labelIn);
labelId = GetIdFromLabel(labelValue2, labelIn);
}
else
LogicError("Unexpected output label type."); // should never get here

Просмотреть файл

@ -1087,9 +1087,9 @@ bool BatchLUSequenceReader<ElemType>::GetFrame(StreamMinibatchInputs& matrices,
assert((jj == m_wordContext.size() - 1) ? true : cxt > m_wordContext[jj + 1]);
size_t hidx;
size_t hlength = history.size();
if (hlength + cxt > 0)
hidx = history[hlength + cxt - 1];
size_t hlength2 = history.size();
if (hlength2 + cxt > 0)
hidx = history[hlength2 + cxt - 1];
else
hidx = history[0];
@ -1174,7 +1174,6 @@ template <class ElemType>
bool MultiIOBatchLUSequenceReader<ElemType>::TryGetMinibatch(StreamMinibatchInputs& matrices)
{
// on first iteration, need to check if all requested data matrices are available
std::map<std::wstring, size_t>::iterator iter;
if (mCheckDictionaryKeys)
{
for (auto iter = matrices.begin(); iter != matrices.end(); iter++) // TODO: range-based for

Просмотреть файл

@ -2481,11 +2481,11 @@ template <class ElemType>
bool SGD<ElemType>::GradientCheck(ComputationNetworkPtr net,
const std::vector<ComputationNodeBasePtr>& criterionNodes,
const std::list<ComputationNodeBasePtr>& learnableNodes,
int npos)
int npos2)
{
ScopedNetworkOperationMode modeGuard(net, NetworkOperationMode::training);
net->StartEvaluateMinibatchLoop(criterionNodes[npos]);
net->StartEvaluateMinibatchLoop(criterionNodes[npos2]);
vector<string> errMsgs; // TODO: These are created but actually not returned, only their count is checked.
@ -2511,8 +2511,8 @@ bool SGD<ElemType>::GradientCheck(ComputationNetworkPtr net,
node->BumpEvalTimeStamp();
net->ForwardProp(criterionNodes[npos]);
net->Backprop(criterionNodes[npos]);
net->ForwardProp(criterionNodes[npos2]);
net->Backprop(criterionNodes[npos2]);
if (node->Gradient().GetMatrixType() == MatrixType::SPARSE)
{
@ -2522,7 +2522,7 @@ bool SGD<ElemType>::GradientCheck(ComputationNetworkPtr net,
// double mbEvalCri =
// criterionNode should be a scalar
// TODO: why is this value not used?
criterionNodes[npos]->Get00Element();
criterionNodes[npos2]->Get00Element();
double eGradErr = node->Gradient()(irow, icol);
node->Gradient().TransferToDeviceIfNotThere(net->GetDeviceId(), true);
@ -2533,19 +2533,19 @@ bool SGD<ElemType>::GradientCheck(ComputationNetworkPtr net,
node->Value().TransferToDeviceIfNotThere(net->GetDeviceId(), true);
node->BumpEvalTimeStamp();
net->ForwardProp(criterionNodes[npos]);
net->ForwardProp(criterionNodes[npos2]);
// criterionNode should be a scalar
double mbEvalCriPos = criterionNodes[npos]->Get00Element(); // TODO: make Get00Element() a function of ComputationNodeBase
double mbEvalCriPos = criterionNodes[npos2]->Get00Element(); // TODO: make Get00Element() a function of ComputationNodeBase
node->Value()(irow, icol) = (ElemType) eNeg;
node->Value().TransferToDeviceIfNotThere(net->GetDeviceId(), true);
node->BumpEvalTimeStamp();
net->ForwardProp(criterionNodes[npos]);
net->ForwardProp(criterionNodes[npos2]);
// criterionNode should be a scalar
double mbEvalCriNeg = criterionNodes[npos]->Get00Element();
double mbEvalCriNeg = criterionNodes[npos2]->Get00Element();
// back to its original parameter value
node->Value()(irow, icol) = (ElemType) eOrg;

Просмотреть файл

@ -22,9 +22,11 @@ struct ReaderFixture
{
BOOST_TEST_MESSAGE("Setup fixture");
#ifdef _WIN32
#if (_MSC_VER <= 1800) // Note: this does not trigger if loaded in vs2013 mode in vs2015!
BOOST_TEST_MESSAGE("Set two-digit format of exponent number");
// Todo: According to MSDN, the following function is obsolete and not available in the CRT from VS2015.
_set_output_format(_TWO_DIGIT_EXPONENT);
#endif
#endif
m_initialWorkingPath = boost::filesystem::current_path().generic_string();
BOOST_TEST_MESSAGE("Current working directory: " + m_initialWorkingPath);

Просмотреть файл

@ -436,9 +436,9 @@ void BlockRandomizerOneEpochTest(bool prefetch)
BOOST_CHECK_EQUAL(sequences.m_data.size(), 1 - (i / data.size()));
if (i < data.size())
{
auto& data = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data.m_numberOfSamples, 1u);
actual.push_back(*((float*)data.GetDataBuffer()));
auto& data2 = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data2.m_numberOfSamples, 1u);
actual.push_back(*((float*)data2.GetDataBuffer()));
}
BOOST_CHECK_EQUAL(sequences.m_endOfEpoch, (data.size() <= i));
}
@ -477,9 +477,9 @@ void BlockRandomizerOneEpochWithChunks1Test(bool prefetch)
BOOST_CHECK_EQUAL(sequences.m_data.size(), 1 - (i / data.size()));
if (i < data.size())
{
auto& data = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data.m_numberOfSamples, 1u);
actual.push_back(*((float*)data.GetDataBuffer()));
auto& data2 = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data2.m_numberOfSamples, 1u);
actual.push_back(*((float*)data2.GetDataBuffer()));
}
BOOST_CHECK_EQUAL(sequences.m_endOfEpoch, (data.size() <= i));
}
@ -522,9 +522,9 @@ void BlockRandomizerOneEpochWithChunks2Test(bool prefetch)
BOOST_CHECK_EQUAL(sequences.m_data.size(), 1 - (i / data.size()));
if (i < data.size())
{
auto& data = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data.m_numberOfSamples, 1u);
actual.push_back(*((float*)data.GetDataBuffer()));
auto& data2 = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data2.m_numberOfSamples, 1u);
actual.push_back(*((float*)data2.GetDataBuffer()));
}
BOOST_CHECK_EQUAL(sequences.m_endOfEpoch, (data.size() <= i));
}
@ -628,9 +628,9 @@ void BlockRandomizerOneEpochLegacyRandomizationTest(bool prefetch)
BOOST_CHECK_EQUAL(sequences.m_data.size(), 1 - (i / data.size()));
if (i < 10)
{
auto& data = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data.m_numberOfSamples, 1u);
actual.push_back(*((float*)data.GetDataBuffer()));
auto& data2 = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data2.m_numberOfSamples, 1u);
actual.push_back(*((float*)data2.GetDataBuffer()));
}
BOOST_CHECK_EQUAL(sequences.m_endOfEpoch, (data.size() <= i));
@ -670,9 +670,9 @@ BOOST_AUTO_TEST_CASE(NoRandomizerOneEpoch)
BOOST_CHECK_EQUAL(sequences.m_data.size(), 1 - (i / data.size()));
if (i < data.size())
{
auto& data = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data.m_numberOfSamples, 1u);
actual.push_back(*((float*)data.GetDataBuffer()));
auto& data2 = reinterpret_cast<DenseSequenceData&>(*sequences.m_data[0][0]);
BOOST_CHECK_EQUAL(data2.m_numberOfSamples, 1u);
actual.push_back(*((float*)data2.GetDataBuffer()));
}
BOOST_CHECK_EQUAL(sequences.m_endOfEpoch, (data.size() <= i));

Просмотреть файл

@ -207,13 +207,13 @@ std::pair<CNTK::FunctionPtr, CNTK::FunctionPtr> LSTMPCellWithSelfStabilization(C
return CNTK::Parameter({ dim }, (ElementType)0.0, device);
};
unsigned long seed = 1;
auto createProjectionParam = [device, &seed](size_t outputDim) {
return CNTK::Parameter({ outputDim, CNTK::NDShape::InferredDimension }, CNTK::AsDataType<ElementType>(), CNTK::GlorotUniformInitializer(1, 0, 1, seed++), device);
unsigned long seed2 = 1;
auto createProjectionParam = [device, &seed2](size_t outputDim) {
return CNTK::Parameter({ outputDim, CNTK::NDShape::InferredDimension }, CNTK::AsDataType<ElementType>(), CNTK::GlorotUniformInitializer(1, 0, 1, seed2++), device);
};
auto createDiagWeightParam = [device, &seed](size_t dim) {
return CNTK::Parameter({ dim }, CNTK::AsDataType<ElementType>(), CNTK::GlorotUniformInitializer(1, 0, 1, seed++), device);
auto createDiagWeightParam = [device, &seed2](size_t dim) {
return CNTK::Parameter({ dim }, CNTK::AsDataType<ElementType>(), CNTK::GlorotUniformInitializer(1, 0, 1, seed2++), device);
};
auto stabilizedPrevOutput = Stabilize<ElementType>(prevOutput, device);

Просмотреть файл

@ -65,15 +65,15 @@ void TestFeedForwardNetworkCreation(const DeviceDescriptor& device, bool testSav
for (size_t i = 0; i < iterationCount; ++i)
{
std::vector<float> inputData(inputDim * numSamples);
for (size_t i = 0; i < inputData.size(); ++i)
inputData[i] = ((float)rand()) / RAND_MAX;
for (size_t i2 = 0; i2 < inputData.size(); ++i2)
inputData[i2] = ((float)rand()) / RAND_MAX;
NDShape inputShape = inputVar.Shape().AppendShape({ 1, numSamples });
ValuePtr inputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(inputShape, inputData.data(), inputData.size(), DeviceDescriptor::CPUDevice(), true));
std::vector<float> labelData(numOutputClasses * numSamples, 0);
for (size_t i = 0; i < numSamples; ++i)
labelData[(i*numOutputClasses) + (rand() % numOutputClasses)] = 1;
for (size_t i3 = 0; i3 < numSamples; ++i3)
labelData[(i3*numOutputClasses) + (rand() % numOutputClasses)] = 1;
NDShape labelShape = labelsVar.Shape().AppendShape({ 1, numSamples });
ValuePtr labelValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(labelShape, labelData.data(), labelData.size(), DeviceDescriptor::CPUDevice(), true));

Просмотреть файл

@ -87,10 +87,10 @@ void TestRecurrentNetworkCreation(const DeviceDescriptor& device, bool testSaveA
ValuePtr inputValue = GenerateSequences<ElementType>(sequenceLengths, { inputDim }, device, false);
std::vector<std::vector<ElementType>> labelsData;
for (size_t i = 0; i < numSequences; ++i)
for (size_t i2 = 0; i2 < numSequences; ++i2)
{
std::vector<ElementType> currentSequence(numOutputClasses * sequenceLengths[i]);
for (size_t j = 0; j < sequenceLengths[i]; ++j)
std::vector<ElementType> currentSequence(numOutputClasses * sequenceLengths[i2]);
for (size_t j = 0; j < sequenceLengths[i2]; ++j)
currentSequence[(j * numOutputClasses) + (rand() % numOutputClasses)] = 1;
labelsData.push_back(std::move(currentSequence));

Просмотреть файл

@ -118,12 +118,12 @@ DictionaryValue CreateDictionaryValue(DictionaryValue::Type type, size_t maxSize
}
case DictionaryValue::Type::Vector:
{
auto type = GetType();
auto type2 = GetType();
size_t size = rng() % maxSize + 1;
vector<DictionaryValue> vector(size);
for (auto i = 0; i < size; i++)
{
vector[i] = CreateDictionaryValue(type, maxSize-1, maxDepth-1);
vector[i] = CreateDictionaryValue(type2, maxSize-1, maxDepth-1);
}
return DictionaryValue(vector);
}
@ -449,21 +449,21 @@ void TestFunctionSerializationDuringTraining(const FunctionPtr& function, const
for (int i = 0; i < 3; ++i)
{
Dictionary model = classifierOutput1->Serialize();
Dictionary model2 = classifierOutput1->Serialize();
auto classifierOutput2 = Function::Deserialize(model, device);
auto classifierOutput3 = Function::Deserialize(model2, device);
if (!AreEqual(classifierOutput1, classifierOutput2))
if (!AreEqual(classifierOutput1, classifierOutput3))
{
throw std::runtime_error("TestModelSerialization: original and reloaded functions are not identical.");
}
Trainer trainer2 = BuildTrainer(classifierOutput2, labels);
Trainer trainer2 = BuildTrainer(classifierOutput3, labels);
for (int j = 0; j < 3; ++j)
{
trainer1.TrainMinibatch({ { classifierOutput1->Arguments()[0], minibatchData[featureStreamInfo].m_data }, { labels, minibatchData[labelStreamInfo].m_data } }, device);
trainer2.TrainMinibatch({ { classifierOutput2->Arguments()[0], minibatchData[featureStreamInfo].m_data }, { labels, minibatchData[labelStreamInfo].m_data } }, device);
trainer2.TrainMinibatch({ { classifierOutput3->Arguments()[0], minibatchData[featureStreamInfo].m_data }, { labels, minibatchData[labelStreamInfo].m_data } }, device);
double mbLoss1 = trainer1.PreviousMinibatchLossAverage();
double mbLoss2 = trainer2.PreviousMinibatchLossAverage();