Use %zd for printing size_t
    %ls for wide strings
Use SGDBase as name for base class instead of redefining templated base
This commit is contained in:
Scott Cyphers 2015-08-03 11:05:33 -04:00
Родитель 765a68f5d8
Коммит e633191da1
4 изменённых файлов: 45 добавлений и 45 удалений

Просмотреть файл

@ -1455,7 +1455,7 @@ public:
ComputationNodePtr newNode(new PairNetworkNode<ElemType>(m_deviceId, nodeName));
if (this->GetNodeFromName(a->NodeName(), nullptr, false) != nullptr)
{
fprintf(stderr, "PairNetwork : asked to pair a node with name l%s in another network.However, this network has already a node with the same name.Should avoid this case.\n", a->NodeName().c_str());
fprintf(stderr, "PairNetwork : asked to pair a node with name %ls in another network.However, this network has already a node with the same name.Should avoid this case.\n", a->NodeName().c_str());
throw std::runtime_error("PairNetwork : asked to pair a node with name in another network.However, this network has already a node with the same name.Should avoid this case.\n");
}
newNode->AttachInputs(a);
@ -3243,7 +3243,7 @@ public:
bool UnitTest(const ComputationNodePtr rootNode)
{
fprintf(stderr, "\n\n Unit test node %ws \n", rootNode->NodeName().c_str());
fprintf(stderr, "\n\n Unit test node %ls \n", rootNode->NodeName().c_str());
std::list<ComputationNodePtr>& nodes = GetEvalOrder(rootNode);

Просмотреть файл

@ -38,36 +38,36 @@ namespace Microsoft {
{
ElemType m_default_activity;
typedef SGD<ElemType> SGD;
using SGDBase = SGD<ElemType>;
public:
using SGD::m_modelPath;
using SGD::m_maxEpochs;
using SGD::m_doUnitTest;
using SGD::m_learnRateAdjustInterval;
using SGD::m_mbSize;
using SGD::m_momentumPerSample;
using SGD::m_learningRatesPerSample;
using SGD::m_dropoutRates;
using SGD::m_autoLearnRateSearchType;
using SGD::m_minLearnRate;
using SGD::m_loadBestModel;
using SGD::m_validateAfterModelReloading;
using SGD::m_continueReduce;
using SGD::m_reduceLearnRateIfImproveLessThan;
using SGD::m_epochSize;
using SGD::m_learnRateDecreaseFactor;
using SGD::m_increaseLearnRateIfImproveMoreThan;
using SGD::m_learnRateIncreaseFactor;
using SGD::m_keepCheckPointFiles;
using SGD::m_doGradientCheck;
using SGD::m_L2RegWeight;
using SGD::m_L1RegWeight;
using SGD::m_needAveMultiplier;
using SGD::m_traceLevel;
using SGD::m_numMBsToShowResult;
using SGD::m_gradientCheckSigDigit;
using SGD::m_prevChosenMinibatchSize;
using SGDBase::m_modelPath;
using SGDBase::m_maxEpochs;
using SGDBase::m_doUnitTest;
using SGDBase::m_learnRateAdjustInterval;
using SGDBase::m_mbSize;
using SGDBase::m_momentumPerSample;
using SGDBase::m_learningRatesPerSample;
using SGDBase::m_dropoutRates;
using SGDBase::m_autoLearnRateSearchType;
using SGDBase::m_minLearnRate;
using SGDBase::m_loadBestModel;
using SGDBase::m_validateAfterModelReloading;
using SGDBase::m_continueReduce;
using SGDBase::m_reduceLearnRateIfImproveLessThan;
using SGDBase::m_epochSize;
using SGDBase::m_learnRateDecreaseFactor;
using SGDBase::m_increaseLearnRateIfImproveMoreThan;
using SGDBase::m_learnRateIncreaseFactor;
using SGDBase::m_keepCheckPointFiles;
using SGDBase::m_doGradientCheck;
using SGDBase::m_L2RegWeight;
using SGDBase::m_L1RegWeight;
using SGDBase::m_needAveMultiplier;
using SGDBase::m_traceLevel;
using SGDBase::m_numMBsToShowResult;
using SGDBase::m_gradientCheckSigDigit;
using SGDBase::m_prevChosenMinibatchSize;
typedef ComputationNode<ElemType>* ComputationNodePtr;
@ -80,7 +80,7 @@ namespace Microsoft {
list<pair<ComputationNodePtr, ComputationNodePtr>> m_lst_pair_encoder_decoder_nodes;
public:
MultiNetworksSGD(const ConfigParameters& configSGD) : SGD(configSGD)
MultiNetworksSGD(const ConfigParameters& configSGD) : SGDBase(configSGD)
{
}
@ -663,7 +663,7 @@ namespace Microsoft {
if (learnRatePerSample < m_minLearnRate)
{
fprintf(stderr, "Learn Rate Per Sample for Epoch[%lu] = %.8g is less than minLearnRate %.8g. Training stops.\n", i + 1, learnRatePerSample, m_minLearnRate);
fprintf(stderr, "Learn Rate Per Sample for Epoch[%d] = %.8g is less than minLearnRate %.8g. Training stops.\n", i + 1, learnRatePerSample, m_minLearnRate);
break;
}
@ -692,7 +692,7 @@ namespace Microsoft {
IDataReader<ElemType>* decoderTrainSetDataReader = trainDataReader[decoderIdx];
ComputationNetwork<ElemType>* decoderNet = nets[decoderIdx];
fprintf(stderr, "Finished Epoch[%lu]: [Training Set] Decoder Train Loss Per Sample = %.8g ", i + 1, epochCriterion);
fprintf(stderr, "Finished Epoch[%d]: [Training Set] Decoder Train Loss Per Sample = %.8g ", i + 1, epochCriterion);
if (epochEvalErrors.size() == 1)
{
fprintf(stderr, "EvalErr Per Sample = %.8g Ave Learn Rate Per Sample = %.10g Epoch Time=%.8g\n", epochEvalErrors[0], learnRatePerSample, epochTime);
@ -703,9 +703,9 @@ namespace Microsoft {
for (size_t j = 0; j<epochEvalErrors.size(); j++)
fprintf(stderr, "[%lu]=%.8g ", j, epochEvalErrors[j]);
fprintf(stderr, "Ave Learn Rate Per Sample = %.10g Epoch Time=%.8g\n", learnRatePerSample, epochTime);
fprintf(stderr, "Finished Epoch[%lu]: Criterion Node Per Sample = %.8g\n", i + 1, epochCriterion);
fprintf(stderr, "Finished Epoch[%d]: Criterion Node Per Sample = %.8g\n", i + 1, epochCriterion);
for (size_t j = 0; j<epochEvalErrors.size(); j++)
fprintf(stderr, "Finished Epoch[%lu]: Evaluation Node [%ws] Per Sample = %.8g\n", i + 1, evalNodeNames[j].c_str(), epochEvalErrors[j]);
fprintf(stderr, "Finished Epoch[%d]: Evaluation Node [%ls] Per Sample = %.8g\n", i + 1, evalNodeNames[j].c_str(), epochEvalErrors[j]);
}
if (decoderValidationSetDataReader != decoderTrainSetDataReader && decoderValidationSetDataReader != nullptr)
@ -717,7 +717,7 @@ namespace Microsoft {
validationDataReader,
m_mbSize[i]);
fprintf(stderr, "Finished Epoch[%lu]: [Validation Set] Loss Per Sample = %.8g \n ", vScore );
fprintf(stderr, "Finished Epoch[%d]: [Validation Set] Loss Per Sample = %.8g \n ", i+1, vScore );
epochCriterion = vScore;
}
@ -1013,7 +1013,7 @@ namespace Microsoft {
{
epochEvalErrors[i] = (const ElemType)localEpochEvalErrors(0, i);
}
fprintf(stderr, "total samples in epoch[%d] = %d\n", epochNumber, totalEpochSamples);
fprintf(stderr, "total samples in epoch[%d] = %zd\n", epochNumber, totalEpochSamples);
}
bool EncoderDecoderGradientCheck(
@ -1053,7 +1053,7 @@ namespace Microsoft {
irow = max(0, irow);
icol = max(0, icol);
fprintf(stderr, "\n###### d%ws######\n", node->NodeName().c_str());
fprintf(stderr, "\n###### d%ls######\n", node->NodeName().c_str());
deviceId = node->FunctionValues().GetDeviceId(); // original device id
node->FunctionValues().TransferFromDeviceToDevice(deviceId, CPUDEVICE, true, false, false);
@ -1124,7 +1124,7 @@ namespace Microsoft {
if (wrong)
{
char serr[2048];
sprintf_s((char*)serr, 2048, "Decoder %ws Numeric gradient = %e, Error BP gradient = %e", node->NodeName().c_str(), grdNum, grdErr);
sprintf_s((char*)serr, 2048, "Decoder %ls Numeric gradient = %e, Error BP gradient = %e", node->NodeName().c_str(), grdNum, grdErr);
fprintf(stdout, "%s\n", serr);
verror_msgs.push_back(serr);
}

Просмотреть файл

@ -1532,7 +1532,7 @@ protected:
if (epochNumber < 2 && m_prevChosenMinibatchSize != 0)
{
// newly started training: any previous MB size stored in the model is to be ignored
fprintf(stderr, "before epoch .2, previous minibatchSize %d is "
fprintf(stderr, "before epoch .2, previous minibatchSize %zd is "
"considered invalid -> resetting\n", m_prevChosenMinibatchSize);
m_prevChosenMinibatchSize = 0;
}
@ -1543,7 +1543,7 @@ protected:
(epochNumber + 1) % m_minibatchSizeTuningFrequency != 0)
{
fprintf(stderr, "AdaptiveMinibatchSearch: Search for a better minibatchSize "
"in epoch %d skipped, keeping minibatchSize of %d\n",
"in epoch %d skipped, keeping minibatchSize of %zd\n",
epochNumber + 1, m_prevChosenMinibatchSize);
chosenMinibatchSize = m_prevChosenMinibatchSize;
}
@ -1568,7 +1568,7 @@ protected:
assert(m_prevChosenMinibatchSize >= chosenMinibatchSize);
fprintf(stderr, "AdaptiveMinibatchSearch: Limiting maxMinibatchSize to "
"previous minibatchSize %d*2\n", m_prevChosenMinibatchSize);
"previous minibatchSize %zd*2\n", m_prevChosenMinibatchSize);
maxMinibatchSize = min(maxMinibatchSize, m_prevChosenMinibatchSize * 2);
}
@ -1634,7 +1634,7 @@ protected:
// round mbsize to something meaningful
trialMinibatchSize = RoundToMultipleOf64(trialMinibatchSizeFloat);
fprintf(stderr, "\nAdaptiveMinibatchSearch: Evaluating trial minibatchSize=%d out of range %d..%d ...\n\n",
fprintf(stderr, "\nAdaptiveMinibatchSearch: Evaluating trial minibatchSize=%zd out of range %zd..%zd ...\n\n",
trialMinibatchSize, RoundToMultipleOf64(minMinibatchSize), RoundToMultipleOf64(maxMinibatchSize));
size_t totalSamplesSeen;

Просмотреть файл

@ -911,7 +911,7 @@ namespace Microsoft {
ComputeTimeInMBs += MBComputeTime;
fprintf(stderr, "Sentenes Seen = %d; Samples seen = %d; Total Compute Time = %.8g ; Time Per Sample=%.8g\n", numMBsRun, totalEpochSamples, ComputeTimeInMBs, ComputeTimeInMBs / totalEpochSamples);
fprintf(stderr, "Sentences Seen = %zd; Samples seen = %zd; Total Compute Time = %.8g ; Time Per Sample=%.8g\n", numMBsRun, totalEpochSamples, ComputeTimeInMBs, ComputeTimeInMBs / totalEpochSamples);
}
startReadMBTime = clock();
@ -1229,7 +1229,7 @@ namespace Microsoft {
{
ElemType score = result_queue.top().score;
best_score = score;
fprintf(stderr, "best[%d] score = %.4e\t", ibest, score);
fprintf(stderr, "best[%zd] score = %.4e\t", ibest, score);
if (best_path.size() > 0)
WriteNbest(ibest, best_path, outputNodes, dataWriter);
}