some more warnings and size_t/int fixed

This commit is contained in:
Frank Seide 2014-10-14 16:41:06 -07:00
Родитель 0ea819a1c8
Коммит 3c888b1d75
7 изменённых файлов: 65 добавлений и 66 удалений

Просмотреть файл

@ -1959,7 +1959,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
rInfo.m_loopId = loopId;
rInfo.m_sourceNode = cur;
size_t sccSize = 0;
while(true)
for (;;)
{
ComputationNodePtr w = sccStack.back();
sccStack.pop_back();

Просмотреть файл

@ -78,4 +78,4 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
};
}}}
}}}

Просмотреть файл

@ -211,8 +211,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
if (m_net->GetTotalNumberOfNodes() < 1) //not built yet
{
ULONG randomSeed = 1;
LoadNetworkFromConfig(m_networkConfig);
}

Просмотреть файл

@ -195,7 +195,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//returns error rate
ElemType EvaluateUnroll(IDataReader<ElemType>& dataReader, const size_t mbSize, ElemType &evalSetCrossEntropy, const wchar_t* output=nullptr, const size_t testSize=requestDataSize)
ElemType EvaluateUnroll(IDataReader<ElemType>& dataReader, const size_t mbSize, ElemType &evalSetCrossEntropy, const wchar_t* output = nullptr, const size_t testSize = requestDataSize)
{
std::vector<ComputationNodePtr> FeatureNodes = m_net.FeatureNodes();
@ -203,14 +203,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
std::vector<ComputationNodePtr> criterionNodes = m_net.FinalCriterionNodes();
std::vector<ComputationNodePtr> evaluationNodes = m_net.EvaluationNodes();
if (criterionNodes.size()==0)
{
throw new runtime_error("No CrossEntropyWithSoftmax node found\n");
}
if (evaluationNodes.size()==0)
{
throw new runtime_error("No Evaluation node found\n");
}
if (criterionNodes.size()==0)
{
throw new runtime_error("No CrossEntropyWithSoftmax node found\n");
}
if (evaluationNodes.size()==0)
{
throw new runtime_error("No Evaluation node found\n");
}
std::map<std::wstring, Matrix<ElemType>*> inputMatrices;
for (size_t i=0; i<FeatureNodes.size(); i++)
@ -228,13 +228,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ElemType epochEvalError = 0;
ElemType epochCrossEntropy = 0;
size_t totalEpochSamples = 0;
ElemType prevEpochEvalError = 0;
ElemType prevEpochCrossEntropy = 0;
ElemType prevEpochEvalError = 0;
ElemType prevEpochCrossEntropy = 0;
size_t prevTotalEpochSamples = 0;
size_t prevStart = 1;
size_t numSamples = 0;
ElemType crossEntropy = 0;
ElemType evalError = 0;
size_t prevStart = 1;
size_t numSamples = 0;
ElemType crossEntropy = 0;
ElemType evalError = 0;
ofstream outputStream;
if (output)
@ -246,27 +246,27 @@ namespace Microsoft { namespace MSR { namespace CNTK {
size_t actualMBSize = 0;
while (dataReader.GetMinibatch(inputMatrices))
{
size_t nbrSamples = (size_t)(*inputMatrices[L"numberobs"])(0,0);
size_t nbrSamples = (size_t)(*inputMatrices[L"numberobs"])(0, 0);
actualMBSize = nbrSamples;
for (int npos = 0; npos < nbrSamples ; npos++)
{
FeatureNodes[npos]->UpdateEvalTimeStamp();
labelNodes[npos]->UpdateEvalTimeStamp();
{
FeatureNodes[npos]->UpdateEvalTimeStamp();
labelNodes[npos]->UpdateEvalTimeStamp();
m_net.Evaluate(criterionNodes[npos]); //use only the first criterion. Is there any possibility to use more?
m_net.Evaluate(evaluationNodes[npos]);
ElemType mbCrossEntropy = criterionNodes[npos]->FunctionValues().Get00Element(); // criterionNode should be a scalar
epochCrossEntropy += mbCrossEntropy;
ElemType mbCrossEntropy = criterionNodes[npos]->FunctionValues().Get00Element(); // criterionNode should be a scalar
epochCrossEntropy += mbCrossEntropy;
ElemType mbEvalError = evaluationNodes[npos]->FunctionValues().Get00Element(); //criterionNode should be a scalar
epochEvalError += mbEvalError;
}
totalEpochSamples += actualMBSize;
totalEpochSamples += actualMBSize;
if (outputStream.is_open())
{
@ -282,43 +282,43 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
numMBsRun++;
if (numMBsRun % m_numMBsToShowResult == 0)
{
numSamples = (totalEpochSamples-prevTotalEpochSamples);
crossEntropy = epochCrossEntropy - prevEpochCrossEntropy;
evalError = epochEvalError - prevEpochEvalError;
numMBsRun++;
if (numMBsRun % m_numMBsToShowResult == 0)
{
numSamples = (totalEpochSamples - prevTotalEpochSamples);
crossEntropy = epochCrossEntropy - prevEpochCrossEntropy;
evalError = epochEvalError - prevEpochEvalError;
fprintf(stderr,"Minibatch[%lu-%lu]: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n",
prevStart, numMBsRun, numSamples, evalError/numSamples, crossEntropy/numSamples);
prevTotalEpochSamples = totalEpochSamples;
prevEpochCrossEntropy = epochCrossEntropy;
prevEpochEvalError = epochEvalError;
prevStart = numMBsRun + 1;
}
fprintf(stderr, "Minibatch[%lu-%lu]: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n",
prevStart, numMBsRun, numSamples, evalError / numSamples, crossEntropy / numSamples);
prevTotalEpochSamples = totalEpochSamples;
prevEpochCrossEntropy = epochCrossEntropy;
prevEpochEvalError = epochEvalError;
prevStart = numMBsRun + 1;
}
}
// show final grouping of output
numSamples = totalEpochSamples-prevTotalEpochSamples;
// show final grouping of output
numSamples = totalEpochSamples - prevTotalEpochSamples;
if (numSamples > 0)
{
crossEntropy = epochCrossEntropy - prevEpochCrossEntropy;
evalError = epochEvalError - prevEpochEvalError;
fprintf(stderr,"Minibatch[%lu-%lu]: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n",
prevStart, numMBsRun, numSamples, evalError/numSamples, crossEntropy/numSamples);
crossEntropy = epochCrossEntropy - prevEpochCrossEntropy;
evalError = epochEvalError - prevEpochEvalError;
fprintf(stderr, "Minibatch[%lu-%lu]: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n",
prevStart, numMBsRun, numSamples, evalError / numSamples, crossEntropy / numSamples);
}
//final statistics
epochEvalError /= (ElemType)totalEpochSamples;
epochCrossEntropy /= (ElemType)totalEpochSamples;
fprintf(stderr,"Overall: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n", totalEpochSamples, epochEvalError,epochCrossEntropy);
epochEvalError /= (ElemType)totalEpochSamples;
epochCrossEntropy /= (ElemType)totalEpochSamples;
fprintf(stderr, "Overall: Samples Evaluated = %lu EvalErr Per Sample = %.8g Loss Per Sample = %.8g\n", totalEpochSamples, epochEvalError, epochCrossEntropy);
if (outputStream.is_open())
{
outputStream.close();
}
evalSetCrossEntropy = epochCrossEntropy;
evalSetCrossEntropy = epochCrossEntropy;
return epochEvalError;
}
@ -340,9 +340,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ComputationNetwork<ElemType>& m_net;
size_t m_numMBsToShowResult;
UINT16 m_traceLevel;
//void operator=(const SimpleEvaluator&) { throw std::logic_error("operator= not available"); }
};
template class SimpleEvaluator<float>;
template class SimpleEvaluator<double>;
}}}
}}}

Просмотреть файл

@ -208,6 +208,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
private:
ComputationNetwork<ElemType>& m_net;
int m_verbosity;
//void operator=(const SimpleOutputWriter&) { throw std::logic_error("operator= not available"); }
};
template class SimpleOutputWriter<float>;
template class SimpleOutputWriter<double>;

Просмотреть файл

@ -707,15 +707,15 @@ public:
virtual ~SynchronousNodeEvaluator()
{
}
private:
ComputationNetwork<ElemType>& m_net;
typedef ComputationNode<ElemType>* ComputationNodePtr;
//void operator=(const SynchronousNodeEvaluator&) { throw std::logic_error("operator= not available"); }
};
template class SynchronousNodeEvaluator<float>;
template class SynchronousNodeEvaluator<double>;
//template class SynchronousNodeEvaluator<float>;
//template class SynchronousNodeEvaluator<double>;
// SynchronousExecutionEngine
// TODO JC Refactor eligible methods and members into abstract base class.

Просмотреть файл

@ -612,8 +612,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
c *= beta;
}
int blocksPerGrid = rhs.m_nz;
int p = (threadsPerBlock < lhs.GetNumRows())? threadsPerBlock : lhs.GetNumRows();
size_t blocksPerGrid = rhs.m_nz;
size_t p = (threadsPerBlock < lhs.GetNumRows())? threadsPerBlock : lhs.GetNumRows();
if (!transposeA && !transposeB)
{
@ -684,7 +684,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
cudaEvent_t done;
CUDACALL(cudaEventCreate(&done));
int blocksPerGrid =rhs.GetNZElements();
size_t blocksPerGrid =rhs.GetNZElements();
_denseMulSparseToSparse<ElemType><<<blocksPerGrid, threadsPerBlock>>>(
lhs.BufferPointer(),
lhs.GetNumRows(),
@ -720,7 +720,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
cudaEvent_t done;
CUDACALL(cudaEventCreate(&done));
int blocksPerGrid =lhs.m_blockSize;
size_t blocksPerGrid = lhs.m_blockSize;
_scaleAndAdd<ElemType><<<blocksPerGrid, threadsPerBlock>>>(
alpha,
blockCol,
@ -772,7 +772,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
cudaEvent_t done;
CUDACALL(cudaEventCreate(&done));
int blocksPerGrid = label.m_expandedSize;
size_t blocksPerGrid = label.m_expandedSize;
//_computePrediction<ElemType><<<blocksPerGrid, threadsPerBlock>>>(
_computePrediction<ElemType><<<blocksPerGrid, 20>>>(
@ -834,7 +834,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
cudaEvent_t done;
CUDACALL(cudaEventCreate(&done));
int blocksPerGrid =grd.GetNumElements();
size_t blocksPerGrid = grd.GetNumElements();
//_computeGradientOfInput<ElemType><<<blocksPerGrid, threadsPerBlock>>>(
_computeGradientOfInput<ElemType><<<blocksPerGrid, 20>>>(
error.m_val,
@ -872,7 +872,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
cudaEvent_t done;
CUDACALL(cudaEventCreate(&done));
int blocksPerGrid =error.m_nz;
size_t blocksPerGrid = error.m_nz;
_computeGradientOfWeight<ElemType><<<blocksPerGrid, threadsPerBlock>>>(
error.m_val,
error.m_row,
@ -925,7 +925,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if(m_format == MatrixFormat::matrixFormatSparseBlockCol || m_format == MatrixFormat::matrixFormatSparseBlockRow)
{
int blocksPerGrid = m_blockSize;
size_t blocksPerGrid = m_blockSize;
bool isBlockCol = (m_format == MatrixFormat::matrixFormatSparseBlockCol);
size_t len = isBlockCol ? GetNumRows(): GetNumCols();
cudaEvent_t done;
@ -1636,9 +1636,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
CUDACALL(cudaMemcpy(&h_sum,d_sum,sizeof(ElemType),cudaMemcpyDeviceToHost));
CUDACALL(cudaFree(d_sum));
if (sizeof(ElemType)==sizeof(float))
return (ElemType)sqrtf(h_sum);
return (ElemType)sqrtf((float)h_sum);
else
return (ElemType)sqrt(h_sum);
return (ElemType)sqrt((double)h_sum);
}
template<class ElemType>