Missing tabs-to-spaces conversion

This commit is contained in:
Mark Hillebrand 2016-01-18 09:40:10 +01:00
Родитель 6c5aa5809e
Коммит f1c3232024
5 изменённых файлов: 20 добавлений и 20 удалений

Просмотреть файл

@ -148,7 +148,7 @@ template void DoCreateLabelMap<double>(const ConfigParameters& config);
// 1) modelPath -- path to the existing model
// 2) outputmodelPath -- where to write the transformed model
// 3) KeepRatio -- how many percentage of energy we want to keep
// 4) AlignedSize -- the resultant number of signular values is aligned to e.g., 32 or 64
// 4) AlignedSize -- the resultant number of signular values is aligned to e.g., 32 or 64
// 5) ParameterName -- name (regex) of the parameter node we want to perform a SVD decomposition
//
//////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -4481,7 +4481,7 @@ GPUMatrix<ElemType>& GPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(con
if (do_sync)
CUDA_CALL(cudaEventCreate(&done));
_assignElementProductOfWithShiftNeg<ElemType><<<block_tail, thread_tail, 0, t_stream>>>(m_pArray, a.m_pArray, b.m_pArray, shift, nt + 1, BS);
// _assignElementProductOf<ElemType> << <block_tail, thread_tail, 0, t_stream >> >(m_pArray, a.m_pArray, b.m_pArray, nt);
// _assignElementProductOf<ElemType> << <block_tail, thread_tail, 0, t_stream >> >(m_pArray, a.m_pArray, b.m_pArray, nt);
if (do_sync)
CUDA_CALL(cudaEventRecord(done));
@ -4556,7 +4556,7 @@ GPUMatrix<ElemType>& GPUMatrix<ElemType>::GetARowByIndex(const GPUMatrix<ElemTyp
if (do_sync)
CUDA_CALL(cudaEventCreate(&done));
_getARowByIndex<ElemType><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, t_stream>>>(m_pArray, a.m_pArray, n, P, m);
// _assignElementProductOf<ElemType> << <block_tail, thread_tail, 0, t_stream >> >(m_pArray, a.m_pArray, b.m_pArray, nt);
// _assignElementProductOf<ElemType> << <block_tail, thread_tail, 0, t_stream >> >(m_pArray, a.m_pArray, b.m_pArray, nt);
if (do_sync)
CUDA_CALL(cudaEventRecord(done));

Просмотреть файл

@ -423,12 +423,12 @@ struct latticefunctionskernels
pathscore0 += getlogtransp(transP,-1,0) + logLLs(senoneid0,ts);
pathscore2 += getlogtransp(transP,-1,2) + logLLs(senoneid2,ts);
}
else //for others, only -1 to 0 is permitted
{
pathscore0 += getlogtransp(transP, -1, 0) + logLLs(senoneid0, ts);
pathscore1 += getlogtransp(transP, -1, 1) + logLLs(senoneid1, ts);
else //for others, only -1 to 0 is permitted
{
pathscore0 += getlogtransp(transP, -1, 0) + logLLs(senoneid0, ts);
pathscore1 += getlogtransp(transP, -1, 1) + logLLs(senoneid1, ts);
}*/
}*/
pathscore2 += getlogtransp(transP, -1, 2) + logLLs(senoneid2, ts);
pathscore1 += getlogtransp(transP, -1, 1) + logLLs(senoneid1, ts);
//state1stepm1to1 = ts;

Просмотреть файл

@ -282,7 +282,7 @@ public:
delete m_labelsIdBufferRow;
delete m_labelsBlock2Id;
delete m_labelsBlock2UniqId;
*/
*/
}
virtual ~SequenceReader();
virtual void StartMinibatchLoop(size_t mbSize, size_t epoch, size_t requestedEpochSamples = requestDataSize);
@ -336,8 +336,8 @@ public:
using SequenceReader<ElemType>::m_traceLevel;
using SequenceReader<ElemType>::m_featureCount;
using SequenceReader<ElemType>::m_endReached;
// using IDataReader<ElemType>::labelIn;
// using IDataReader<ElemType>::labelOut;
// using IDataReader<ElemType>::labelIn;
// using IDataReader<ElemType>::labelOut;
using SequenceReader<ElemType>::InitCache;
using SequenceReader<ElemType>::m_readerConfig;
using SequenceReader<ElemType>::ReleaseMemory;
@ -345,8 +345,8 @@ public:
using SequenceReader<ElemType>::m_featuresBufferRow;
using SequenceReader<ElemType>::m_labelsBuffer;
using SequenceReader<ElemType>::m_labelsIdBuffer;
// using IDataReader<ElemType>::labelInfo;
// using SequenceReader<ElemType>::m_featuresBufferRowIndex;
// using IDataReader<ElemType>::labelInfo;
// using SequenceReader<ElemType>::m_featuresBufferRowIndex;
using SequenceReader<ElemType>::m_labelsIdBufferRow;
using SequenceReader<ElemType>::m_labelsBlock2Id;
using SequenceReader<ElemType>::m_labelsBlock2UniqId;

Просмотреть файл

@ -349,7 +349,7 @@ bool SparseBinaryInput<ElemType>::Randomize()
return true;
}
return false;
*/
*/
}
template <class ElemType>
@ -548,14 +548,14 @@ size_t SparseBinaryInput<ElemType>::ReadMinibatch(void* data_buffer, std::map<st
//fprintf(stderr, "start read minibatch.\n");
/*
size_t readSize = m_offsets[cur_batch + 1] - m_offsets[cur_batch];
void* data_buffer = GetTempDataPointer(readSize);
void* data_buffer = GetTempDataPointer(readSize);
fprintf(stderr, "start reading data.\n");
fprintf(stderr, "start reading data.\n");
m_inFile.clear();
m_inFile.seekg(m_dataStart + m_offsets[cur_batch], ios::beg);
m_inFile.read((char*)data_buffer, readSize);
fprintf(stderr, "done reading data.\n");
*/
m_inFile.read((char*)data_buffer, readSize);
fprintf(stderr, "done reading data.\n");
*/
int32_t nnz;
int32_t curMBSize;
@ -850,7 +850,7 @@ bool LibSVMBinaryReader<ElemType>::GetMinibatch(std::map<std::wstring, Matrix<El
timer = clock() - timer;
fprintf(stderr, "It took me %d clicks (%f seconds).\n", timer, ((float)timer) / CLOCKS_PER_SEC);
*/
*/
//fprintf(stderr, "done\n");
return true;
}