Merge branch 'master' into deepbliscore/vnext
This commit is contained in:
Коммит
49a1d3bb4c
|
@ -1,5 +1,18 @@
|
|||
Dockerfile-CPU text
|
||||
Dockerfile-GPU text
|
||||
|
||||
*.ini text
|
||||
*.simple text
|
||||
*.counts text
|
||||
*.labels text
|
||||
*.feats text
|
||||
*.post text
|
||||
*.cpu text
|
||||
*.gpu text
|
||||
|
||||
.gitattributes text
|
||||
.gitignore text
|
||||
.gitmodules text
|
||||
|
||||
.clang-format text
|
||||
|
||||
|
@ -15,6 +28,8 @@ Readme text
|
|||
*.py text
|
||||
*.pl text
|
||||
*.ps1 text
|
||||
*.ps text
|
||||
|
||||
*.sh text eol=lf
|
||||
build-and-test text eol=lf
|
||||
configure text eol=lf
|
||||
|
|
|
@ -14,7 +14,7 @@ imageLayout = "cudnn"
|
|||
# override the above as follows when running on CPU:
|
||||
# deviceId = -1
|
||||
|
||||
command = MNISTtrain:MNISTtest
|
||||
command = train:test
|
||||
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/01_OneHidden"
|
||||
|
@ -25,11 +25,14 @@ ndlMacros = "$ConfigDir$/Macros.ndl"
|
|||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
|
||||
# If set to true, always initialize the network on CPU, making initialization consistent across CPU and GPU targets (for testing).
|
||||
initOnCPUOnly=true
|
||||
|
||||
#######################################
|
||||
# TRAINING CONFIG #
|
||||
#######################################
|
||||
|
||||
MNISTtrain = [
|
||||
train = [
|
||||
action = "train"
|
||||
|
||||
NDLNetworkBuilder = [
|
||||
|
@ -44,6 +47,7 @@ MNISTtrain = [
|
|||
maxEpochs = 30
|
||||
]
|
||||
|
||||
# Note: this reader crashes if randomization is turned on.
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
# To get the data (Train-28x28.txt) please run `python mnist_convert.py`
|
||||
|
@ -68,7 +72,7 @@ MNISTtrain = [
|
|||
# TEST CONFIG #
|
||||
#######################################
|
||||
|
||||
MNISTtest = [
|
||||
test = [
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
|
||||
|
|
|
@ -11,8 +11,9 @@ ModelDir = "$OutputDir$/Models"
|
|||
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
# override the above as follows when running on CPU:
|
||||
# Override the above as follows when running on CPU:
|
||||
# deviceId = -1
|
||||
# Note: Compared to GPU, this runs very slow.
|
||||
|
||||
command = train:test
|
||||
|
||||
|
@ -27,6 +28,9 @@ numMBsToShowResult=500
|
|||
|
||||
prefetch=true
|
||||
|
||||
# If set to true, always initialize the network on CPU, making initialization consistent across CPU and GPU targets (for testing).
|
||||
initOnCPUOnly=true
|
||||
|
||||
#######################################
|
||||
# TRAINING CONFIG #
|
||||
#######################################
|
||||
|
@ -46,6 +50,7 @@ train = [
|
|||
maxEpochs = 15
|
||||
]
|
||||
|
||||
# Note: this reader crashes if randomization is turned on.
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
# To get the data (Train-28x28.txt) please run `python mnist_convert.py`
|
||||
|
|
|
@ -11,7 +11,8 @@ ModelDir = "$OutputDir$/Models"
|
|||
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
# override the above as follows when running on CPU:
|
||||
# Note: Batch normalization training on CPU is not yet implemented.
|
||||
# When it is, override the above as follows when running on CPU:
|
||||
# deviceId = -1
|
||||
|
||||
command = train:test
|
||||
|
@ -23,6 +24,12 @@ ndlMacros = "$ConfigDir$/Macros.ndl"
|
|||
# uncomment the following line to write logs to a file
|
||||
# stderr = "$OutputDir$/03_ConvBatchNorm_out"
|
||||
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
|
||||
# If set to true, always initialize the network on CPU, making initialization consistent across CPU and GPU targets (for testing).
|
||||
initOnCPUOnly=true
|
||||
|
||||
#######################################
|
||||
# TRAINING CONFIG #
|
||||
#######################################
|
||||
|
@ -44,6 +51,7 @@ train = [
|
|||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
|
||||
# Note: this reader crashes if randomization is turned on.
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
# To get the data (Train-28x28.txt) please run `python mnist_convert.py`
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -63,7 +63,7 @@ Train=[
|
|||
height=224
|
||||
channels=3
|
||||
# Below are the optional parameters.
|
||||
# Possible values: Center, Random. Default: Center
|
||||
# Possible values: Center, Random, MultiView10. Default: Center
|
||||
cropType="Random"
|
||||
# Horizontal random flip, will be enabled by default if cropType=Random
|
||||
#hflip="true"
|
||||
|
@ -105,7 +105,7 @@ Test=[
|
|||
width=224
|
||||
height=224
|
||||
channels=3
|
||||
cropType="Center"
|
||||
cropType="Center" # Use MultiView10 to enable 10-view testing.
|
||||
meanFile="$ConfigDir$/ImageNet1K_mean.xml"
|
||||
]
|
||||
labels=[
|
||||
|
|
|
@ -67,7 +67,7 @@ Train=[
|
|||
height=224
|
||||
channels=3
|
||||
# Below are the optional parameters.
|
||||
# Possible values: Center, Random. Default: Center
|
||||
# Possible values: Center, Random, MultiView10. Default: Center
|
||||
cropType="Random"
|
||||
# Horizontal random flip, will be enabled by default if cropType=Random
|
||||
#hflip="true"
|
||||
|
@ -109,7 +109,7 @@ Test=[
|
|||
width=224
|
||||
height=224
|
||||
channels=3
|
||||
cropType="Center"
|
||||
cropType="Center" # Use MultiView10 to enable 10-view testing.
|
||||
meanFile="$ConfigDir$/ImageNet1K_mean.xml"
|
||||
]
|
||||
labels=[
|
||||
|
|
10
README.md
10
README.md
|
@ -1,15 +1,17 @@
|
|||
# CNTK
|
||||
|
||||
## Latest news
|
||||
*2016-04-12.* CNTK is available as [Azure Virtual Machines](https://github.com/Microsoft/CNTK/wiki/CNTK-on-Azure) and [Docker Containers](https://github.com/Microsoft/CNTK/wiki/CNTK-Docker-Containers)
|
||||
|
||||
*2016-04-12.* Added support for ND convolution and ND pooling and CPU support for `cudnn` layout in convolution, pooling and batch normalization nodes.
|
||||
Read [documentation](https://github.com/Microsoft/CNTK/wiki/Full-NDL-Function-Reference) on convolution, pooling and batch normalization nodes.
|
||||
|
||||
*2016-04-05.* CUDA7.5 support for Windows Build: Windows project files have been updated to automatically utilize CUDA 7.5 if present
|
||||
|
||||
## March 2016
|
||||
*2016-03-24.* New Text Reader (CNTKTextFormatReader) is available
|
||||
Read description here https://github.com/Microsoft/CNTK/wiki/CNTKTextFormat-Reader
|
||||
|
||||
*2016-02-29.* Added ZIP files support to the ImageReader
|
||||
Examples: https://github.com/Microsoft/CNTK/wiki/Image-reader
|
||||
Updated build steps at https://github.com/Microsoft/CNTK/wiki/Setup-CNTK-on-your-machine
|
||||
|
||||
See [all news](https://github.com/Microsoft/CNTK/wiki/News).
|
||||
|
||||
## What is CNTK
|
||||
|
|
|
@ -47,21 +47,21 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\SequenceTrainingLib;..\SGDLib;..\ComputationNetworkLib;..\CNTK;..\Math;..\Common\Include;..\CNTK\BrainScript;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<PreBuildEventUseInBuild>false</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\SequenceTrainingLib;..\SGDLib;..\ComputationNetworkLib;..\CNTK;..\Math;..\Common\Include;..\CNTK\BrainScript;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<ExecutablePath>$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>false</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\SequenceTrainingLib;$(SolutionDir)Source\SGDLib;$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\CNTK;$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\CNTK\BrainScript;$(MSMPI_INC);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(MSMPI_LIB64);$(OutDir);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
|
@ -73,13 +73,11 @@
|
|||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll</DelayLoadDLLs>
|
||||
<StackReserveSize>100000000</StackReserveSize>
|
||||
</Link>
|
||||
|
@ -97,7 +95,6 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,7 +104,6 @@
|
|||
<AdditionalDependencies>Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
|
@ -119,6 +115,12 @@
|
|||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>if exist "%ProgramW6432%\NVIDIA Corporation\NVSMI" xcopy /I /D /Y "%ProgramW6432%\NVIDIA Corporation\NVSMI\nvml*.dll" $(TargetDir)</Command>
|
||||
<Message>Copying NVidia GDK extension DLL to target folder</Message>
|
||||
|
|
|
@ -47,19 +47,19 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\ActionsLib;..\SequenceTrainingLib;..\SGDLib;..\ComputationNetworkLib;..\Math;..\Common\Include;..\CNTK\BrainScript;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\ActionsLib;..\SequenceTrainingLib;..\SGDLib;..\ComputationNetworkLib;..\Math;..\Common\Include;..\CNTK\BrainScript;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<ExecutablePath>$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\ActionsLib;$(SolutionDir)Source\SequenceTrainingLib;$(SolutionDir)Source\SGDLib;$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\CNTK\BrainScript;$(MSMPI_INC);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(MSMPI_LIB64);$(OutDir);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
|
@ -71,13 +71,11 @@
|
|||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ActionsLib.lib; SGDLib.lib; ComputationNetworkLib.lib; Math.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>Math.dll; msmpi.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<StackReserveSize>100000000</StackReserveSize>
|
||||
</Link>
|
||||
|
@ -98,7 +96,6 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -108,7 +105,6 @@
|
|||
<AdditionalDependencies>ActionsLib.lib; SGDLib.lib; ComputationNetworkLib.lib; Math.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; msmpi.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<StackReserveSize>100000000</StackReserveSize>
|
||||
</Link>
|
||||
<PreBuildEvent>
|
||||
|
@ -132,6 +128,12 @@
|
|||
</PostBuildEvent>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>xcopy /I /D /Y $(ProjectDir)BrainScript\CNTKCoreLib\CNTK.core.bs $(TargetDir) && if exist "%ProgramW6432%\NVIDIA Corporation\NVSMI" xcopy /I /D /Y "%ProgramW6432%\NVIDIA Corporation\NVSMI\nvml*.dll" $(TargetDir)</Command>
|
||||
<Message>Copying dependencies</Message>
|
||||
|
@ -225,4 +227,4 @@
|
|||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets" />
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
|
@ -148,7 +148,7 @@ void File::Init(const wchar_t* filename, int fileOptions)
|
|||
// (wstring only for now; feel free to make this a template if needed)
|
||||
/*static*/ wstring File::DirectoryPathOf(wstring path)
|
||||
{
|
||||
#if WIN32
|
||||
#ifdef WIN32
|
||||
if (IsWindows8OrGreater())
|
||||
{
|
||||
typedef HRESULT(*PathCchRemoveFileSpecProc)(_Inout_updates_(_Inexpressible_(cchPath)) PWSTR, _In_ size_t);
|
||||
|
@ -203,7 +203,7 @@ void File::Init(const wchar_t* filename, int fileOptions)
|
|||
// (wstring only for now; feel free to make this a template if needed)
|
||||
/*static*/ wstring File::FileNameOf(wstring path)
|
||||
{
|
||||
#if WIN32
|
||||
#ifdef WIN32
|
||||
static const wstring delim = L"\\:/";
|
||||
#else
|
||||
static const wstring delim = L"/";
|
||||
|
@ -218,7 +218,7 @@ void File::Init(const wchar_t* filename, int fileOptions)
|
|||
// get path of current executable
|
||||
/*static*/ wstring File::GetExecutablePath()
|
||||
{
|
||||
#if WIN32
|
||||
#ifdef WIN32
|
||||
wchar_t path[33000];
|
||||
if (GetModuleFileNameW(NULL, path, _countof(path)) == 0)
|
||||
LogicError("GetExecutablePath: GetModuleFileNameW() unexpectedly failed.");
|
||||
|
@ -264,7 +264,7 @@ File::~File(void)
|
|||
{
|
||||
if (m_pcloseNeeded)
|
||||
{
|
||||
// TODO: Check for error code and throw if !std::uncaught_exception()
|
||||
// TODO: Check for error code and throw if !std::uncaught_exception()
|
||||
_pclose(m_file);
|
||||
}
|
||||
else if (m_file != stdin && m_file != stdout && m_file != stderr)
|
||||
|
|
|
@ -119,7 +119,7 @@ void ComputationNetwork::SaveToFileImpl(const wstring& fileName, const FileOptio
|
|||
{
|
||||
ComputationNodeBasePtr nodePtr = nodeIter->second;
|
||||
// type
|
||||
#if CURRENT_CNTK_MODEL_VERSION >= CNTK_MODEL_VERSION_5
|
||||
#if CURRENT_CNTK_MODEL_VERSION >= CNTK_MODEL_VERSION_7
|
||||
wstring precision;
|
||||
if (nodePtr->Is<ComputationNode<float>>())
|
||||
precision = ElemTypeName<float>();
|
||||
|
@ -195,7 +195,7 @@ void ComputationNetwork::SaveToFileImpl(const wstring& fileName, const FileOptio
|
|||
// load the section of nodes that contain persistable parameters
|
||||
// This is also used for reloading a model without recreating it, e.g. during training.
|
||||
// TODO: Why not just reload it? Because SGD::Train() holds pointers to the parameters directly? That should be fixed.
|
||||
template <class ElemType> // ElemType is the default for models prior to CNTK_MODEL_VERSION_5; after that, it is serialized, and ElemType is ignored
|
||||
template <class ElemType> // ElemType is the default for models prior to CNTK_MODEL_VERSION_7; after that, it is serialized, and ElemType is ignored
|
||||
void ComputationNetwork::ReadPersistableParameters(File& fstream, bool create)
|
||||
{
|
||||
fstream.GetMarker(FileMarker::fileMarkerBeginSection, L"BCN");
|
||||
|
@ -218,7 +218,7 @@ void ComputationNetwork::ReadPersistableParameters(File& fstream, bool create)
|
|||
for (size_t i = 0; i < numNodes; i++)
|
||||
{
|
||||
wstring precision;
|
||||
if (modelVersion >= CNTK_MODEL_VERSION_5)
|
||||
if (modelVersion >= CNTK_MODEL_VERSION_7)
|
||||
fstream >> precision; // "float" or "double"; default is "" meaning <ElemType> as passed in from outside
|
||||
|
||||
wstring opName, nodeName;
|
||||
|
|
|
@ -47,21 +47,21 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\SequenceTrainingLib;..\Math;..\Common\Include;..\CNTK\BrainScript;..\ActionsLib;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<PreBuildEventUseInBuild>false</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\SequenceTrainingLib;..\Math;..\Common\Include;..\CNTK\BrainScript;..\ActionsLib;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<ExecutablePath>$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>false</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\SequenceTrainingLib;$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\CNTK\BrainScript;$(SolutionDir)Source\ActionsLib;$(MSMPI_INC);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(MSMPI_LIB64);$(OutDir);$(NvmlLib)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
|
@ -73,13 +73,11 @@
|
|||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLib)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<StackReserveSize>100000000</StackReserveSize>
|
||||
</Link>
|
||||
|
@ -97,7 +95,6 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,7 +104,6 @@
|
|||
<AdditionalDependencies>Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
|
@ -119,6 +115,12 @@
|
|||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>if exist "%ProgramW6432%\NVIDIA Corporation\NVSMI" xcopy /I /D /Y "%ProgramW6432%\NVIDIA Corporation\NVSMI\nvml*.dll" $(TargetDir)</Command>
|
||||
<Message>Copying NVidia GDK extension DLL to target folder</Message>
|
||||
|
|
|
@ -199,7 +199,7 @@ public:
|
|||
|
||||
wstring pathName = config[L"pathName"];
|
||||
fprintf(stderr, "Load: Loading model file: %ls", pathName.c_str());
|
||||
Load<ElemType>(pathName); // note that for CNTK_MODEL_VERSION_5 and above, 'ElemType' is ignored
|
||||
Load<ElemType>(pathName); // note that for CNTK_MODEL_VERSION_7 and above, 'ElemType' is ignored
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -48,20 +48,20 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\SGDLib;..\ComputationNetworkLib;..\SequenceTrainingLib;..\Math;..\Common\Include;..\CNTK\BrainScript;..\ActionsLib;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>..\ComputationNetworkLib;..\Math;$(MSMPI_LIB64);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64);$(SolutionDir)$(Platform)\$(Configuration)\</LibraryPath>
|
||||
<TargetName>EvalDll</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\SGDLib;..\ComputationNetworkLib;..\SequenceTrainingLib;..\Math;..\Common\Include;..\CNTK\BrainScript;..\ActionsLib;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<LibraryPath>..\ComputationNetworkLib;..\Math;$(MSMPI_LIB64);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64);$(SolutionDir)$(Platform)\$(Configuration)\</LibraryPath>
|
||||
<TargetName>EvalDll</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\SGDLib;$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\SequenceTrainingLib;$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\CNTK\BrainScript;$(SolutionDir)Source\ActionsLib;$(MSMPI_INC);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\Math;$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
|
@ -69,7 +69,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>EVALDLL;WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -77,7 +76,6 @@
|
|||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; ActionsLib.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
@ -90,7 +88,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>EVALDLL;WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ /bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
||||
|
@ -103,7 +100,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; ActionsLib.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
|
@ -111,16 +107,18 @@
|
|||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>CPUONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>
|
||||
</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<DelayLoadDLLs>Math.dll</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>
|
||||
</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>if exist "%ProgramW6432%\NVIDIA Corporation\NVSMI" xcopy /I /D /Y "%ProgramW6432%\NVIDIA Corporation\NVSMI\nvml*.dll" $(TargetDir)</Command>
|
||||
<Message>Copying NVidia GDK extension DLL to target folder</Message>
|
||||
|
|
|
@ -47,18 +47,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)Source\Common\Include</IncludePath>
|
||||
<TargetExt>.dll</TargetExt>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration)\;$(LibraryPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)Source\Common\Include</IncludePath>
|
||||
<TargetExt>.dll</TargetExt>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration)\;$(LibraryPath)</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
|
@ -67,7 +66,6 @@
|
|||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<DelayLoadDLLs>
|
||||
</DelayLoadDLLs>
|
||||
</Link>
|
||||
|
@ -79,7 +77,6 @@
|
|||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<DelayLoadDLLs>
|
||||
</DelayLoadDLLs>
|
||||
</Link>
|
||||
|
|
|
@ -51,21 +51,22 @@
|
|||
<PropertyGroup>
|
||||
<!-- TODO intentional for all? -->
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\Common\include;$(ACML_PATH)\include;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(ACML_PATH)\lib;$(LibraryPath)</LibraryPath>
|
||||
<TargetName>Math</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(ACML_PATH)\include;$(SolutionDir)Source\Common\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(ACML_PATH)\lib;$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
<WarningLevel>Level4</WarningLevel>
|
||||
<PreprocessorDefinitions>USE_ACML; NO_SYNC; WIN32; _DEBUG; _WINDOWS; _USRDLL; MATH_EXPORTS; %(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\Common\include\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
|
@ -77,7 +78,6 @@
|
|||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>libacml_mp_dll.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>libacml_mp_dll.dll; $(CudaDlls); %(DelayLoadDLLs)</DelayLoadDLLs>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
|
@ -105,7 +105,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>USE_ACML; NO_SYNC; WIN32; NDEBUG; _WINDOWS; _USRDLL; MATH_EXPORTS; %(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\Common\include\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
|
@ -120,10 +119,9 @@
|
|||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>libacml_mp_dll.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>libacml_dll.dll; libacml_mp_dll.dll; $(CudaDlls); %(DelayLoadDLLs)</DelayLoadDLLs>
|
||||
<DelayLoadDLLs>libacml_mp_dll.dll; $(CudaDlls); %(DelayLoadDLLs)</DelayLoadDLLs>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>xcopy /D /I /Y "$(ACML_PATH)\lib\*.dll" $(OutputPath)</Command>
|
||||
|
@ -144,12 +142,20 @@
|
|||
<LinkLibraryDependencies>true</LinkLibraryDependencies>
|
||||
</ProjectReference>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>CPUONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<DelayLoadDLLs>libacml_dll.dll; libacml_mp_dll.dll</DelayLoadDLLs>
|
||||
<DelayLoadDLLs>libacml_mp_dll.dll</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
|
|
|
@ -49,8 +49,6 @@
|
|||
</Choose>
|
||||
<PropertyGroup>
|
||||
<CudaToolkitCustomDir>$(CudaPath)</CudaToolkitCustomDir>
|
||||
<IncludePath>..\Common\include;$(ACML_PATH)\include;$(CudaInclude);$(CUB_PATH);$(CuDnnIncPath);$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(ACML_PATH)\lib;$(CudaLibPath);$(CuDnnLibPath);$(LibraryPath)</LibraryPath>
|
||||
<IntDir>$(IntDir)\MathCUDA\</IntDir>
|
||||
</PropertyGroup>
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -76,7 +74,7 @@
|
|||
<WarningLevel>Level4</WarningLevel>
|
||||
<PreprocessorDefinitions>NO_SYNC; WIN32; _WINDOWS; _USRDLL; MATH_EXPORTS; $(CuDnnDefine); %(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\Common\include\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\include;$(ACML_PATH)\include;$(CudaInclude);$(CUB_PATH);$(CuDnnIncPath)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
|
@ -90,7 +88,7 @@
|
|||
<DelayLoadDLLs>$(CudaDlls);%(DelayLoadDLLs)</DelayLoadDLLs>
|
||||
</Link>
|
||||
<Lib>
|
||||
<AdditionalLibraryDirectories>$(CuDnnLibPath);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
|
||||
<AdditionalLibraryDirectories>$(OutDir);$(ACML_PATH)\lib;$(CudaLibPath);$(CuDnnLibPath)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>$(CuDnnLib)</AdditionalDependencies>
|
||||
</Lib>
|
||||
<CudaCompile>
|
||||
|
|
|
@ -47,14 +47,18 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -62,14 +66,12 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -81,7 +83,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -92,7 +93,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -43,10 +43,6 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>..\..\common\include;..\..\math;$(IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(LibraryPath);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
|
@ -61,12 +57,13 @@
|
|||
<SDLCheck>true</SDLCheck>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<AdditionalIncludeDirectories>../ReaderLib</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math;$(SolutionDir)Source\Readers\ReaderLib</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ReaderLib.lib;Math.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
|
|
|
@ -45,16 +45,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>$(MSMPI_INC);..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
|
@ -62,7 +63,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -70,7 +70,6 @@
|
|||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -82,7 +81,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -93,7 +91,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -39,10 +39,6 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
|
@ -53,12 +49,13 @@
|
|||
<PreprocessorDefinitions>WIN32;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>..\..\Common\Include;..\..\Math;..\ReaderLib</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math;$(SolutionDir)Source\Readers\ReaderLib</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ReaderLib.lib;Math.lib;kernel32.lib;user32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
|
|
|
@ -45,16 +45,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -130,11 +131,8 @@
|
|||
<ClCompile Include="Exports.cpp" />
|
||||
<ClCompile Include="DataWriterLocal.cpp" />
|
||||
<ClCompile Include="dllmain.cpp">
|
||||
<CompileAsManaged Condition="$(DebugBuild)">false</CompileAsManaged>
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">
|
||||
</PrecompiledHeader>
|
||||
<CompileAsManaged Condition="$(ReleaseBuild)">false</CompileAsManaged>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">
|
||||
<CompileAsManaged>false</CompileAsManaged>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\Common\fileutil.cpp">
|
||||
|
@ -144,8 +142,7 @@
|
|||
<ClCompile Include="HTKMLFWriter.cpp" />
|
||||
<ClCompile Include="latticearchive.cpp" />
|
||||
<ClCompile Include="stdafx.cpp">
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">Create</PrecompiledHeader>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">Create</PrecompiledHeader>
|
||||
<PrecompiledHeader>Create</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
|
|
|
@ -9,128 +9,130 @@
|
|||
|
||||
namespace Microsoft { namespace MSR { namespace CNTK {
|
||||
|
||||
std::vector<std::string> GetSectionsWithParameter(const ConfigParameters& config, const std::string& parameterName)
|
||||
std::vector<std::string> GetSectionsWithParameter(const ConfigParameters& config, const std::string& parameterName)
|
||||
{
|
||||
std::vector<std::string> sectionNames;
|
||||
for (const std::pair<std::string, ConfigParameters>& section : config)
|
||||
{
|
||||
std::vector<std::string> sectionNames;
|
||||
for (const std::pair<std::string, ConfigParameters>& section : config)
|
||||
if (section.second.ExistsCurrent(parameterName))
|
||||
{
|
||||
if (section.second.ExistsCurrent(parameterName))
|
||||
{
|
||||
sectionNames.push_back(section.first);
|
||||
}
|
||||
sectionNames.push_back(section.first);
|
||||
}
|
||||
|
||||
if (sectionNames.empty())
|
||||
{
|
||||
RuntimeError("ImageReader requires %s parameter.", parameterName.c_str());
|
||||
}
|
||||
|
||||
return sectionNames;
|
||||
}
|
||||
|
||||
ImageConfigHelper::ImageConfigHelper(const ConfigParameters& config)
|
||||
: m_dataFormat(CHW)
|
||||
if (sectionNames.empty())
|
||||
{
|
||||
std::vector<std::string> featureNames = GetSectionsWithParameter(config, "width");
|
||||
std::vector<std::string> labelNames = GetSectionsWithParameter(config, "labelDim");
|
||||
|
||||
// REVIEW alexeyk: currently support only one feature and label section.
|
||||
if (featureNames.size() != 1 || labelNames.size() != 1)
|
||||
{
|
||||
RuntimeError(
|
||||
"ImageReader currently supports a single feature and label stream. '%d' features , '%d' labels found.",
|
||||
static_cast<int>(featureNames.size()),
|
||||
static_cast<int>(labelNames.size()));
|
||||
}
|
||||
|
||||
ConfigParameters featureSection = config(featureNames[0]);
|
||||
size_t w = featureSection("width");
|
||||
size_t h = featureSection("height");
|
||||
size_t c = featureSection("channels");
|
||||
|
||||
std::string mbFmt = featureSection("mbFormat", "nchw");
|
||||
if (AreEqualIgnoreCase(mbFmt, "nhwc") || AreEqualIgnoreCase(mbFmt, "legacy"))
|
||||
{
|
||||
m_dataFormat = HWC;
|
||||
}
|
||||
else if (!AreEqualIgnoreCase(mbFmt, "nchw") || AreEqualIgnoreCase(mbFmt, "cudnn"))
|
||||
{
|
||||
RuntimeError("ImageReader does not support the sample format '%s', only 'nchw' and 'nhwc' are supported.", mbFmt.c_str());
|
||||
}
|
||||
|
||||
auto features = std::make_shared<StreamDescription>();
|
||||
features->m_id = 0;
|
||||
features->m_name = msra::strfun::utf16(featureSection.ConfigName());
|
||||
features->m_sampleLayout = std::make_shared<TensorShape>(ImageDimensions(w, h, c).AsTensorShape(m_dataFormat));
|
||||
features->m_storageType = StorageType::dense;
|
||||
m_streams.push_back(features);
|
||||
|
||||
ConfigParameters label = config(labelNames[0]);
|
||||
size_t labelDimension = label("labelDim");
|
||||
|
||||
auto labelSection = std::make_shared<StreamDescription>();
|
||||
labelSection->m_id = 1;
|
||||
labelSection->m_name = msra::strfun::utf16(label.ConfigName());
|
||||
labelSection->m_sampleLayout = std::make_shared<TensorShape>(labelDimension);
|
||||
labelSection->m_storageType = StorageType::dense;
|
||||
m_streams.push_back(labelSection);
|
||||
|
||||
m_mapPath = config(L"file");
|
||||
|
||||
std::string rand = config(L"randomize", "auto");
|
||||
|
||||
if (AreEqualIgnoreCase(rand, "auto"))
|
||||
{
|
||||
m_randomize = true;
|
||||
}
|
||||
else if (AreEqualIgnoreCase(rand, "none"))
|
||||
{
|
||||
m_randomize = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
RuntimeError("'randomize' parameter must be set to 'auto' or 'none'");
|
||||
}
|
||||
|
||||
// Identify precision
|
||||
string precision = config.Find("precision", "float");
|
||||
if (AreEqualIgnoreCase(precision, "float"))
|
||||
{
|
||||
features->m_elementType = ElementType::tfloat;
|
||||
labelSection->m_elementType = ElementType::tfloat;
|
||||
}
|
||||
else if (AreEqualIgnoreCase(precision, "double"))
|
||||
{
|
||||
features->m_elementType = ElementType::tdouble;
|
||||
labelSection->m_elementType = ElementType::tdouble;
|
||||
}
|
||||
else
|
||||
{
|
||||
RuntimeError("Not supported precision '%s'. Expected 'double' or 'float'.", precision.c_str());
|
||||
}
|
||||
|
||||
m_cpuThreadCount = config(L"numCPUThreads", 0);
|
||||
RuntimeError("ImageReader requires %s parameter.", parameterName.c_str());
|
||||
}
|
||||
|
||||
std::vector<StreamDescriptionPtr> ImageConfigHelper::GetStreams() const
|
||||
return sectionNames;
|
||||
}
|
||||
|
||||
ImageConfigHelper::ImageConfigHelper(const ConfigParameters& config)
|
||||
: m_dataFormat(CHW)
|
||||
{
|
||||
std::vector<std::string> featureNames = GetSectionsWithParameter(config, "width");
|
||||
std::vector<std::string> labelNames = GetSectionsWithParameter(config, "labelDim");
|
||||
|
||||
// REVIEW alexeyk: currently support only one feature and label section.
|
||||
if (featureNames.size() != 1 || labelNames.size() != 1)
|
||||
{
|
||||
return m_streams;
|
||||
RuntimeError(
|
||||
"ImageReader currently supports a single feature and label stream. '%d' features , '%d' labels found.",
|
||||
static_cast<int>(featureNames.size()),
|
||||
static_cast<int>(labelNames.size()));
|
||||
}
|
||||
|
||||
size_t ImageConfigHelper::GetFeatureStreamId() const
|
||||
ConfigParameters featureSection = config(featureNames[0]);
|
||||
size_t w = featureSection("width");
|
||||
size_t h = featureSection("height");
|
||||
size_t c = featureSection("channels");
|
||||
|
||||
std::string mbFmt = featureSection("mbFormat", "nchw");
|
||||
if (AreEqualIgnoreCase(mbFmt, "nhwc") || AreEqualIgnoreCase(mbFmt, "legacy"))
|
||||
{
|
||||
// Currently we only support a single feature/label stream, so the index is hard-wired.
|
||||
return 0;
|
||||
m_dataFormat = HWC;
|
||||
}
|
||||
else if (!AreEqualIgnoreCase(mbFmt, "nchw") || AreEqualIgnoreCase(mbFmt, "cudnn"))
|
||||
{
|
||||
RuntimeError("ImageReader does not support the sample format '%s', only 'nchw' and 'nhwc' are supported.", mbFmt.c_str());
|
||||
}
|
||||
|
||||
size_t ImageConfigHelper::GetLabelStreamId() const
|
||||
auto features = std::make_shared<StreamDescription>();
|
||||
features->m_id = 0;
|
||||
features->m_name = msra::strfun::utf16(featureSection.ConfigName());
|
||||
features->m_sampleLayout = std::make_shared<TensorShape>(ImageDimensions(w, h, c).AsTensorShape(m_dataFormat));
|
||||
features->m_storageType = StorageType::dense;
|
||||
m_streams.push_back(features);
|
||||
|
||||
ConfigParameters label = config(labelNames[0]);
|
||||
size_t labelDimension = label("labelDim");
|
||||
|
||||
auto labelSection = std::make_shared<StreamDescription>();
|
||||
labelSection->m_id = 1;
|
||||
labelSection->m_name = msra::strfun::utf16(label.ConfigName());
|
||||
labelSection->m_sampleLayout = std::make_shared<TensorShape>(labelDimension);
|
||||
labelSection->m_storageType = StorageType::dense;
|
||||
m_streams.push_back(labelSection);
|
||||
|
||||
m_mapPath = config(L"file");
|
||||
|
||||
std::string rand = config(L"randomize", "auto");
|
||||
|
||||
if (AreEqualIgnoreCase(rand, "auto"))
|
||||
{
|
||||
// Currently we only support a single feature/label stream, so the index is hard-wired.
|
||||
return 1;
|
||||
m_randomize = true;
|
||||
}
|
||||
else if (AreEqualIgnoreCase(rand, "none"))
|
||||
{
|
||||
m_randomize = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
RuntimeError("'randomize' parameter must be set to 'auto' or 'none'");
|
||||
}
|
||||
|
||||
std::string ImageConfigHelper::GetMapPath() const
|
||||
// Identify precision
|
||||
string precision = config.Find("precision", "float");
|
||||
if (AreEqualIgnoreCase(precision, "float"))
|
||||
{
|
||||
return m_mapPath;
|
||||
features->m_elementType = ElementType::tfloat;
|
||||
labelSection->m_elementType = ElementType::tfloat;
|
||||
}
|
||||
else if (AreEqualIgnoreCase(precision, "double"))
|
||||
{
|
||||
features->m_elementType = ElementType::tdouble;
|
||||
labelSection->m_elementType = ElementType::tdouble;
|
||||
}
|
||||
else
|
||||
{
|
||||
RuntimeError("Not supported precision '%s'. Expected 'double' or 'float'.", precision.c_str());
|
||||
}
|
||||
|
||||
m_cpuThreadCount = config(L"numCPUThreads", 0);
|
||||
|
||||
m_multiViewCrop = AreEqualIgnoreCase((string)featureSection(L"cropType", ""), "multiview10");
|
||||
}
|
||||
|
||||
std::vector<StreamDescriptionPtr> ImageConfigHelper::GetStreams() const
|
||||
{
|
||||
return m_streams;
|
||||
}
|
||||
|
||||
size_t ImageConfigHelper::GetFeatureStreamId() const
|
||||
{
|
||||
// Currently we only support a single feature/label stream, so the index is hard-wired.
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t ImageConfigHelper::GetLabelStreamId() const
|
||||
{
|
||||
// Currently we only support a single feature/label stream, so the index is hard-wired.
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string ImageConfigHelper::GetMapPath() const
|
||||
{
|
||||
return m_mapPath;
|
||||
}
|
||||
}}}
|
||||
|
|
|
@ -46,6 +46,11 @@ public:
|
|||
return m_randomize;
|
||||
}
|
||||
|
||||
bool IsMultiViewCrop() const
|
||||
{
|
||||
return m_multiViewCrop;
|
||||
}
|
||||
|
||||
private:
|
||||
ImageConfigHelper(const ImageConfigHelper&) = delete;
|
||||
ImageConfigHelper& operator=(const ImageConfigHelper&) = delete;
|
||||
|
@ -55,6 +60,7 @@ private:
|
|||
ImageLayoutKind m_dataFormat;
|
||||
int m_cpuThreadCount;
|
||||
bool m_randomize;
|
||||
bool m_multiViewCrop;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<ImageConfigHelper> ImageConfigHelperPtr;
|
||||
|
|
|
@ -74,7 +74,6 @@ public:
|
|||
virtual void GetSequence(size_t sequenceId, std::vector<SequenceDataPtr>& result) override
|
||||
{
|
||||
assert(sequenceId == m_description.m_id);
|
||||
UNUSED(sequenceId);
|
||||
const auto& imageSequence = m_description;
|
||||
|
||||
auto image = std::make_shared<DeserializedImage>();
|
||||
|
@ -102,6 +101,7 @@ public:
|
|||
image->m_data = image->m_image.data;
|
||||
ImageDimensions dimensions(cvImage.cols, cvImage.rows, cvImage.channels());
|
||||
image->m_sampleLayout = std::make_shared<TensorShape>(dimensions.AsTensorShape(HWC));
|
||||
image->m_id = sequenceId;
|
||||
image->m_numberOfSamples = 1;
|
||||
image->m_chunk = shared_from_this();
|
||||
result.push_back(image);
|
||||
|
@ -145,7 +145,7 @@ ImageDataDeserializer::ImageDataDeserializer(const ConfigParameters& config)
|
|||
RuntimeError("Unsupported label element type '%d'.", (int)label->m_elementType);
|
||||
}
|
||||
|
||||
CreateSequenceDescriptions(configHelper.GetMapPath(), labelDimension);
|
||||
CreateSequenceDescriptions(configHelper.GetMapPath(), labelDimension, configHelper);
|
||||
}
|
||||
|
||||
// Descriptions of chunks exposed by the image reader.
|
||||
|
@ -171,21 +171,22 @@ void ImageDataDeserializer::GetSequencesForChunk(size_t chunkId, std::vector<Seq
|
|||
result.push_back(m_imageSequences[chunkId]);
|
||||
}
|
||||
|
||||
void ImageDataDeserializer::CreateSequenceDescriptions(std::string mapPath, size_t labelDimension)
|
||||
void ImageDataDeserializer::CreateSequenceDescriptions(std::string mapPath, size_t labelDimension, const ImageConfigHelper& config)
|
||||
{
|
||||
UNUSED(labelDimension);
|
||||
|
||||
std::ifstream mapFile(mapPath);
|
||||
if (!mapFile)
|
||||
{
|
||||
RuntimeError("Could not open %s for reading.", mapPath.c_str());
|
||||
}
|
||||
|
||||
size_t itemsPerLine = config.IsMultiViewCrop() ? 10 : 1;
|
||||
size_t curId = 0;
|
||||
std::string line;
|
||||
PathReaderMap knownReaders;
|
||||
ImageSequenceDescription description;
|
||||
description.m_numberOfSamples = 1;
|
||||
description.m_isValid = true;
|
||||
PathReaderMap knownReaders;
|
||||
|
||||
for (size_t lineIndex = 0; std::getline(mapFile, line); ++lineIndex)
|
||||
{
|
||||
std::stringstream ss(line);
|
||||
|
@ -194,26 +195,31 @@ void ImageDataDeserializer::CreateSequenceDescriptions(std::string mapPath, size
|
|||
if (!std::getline(ss, imagePath, '\t') || !std::getline(ss, classId, '\t'))
|
||||
RuntimeError("Invalid map file format, must contain 2 tab-delimited columns, line %" PRIu64 " in file %s.", lineIndex, mapPath.c_str());
|
||||
|
||||
description.m_id = lineIndex;
|
||||
description.m_chunkId = lineIndex;
|
||||
description.m_path = imagePath;
|
||||
char* eptr;
|
||||
errno = 0;
|
||||
size_t cid = strtoull(classId.c_str(), &eptr, 10);
|
||||
if (classId.c_str() == eptr || errno == ERANGE)
|
||||
RuntimeError("Cannot parse label value on line %" PRIu64 ", second column, in file %s.", lineIndex, mapPath.c_str());
|
||||
description.m_classId = cid;
|
||||
description.m_key.m_major = description.m_id;
|
||||
description.m_key.m_minor = 0;
|
||||
|
||||
if (description.m_classId >= labelDimension)
|
||||
if (cid >= labelDimension)
|
||||
{
|
||||
RuntimeError(
|
||||
"Image '%s' has invalid class id '%" PRIu64 "'. Expected label dimension is '%" PRIu64 "'. Line %" PRIu64 " in file %s.",
|
||||
imagePath.c_str(), description.m_classId, labelDimension, lineIndex, mapPath.c_str());
|
||||
imagePath.c_str(), cid, labelDimension, lineIndex, mapPath.c_str());
|
||||
}
|
||||
|
||||
for (size_t start = curId; curId < start + itemsPerLine; curId++)
|
||||
{
|
||||
description.m_id = curId;
|
||||
description.m_chunkId = curId;
|
||||
description.m_path = imagePath;
|
||||
description.m_classId = cid;
|
||||
description.m_key.m_major = description.m_id;
|
||||
description.m_key.m_minor = 0;
|
||||
|
||||
m_imageSequences.push_back(description);
|
||||
RegisterByteReader(description.m_id, description.m_path, knownReaders);
|
||||
}
|
||||
m_imageSequences.push_back(description);
|
||||
RegisterByteReader(description.m_id, description.m_path, knownReaders);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "DataDeserializerBase.h"
|
||||
#include "Config.h"
|
||||
#include "ByteReader.h"
|
||||
#include "ImageConfigHelper.h"
|
||||
#include <unordered_map>
|
||||
|
||||
namespace Microsoft { namespace MSR { namespace CNTK {
|
||||
|
@ -33,7 +34,7 @@ public:
|
|||
|
||||
private:
|
||||
// Creates a set of sequence descriptions.
|
||||
void CreateSequenceDescriptions(std::string mapPath, size_t labelDimension);
|
||||
void CreateSequenceDescriptions(std::string mapPath, size_t labelDimension, const ImageConfigHelper& config);
|
||||
|
||||
// Image sequence descriptions. Currently, a sequence contains a single sample only.
|
||||
struct ImageSequenceDescription : public SequenceDescription
|
||||
|
|
|
@ -55,8 +55,6 @@
|
|||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>..\..\common\include;..\..\math;$(OPENCV_PATH)\include;$(ZipInclude);$(IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(OPENCV_PATH)\x64\vc12\lib;$(ZipLibPath);$(LibraryPath);</LibraryPath>
|
||||
<OpenCVLib Condition="$(HasOpenCV)">opencv_world300.lib</OpenCVLib>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
|
@ -87,11 +85,18 @@ if "$(UseZip)" == "true" if exist "$(ZLIB_PATH)\bin\zlib1.dll" (xcopy /I /D /Y "
|
|||
<Message>Copying dependencies</Message>
|
||||
</PostBuildEvent>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math;$(OPENCV_PATH)\include;$(ZipInclude);$(SolutionDir)Source\Readers\ReaderLib</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir);$(OPENCV_PATH)\x64\vc12\lib;$(ZipLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>../ReaderLib</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -101,7 +106,6 @@ if "$(UseZip)" == "true" if exist "$(ZLIB_PATH)\bin\zlib1.dll" (xcopy /I /D /Y "
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>../ReaderLib</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
|
|
|
@ -68,7 +68,7 @@ ImageTransformerBase::Apply(SequenceDataPtr sequence,
|
|||
auto result = std::make_shared<ImageSequenceData>();
|
||||
int type = CV_MAKETYPE(typeId, channels);
|
||||
cv::Mat buffer = cv::Mat(rows, columns, type, inputSequence.m_data);
|
||||
Apply(buffer);
|
||||
Apply(sequence->m_id, buffer);
|
||||
if (!buffer.isContinuous())
|
||||
{
|
||||
buffer = buffer.clone();
|
||||
|
@ -131,7 +131,7 @@ void CropTransformer::InitFromConfig(const ConfigParameters &config)
|
|||
}
|
||||
}
|
||||
|
||||
void CropTransformer::Apply(cv::Mat &mat)
|
||||
void CropTransformer::Apply(size_t id, cv::Mat &mat)
|
||||
{
|
||||
auto seed = GetSeed();
|
||||
auto rng = m_rngs.pop_or_create(
|
||||
|
@ -161,8 +161,11 @@ void CropTransformer::Apply(cv::Mat &mat)
|
|||
RuntimeError("Jitter type currently not implemented.");
|
||||
}
|
||||
|
||||
mat = mat(GetCropRect(m_cropType, mat.rows, mat.cols, ratio, *rng));
|
||||
if (m_hFlip && std::bernoulli_distribution()(*rng))
|
||||
int viewIndex = m_cropType == CropType::MultiView10 ? (int)(id % 10) : 0;
|
||||
|
||||
mat = mat(GetCropRect(m_cropType, viewIndex, mat.rows, mat.cols, ratio, *rng));
|
||||
if ((m_hFlip && std::bernoulli_distribution()(*rng)) ||
|
||||
viewIndex >= 5)
|
||||
{
|
||||
cv::flip(mat, mat, 1);
|
||||
}
|
||||
|
@ -183,6 +186,11 @@ CropTransformer::ParseCropType(const std::string &src)
|
|||
return CropType::Random;
|
||||
}
|
||||
|
||||
if (AreEqualIgnoreCase(src, "multiview10"))
|
||||
{
|
||||
return CropType::MultiView10;
|
||||
}
|
||||
|
||||
RuntimeError("Invalid crop type: %s.", src.c_str());
|
||||
}
|
||||
|
||||
|
@ -212,7 +220,7 @@ CropTransformer::ParseJitterType(const std::string &src)
|
|||
RuntimeError("Invalid jitter type: %s.", src.c_str());
|
||||
}
|
||||
|
||||
cv::Rect CropTransformer::GetCropRect(CropType type, int crow, int ccol,
|
||||
cv::Rect CropTransformer::GetCropRect(CropType type, int viewIndex, int crow, int ccol,
|
||||
double cropRatio, std::mt19937 &rng)
|
||||
{
|
||||
assert(crow > 0);
|
||||
|
@ -225,13 +233,50 @@ cv::Rect CropTransformer::GetCropRect(CropType type, int crow, int ccol,
|
|||
switch (type)
|
||||
{
|
||||
case CropType::Center:
|
||||
assert(viewIndex == 0);
|
||||
xOff = (ccol - cropSize) / 2;
|
||||
yOff = (crow - cropSize) / 2;
|
||||
break;
|
||||
case CropType::Random:
|
||||
assert(viewIndex == 0);
|
||||
xOff = UniIntT(0, ccol - cropSize)(rng);
|
||||
yOff = UniIntT(0, crow - cropSize)(rng);
|
||||
break;
|
||||
case CropType::MultiView10:
|
||||
{
|
||||
assert(0 <= viewIndex && viewIndex < 10);
|
||||
// 0 - 4: 4 corners + center crop. 5 - 9: same, but with a flip.
|
||||
int isubView = viewIndex % 5;
|
||||
switch (isubView)
|
||||
{
|
||||
// top-left
|
||||
case 0:
|
||||
xOff = 0;
|
||||
yOff = 0;
|
||||
break;
|
||||
// top-right
|
||||
case 1:
|
||||
xOff = ccol - cropSize;
|
||||
yOff = 0;
|
||||
break;
|
||||
// bottom-left
|
||||
case 2:
|
||||
xOff = 0;
|
||||
yOff = crow - cropSize;
|
||||
break;
|
||||
// bottom-right
|
||||
case 3:
|
||||
xOff = ccol - cropSize;
|
||||
yOff = crow - cropSize;
|
||||
break;
|
||||
// center
|
||||
case 4:
|
||||
xOff = (ccol - cropSize) / 2;
|
||||
yOff = (crow - cropSize) / 2;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
@ -291,8 +336,9 @@ void ScaleTransformer::InitFromConfig(const ConfigParameters &config)
|
|||
m_interp.push_back(cv::INTER_LINEAR);
|
||||
}
|
||||
|
||||
void ScaleTransformer::Apply(cv::Mat &mat)
|
||||
void ScaleTransformer::Apply(size_t id, cv::Mat &mat)
|
||||
{
|
||||
UNUSED(id);
|
||||
// If matrix has not been converted to the right type, do it now as rescaling
|
||||
// requires floating point type.
|
||||
//
|
||||
|
@ -364,8 +410,9 @@ void MeanTransformer::InitFromConfig(const ConfigParameters &config)
|
|||
}
|
||||
}
|
||||
|
||||
void MeanTransformer::Apply(cv::Mat &mat)
|
||||
void MeanTransformer::Apply(size_t id, cv::Mat &mat)
|
||||
{
|
||||
UNUSED(id);
|
||||
assert(m_meanImg.size() == cv::Size(0, 0) ||
|
||||
(m_meanImg.size() == mat.size() &&
|
||||
m_meanImg.channels() == mat.channels()));
|
||||
|
@ -439,28 +486,25 @@ TransposeTransformer::TypedApply(SequenceDataPtr sequence,
|
|||
assert(inputStream.m_storageType == StorageType::dense);
|
||||
auto inputSequence = static_cast<DenseSequenceData&>(*sequence.get());
|
||||
assert(inputSequence.m_numberOfSamples == 1);
|
||||
assert(inputStream.m_sampleLayout->GetNumElements() ==
|
||||
outputStream.m_sampleLayout->GetNumElements());
|
||||
assert(inputStream.m_sampleLayout->GetNumElements() == outputStream.m_sampleLayout->GetNumElements());
|
||||
|
||||
size_t count = inputStream.m_sampleLayout->GetNumElements() * GetSizeByType(inputStream.m_elementType);
|
||||
|
||||
auto result = std::make_shared<DenseSequenceWithBuffer>();
|
||||
result->m_buffer.resize(count);
|
||||
|
||||
TElemType* typedBuffer = reinterpret_cast<TElemType*>(result->m_buffer.data());
|
||||
ImageDimensions dimensions(*inputStream.m_sampleLayout, ImageLayoutKind::HWC);
|
||||
|
||||
size_t rowCount = dimensions.m_height * dimensions.m_width;
|
||||
size_t channelCount = dimensions.m_numChannels;
|
||||
TElemType* data = reinterpret_cast<TElemType*>(inputSequence.m_data);
|
||||
|
||||
for (size_t rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
auto src = reinterpret_cast<TElemType*>(inputSequence.m_data);
|
||||
auto dst = reinterpret_cast<TElemType*>(result->m_buffer.data());
|
||||
|
||||
for (size_t irow = 0; irow < rowCount; irow++)
|
||||
{
|
||||
for (size_t columnIndex = 0; columnIndex < channelCount;
|
||||
columnIndex++)
|
||||
for (size_t icol = 0; icol < channelCount; icol++)
|
||||
{
|
||||
typedBuffer[columnIndex * rowCount + rowIndex] =
|
||||
data[rowIndex * channelCount + columnIndex];
|
||||
dst[icol * rowCount + irow] = src[irow * channelCount + icol];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ protected:
|
|||
const StreamDescription &outputStream) override;
|
||||
|
||||
// The only function that should be redefined by the inherited classes.
|
||||
virtual void Apply(cv::Mat &from) = 0;
|
||||
virtual void Apply(size_t id, cv::Mat &from) = 0;
|
||||
|
||||
private:
|
||||
std::vector<StreamDescriptionPtr> m_outputStreams;
|
||||
|
@ -70,13 +70,14 @@ public:
|
|||
const ConfigParameters &readerConfig) override;
|
||||
|
||||
protected:
|
||||
virtual void Apply(cv::Mat &mat) override;
|
||||
virtual void Apply(size_t id, cv::Mat &mat) override;
|
||||
|
||||
private:
|
||||
enum class CropType
|
||||
{
|
||||
Center = 0,
|
||||
Random = 1
|
||||
Random = 1,
|
||||
MultiView10 = 2
|
||||
};
|
||||
enum class RatioJitterType
|
||||
{
|
||||
|
@ -89,7 +90,7 @@ private:
|
|||
void InitFromConfig(const ConfigParameters &config);
|
||||
CropType ParseCropType(const std::string &src);
|
||||
RatioJitterType ParseJitterType(const std::string &src);
|
||||
cv::Rect GetCropRect(CropType type, int crow, int ccol, double cropRatio,
|
||||
cv::Rect GetCropRect(CropType type, int viewIndex, int crow, int ccol, double cropRatio,
|
||||
std::mt19937 &rng);
|
||||
|
||||
conc_stack<std::unique_ptr<std::mt19937>> m_rngs;
|
||||
|
@ -110,7 +111,7 @@ public:
|
|||
|
||||
private:
|
||||
void InitFromConfig(const ConfigParameters &config);
|
||||
virtual void Apply(cv::Mat &mat) override;
|
||||
virtual void Apply(size_t id, cv::Mat &mat) override;
|
||||
|
||||
using StrToIntMapT = std::unordered_map<std::string, int>;
|
||||
StrToIntMapT m_interpMap;
|
||||
|
@ -131,7 +132,7 @@ public:
|
|||
const ConfigParameters &readerConfig) override;
|
||||
|
||||
private:
|
||||
virtual void Apply(cv::Mat &mat) override;
|
||||
virtual void Apply(size_t id, cv::Mat &mat) override;
|
||||
void InitFromConfig(const ConfigParameters &config);
|
||||
|
||||
cv::Mat m_meanImg;
|
||||
|
|
|
@ -46,16 +46,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -63,14 +64,12 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -82,7 +81,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -93,7 +91,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -48,14 +48,18 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -63,14 +67,12 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -82,7 +84,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -93,7 +94,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -45,16 +45,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>$(MSMPI_INC);..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\common\include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -62,7 +63,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>false</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -70,7 +70,6 @@
|
|||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -82,7 +81,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -93,7 +91,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
<WarningLevel>Level4</WarningLevel>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\math</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
|
|
@ -47,14 +47,18 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>$(MSMPI_INC);..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
|
@ -62,7 +66,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -70,7 +73,6 @@
|
|||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -82,7 +84,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -93,7 +94,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -47,14 +47,18 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\common\include;..\..\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -62,14 +66,12 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
|
@ -81,7 +83,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\common\include;..\..\Math</AdditionalIncludeDirectories>
|
||||
<OpenMPSupport>false</OpenMPSupport>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -92,7 +93,6 @@
|
|||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>Math.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
|
||||
<Profile>true</Profile>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
|
|
@ -46,22 +46,25 @@
|
|||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>..\SequenceTrainingLib;..\ComputationNetworkLib;..\Math;..\Common\Include;..\CNTK\BrainScript;$(MSMPI_INC);$(VCInstallDir)include;$(WindowsSDK_IncludePath)</IncludePath>
|
||||
<IncludePath Condition="'$(CNTK_ENABLE_1BitSGD)'=='true'">..\1BitSGD;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
<PreBuildEventUseInBuild>false</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\SequenceTrainingLib;$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\CNTK\BrainScript;$(MSMPI_INC);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(MSMPI_LIB64);$(OutDir);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<!-- TODO can we merge with above? -->
|
||||
<ItemDefinitionGroup Condition="'$(CNTK_ENABLE_1BitSGD)'=='true'">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\1BitSGD;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
|
@ -74,13 +77,11 @@
|
|||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<StackReserveSize>100000000</StackReserveSize>
|
||||
</Link>
|
||||
|
@ -99,7 +100,6 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -109,7 +109,6 @@
|
|||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; cudart64_70.dll</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
|
@ -121,6 +120,12 @@
|
|||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<PostBuildEvent>
|
||||
<Command>if exist "%ProgramW6432%\NVIDIA Corporation\NVSMI" xcopy /I /D /Y "%ProgramW6432%\NVIDIA Corporation\NVSMI\nvml*.dll" $(TargetDir)</Command>
|
||||
<Message>Copying NVidia GDK extension DLL to target folder</Message>
|
||||
|
|
|
@ -42,15 +42,13 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);..\Common\Include;..\Math</IncludePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<WarningLevel>Level4</WarningLevel>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<PreprocessorDefinitions>WIN32;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,814 @@
|
|||
=== Running /home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/01_OneHidden.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu DeviceId=0 timestamping=true MNISTtrain=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 6 2016 15:52:46
|
||||
Last modified date: Tue Apr 5 14:19:05 2016
|
||||
Build type: release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: no
|
||||
Math lib: acml
|
||||
CUDA_PATH: /usr/local/cuda-7.0
|
||||
CUB_PATH: /usr/local/cub-1.4.1
|
||||
CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
Built by mahilleb on atleneu04
|
||||
Build Path: /home/mahilleb/CNTK
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to /home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
04/07/2016 14:04:38: -------------------------------------------------------------------
|
||||
04/07/2016 14:04:38: Build info:
|
||||
|
||||
04/07/2016 14:04:38: Built time: Apr 6 2016 15:52:46
|
||||
04/07/2016 14:04:38: Last modified date: Tue Apr 5 14:19:05 2016
|
||||
04/07/2016 14:04:38: Build type: release
|
||||
04/07/2016 14:04:38: Build target: GPU
|
||||
04/07/2016 14:04:38: With 1bit-SGD: no
|
||||
04/07/2016 14:04:38: Math lib: acml
|
||||
04/07/2016 14:04:38: CUDA_PATH: /usr/local/cuda-7.0
|
||||
04/07/2016 14:04:38: CUB_PATH: /usr/local/cub-1.4.1
|
||||
04/07/2016 14:04:38: CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
04/07/2016 14:04:38: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:04:38: Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
04/07/2016 14:04:38: Built by mahilleb on atleneu04
|
||||
04/07/2016 14:04:38: Build Path: /home/mahilleb/CNTK
|
||||
04/07/2016 14:04:38: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:04:38: Running on localhost at 2016/04/07 14:04:38
|
||||
04/07/2016 14:04:38: Command line:
|
||||
/home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/01_OneHidden.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu DeviceId=0 timestamping=true MNISTtrain=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:04:38: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:04:38: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = MNISTtrain:MNISTtest
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/01_OneHidden"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
MNISTtrain = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
MNISTtest = [
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
MNISTtrain=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:04:38: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:04:38: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:04:38: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = MNISTtrain:MNISTtest
|
||||
precision = "float"
|
||||
modelPath = "/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden"
|
||||
ndlMacros = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
MNISTtrain = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
MNISTtest = [
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
MNISTtrain=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:04:38: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:04:38: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 01_OneHidden.cntk:command=MNISTtrain:MNISTtest
|
||||
configparameters: 01_OneHidden.cntk:ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config
|
||||
configparameters: 01_OneHidden.cntk:currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 01_OneHidden.cntk:DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 01_OneHidden.cntk:deviceId=0
|
||||
configparameters: 01_OneHidden.cntk:imageLayout=cudnn
|
||||
configparameters: 01_OneHidden.cntk:initOnCPUOnly=true
|
||||
configparameters: 01_OneHidden.cntk:MNISTtest=[
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 01_OneHidden.cntk:MNISTtrain=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
configparameters: 01_OneHidden.cntk:ModelDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models
|
||||
configparameters: 01_OneHidden.cntk:modelPath=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden
|
||||
configparameters: 01_OneHidden.cntk:ndlMacros=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/01_OneHidden/../../../../../../Examples/Image/MNIST/Config/Macros.ndl
|
||||
configparameters: 01_OneHidden.cntk:numMBsToShowResult=500
|
||||
configparameters: 01_OneHidden.cntk:OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
configparameters: 01_OneHidden.cntk:precision=float
|
||||
configparameters: 01_OneHidden.cntk:RootDir=..
|
||||
configparameters: 01_OneHidden.cntk:RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu
|
||||
configparameters: 01_OneHidden.cntk:timestamping=true
|
||||
configparameters: 01_OneHidden.cntk:traceLevel=1
|
||||
04/07/2016 14:04:38: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:04:38: Commands: MNISTtrain MNISTtest
|
||||
04/07/2016 14:04:38: Precision = "float"
|
||||
04/07/2016 14:04:38: CNTKModelPath: /tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden
|
||||
04/07/2016 14:04:38: CNTKCommandTrainInfo: MNISTtrain : 30
|
||||
04/07/2016 14:04:38: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 30
|
||||
|
||||
04/07/2016 14:04:38: ##############################################################################
|
||||
04/07/2016 14:04:38: # #
|
||||
04/07/2016 14:04:38: # Action "train" #
|
||||
04/07/2016 14:04:38: # #
|
||||
04/07/2016 14:04:38: ##############################################################################
|
||||
|
||||
04/07/2016 14:04:38: CNTKCommandTrainBegin: MNISTtrain
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:04:38: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
4 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
errTop5 = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 17 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 9 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 200]
|
||||
Validating --> h1.W = LearnableParameter() : -> [200 x 784]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [784 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [784 x *] -> [784 x 1 x *]
|
||||
Validating --> h1.t = Times (h1.W, featScaled) : [200 x 784], [784 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [200 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [200 x 1 x *], [200 x 1] -> [200 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [200 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 200], [200 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> unnamed81 = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> errTop5 = ErrorPrediction (labels, ol.z, unnamed81) : [10 x *], [10 x 1 x *], [1 x 1] -> [1]
|
||||
|
||||
|
||||
9 out of 17 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:04:38: Created model with 17 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:04:38: Training criterion node(s):
|
||||
04/07/2016 14:04:38: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:04:38: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:04:38: errTop5 = ErrorPrediction
|
||||
04/07/2016 14:04:38: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:04:38: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:04:38: Starting Epoch 1: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:39: Starting minibatch loop.
|
||||
04/07/2016 14:04:39: Epoch[ 1 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 1.29554907; EvalErr[0]PerSample = 0.38125000; EvalErr[1]PerSample = 0.38125000; TotalTime = 0.5578s; SamplesPerSecond = 28684.9
|
||||
04/07/2016 14:04:40: Epoch[ 1 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.50022791; EvalErr[0]PerSample = 0.13212500; EvalErr[1]PerSample = 0.13212500; TotalTime = 0.4482s; SamplesPerSecond = 35702.0
|
||||
04/07/2016 14:04:40: Epoch[ 1 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.39194543; EvalErr[0]PerSample = 0.11150000; EvalErr[1]PerSample = 0.11150000; TotalTime = 0.4494s; SamplesPerSecond = 35601.4
|
||||
04/07/2016 14:04:41: Finished Epoch[ 1 of 30]: [Training Set] TrainLossPerSample = 0.64949685; TotalSamplesSeen = 60000; EvalErrPerSample [0]=0.18576667; [1]=0.18576667; AvgLearningRatePerSample = 0.003125; EpochTime=2.30671
|
||||
04/07/2016 14:04:41: Finished Epoch[ 1 of 30]: Criterion Node [ce] Per Sample = 0.64949685
|
||||
04/07/2016 14:04:41: Finished Epoch[ 1 of 30]: Evaluation Node [errTop5] Per Sample = 0.18576667
|
||||
04/07/2016 14:04:41: Finished Epoch[ 1 of 30]: Evaluation Node [err] Per Sample = 0.18576667
|
||||
04/07/2016 14:04:41: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.1'
|
||||
|
||||
04/07/2016 14:04:41: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:41: Starting minibatch loop.
|
||||
04/07/2016 14:04:41: Epoch[ 2 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.33514655; EvalErr[0]PerSample = 0.09568750; EvalErr[1]PerSample = 0.09568750; TotalTime = 0.2391s; SamplesPerSecond = 66912.0
|
||||
04/07/2016 14:04:41: Epoch[ 2 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.31925513; EvalErr[0]PerSample = 0.09193750; EvalErr[1]PerSample = 0.09193750; TotalTime = 0.2393s; SamplesPerSecond = 66873.1
|
||||
04/07/2016 14:04:41: Epoch[ 2 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.30763525; EvalErr[0]PerSample = 0.09056250; EvalErr[1]PerSample = 0.09056250; TotalTime = 0.2392s; SamplesPerSecond = 66884.0
|
||||
04/07/2016 14:04:42: Finished Epoch[ 2 of 30]: [Training Set] TrainLossPerSample = 0.31140831; TotalSamplesSeen = 120000; EvalErrPerSample [0]=0.089983337; [1]=0.089983337; AvgLearningRatePerSample = 0.003125; EpochTime=0.898468
|
||||
04/07/2016 14:04:42: Finished Epoch[ 2 of 30]: Criterion Node [ce] Per Sample = 0.31140831
|
||||
04/07/2016 14:04:42: Finished Epoch[ 2 of 30]: Evaluation Node [errTop5] Per Sample = 0.089983337
|
||||
04/07/2016 14:04:42: Finished Epoch[ 2 of 30]: Evaluation Node [err] Per Sample = 0.089983337
|
||||
04/07/2016 14:04:42: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.2'
|
||||
|
||||
04/07/2016 14:04:42: Starting Epoch 3: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 2 at record count 120000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:42: Starting minibatch loop.
|
||||
04/07/2016 14:04:42: Epoch[ 3 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.28740411; EvalErr[0]PerSample = 0.08375000; EvalErr[1]PerSample = 0.08375000; TotalTime = 0.2393s; SamplesPerSecond = 66858.0
|
||||
04/07/2016 14:04:42: Epoch[ 3 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.27846802; EvalErr[0]PerSample = 0.07981250; EvalErr[1]PerSample = 0.07981250; TotalTime = 0.2400s; SamplesPerSecond = 66672.5
|
||||
04/07/2016 14:04:42: Epoch[ 3 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.27320044; EvalErr[0]PerSample = 0.08025000; EvalErr[1]PerSample = 0.08025000; TotalTime = 0.2390s; SamplesPerSecond = 66941.4
|
||||
04/07/2016 14:04:43: Finished Epoch[ 3 of 30]: [Training Set] TrainLossPerSample = 0.2724613; TotalSamplesSeen = 180000; EvalErrPerSample [0]=0.078916669; [1]=0.078916669; AvgLearningRatePerSample = 0.003125; EpochTime=0.899276
|
||||
04/07/2016 14:04:43: Finished Epoch[ 3 of 30]: Criterion Node [ce] Per Sample = 0.2724613
|
||||
04/07/2016 14:04:43: Finished Epoch[ 3 of 30]: Evaluation Node [errTop5] Per Sample = 0.078916669
|
||||
04/07/2016 14:04:43: Finished Epoch[ 3 of 30]: Evaluation Node [err] Per Sample = 0.078916669
|
||||
04/07/2016 14:04:43: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.3'
|
||||
|
||||
04/07/2016 14:04:43: Starting Epoch 4: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 3 at record count 180000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:43: Starting minibatch loop.
|
||||
04/07/2016 14:04:43: Epoch[ 4 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.25527621; EvalErr[0]PerSample = 0.07356250; EvalErr[1]PerSample = 0.07356250; TotalTime = 0.2391s; SamplesPerSecond = 66918.4
|
||||
04/07/2016 14:04:43: Epoch[ 4 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.24833566; EvalErr[0]PerSample = 0.07106250; EvalErr[1]PerSample = 0.07106250; TotalTime = 0.2393s; SamplesPerSecond = 66856.4
|
||||
04/07/2016 14:04:43: Epoch[ 4 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.24453772; EvalErr[0]PerSample = 0.07056250; EvalErr[1]PerSample = 0.07056250; TotalTime = 0.2391s; SamplesPerSecond = 66919.0
|
||||
04/07/2016 14:04:43: Finished Epoch[ 4 of 30]: [Training Set] TrainLossPerSample = 0.24299194; TotalSamplesSeen = 240000; EvalErrPerSample [0]=0.069650002; [1]=0.069650002; AvgLearningRatePerSample = 0.003125; EpochTime=0.898466
|
||||
04/07/2016 14:04:43: Finished Epoch[ 4 of 30]: Criterion Node [ce] Per Sample = 0.24299194
|
||||
04/07/2016 14:04:43: Finished Epoch[ 4 of 30]: Evaluation Node [errTop5] Per Sample = 0.069650002
|
||||
04/07/2016 14:04:43: Finished Epoch[ 4 of 30]: Evaluation Node [err] Per Sample = 0.069650002
|
||||
04/07/2016 14:04:43: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.4'
|
||||
|
||||
04/07/2016 14:04:43: Starting Epoch 5: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 4 at record count 240000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:43: Starting minibatch loop.
|
||||
04/07/2016 14:04:44: Epoch[ 5 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.22738316; EvalErr[0]PerSample = 0.06581250; EvalErr[1]PerSample = 0.06581250; TotalTime = 0.2392s; SamplesPerSecond = 66894.1
|
||||
04/07/2016 14:04:44: Epoch[ 5 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.22237688; EvalErr[0]PerSample = 0.06400000; EvalErr[1]PerSample = 0.06400000; TotalTime = 0.2392s; SamplesPerSecond = 66875.9
|
||||
04/07/2016 14:04:44: Epoch[ 5 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.21983353; EvalErr[0]PerSample = 0.06381250; EvalErr[1]PerSample = 0.06381250; TotalTime = 0.2402s; SamplesPerSecond = 66605.3
|
||||
04/07/2016 14:04:44: Finished Epoch[ 5 of 30]: [Training Set] TrainLossPerSample = 0.21759067; TotalSamplesSeen = 300000; EvalErrPerSample [0]=0.062466666; [1]=0.062466666; AvgLearningRatePerSample = 0.003125; EpochTime=0.89968
|
||||
04/07/2016 14:04:44: Finished Epoch[ 5 of 30]: Criterion Node [ce] Per Sample = 0.21759067
|
||||
04/07/2016 14:04:44: Finished Epoch[ 5 of 30]: Evaluation Node [errTop5] Per Sample = 0.062466666
|
||||
04/07/2016 14:04:44: Finished Epoch[ 5 of 30]: Evaluation Node [err] Per Sample = 0.062466666
|
||||
04/07/2016 14:04:44: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.5'
|
||||
|
||||
04/07/2016 14:04:44: Starting Epoch 6: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 5 at record count 300000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:44: Starting minibatch loop.
|
||||
04/07/2016 14:04:45: Epoch[ 6 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.20367961; EvalErr[0]PerSample = 0.05812500; EvalErr[1]PerSample = 0.05812500; TotalTime = 0.2392s; SamplesPerSecond = 66900.3
|
||||
04/07/2016 14:04:45: Epoch[ 6 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.20047520; EvalErr[0]PerSample = 0.05706250; EvalErr[1]PerSample = 0.05706250; TotalTime = 0.2390s; SamplesPerSecond = 66950.9
|
||||
04/07/2016 14:04:45: Epoch[ 6 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.19919839; EvalErr[0]PerSample = 0.05768750; EvalErr[1]PerSample = 0.05768750; TotalTime = 0.2390s; SamplesPerSecond = 66948.4
|
||||
04/07/2016 14:04:45: Finished Epoch[ 6 of 30]: [Training Set] TrainLossPerSample = 0.19621238; TotalSamplesSeen = 360000; EvalErrPerSample [0]=0.056000002; [1]=0.056000002; AvgLearningRatePerSample = 0.003125; EpochTime=0.897957
|
||||
04/07/2016 14:04:45: Finished Epoch[ 6 of 30]: Criterion Node [ce] Per Sample = 0.19621238
|
||||
04/07/2016 14:04:45: Finished Epoch[ 6 of 30]: Evaluation Node [errTop5] Per Sample = 0.056000002
|
||||
04/07/2016 14:04:45: Finished Epoch[ 6 of 30]: Evaluation Node [err] Per Sample = 0.056000002
|
||||
04/07/2016 14:04:45: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.6'
|
||||
|
||||
04/07/2016 14:04:45: Starting Epoch 7: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 6 at record count 360000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:45: Starting minibatch loop.
|
||||
04/07/2016 14:04:45: Epoch[ 7 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.18383334; EvalErr[0]PerSample = 0.05268750; EvalErr[1]PerSample = 0.05268750; TotalTime = 0.2395s; SamplesPerSecond = 66799.4
|
||||
04/07/2016 14:04:46: Epoch[ 7 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.18212634; EvalErr[0]PerSample = 0.05206250; EvalErr[1]PerSample = 0.05206250; TotalTime = 0.2391s; SamplesPerSecond = 66930.5
|
||||
04/07/2016 14:04:46: Epoch[ 7 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.18186630; EvalErr[0]PerSample = 0.05231250; EvalErr[1]PerSample = 0.05231250; TotalTime = 0.2393s; SamplesPerSecond = 66855.8
|
||||
04/07/2016 14:04:46: Finished Epoch[ 7 of 30]: [Training Set] TrainLossPerSample = 0.17827822; TotalSamplesSeen = 420000; EvalErrPerSample [0]=0.050650001; [1]=0.050650001; AvgLearningRatePerSample = 0.003125; EpochTime=0.898836
|
||||
04/07/2016 14:04:46: Finished Epoch[ 7 of 30]: Criterion Node [ce] Per Sample = 0.17827822
|
||||
04/07/2016 14:04:46: Finished Epoch[ 7 of 30]: Evaluation Node [errTop5] Per Sample = 0.050650001
|
||||
04/07/2016 14:04:46: Finished Epoch[ 7 of 30]: Evaluation Node [err] Per Sample = 0.050650001
|
||||
04/07/2016 14:04:46: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.7'
|
||||
|
||||
04/07/2016 14:04:46: Starting Epoch 8: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 7 at record count 420000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:46: Starting minibatch loop.
|
||||
04/07/2016 14:04:46: Epoch[ 8 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.16707426; EvalErr[0]PerSample = 0.04881250; EvalErr[1]PerSample = 0.04881250; TotalTime = 0.2393s; SamplesPerSecond = 66849.9
|
||||
04/07/2016 14:04:47: Epoch[ 8 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.16657964; EvalErr[0]PerSample = 0.04762500; EvalErr[1]PerSample = 0.04762500; TotalTime = 0.2396s; SamplesPerSecond = 66789.9
|
||||
04/07/2016 14:04:47: Epoch[ 8 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.16704306; EvalErr[0]PerSample = 0.04750000; EvalErr[1]PerSample = 0.04750000; TotalTime = 0.2393s; SamplesPerSecond = 66860.6
|
||||
04/07/2016 14:04:47: Finished Epoch[ 8 of 30]: [Training Set] TrainLossPerSample = 0.16303664; TotalSamplesSeen = 480000; EvalErrPerSample [0]=0.046483334; [1]=0.046483334; AvgLearningRatePerSample = 0.003125; EpochTime=0.899093
|
||||
04/07/2016 14:04:47: Finished Epoch[ 8 of 30]: Criterion Node [ce] Per Sample = 0.16303664
|
||||
04/07/2016 14:04:47: Finished Epoch[ 8 of 30]: Evaluation Node [errTop5] Per Sample = 0.046483334
|
||||
04/07/2016 14:04:47: Finished Epoch[ 8 of 30]: Evaluation Node [err] Per Sample = 0.046483334
|
||||
04/07/2016 14:04:47: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.8'
|
||||
|
||||
04/07/2016 14:04:47: Starting Epoch 9: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 8 at record count 480000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:47: Starting minibatch loop.
|
||||
04/07/2016 14:04:47: Epoch[ 9 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.15274779; EvalErr[0]PerSample = 0.04450000; EvalErr[1]PerSample = 0.04450000; TotalTime = 0.2391s; SamplesPerSecond = 66912.6
|
||||
04/07/2016 14:04:48: Epoch[ 9 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.15323347; EvalErr[0]PerSample = 0.04450000; EvalErr[1]PerSample = 0.04450000; TotalTime = 0.2392s; SamplesPerSecond = 66896.6
|
||||
04/07/2016 14:04:48: Epoch[ 9 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.15418118; EvalErr[0]PerSample = 0.04425000; EvalErr[1]PerSample = 0.04425000; TotalTime = 0.2393s; SamplesPerSecond = 66865.9
|
||||
04/07/2016 14:04:48: Finished Epoch[ 9 of 30]: [Training Set] TrainLossPerSample = 0.14991474; TotalSamplesSeen = 540000; EvalErrPerSample [0]=0.04315; [1]=0.04315; AvgLearningRatePerSample = 0.003125; EpochTime=0.898354
|
||||
04/07/2016 14:04:48: Finished Epoch[ 9 of 30]: Criterion Node [ce] Per Sample = 0.14991474
|
||||
04/07/2016 14:04:48: Finished Epoch[ 9 of 30]: Evaluation Node [errTop5] Per Sample = 0.04315
|
||||
04/07/2016 14:04:48: Finished Epoch[ 9 of 30]: Evaluation Node [err] Per Sample = 0.04315
|
||||
04/07/2016 14:04:48: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.9'
|
||||
|
||||
04/07/2016 14:04:48: Starting Epoch 10: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 9 at record count 540000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:48: Starting minibatch loop.
|
||||
04/07/2016 14:04:48: Epoch[10 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.14038245; EvalErr[0]PerSample = 0.04062500; EvalErr[1]PerSample = 0.04062500; TotalTime = 0.2393s; SamplesPerSecond = 66875.1
|
||||
04/07/2016 14:04:48: Epoch[10 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.14165695; EvalErr[0]PerSample = 0.04100000; EvalErr[1]PerSample = 0.04100000; TotalTime = 0.2391s; SamplesPerSecond = 66926.8
|
||||
04/07/2016 14:04:49: Epoch[10 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.14291498; EvalErr[0]PerSample = 0.04093750; EvalErr[1]PerSample = 0.04093750; TotalTime = 0.2389s; SamplesPerSecond = 66971.1
|
||||
04/07/2016 14:04:49: Finished Epoch[10 of 30]: [Training Set] TrainLossPerSample = 0.13850868; TotalSamplesSeen = 600000; EvalErrPerSample [0]=0.039583333; [1]=0.039583333; AvgLearningRatePerSample = 0.003125; EpochTime=0.897973
|
||||
04/07/2016 14:04:49: Finished Epoch[10 of 30]: Criterion Node [ce] Per Sample = 0.13850868
|
||||
04/07/2016 14:04:49: Finished Epoch[10 of 30]: Evaluation Node [errTop5] Per Sample = 0.039583333
|
||||
04/07/2016 14:04:49: Finished Epoch[10 of 30]: Evaluation Node [err] Per Sample = 0.039583333
|
||||
04/07/2016 14:04:49: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.10'
|
||||
|
||||
04/07/2016 14:04:49: Starting Epoch 11: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 10 at record count 600000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:49: Starting minibatch loop.
|
||||
04/07/2016 14:04:49: Epoch[11 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.12962636; EvalErr[0]PerSample = 0.03731250; EvalErr[1]PerSample = 0.03731250; TotalTime = 0.2391s; SamplesPerSecond = 66928.0
|
||||
04/07/2016 14:04:49: Epoch[11 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.13153072; EvalErr[0]PerSample = 0.03775000; EvalErr[1]PerSample = 0.03775000; TotalTime = 0.2390s; SamplesPerSecond = 66945.6
|
||||
04/07/2016 14:04:50: Epoch[11 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.13298419; EvalErr[0]PerSample = 0.03900000; EvalErr[1]PerSample = 0.03900000; TotalTime = 0.2390s; SamplesPerSecond = 66943.6
|
||||
04/07/2016 14:04:50: Finished Epoch[11 of 30]: [Training Set] TrainLossPerSample = 0.12852019; TotalSamplesSeen = 660000; EvalErrPerSample [0]=0.036699999; [1]=0.036699999; AvgLearningRatePerSample = 0.003125; EpochTime=0.897779
|
||||
04/07/2016 14:04:50: Finished Epoch[11 of 30]: Criterion Node [ce] Per Sample = 0.12852019
|
||||
04/07/2016 14:04:50: Finished Epoch[11 of 30]: Evaluation Node [errTop5] Per Sample = 0.036699999
|
||||
04/07/2016 14:04:50: Finished Epoch[11 of 30]: Evaluation Node [err] Per Sample = 0.036699999
|
||||
04/07/2016 14:04:50: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.11'
|
||||
|
||||
04/07/2016 14:04:50: Starting Epoch 12: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 11 at record count 660000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:50: Starting minibatch loop.
|
||||
04/07/2016 14:04:50: Epoch[12 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.12020341; EvalErr[0]PerSample = 0.03381250; EvalErr[1]PerSample = 0.03381250; TotalTime = 0.2393s; SamplesPerSecond = 66849.1
|
||||
04/07/2016 14:04:50: Epoch[12 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.12260921; EvalErr[0]PerSample = 0.03587500; EvalErr[1]PerSample = 0.03587500; TotalTime = 0.2390s; SamplesPerSecond = 66936.1
|
||||
04/07/2016 14:04:51: Epoch[12 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.12419101; EvalErr[0]PerSample = 0.03618750; EvalErr[1]PerSample = 0.03618750; TotalTime = 0.2391s; SamplesPerSecond = 66907.0
|
||||
04/07/2016 14:04:51: Finished Epoch[12 of 30]: [Training Set] TrainLossPerSample = 0.11971864; TotalSamplesSeen = 720000; EvalErrPerSample [0]=0.034066666; [1]=0.034066666; AvgLearningRatePerSample = 0.003125; EpochTime=0.898274
|
||||
04/07/2016 14:04:51: Finished Epoch[12 of 30]: Criterion Node [ce] Per Sample = 0.11971864
|
||||
04/07/2016 14:04:51: Finished Epoch[12 of 30]: Evaluation Node [errTop5] Per Sample = 0.034066666
|
||||
04/07/2016 14:04:51: Finished Epoch[12 of 30]: Evaluation Node [err] Per Sample = 0.034066666
|
||||
04/07/2016 14:04:51: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.12'
|
||||
|
||||
04/07/2016 14:04:51: Starting Epoch 13: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 12 at record count 720000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:51: Starting minibatch loop.
|
||||
04/07/2016 14:04:51: Epoch[13 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.11189415; EvalErr[0]PerSample = 0.03093750; EvalErr[1]PerSample = 0.03093750; TotalTime = 0.2393s; SamplesPerSecond = 66865.0
|
||||
04/07/2016 14:04:51: Epoch[13 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.11469778; EvalErr[0]PerSample = 0.03400000; EvalErr[1]PerSample = 0.03400000; TotalTime = 0.2393s; SamplesPerSecond = 66860.8
|
||||
04/07/2016 14:04:51: Epoch[13 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.11637517; EvalErr[0]PerSample = 0.03412500; EvalErr[1]PerSample = 0.03412500; TotalTime = 0.2393s; SamplesPerSecond = 66862.2
|
||||
04/07/2016 14:04:52: Finished Epoch[13 of 30]: [Training Set] TrainLossPerSample = 0.11191925; TotalSamplesSeen = 780000; EvalErrPerSample [0]=0.0319; [1]=0.0319; AvgLearningRatePerSample = 0.003125; EpochTime=0.898649
|
||||
04/07/2016 14:04:52: Finished Epoch[13 of 30]: Criterion Node [ce] Per Sample = 0.11191925
|
||||
04/07/2016 14:04:52: Finished Epoch[13 of 30]: Evaluation Node [errTop5] Per Sample = 0.0319
|
||||
04/07/2016 14:04:52: Finished Epoch[13 of 30]: Evaluation Node [err] Per Sample = 0.0319
|
||||
04/07/2016 14:04:52: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.13'
|
||||
|
||||
04/07/2016 14:04:52: Starting Epoch 14: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 13 at record count 780000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:52: Starting minibatch loop.
|
||||
04/07/2016 14:04:52: Epoch[14 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.10452200; EvalErr[0]PerSample = 0.02825000; EvalErr[1]PerSample = 0.02825000; TotalTime = 0.2393s; SamplesPerSecond = 66868.7
|
||||
04/07/2016 14:04:52: Epoch[14 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.10763933; EvalErr[0]PerSample = 0.03143750; EvalErr[1]PerSample = 0.03143750; TotalTime = 0.2391s; SamplesPerSecond = 66914.2
|
||||
04/07/2016 14:04:52: Epoch[14 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.10939818; EvalErr[0]PerSample = 0.03187500; EvalErr[1]PerSample = 0.03187500; TotalTime = 0.2391s; SamplesPerSecond = 66917.0
|
||||
04/07/2016 14:04:53: Finished Epoch[14 of 30]: [Training Set] TrainLossPerSample = 0.1049702; TotalSamplesSeen = 840000; EvalErrPerSample [0]=0.029516667; [1]=0.029516667; AvgLearningRatePerSample = 0.003125; EpochTime=0.898446
|
||||
04/07/2016 14:04:53: Finished Epoch[14 of 30]: Criterion Node [ce] Per Sample = 0.1049702
|
||||
04/07/2016 14:04:53: Finished Epoch[14 of 30]: Evaluation Node [errTop5] Per Sample = 0.029516667
|
||||
04/07/2016 14:04:53: Finished Epoch[14 of 30]: Evaluation Node [err] Per Sample = 0.029516667
|
||||
04/07/2016 14:04:53: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.14'
|
||||
|
||||
04/07/2016 14:04:53: Starting Epoch 15: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 14 at record count 840000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:53: Starting minibatch loop.
|
||||
04/07/2016 14:04:53: Epoch[15 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.09794547; EvalErr[0]PerSample = 0.02681250; EvalErr[1]PerSample = 0.02681250; TotalTime = 0.2392s; SamplesPerSecond = 66879.0
|
||||
04/07/2016 14:04:53: Epoch[15 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.10130624; EvalErr[0]PerSample = 0.02906250; EvalErr[1]PerSample = 0.02906250; TotalTime = 0.2394s; SamplesPerSecond = 66839.1
|
||||
04/07/2016 14:04:53: Epoch[15 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.10314120; EvalErr[0]PerSample = 0.02937500; EvalErr[1]PerSample = 0.02937500; TotalTime = 0.2391s; SamplesPerSecond = 66928.0
|
||||
04/07/2016 14:04:53: Finished Epoch[15 of 30]: [Training Set] TrainLossPerSample = 0.098746225; TotalSamplesSeen = 900000; EvalErrPerSample [0]=0.027566668; [1]=0.027566668; AvgLearningRatePerSample = 0.003125; EpochTime=0.898655
|
||||
04/07/2016 14:04:53: Finished Epoch[15 of 30]: Criterion Node [ce] Per Sample = 0.098746225
|
||||
04/07/2016 14:04:53: Finished Epoch[15 of 30]: Evaluation Node [errTop5] Per Sample = 0.027566668
|
||||
04/07/2016 14:04:53: Finished Epoch[15 of 30]: Evaluation Node [err] Per Sample = 0.027566668
|
||||
04/07/2016 14:04:53: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.15'
|
||||
|
||||
04/07/2016 14:04:53: Starting Epoch 16: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 15 at record count 900000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:53: Starting minibatch loop.
|
||||
04/07/2016 14:04:54: Epoch[16 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.09205003; EvalErr[0]PerSample = 0.02506250; EvalErr[1]PerSample = 0.02506250; TotalTime = 0.2395s; SamplesPerSecond = 66804.5
|
||||
04/07/2016 14:04:54: Epoch[16 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.09559438; EvalErr[0]PerSample = 0.02725000; EvalErr[1]PerSample = 0.02725000; TotalTime = 0.2394s; SamplesPerSecond = 66847.2
|
||||
04/07/2016 14:04:54: Epoch[16 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.09750049; EvalErr[0]PerSample = 0.02762500; EvalErr[1]PerSample = 0.02762500; TotalTime = 0.2391s; SamplesPerSecond = 66930.5
|
||||
04/07/2016 14:04:54: Finished Epoch[16 of 30]: [Training Set] TrainLossPerSample = 0.093143202; TotalSamplesSeen = 960000; EvalErrPerSample [0]=0.025933333; [1]=0.025933333; AvgLearningRatePerSample = 0.003125; EpochTime=0.898779
|
||||
04/07/2016 14:04:54: Finished Epoch[16 of 30]: Criterion Node [ce] Per Sample = 0.093143202
|
||||
04/07/2016 14:04:54: Finished Epoch[16 of 30]: Evaluation Node [errTop5] Per Sample = 0.025933333
|
||||
04/07/2016 14:04:54: Finished Epoch[16 of 30]: Evaluation Node [err] Per Sample = 0.025933333
|
||||
04/07/2016 14:04:54: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.16'
|
||||
|
||||
04/07/2016 14:04:54: Starting Epoch 17: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 16 at record count 960000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:54: Starting minibatch loop.
|
||||
04/07/2016 14:04:55: Epoch[17 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.08674107; EvalErr[0]PerSample = 0.02350000; EvalErr[1]PerSample = 0.02350000; TotalTime = 0.2393s; SamplesPerSecond = 66849.4
|
||||
04/07/2016 14:04:55: Epoch[17 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.09041695; EvalErr[0]PerSample = 0.02531250; EvalErr[1]PerSample = 0.02531250; TotalTime = 0.2393s; SamplesPerSecond = 66860.0
|
||||
04/07/2016 14:04:55: Epoch[17 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.09238812; EvalErr[0]PerSample = 0.02618750; EvalErr[1]PerSample = 0.02618750; TotalTime = 0.2391s; SamplesPerSecond = 66914.8
|
||||
04/07/2016 14:04:55: Finished Epoch[17 of 30]: [Training Set] TrainLossPerSample = 0.088073827; TotalSamplesSeen = 1020000; EvalErrPerSample [0]=0.024383334; [1]=0.024383334; AvgLearningRatePerSample = 0.003125; EpochTime=0.898655
|
||||
04/07/2016 14:04:55: Finished Epoch[17 of 30]: Criterion Node [ce] Per Sample = 0.088073827
|
||||
04/07/2016 14:04:55: Finished Epoch[17 of 30]: Evaluation Node [errTop5] Per Sample = 0.024383334
|
||||
04/07/2016 14:04:55: Finished Epoch[17 of 30]: Evaluation Node [err] Per Sample = 0.024383334
|
||||
04/07/2016 14:04:55: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.17'
|
||||
|
||||
04/07/2016 14:04:55: Starting Epoch 18: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 17 at record count 1020000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:55: Starting minibatch loop.
|
||||
04/07/2016 14:04:55: Epoch[18 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.08193969; EvalErr[0]PerSample = 0.02212500; EvalErr[1]PerSample = 0.02212500; TotalTime = 0.2393s; SamplesPerSecond = 66849.1
|
||||
04/07/2016 14:04:56: Epoch[18 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.08570138; EvalErr[0]PerSample = 0.02375000; EvalErr[1]PerSample = 0.02375000; TotalTime = 0.2394s; SamplesPerSecond = 66836.5
|
||||
04/07/2016 14:04:56: Epoch[18 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.08772934; EvalErr[0]PerSample = 0.02425000; EvalErr[1]PerSample = 0.02425000; TotalTime = 0.2392s; SamplesPerSecond = 66901.9
|
||||
04/07/2016 14:04:56: Finished Epoch[18 of 30]: [Training Set] TrainLossPerSample = 0.083464988; TotalSamplesSeen = 1080000; EvalErrPerSample [0]=0.022849999; [1]=0.022849999; AvgLearningRatePerSample = 0.003125; EpochTime=0.898846
|
||||
04/07/2016 14:04:56: Finished Epoch[18 of 30]: Criterion Node [ce] Per Sample = 0.083464988
|
||||
04/07/2016 14:04:56: Finished Epoch[18 of 30]: Evaluation Node [errTop5] Per Sample = 0.022849999
|
||||
04/07/2016 14:04:56: Finished Epoch[18 of 30]: Evaluation Node [err] Per Sample = 0.022849999
|
||||
04/07/2016 14:04:56: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.18'
|
||||
|
||||
04/07/2016 14:04:56: Starting Epoch 19: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 18 at record count 1080000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:56: Starting minibatch loop.
|
||||
04/07/2016 14:04:56: Epoch[19 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.07757853; EvalErr[0]PerSample = 0.02068750; EvalErr[1]PerSample = 0.02068750; TotalTime = 0.2393s; SamplesPerSecond = 66848.6
|
||||
04/07/2016 14:04:57: Epoch[19 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.08138667; EvalErr[0]PerSample = 0.02262500; EvalErr[1]PerSample = 0.02262500; TotalTime = 0.2393s; SamplesPerSecond = 66852.7
|
||||
04/07/2016 14:04:57: Epoch[19 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.08346284; EvalErr[0]PerSample = 0.02318750; EvalErr[1]PerSample = 0.02318750; TotalTime = 0.2391s; SamplesPerSecond = 66921.2
|
||||
04/07/2016 14:04:57: Finished Epoch[19 of 30]: [Training Set] TrainLossPerSample = 0.079255536; TotalSamplesSeen = 1140000; EvalErrPerSample [0]=0.0217; [1]=0.0217; AvgLearningRatePerSample = 0.003125; EpochTime=0.898689
|
||||
04/07/2016 14:04:57: Finished Epoch[19 of 30]: Criterion Node [ce] Per Sample = 0.079255536
|
||||
04/07/2016 14:04:57: Finished Epoch[19 of 30]: Evaluation Node [errTop5] Per Sample = 0.0217
|
||||
04/07/2016 14:04:57: Finished Epoch[19 of 30]: Evaluation Node [err] Per Sample = 0.0217
|
||||
04/07/2016 14:04:57: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.19'
|
||||
|
||||
04/07/2016 14:04:57: Starting Epoch 20: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 19 at record count 1140000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:57: Starting minibatch loop.
|
||||
04/07/2016 14:04:57: Epoch[20 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.07360051; EvalErr[0]PerSample = 0.01900000; EvalErr[1]PerSample = 0.01900000; TotalTime = 0.2391s; SamplesPerSecond = 66914.5
|
||||
04/07/2016 14:04:58: Epoch[20 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07742140; EvalErr[0]PerSample = 0.02206250; EvalErr[1]PerSample = 0.02206250; TotalTime = 0.2392s; SamplesPerSecond = 66896.6
|
||||
04/07/2016 14:04:58: Epoch[20 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07953708; EvalErr[0]PerSample = 0.02212500; EvalErr[1]PerSample = 0.02212500; TotalTime = 0.2391s; SamplesPerSecond = 66912.9
|
||||
04/07/2016 14:04:58: Finished Epoch[20 of 30]: [Training Set] TrainLossPerSample = 0.07539349; TotalSamplesSeen = 1200000; EvalErrPerSample [0]=0.020666666; [1]=0.020666666; AvgLearningRatePerSample = 0.003125; EpochTime=0.898458
|
||||
04/07/2016 14:04:58: Finished Epoch[20 of 30]: Criterion Node [ce] Per Sample = 0.07539349
|
||||
04/07/2016 14:04:58: Finished Epoch[20 of 30]: Evaluation Node [errTop5] Per Sample = 0.020666666
|
||||
04/07/2016 14:04:58: Finished Epoch[20 of 30]: Evaluation Node [err] Per Sample = 0.020666666
|
||||
04/07/2016 14:04:58: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.20'
|
||||
|
||||
04/07/2016 14:04:58: Starting Epoch 21: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 20 at record count 1200000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:58: Starting minibatch loop.
|
||||
04/07/2016 14:04:58: Epoch[21 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06995720; EvalErr[0]PerSample = 0.01831250; EvalErr[1]PerSample = 0.01831250; TotalTime = 0.2394s; SamplesPerSecond = 66824.0
|
||||
04/07/2016 14:04:58: Epoch[21 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07376222; EvalErr[0]PerSample = 0.02068750; EvalErr[1]PerSample = 0.02068750; TotalTime = 0.2393s; SamplesPerSecond = 66849.1
|
||||
04/07/2016 14:04:59: Epoch[21 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07590971; EvalErr[0]PerSample = 0.02131250; EvalErr[1]PerSample = 0.02131250; TotalTime = 0.2391s; SamplesPerSecond = 66912.6
|
||||
04/07/2016 14:04:59: Finished Epoch[21 of 30]: [Training Set] TrainLossPerSample = 0.071835473; TotalSamplesSeen = 1260000; EvalErrPerSample [0]=0.019650001; [1]=0.019650001; AvgLearningRatePerSample = 0.003125; EpochTime=0.898979
|
||||
04/07/2016 14:04:59: Finished Epoch[21 of 30]: Criterion Node [ce] Per Sample = 0.071835473
|
||||
04/07/2016 14:04:59: Finished Epoch[21 of 30]: Evaluation Node [errTop5] Per Sample = 0.019650001
|
||||
04/07/2016 14:04:59: Finished Epoch[21 of 30]: Evaluation Node [err] Per Sample = 0.019650001
|
||||
04/07/2016 14:04:59: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.21'
|
||||
|
||||
04/07/2016 14:04:59: Starting Epoch 22: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 21 at record count 1260000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:04:59: Starting minibatch loop.
|
||||
04/07/2016 14:04:59: Epoch[22 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06660743; EvalErr[0]PerSample = 0.01756250; EvalErr[1]PerSample = 0.01756250; TotalTime = 0.2392s; SamplesPerSecond = 66895.2
|
||||
04/07/2016 14:04:59: Epoch[22 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07037196; EvalErr[0]PerSample = 0.01937500; EvalErr[1]PerSample = 0.01937500; TotalTime = 0.2392s; SamplesPerSecond = 66896.6
|
||||
04/07/2016 14:05:00: Epoch[22 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07254503; EvalErr[0]PerSample = 0.01981250; EvalErr[1]PerSample = 0.01981250; TotalTime = 0.2391s; SamplesPerSecond = 66929.6
|
||||
04/07/2016 14:05:00: Finished Epoch[22 of 30]: [Training Set] TrainLossPerSample = 0.068544805; TotalSamplesSeen = 1320000; EvalErrPerSample [0]=0.018616667; [1]=0.018616667; AvgLearningRatePerSample = 0.003125; EpochTime=0.89819
|
||||
04/07/2016 14:05:00: Finished Epoch[22 of 30]: Criterion Node [ce] Per Sample = 0.068544805
|
||||
04/07/2016 14:05:00: Finished Epoch[22 of 30]: Evaluation Node [errTop5] Per Sample = 0.018616667
|
||||
04/07/2016 14:05:00: Finished Epoch[22 of 30]: Evaluation Node [err] Per Sample = 0.018616667
|
||||
04/07/2016 14:05:00: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.22'
|
||||
|
||||
04/07/2016 14:05:00: Starting Epoch 23: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 22 at record count 1320000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:00: Starting minibatch loop.
|
||||
04/07/2016 14:05:00: Epoch[23 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06351610; EvalErr[0]PerSample = 0.01668750; EvalErr[1]PerSample = 0.01668750; TotalTime = 0.2392s; SamplesPerSecond = 66901.4
|
||||
04/07/2016 14:05:00: Epoch[23 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06721899; EvalErr[0]PerSample = 0.01825000; EvalErr[1]PerSample = 0.01825000; TotalTime = 0.2393s; SamplesPerSecond = 66862.8
|
||||
04/07/2016 14:05:01: Epoch[23 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06941310; EvalErr[0]PerSample = 0.01856250; EvalErr[1]PerSample = 0.01856250; TotalTime = 0.2393s; SamplesPerSecond = 66869.5
|
||||
04/07/2016 14:05:01: Finished Epoch[23 of 30]: [Training Set] TrainLossPerSample = 0.065490127; TotalSamplesSeen = 1380000; EvalErrPerSample [0]=0.017633334; [1]=0.017633334; AvgLearningRatePerSample = 0.003125; EpochTime=0.898821
|
||||
04/07/2016 14:05:01: Finished Epoch[23 of 30]: Criterion Node [ce] Per Sample = 0.065490127
|
||||
04/07/2016 14:05:01: Finished Epoch[23 of 30]: Evaluation Node [errTop5] Per Sample = 0.017633334
|
||||
04/07/2016 14:05:01: Finished Epoch[23 of 30]: Evaluation Node [err] Per Sample = 0.017633334
|
||||
04/07/2016 14:05:01: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.23'
|
||||
|
||||
04/07/2016 14:05:01: Starting Epoch 24: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 23 at record count 1380000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:01: Starting minibatch loop.
|
||||
04/07/2016 14:05:01: Epoch[24 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06065337; EvalErr[0]PerSample = 0.01587500; EvalErr[1]PerSample = 0.01587500; TotalTime = 0.2392s; SamplesPerSecond = 66901.1
|
||||
04/07/2016 14:05:01: Epoch[24 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06427705; EvalErr[0]PerSample = 0.01768750; EvalErr[1]PerSample = 0.01768750; TotalTime = 0.2395s; SamplesPerSecond = 66800.5
|
||||
04/07/2016 14:05:01: Epoch[24 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06648905; EvalErr[0]PerSample = 0.01750000; EvalErr[1]PerSample = 0.01750000; TotalTime = 0.2393s; SamplesPerSecond = 66875.7
|
||||
04/07/2016 14:05:02: Finished Epoch[24 of 30]: [Training Set] TrainLossPerSample = 0.062645398; TotalSamplesSeen = 1440000; EvalErrPerSample [0]=0.016716667; [1]=0.016716667; AvgLearningRatePerSample = 0.003125; EpochTime=0.89884
|
||||
04/07/2016 14:05:02: Finished Epoch[24 of 30]: Criterion Node [ce] Per Sample = 0.062645398
|
||||
04/07/2016 14:05:02: Finished Epoch[24 of 30]: Evaluation Node [errTop5] Per Sample = 0.016716667
|
||||
04/07/2016 14:05:02: Finished Epoch[24 of 30]: Evaluation Node [err] Per Sample = 0.016716667
|
||||
04/07/2016 14:05:02: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.24'
|
||||
|
||||
04/07/2016 14:05:02: Starting Epoch 25: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 24 at record count 1440000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:02: Starting minibatch loop.
|
||||
04/07/2016 14:05:02: Epoch[25 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05799355; EvalErr[0]PerSample = 0.01562500; EvalErr[1]PerSample = 0.01562500; TotalTime = 0.2393s; SamplesPerSecond = 66869.2
|
||||
04/07/2016 14:05:02: Epoch[25 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06152334; EvalErr[0]PerSample = 0.01718750; EvalErr[1]PerSample = 0.01718750; TotalTime = 0.2392s; SamplesPerSecond = 66896.9
|
||||
04/07/2016 14:05:02: Epoch[25 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06375144; EvalErr[0]PerSample = 0.01687500; EvalErr[1]PerSample = 0.01687500; TotalTime = 0.2393s; SamplesPerSecond = 66862.0
|
||||
04/07/2016 14:05:03: Finished Epoch[25 of 30]: [Training Set] TrainLossPerSample = 0.059987996; TotalSamplesSeen = 1500000; EvalErrPerSample [0]=0.016233334; [1]=0.016233334; AvgLearningRatePerSample = 0.003125; EpochTime=0.898668
|
||||
04/07/2016 14:05:03: Finished Epoch[25 of 30]: Criterion Node [ce] Per Sample = 0.059987996
|
||||
04/07/2016 14:05:03: Finished Epoch[25 of 30]: Evaluation Node [errTop5] Per Sample = 0.016233334
|
||||
04/07/2016 14:05:03: Finished Epoch[25 of 30]: Evaluation Node [err] Per Sample = 0.016233334
|
||||
04/07/2016 14:05:03: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.25'
|
||||
|
||||
04/07/2016 14:05:03: Starting Epoch 26: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 25 at record count 1500000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:03: Starting minibatch loop.
|
||||
04/07/2016 14:05:03: Epoch[26 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05551456; EvalErr[0]PerSample = 0.01475000; EvalErr[1]PerSample = 0.01475000; TotalTime = 0.2391s; SamplesPerSecond = 66915.9
|
||||
04/07/2016 14:05:03: Epoch[26 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05893863; EvalErr[0]PerSample = 0.01668750; EvalErr[1]PerSample = 0.01668750; TotalTime = 0.2390s; SamplesPerSecond = 66933.3
|
||||
04/07/2016 14:05:03: Epoch[26 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06118142; EvalErr[0]PerSample = 0.01625000; EvalErr[1]PerSample = 0.01625000; TotalTime = 0.2391s; SamplesPerSecond = 66920.7
|
||||
04/07/2016 14:05:03: Finished Epoch[26 of 30]: [Training Set] TrainLossPerSample = 0.0574985; TotalSamplesSeen = 1560000; EvalErrPerSample [0]=0.015600001; [1]=0.015600001; AvgLearningRatePerSample = 0.003125; EpochTime=0.898136
|
||||
04/07/2016 14:05:03: Finished Epoch[26 of 30]: Criterion Node [ce] Per Sample = 0.0574985
|
||||
04/07/2016 14:05:03: Finished Epoch[26 of 30]: Evaluation Node [errTop5] Per Sample = 0.015600001
|
||||
04/07/2016 14:05:03: Finished Epoch[26 of 30]: Evaluation Node [err] Per Sample = 0.015600001
|
||||
04/07/2016 14:05:03: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.26'
|
||||
|
||||
04/07/2016 14:05:03: Starting Epoch 27: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 26 at record count 1560000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:03: Starting minibatch loop.
|
||||
04/07/2016 14:05:04: Epoch[27 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05319750; EvalErr[0]PerSample = 0.01393750; EvalErr[1]PerSample = 0.01393750; TotalTime = 0.2389s; SamplesPerSecond = 66970.3
|
||||
04/07/2016 14:05:04: Epoch[27 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05650659; EvalErr[0]PerSample = 0.01600000; EvalErr[1]PerSample = 0.01600000; TotalTime = 0.2392s; SamplesPerSecond = 66879.6
|
||||
04/07/2016 14:05:04: Epoch[27 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05876301; EvalErr[0]PerSample = 0.01531250; EvalErr[1]PerSample = 0.01531250; TotalTime = 0.2393s; SamplesPerSecond = 66865.0
|
||||
04/07/2016 14:05:04: Finished Epoch[27 of 30]: [Training Set] TrainLossPerSample = 0.055160314; TotalSamplesSeen = 1620000; EvalErrPerSample [0]=0.014816667; [1]=0.014816667; AvgLearningRatePerSample = 0.003125; EpochTime=0.898545
|
||||
04/07/2016 14:05:04: Finished Epoch[27 of 30]: Criterion Node [ce] Per Sample = 0.055160314
|
||||
04/07/2016 14:05:04: Finished Epoch[27 of 30]: Evaluation Node [errTop5] Per Sample = 0.014816667
|
||||
04/07/2016 14:05:04: Finished Epoch[27 of 30]: Evaluation Node [err] Per Sample = 0.014816667
|
||||
04/07/2016 14:05:04: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.27'
|
||||
|
||||
04/07/2016 14:05:04: Starting Epoch 28: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 27 at record count 1620000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:04: Starting minibatch loop.
|
||||
04/07/2016 14:05:05: Epoch[28 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05102567; EvalErr[0]PerSample = 0.01300000; EvalErr[1]PerSample = 0.01300000; TotalTime = 0.2394s; SamplesPerSecond = 66845.2
|
||||
04/07/2016 14:05:05: Epoch[28 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05421332; EvalErr[0]PerSample = 0.01500000; EvalErr[1]PerSample = 0.01500000; TotalTime = 0.2391s; SamplesPerSecond = 66911.2
|
||||
04/07/2016 14:05:05: Epoch[28 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05648208; EvalErr[0]PerSample = 0.01443750; EvalErr[1]PerSample = 0.01443750; TotalTime = 0.2391s; SamplesPerSecond = 66924.9
|
||||
04/07/2016 14:05:05: Finished Epoch[28 of 30]: [Training Set] TrainLossPerSample = 0.052959003; TotalSamplesSeen = 1680000; EvalErrPerSample [0]=0.013950001; [1]=0.013950001; AvgLearningRatePerSample = 0.003125; EpochTime=0.898578
|
||||
04/07/2016 14:05:05: Finished Epoch[28 of 30]: Criterion Node [ce] Per Sample = 0.052959003
|
||||
04/07/2016 14:05:05: Finished Epoch[28 of 30]: Evaluation Node [errTop5] Per Sample = 0.013950001
|
||||
04/07/2016 14:05:05: Finished Epoch[28 of 30]: Evaluation Node [err] Per Sample = 0.013950001
|
||||
04/07/2016 14:05:05: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.28'
|
||||
|
||||
04/07/2016 14:05:05: Starting Epoch 29: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 28 at record count 1680000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:05: Starting minibatch loop.
|
||||
04/07/2016 14:05:05: Epoch[29 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04898481; EvalErr[0]PerSample = 0.01231250; EvalErr[1]PerSample = 0.01231250; TotalTime = 0.2392s; SamplesPerSecond = 66899.7
|
||||
04/07/2016 14:05:06: Epoch[29 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05204683; EvalErr[0]PerSample = 0.01406250; EvalErr[1]PerSample = 0.01406250; TotalTime = 0.2393s; SamplesPerSecond = 66867.8
|
||||
04/07/2016 14:05:06: Epoch[29 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05432639; EvalErr[0]PerSample = 0.01393750; EvalErr[1]PerSample = 0.01393750; TotalTime = 0.2393s; SamplesPerSecond = 66863.6
|
||||
04/07/2016 14:05:06: Finished Epoch[29 of 30]: [Training Set] TrainLossPerSample = 0.05088209; TotalSamplesSeen = 1740000; EvalErrPerSample [0]=0.013283334; [1]=0.013283334; AvgLearningRatePerSample = 0.003125; EpochTime=0.898595
|
||||
04/07/2016 14:05:06: Finished Epoch[29 of 30]: Criterion Node [ce] Per Sample = 0.05088209
|
||||
04/07/2016 14:05:06: Finished Epoch[29 of 30]: Evaluation Node [errTop5] Per Sample = 0.013283334
|
||||
04/07/2016 14:05:06: Finished Epoch[29 of 30]: Evaluation Node [err] Per Sample = 0.013283334
|
||||
04/07/2016 14:05:06: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.29'
|
||||
|
||||
04/07/2016 14:05:06: Starting Epoch 30: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 29 at record count 1740000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:06: Starting minibatch loop.
|
||||
04/07/2016 14:05:06: Epoch[30 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04706239; EvalErr[0]PerSample = 0.01168750; EvalErr[1]PerSample = 0.01168750; TotalTime = 0.2395s; SamplesPerSecond = 66812.8
|
||||
04/07/2016 14:05:07: Epoch[30 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.04999670; EvalErr[0]PerSample = 0.01318750; EvalErr[1]PerSample = 0.01318750; TotalTime = 0.2391s; SamplesPerSecond = 66925.4
|
||||
04/07/2016 14:05:07: Epoch[30 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05228509; EvalErr[0]PerSample = 0.01331250; EvalErr[1]PerSample = 0.01331250; TotalTime = 0.2396s; SamplesPerSecond = 66786.3
|
||||
04/07/2016 14:05:07: Finished Epoch[30 of 30]: [Training Set] TrainLossPerSample = 0.048918661; TotalSamplesSeen = 1800000; EvalErrPerSample [0]=0.0125; [1]=0.0125; AvgLearningRatePerSample = 0.003125; EpochTime=0.899362
|
||||
04/07/2016 14:05:07: Finished Epoch[30 of 30]: Criterion Node [ce] Per Sample = 0.048918661
|
||||
04/07/2016 14:05:07: Finished Epoch[30 of 30]: Evaluation Node [errTop5] Per Sample = 0.0125
|
||||
04/07/2016 14:05:07: Finished Epoch[30 of 30]: Evaluation Node [err] Per Sample = 0.0125
|
||||
04/07/2016 14:05:07: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_01_OneHidden@release_gpu/Models/01_OneHidden'
|
||||
04/07/2016 14:05:07: CNTKCommandTrainEnd: MNISTtrain
|
||||
|
||||
04/07/2016 14:05:07: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:05:07: ##############################################################################
|
||||
04/07/2016 14:05:07: # #
|
||||
04/07/2016 14:05:07: # Action "test" #
|
||||
04/07/2016 14:05:07: # #
|
||||
04/07/2016 14:05:07: ##############################################################################
|
||||
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
4 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
errTop5 = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 17 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 9 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 200]
|
||||
Validating --> h1.W = LearnableParameter() : -> [200 x 784]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [784 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [784 x *] -> [784 x 1 x *]
|
||||
Validating --> h1.t = Times (h1.W, featScaled) : [200 x 784], [784 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [200 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [200 x 1 x *], [200 x 1] -> [200 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [200 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 200], [200 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> unnamed81 = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> errTop5 = ErrorPrediction (labels, ol.z, unnamed81) : [10 x *], [10 x 1 x *], [1 x 1] -> [1]
|
||||
|
||||
|
||||
9 out of 17 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 2036 retries for 10000 elements (20.4%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2009, 1524, ...
|
||||
Minibatch[1-500]: SamplesSeen = 8000 errTop5: ErrorPrediction/Sample = 0.02575 err: ErrorPrediction/Sample = 0.02575 ce: CrossEntropyWithSoftmax/Sample = 0.084545252
|
||||
Minibatch[501-625]: SamplesSeen = 2000 errTop5: ErrorPrediction/Sample = 0.013 err: ErrorPrediction/Sample = 0.013 ce: CrossEntropyWithSoftmax/Sample = 0.047279389
|
||||
Final Results: Minibatch[1-625]: SamplesSeen = 10000 errTop5: ErrorPrediction/Sample = 0.0232 err: ErrorPrediction/Sample = 0.0232 ce: CrossEntropyWithSoftmax/Sample = 0.07709208 Perplexity = 1.0801415
|
||||
|
||||
04/07/2016 14:05:07: Action "test" complete.
|
||||
|
||||
04/07/2016 14:05:07: __COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,812 @@
|
|||
=== Running /cygdrive/c/R/CNTK3/x64/release/cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/01_OneHidden.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu DeviceId=0 timestamping=true MNISTtrain=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 7 2016 15:32:16
|
||||
Last modified date: Thu Apr 7 09:19:53 2016
|
||||
Build type: Release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: yes
|
||||
CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
CUB_PATH: C:\R\cub-1.4.1
|
||||
CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
Built by mahilleb on mahilleb57
|
||||
Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
04/07/2016 14:48:56: -------------------------------------------------------------------
|
||||
04/07/2016 14:48:56: Build info:
|
||||
|
||||
04/07/2016 14:48:56: Built time: Apr 7 2016 15:32:16
|
||||
04/07/2016 14:48:56: Last modified date: Thu Apr 7 09:19:53 2016
|
||||
04/07/2016 14:48:56: Build type: Release
|
||||
04/07/2016 14:48:56: Build target: GPU
|
||||
04/07/2016 14:48:56: With 1bit-SGD: yes
|
||||
04/07/2016 14:48:56: CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
04/07/2016 14:48:56: CUB_PATH: C:\R\cub-1.4.1
|
||||
04/07/2016 14:48:56: CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
04/07/2016 14:48:56: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:48:56: Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
04/07/2016 14:48:56: Built by mahilleb on mahilleb57
|
||||
04/07/2016 14:48:56: Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
04/07/2016 14:48:56: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:48:56: Running on mahilleb57 at 2016/04/07 14:48:56
|
||||
04/07/2016 14:48:56: Command line:
|
||||
C:\R\CNTK3\x64\release\cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/01_OneHidden.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu DeviceId=0 timestamping=true MNISTtrain=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:48:56: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:48:56: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = MNISTtrain:MNISTtest
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/01_OneHidden"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
MNISTtrain = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
MNISTtest = [
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
MNISTtrain=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:48:56: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:48:56: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:48:56: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = MNISTtrain:MNISTtest
|
||||
precision = "float"
|
||||
modelPath = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden"
|
||||
ndlMacros = "C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
MNISTtrain = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
MNISTtest = [
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
MNISTtrain=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:48:56: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:48:56: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 01_OneHidden.cntk:command=MNISTtrain:MNISTtest
|
||||
configparameters: 01_OneHidden.cntk:ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
configparameters: 01_OneHidden.cntk:currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 01_OneHidden.cntk:DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 01_OneHidden.cntk:deviceId=0
|
||||
configparameters: 01_OneHidden.cntk:imageLayout=cudnn
|
||||
configparameters: 01_OneHidden.cntk:initOnCPUOnly=true
|
||||
configparameters: 01_OneHidden.cntk:MNISTtest=[
|
||||
action = "test"
|
||||
minibatchSize = 16
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 01_OneHidden.cntk:MNISTtrain=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/01_OneHidden.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1
|
||||
momentumPerMB = 0
|
||||
maxEpochs = 30
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
configparameters: 01_OneHidden.cntk:ModelDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models
|
||||
configparameters: 01_OneHidden.cntk:modelPath=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden
|
||||
configparameters: 01_OneHidden.cntk:ndlMacros=C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl
|
||||
configparameters: 01_OneHidden.cntk:numMBsToShowResult=500
|
||||
configparameters: 01_OneHidden.cntk:OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
configparameters: 01_OneHidden.cntk:precision=float
|
||||
configparameters: 01_OneHidden.cntk:RootDir=..
|
||||
configparameters: 01_OneHidden.cntk:RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu
|
||||
configparameters: 01_OneHidden.cntk:timestamping=true
|
||||
configparameters: 01_OneHidden.cntk:traceLevel=1
|
||||
04/07/2016 14:48:56: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:48:56: Commands: MNISTtrain MNISTtest
|
||||
04/07/2016 14:48:56: Precision = "float"
|
||||
04/07/2016 14:48:56: CNTKModelPath: C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden
|
||||
04/07/2016 14:48:56: CNTKCommandTrainInfo: MNISTtrain : 30
|
||||
04/07/2016 14:48:56: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 30
|
||||
|
||||
04/07/2016 14:48:56: ##############################################################################
|
||||
04/07/2016 14:48:56: # #
|
||||
04/07/2016 14:48:56: # Action "train" #
|
||||
04/07/2016 14:48:56: # #
|
||||
04/07/2016 14:48:56: ##############################################################################
|
||||
|
||||
04/07/2016 14:48:56: CNTKCommandTrainBegin: MNISTtrain
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:48:56: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
4 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
errTop5 = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 17 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 9 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 200]
|
||||
Validating --> h1.W = LearnableParameter() : -> [200 x 784]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [784 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [784 x *] -> [784 x 1 x *]
|
||||
Validating --> h1.t = Times (h1.W, featScaled) : [200 x 784], [784 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [200 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [200 x 1 x *], [200 x 1] -> [200 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [200 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 200], [200 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> unnamed81 = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> errTop5 = ErrorPrediction (labels, ol.z, unnamed81) : [10 x *], [10 x 1 x *], [1 x 1] -> [1]
|
||||
|
||||
|
||||
9 out of 17 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:48:57: Created model with 17 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:48:57: Training criterion node(s):
|
||||
04/07/2016 14:48:57: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:48:57: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:48:57: errTop5 = ErrorPrediction
|
||||
04/07/2016 14:48:57: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:48:57: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:48:57: Starting Epoch 1: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:48:58: Starting minibatch loop.
|
||||
04/07/2016 14:48:59: Epoch[ 1 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 1.28930847; EvalErr[0]PerSample = 0.37568750; EvalErr[1]PerSample = 0.37568750; TotalTime = 0.8859s; SamplesPerSecond = 18061.6
|
||||
04/07/2016 14:48:59: Epoch[ 1 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.50096338; EvalErr[0]PerSample = 0.13118750; EvalErr[1]PerSample = 0.13118750; TotalTime = 0.7452s; SamplesPerSecond = 21472.0
|
||||
04/07/2016 14:49:00: Epoch[ 1 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.39348669; EvalErr[0]PerSample = 0.11293750; EvalErr[1]PerSample = 0.11293750; TotalTime = 0.6847s; SamplesPerSecond = 23367.7
|
||||
04/07/2016 14:49:01: Finished Epoch[ 1 of 30]: [Training Set] TrainLossPerSample = 0.64864165; TotalSamplesSeen = 60000; EvalErrPerSample [0]=0.18426667; [1]=0.18426667; AvgLearningRatePerSample = 0.003125; EpochTime=3.40798
|
||||
04/07/2016 14:49:01: Finished Epoch[ 1 of 30]: Criterion Node [ce] Per Sample = 0.64864165
|
||||
04/07/2016 14:49:01: Finished Epoch[ 1 of 30]: Evaluation Node [errTop5] Per Sample = 0.18426667
|
||||
04/07/2016 14:49:01: Finished Epoch[ 1 of 30]: Evaluation Node [err] Per Sample = 0.18426667
|
||||
04/07/2016 14:49:01: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.1'
|
||||
|
||||
04/07/2016 14:49:01: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:01: Starting minibatch loop.
|
||||
04/07/2016 14:49:01: Epoch[ 2 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.33642883; EvalErr[0]PerSample = 0.09581250; EvalErr[1]PerSample = 0.09581250; TotalTime = 0.4110s; SamplesPerSecond = 38925.4
|
||||
04/07/2016 14:49:01: Epoch[ 2 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.31968060; EvalErr[0]PerSample = 0.09237500; EvalErr[1]PerSample = 0.09237500; TotalTime = 0.3961s; SamplesPerSecond = 40392.4
|
||||
04/07/2016 14:49:02: Epoch[ 2 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.30829694; EvalErr[0]PerSample = 0.09081250; EvalErr[1]PerSample = 0.09081250; TotalTime = 0.4017s; SamplesPerSecond = 39828.9
|
||||
04/07/2016 14:49:02: Finished Epoch[ 2 of 30]: [Training Set] TrainLossPerSample = 0.31219226; TotalSamplesSeen = 120000; EvalErrPerSample [0]=0.090116665; [1]=0.090116665; AvgLearningRatePerSample = 0.003125; EpochTime=1.50626
|
||||
04/07/2016 14:49:02: Finished Epoch[ 2 of 30]: Criterion Node [ce] Per Sample = 0.31219226
|
||||
04/07/2016 14:49:02: Finished Epoch[ 2 of 30]: Evaluation Node [errTop5] Per Sample = 0.090116665
|
||||
04/07/2016 14:49:02: Finished Epoch[ 2 of 30]: Evaluation Node [err] Per Sample = 0.090116665
|
||||
04/07/2016 14:49:02: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.2'
|
||||
|
||||
04/07/2016 14:49:02: Starting Epoch 3: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 2 at record count 120000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:02: Starting minibatch loop.
|
||||
04/07/2016 14:49:02: Epoch[ 3 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.28846320; EvalErr[0]PerSample = 0.08293750; EvalErr[1]PerSample = 0.08293750; TotalTime = 0.3928s; SamplesPerSecond = 40736.8
|
||||
04/07/2016 14:49:03: Epoch[ 3 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.27885980; EvalErr[0]PerSample = 0.07968750; EvalErr[1]PerSample = 0.07968750; TotalTime = 0.4249s; SamplesPerSecond = 37659.1
|
||||
04/07/2016 14:49:03: Epoch[ 3 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.27336786; EvalErr[0]PerSample = 0.08018750; EvalErr[1]PerSample = 0.08018750; TotalTime = 0.4066s; SamplesPerSecond = 39353.7
|
||||
04/07/2016 14:49:04: Finished Epoch[ 3 of 30]: [Training Set] TrainLossPerSample = 0.27297238; TotalSamplesSeen = 180000; EvalErrPerSample [0]=0.078716666; [1]=0.078716666; AvgLearningRatePerSample = 0.003125; EpochTime=1.52499
|
||||
04/07/2016 14:49:04: Finished Epoch[ 3 of 30]: Criterion Node [ce] Per Sample = 0.27297238
|
||||
04/07/2016 14:49:04: Finished Epoch[ 3 of 30]: Evaluation Node [errTop5] Per Sample = 0.078716666
|
||||
04/07/2016 14:49:04: Finished Epoch[ 3 of 30]: Evaluation Node [err] Per Sample = 0.078716666
|
||||
04/07/2016 14:49:04: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.3'
|
||||
|
||||
04/07/2016 14:49:04: Starting Epoch 4: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 3 at record count 180000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:04: Starting minibatch loop.
|
||||
04/07/2016 14:49:04: Epoch[ 4 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.25608041; EvalErr[0]PerSample = 0.07331250; EvalErr[1]PerSample = 0.07331250; TotalTime = 0.4032s; SamplesPerSecond = 39686.1
|
||||
04/07/2016 14:49:04: Epoch[ 4 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.24877383; EvalErr[0]PerSample = 0.07125000; EvalErr[1]PerSample = 0.07125000; TotalTime = 0.4093s; SamplesPerSecond = 39093.1
|
||||
04/07/2016 14:49:05: Epoch[ 4 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.24436157; EvalErr[0]PerSample = 0.07162500; EvalErr[1]PerSample = 0.07162500; TotalTime = 0.4462s; SamplesPerSecond = 35858.4
|
||||
04/07/2016 14:49:05: Finished Epoch[ 4 of 30]: [Training Set] TrainLossPerSample = 0.24334691; TotalSamplesSeen = 240000; EvalErrPerSample [0]=0.069933333; [1]=0.069933333; AvgLearningRatePerSample = 0.003125; EpochTime=1.59941
|
||||
04/07/2016 14:49:05: Finished Epoch[ 4 of 30]: Criterion Node [ce] Per Sample = 0.24334691
|
||||
04/07/2016 14:49:05: Finished Epoch[ 4 of 30]: Evaluation Node [errTop5] Per Sample = 0.069933333
|
||||
04/07/2016 14:49:05: Finished Epoch[ 4 of 30]: Evaluation Node [err] Per Sample = 0.069933333
|
||||
04/07/2016 14:49:05: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.4'
|
||||
|
||||
04/07/2016 14:49:05: Starting Epoch 5: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 4 at record count 240000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:05: Starting minibatch loop.
|
||||
04/07/2016 14:49:06: Epoch[ 5 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.22827538; EvalErr[0]PerSample = 0.06593750; EvalErr[1]PerSample = 0.06593750; TotalTime = 0.4427s; SamplesPerSecond = 36145.2
|
||||
04/07/2016 14:49:06: Epoch[ 5 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.22302122; EvalErr[0]PerSample = 0.06368750; EvalErr[1]PerSample = 0.06368750; TotalTime = 0.4201s; SamplesPerSecond = 38090.3
|
||||
04/07/2016 14:49:07: Epoch[ 5 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.21961777; EvalErr[0]PerSample = 0.06381250; EvalErr[1]PerSample = 0.06381250; TotalTime = 0.4232s; SamplesPerSecond = 37806.1
|
||||
04/07/2016 14:49:07: Finished Epoch[ 5 of 30]: [Training Set] TrainLossPerSample = 0.2180291; TotalSamplesSeen = 300000; EvalErrPerSample [0]=0.0623; [1]=0.0623; AvgLearningRatePerSample = 0.003125; EpochTime=1.63154
|
||||
04/07/2016 14:49:07: Finished Epoch[ 5 of 30]: Criterion Node [ce] Per Sample = 0.2180291
|
||||
04/07/2016 14:49:07: Finished Epoch[ 5 of 30]: Evaluation Node [errTop5] Per Sample = 0.0623
|
||||
04/07/2016 14:49:07: Finished Epoch[ 5 of 30]: Evaluation Node [err] Per Sample = 0.0623
|
||||
04/07/2016 14:49:07: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.5'
|
||||
|
||||
04/07/2016 14:49:07: Starting Epoch 6: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 5 at record count 300000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:07: Starting minibatch loop.
|
||||
04/07/2016 14:49:07: Epoch[ 6 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.20467020; EvalErr[0]PerSample = 0.05868750; EvalErr[1]PerSample = 0.05868750; TotalTime = 0.4071s; SamplesPerSecond = 39298.3
|
||||
04/07/2016 14:49:08: Epoch[ 6 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.20118964; EvalErr[0]PerSample = 0.05825000; EvalErr[1]PerSample = 0.05825000; TotalTime = 0.4105s; SamplesPerSecond = 38974.6
|
||||
04/07/2016 14:49:08: Epoch[ 6 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.19883932; EvalErr[0]PerSample = 0.05775000; EvalErr[1]PerSample = 0.05775000; TotalTime = 0.4028s; SamplesPerSecond = 39719.1
|
||||
04/07/2016 14:49:08: Finished Epoch[ 6 of 30]: [Training Set] TrainLossPerSample = 0.19664152; TotalSamplesSeen = 360000; EvalErrPerSample [0]=0.05635; [1]=0.05635; AvgLearningRatePerSample = 0.003125; EpochTime=1.54017
|
||||
04/07/2016 14:49:08: Finished Epoch[ 6 of 30]: Criterion Node [ce] Per Sample = 0.19664152
|
||||
04/07/2016 14:49:08: Finished Epoch[ 6 of 30]: Evaluation Node [errTop5] Per Sample = 0.05635
|
||||
04/07/2016 14:49:08: Finished Epoch[ 6 of 30]: Evaluation Node [err] Per Sample = 0.05635
|
||||
04/07/2016 14:49:09: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.6'
|
||||
|
||||
04/07/2016 14:49:09: Starting Epoch 7: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 6 at record count 360000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:09: Starting minibatch loop.
|
||||
04/07/2016 14:49:09: Epoch[ 7 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.18476917; EvalErr[0]PerSample = 0.05306250; EvalErr[1]PerSample = 0.05306250; TotalTime = 0.4088s; SamplesPerSecond = 39141.9
|
||||
04/07/2016 14:49:09: Epoch[ 7 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.18272620; EvalErr[0]PerSample = 0.05325000; EvalErr[1]PerSample = 0.05325000; TotalTime = 0.3998s; SamplesPerSecond = 40015.8
|
||||
04/07/2016 14:49:10: Epoch[ 7 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.18130524; EvalErr[0]PerSample = 0.05287500; EvalErr[1]PerSample = 0.05287500; TotalTime = 0.4255s; SamplesPerSecond = 37601.1
|
||||
04/07/2016 14:49:10: Finished Epoch[ 7 of 30]: [Training Set] TrainLossPerSample = 0.17858277; TotalSamplesSeen = 420000; EvalErrPerSample [0]=0.051366668; [1]=0.051366668; AvgLearningRatePerSample = 0.003125; EpochTime=1.55222
|
||||
04/07/2016 14:49:10: Finished Epoch[ 7 of 30]: Criterion Node [ce] Per Sample = 0.17858277
|
||||
04/07/2016 14:49:10: Finished Epoch[ 7 of 30]: Evaluation Node [errTop5] Per Sample = 0.051366668
|
||||
04/07/2016 14:49:10: Finished Epoch[ 7 of 30]: Evaluation Node [err] Per Sample = 0.051366668
|
||||
04/07/2016 14:49:10: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.7'
|
||||
|
||||
04/07/2016 14:49:10: Starting Epoch 8: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 7 at record count 420000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:10: Starting minibatch loop.
|
||||
04/07/2016 14:49:11: Epoch[ 8 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.16791026; EvalErr[0]PerSample = 0.04856250; EvalErr[1]PerSample = 0.04856250; TotalTime = 0.4023s; SamplesPerSecond = 39773.9
|
||||
04/07/2016 14:49:11: Epoch[ 8 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.16702440; EvalErr[0]PerSample = 0.04756250; EvalErr[1]PerSample = 0.04756250; TotalTime = 0.4014s; SamplesPerSecond = 39855.8
|
||||
04/07/2016 14:49:11: Epoch[ 8 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.16637323; EvalErr[0]PerSample = 0.04706250; EvalErr[1]PerSample = 0.04706250; TotalTime = 0.4006s; SamplesPerSecond = 39935.7
|
||||
04/07/2016 14:49:12: Finished Epoch[ 8 of 30]: [Training Set] TrainLossPerSample = 0.16323005; TotalSamplesSeen = 480000; EvalErrPerSample [0]=0.046350002; [1]=0.046350002; AvgLearningRatePerSample = 0.003125; EpochTime=1.50533
|
||||
04/07/2016 14:49:12: Finished Epoch[ 8 of 30]: Criterion Node [ce] Per Sample = 0.16323005
|
||||
04/07/2016 14:49:12: Finished Epoch[ 8 of 30]: Evaluation Node [errTop5] Per Sample = 0.046350002
|
||||
04/07/2016 14:49:12: Finished Epoch[ 8 of 30]: Evaluation Node [err] Per Sample = 0.046350002
|
||||
04/07/2016 14:49:12: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.8'
|
||||
|
||||
04/07/2016 14:49:12: Starting Epoch 9: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 8 at record count 480000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:12: Starting minibatch loop.
|
||||
04/07/2016 14:49:12: Epoch[ 9 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.15350847; EvalErr[0]PerSample = 0.04406250; EvalErr[1]PerSample = 0.04406250; TotalTime = 0.4002s; SamplesPerSecond = 39978.2
|
||||
04/07/2016 14:49:12: Epoch[ 9 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.15356984; EvalErr[0]PerSample = 0.04381250; EvalErr[1]PerSample = 0.04381250; TotalTime = 0.4039s; SamplesPerSecond = 39612.3
|
||||
04/07/2016 14:49:13: Epoch[ 9 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.15354349; EvalErr[0]PerSample = 0.04325000; EvalErr[1]PerSample = 0.04325000; TotalTime = 0.4011s; SamplesPerSecond = 39890.7
|
||||
04/07/2016 14:49:13: Finished Epoch[ 9 of 30]: [Training Set] TrainLossPerSample = 0.15006728; TotalSamplesSeen = 540000; EvalErrPerSample [0]=0.042533334; [1]=0.042533334; AvgLearningRatePerSample = 0.003125; EpochTime=1.50807
|
||||
04/07/2016 14:49:13: Finished Epoch[ 9 of 30]: Criterion Node [ce] Per Sample = 0.15006728
|
||||
04/07/2016 14:49:13: Finished Epoch[ 9 of 30]: Evaluation Node [errTop5] Per Sample = 0.042533334
|
||||
04/07/2016 14:49:13: Finished Epoch[ 9 of 30]: Evaluation Node [err] Per Sample = 0.042533334
|
||||
04/07/2016 14:49:13: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.9'
|
||||
|
||||
04/07/2016 14:49:13: Starting Epoch 10: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 9 at record count 540000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:13: Starting minibatch loop.
|
||||
04/07/2016 14:49:14: Epoch[10 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.14108638; EvalErr[0]PerSample = 0.04043750; EvalErr[1]PerSample = 0.04043750; TotalTime = 0.4020s; SamplesPerSecond = 39805.4
|
||||
04/07/2016 14:49:14: Epoch[10 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.14194075; EvalErr[0]PerSample = 0.04112500; EvalErr[1]PerSample = 0.04112500; TotalTime = 0.3985s; SamplesPerSecond = 40155.4
|
||||
04/07/2016 14:49:14: Epoch[10 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.14241907; EvalErr[0]PerSample = 0.04043750; EvalErr[1]PerSample = 0.04043750; TotalTime = 0.3966s; SamplesPerSecond = 40343.1
|
||||
04/07/2016 14:49:15: Finished Epoch[10 of 30]: [Training Set] TrainLossPerSample = 0.13867831; TotalSamplesSeen = 600000; EvalErrPerSample [0]=0.039433334; [1]=0.039433334; AvgLearningRatePerSample = 0.003125; EpochTime=1.49635
|
||||
04/07/2016 14:49:15: Finished Epoch[10 of 30]: Criterion Node [ce] Per Sample = 0.13867831
|
||||
04/07/2016 14:49:15: Finished Epoch[10 of 30]: Evaluation Node [errTop5] Per Sample = 0.039433334
|
||||
04/07/2016 14:49:15: Finished Epoch[10 of 30]: Evaluation Node [err] Per Sample = 0.039433334
|
||||
04/07/2016 14:49:15: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.10'
|
||||
|
||||
04/07/2016 14:49:15: Starting Epoch 11: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 10 at record count 600000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:15: Starting minibatch loop.
|
||||
04/07/2016 14:49:15: Epoch[11 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.13026932; EvalErr[0]PerSample = 0.03768750; EvalErr[1]PerSample = 0.03768750; TotalTime = 0.4008s; SamplesPerSecond = 39923.9
|
||||
04/07/2016 14:49:16: Epoch[11 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.13180487; EvalErr[0]PerSample = 0.03762500; EvalErr[1]PerSample = 0.03762500; TotalTime = 0.4001s; SamplesPerSecond = 39991.1
|
||||
04/07/2016 14:49:16: Epoch[11 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.13268649; EvalErr[0]PerSample = 0.03768750; EvalErr[1]PerSample = 0.03768750; TotalTime = 0.4010s; SamplesPerSecond = 39898.8
|
||||
04/07/2016 14:49:16: Finished Epoch[11 of 30]: [Training Set] TrainLossPerSample = 0.1287349; TotalSamplesSeen = 660000; EvalErrPerSample [0]=0.036533333; [1]=0.036533333; AvgLearningRatePerSample = 0.003125; EpochTime=1.50423
|
||||
04/07/2016 14:49:16: Finished Epoch[11 of 30]: Criterion Node [ce] Per Sample = 0.1287349
|
||||
04/07/2016 14:49:16: Finished Epoch[11 of 30]: Evaluation Node [errTop5] Per Sample = 0.036533333
|
||||
04/07/2016 14:49:16: Finished Epoch[11 of 30]: Evaluation Node [err] Per Sample = 0.036533333
|
||||
04/07/2016 14:49:16: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.11'
|
||||
|
||||
04/07/2016 14:49:16: Starting Epoch 12: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 11 at record count 660000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:16: Starting minibatch loop.
|
||||
04/07/2016 14:49:17: Epoch[12 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.12077362; EvalErr[0]PerSample = 0.03375000; EvalErr[1]PerSample = 0.03375000; TotalTime = 0.3991s; SamplesPerSecond = 40094.1
|
||||
04/07/2016 14:49:17: Epoch[12 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.12290393; EvalErr[0]PerSample = 0.03518750; EvalErr[1]PerSample = 0.03518750; TotalTime = 0.4015s; SamplesPerSecond = 39850.5
|
||||
04/07/2016 14:49:17: Epoch[12 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.12410324; EvalErr[0]PerSample = 0.03525000; EvalErr[1]PerSample = 0.03525000; TotalTime = 0.4013s; SamplesPerSecond = 39872.8
|
||||
04/07/2016 14:49:18: Finished Epoch[12 of 30]: [Training Set] TrainLossPerSample = 0.11998519; TotalSamplesSeen = 720000; EvalErrPerSample [0]=0.033800002; [1]=0.033800002; AvgLearningRatePerSample = 0.003125; EpochTime=1.50444
|
||||
04/07/2016 14:49:18: Finished Epoch[12 of 30]: Criterion Node [ce] Per Sample = 0.11998519
|
||||
04/07/2016 14:49:18: Finished Epoch[12 of 30]: Evaluation Node [errTop5] Per Sample = 0.033800002
|
||||
04/07/2016 14:49:18: Finished Epoch[12 of 30]: Evaluation Node [err] Per Sample = 0.033800002
|
||||
04/07/2016 14:49:18: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.12'
|
||||
|
||||
04/07/2016 14:49:18: Starting Epoch 13: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 12 at record count 720000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:18: Starting minibatch loop.
|
||||
04/07/2016 14:49:18: Epoch[13 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.11238457; EvalErr[0]PerSample = 0.03125000; EvalErr[1]PerSample = 0.03125000; TotalTime = 0.4007s; SamplesPerSecond = 39930.7
|
||||
04/07/2016 14:49:19: Epoch[13 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.11503398; EvalErr[0]PerSample = 0.03281250; EvalErr[1]PerSample = 0.03281250; TotalTime = 0.4017s; SamplesPerSecond = 39835.7
|
||||
04/07/2016 14:49:19: Epoch[13 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.11648001; EvalErr[0]PerSample = 0.03312500; EvalErr[1]PerSample = 0.03312500; TotalTime = 0.3994s; SamplesPerSecond = 40064.2
|
||||
04/07/2016 14:49:19: Finished Epoch[13 of 30]: [Training Set] TrainLossPerSample = 0.11223369; TotalSamplesSeen = 780000; EvalErrPerSample [0]=0.031550001; [1]=0.031550001; AvgLearningRatePerSample = 0.003125; EpochTime=1.50349
|
||||
04/07/2016 14:49:19: Finished Epoch[13 of 30]: Criterion Node [ce] Per Sample = 0.11223369
|
||||
04/07/2016 14:49:19: Finished Epoch[13 of 30]: Evaluation Node [errTop5] Per Sample = 0.031550001
|
||||
04/07/2016 14:49:19: Finished Epoch[13 of 30]: Evaluation Node [err] Per Sample = 0.031550001
|
||||
04/07/2016 14:49:19: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.13'
|
||||
|
||||
04/07/2016 14:49:19: Starting Epoch 14: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 13 at record count 780000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:19: Starting minibatch loop.
|
||||
04/07/2016 14:49:20: Epoch[14 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.10493344; EvalErr[0]PerSample = 0.02881250; EvalErr[1]PerSample = 0.02881250; TotalTime = 0.4009s; SamplesPerSecond = 39908.0
|
||||
04/07/2016 14:49:20: Epoch[14 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.10803001; EvalErr[0]PerSample = 0.03037500; EvalErr[1]PerSample = 0.03037500; TotalTime = 0.4019s; SamplesPerSecond = 39812.9
|
||||
04/07/2016 14:49:21: Epoch[14 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.10966496; EvalErr[0]PerSample = 0.03131250; EvalErr[1]PerSample = 0.03131250; TotalTime = 0.3999s; SamplesPerSecond = 40009.5
|
||||
04/07/2016 14:49:21: Finished Epoch[14 of 30]: [Training Set] TrainLossPerSample = 0.10532398; TotalSamplesSeen = 840000; EvalErrPerSample [0]=0.029366666; [1]=0.029366666; AvgLearningRatePerSample = 0.003125; EpochTime=1.50479
|
||||
04/07/2016 14:49:21: Finished Epoch[14 of 30]: Criterion Node [ce] Per Sample = 0.10532398
|
||||
04/07/2016 14:49:21: Finished Epoch[14 of 30]: Evaluation Node [errTop5] Per Sample = 0.029366666
|
||||
04/07/2016 14:49:21: Finished Epoch[14 of 30]: Evaluation Node [err] Per Sample = 0.029366666
|
||||
04/07/2016 14:49:21: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.14'
|
||||
|
||||
04/07/2016 14:49:21: Starting Epoch 15: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 14 at record count 840000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:21: Starting minibatch loop.
|
||||
04/07/2016 14:49:21: Epoch[15 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.09828265; EvalErr[0]PerSample = 0.02781250; EvalErr[1]PerSample = 0.02781250; TotalTime = 0.4005s; SamplesPerSecond = 39951.0
|
||||
04/07/2016 14:49:22: Epoch[15 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.10175670; EvalErr[0]PerSample = 0.02887500; EvalErr[1]PerSample = 0.02887500; TotalTime = 0.3984s; SamplesPerSecond = 40164.4
|
||||
04/07/2016 14:49:22: Epoch[15 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.10353468; EvalErr[0]PerSample = 0.02975000; EvalErr[1]PerSample = 0.02975000; TotalTime = 0.3979s; SamplesPerSecond = 40211.4
|
||||
04/07/2016 14:49:22: Finished Epoch[15 of 30]: [Training Set] TrainLossPerSample = 0.099128082; TotalSamplesSeen = 900000; EvalErrPerSample [0]=0.027900001; [1]=0.027900001; AvgLearningRatePerSample = 0.003125; EpochTime=1.4965
|
||||
04/07/2016 14:49:22: Finished Epoch[15 of 30]: Criterion Node [ce] Per Sample = 0.099128082
|
||||
04/07/2016 14:49:22: Finished Epoch[15 of 30]: Evaluation Node [errTop5] Per Sample = 0.027900001
|
||||
04/07/2016 14:49:22: Finished Epoch[15 of 30]: Evaluation Node [err] Per Sample = 0.027900001
|
||||
04/07/2016 14:49:22: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.15'
|
||||
|
||||
04/07/2016 14:49:22: Starting Epoch 16: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 15 at record count 900000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:22: Starting minibatch loop.
|
||||
04/07/2016 14:49:23: Epoch[16 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.09231853; EvalErr[0]PerSample = 0.02643750; EvalErr[1]PerSample = 0.02643750; TotalTime = 0.4014s; SamplesPerSecond = 39863.1
|
||||
04/07/2016 14:49:23: Epoch[16 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.09610316; EvalErr[0]PerSample = 0.02700000; EvalErr[1]PerSample = 0.02700000; TotalTime = 0.4001s; SamplesPerSecond = 39993.7
|
||||
04/07/2016 14:49:24: Epoch[16 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.09798691; EvalErr[0]PerSample = 0.02768750; EvalErr[1]PerSample = 0.02768750; TotalTime = 0.4012s; SamplesPerSecond = 39878.7
|
||||
04/07/2016 14:49:24: Finished Epoch[16 of 30]: [Training Set] TrainLossPerSample = 0.09354043; TotalSamplesSeen = 960000; EvalErrPerSample [0]=0.0262; [1]=0.0262; AvgLearningRatePerSample = 0.003125; EpochTime=1.50569
|
||||
04/07/2016 14:49:24: Finished Epoch[16 of 30]: Criterion Node [ce] Per Sample = 0.09354043
|
||||
04/07/2016 14:49:24: Finished Epoch[16 of 30]: Evaluation Node [errTop5] Per Sample = 0.0262
|
||||
04/07/2016 14:49:24: Finished Epoch[16 of 30]: Evaluation Node [err] Per Sample = 0.0262
|
||||
04/07/2016 14:49:24: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.16'
|
||||
|
||||
04/07/2016 14:49:24: Starting Epoch 17: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 16 at record count 960000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:24: Starting minibatch loop.
|
||||
04/07/2016 14:49:24: Epoch[17 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.08694670; EvalErr[0]PerSample = 0.02512500; EvalErr[1]PerSample = 0.02512500; TotalTime = 0.4001s; SamplesPerSecond = 39986.7
|
||||
04/07/2016 14:49:25: Epoch[17 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.09097736; EvalErr[0]PerSample = 0.02537500; EvalErr[1]PerSample = 0.02537500; TotalTime = 0.4006s; SamplesPerSecond = 39935.5
|
||||
04/07/2016 14:49:25: Epoch[17 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.09293706; EvalErr[0]PerSample = 0.02662500; EvalErr[1]PerSample = 0.02662500; TotalTime = 0.4002s; SamplesPerSecond = 39975.6
|
||||
04/07/2016 14:49:25: Finished Epoch[17 of 30]: [Training Set] TrainLossPerSample = 0.088473409; TotalSamplesSeen = 1020000; EvalErrPerSample [0]=0.024916667; [1]=0.024916667; AvgLearningRatePerSample = 0.003125; EpochTime=1.50249
|
||||
04/07/2016 14:49:25: Finished Epoch[17 of 30]: Criterion Node [ce] Per Sample = 0.088473409
|
||||
04/07/2016 14:49:25: Finished Epoch[17 of 30]: Evaluation Node [errTop5] Per Sample = 0.024916667
|
||||
04/07/2016 14:49:25: Finished Epoch[17 of 30]: Evaluation Node [err] Per Sample = 0.024916667
|
||||
04/07/2016 14:49:25: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.17'
|
||||
|
||||
04/07/2016 14:49:25: Starting Epoch 18: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 17 at record count 1020000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:25: Starting minibatch loop.
|
||||
04/07/2016 14:49:26: Epoch[18 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.08208772; EvalErr[0]PerSample = 0.02268750; EvalErr[1]PerSample = 0.02268750; TotalTime = 0.4007s; SamplesPerSecond = 39931.9
|
||||
04/07/2016 14:49:26: Epoch[18 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.08630401; EvalErr[0]PerSample = 0.02412500; EvalErr[1]PerSample = 0.02412500; TotalTime = 0.4005s; SamplesPerSecond = 39951.7
|
||||
04/07/2016 14:49:27: Epoch[18 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.08831522; EvalErr[0]PerSample = 0.02512500; EvalErr[1]PerSample = 0.02512500; TotalTime = 0.4077s; SamplesPerSecond = 39240.1
|
||||
04/07/2016 14:49:27: Finished Epoch[18 of 30]: [Training Set] TrainLossPerSample = 0.083854452; TotalSamplesSeen = 1080000; EvalErrPerSample [0]=0.023250001; [1]=0.023250001; AvgLearningRatePerSample = 0.003125; EpochTime=1.52544
|
||||
04/07/2016 14:49:27: Finished Epoch[18 of 30]: Criterion Node [ce] Per Sample = 0.083854452
|
||||
04/07/2016 14:49:27: Finished Epoch[18 of 30]: Evaluation Node [errTop5] Per Sample = 0.023250001
|
||||
04/07/2016 14:49:27: Finished Epoch[18 of 30]: Evaluation Node [err] Per Sample = 0.023250001
|
||||
04/07/2016 14:49:27: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.18'
|
||||
|
||||
04/07/2016 14:49:27: Starting Epoch 19: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 18 at record count 1080000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:27: Starting minibatch loop.
|
||||
04/07/2016 14:49:27: Epoch[19 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.07767484; EvalErr[0]PerSample = 0.02112500; EvalErr[1]PerSample = 0.02112500; TotalTime = 0.4860s; SamplesPerSecond = 32919.6
|
||||
04/07/2016 14:49:28: Epoch[19 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.08202056; EvalErr[0]PerSample = 0.02268750; EvalErr[1]PerSample = 0.02268750; TotalTime = 0.4952s; SamplesPerSecond = 32309.7
|
||||
04/07/2016 14:49:28: Epoch[19 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.08406421; EvalErr[0]PerSample = 0.02356250; EvalErr[1]PerSample = 0.02356250; TotalTime = 0.4924s; SamplesPerSecond = 32494.2
|
||||
04/07/2016 14:49:29: Finished Epoch[19 of 30]: [Training Set] TrainLossPerSample = 0.079624176; TotalSamplesSeen = 1140000; EvalErrPerSample [0]=0.021883333; [1]=0.021883333; AvgLearningRatePerSample = 0.003125; EpochTime=1.82496
|
||||
04/07/2016 14:49:29: Finished Epoch[19 of 30]: Criterion Node [ce] Per Sample = 0.079624176
|
||||
04/07/2016 14:49:29: Finished Epoch[19 of 30]: Evaluation Node [errTop5] Per Sample = 0.021883333
|
||||
04/07/2016 14:49:29: Finished Epoch[19 of 30]: Evaluation Node [err] Per Sample = 0.021883333
|
||||
04/07/2016 14:49:29: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.19'
|
||||
|
||||
04/07/2016 14:49:29: Starting Epoch 20: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 19 at record count 1140000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:29: Starting minibatch loop.
|
||||
04/07/2016 14:49:29: Epoch[20 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.07365132; EvalErr[0]PerSample = 0.01987500; EvalErr[1]PerSample = 0.01987500; TotalTime = 0.4603s; SamplesPerSecond = 34759.9
|
||||
04/07/2016 14:49:30: Epoch[20 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07807613; EvalErr[0]PerSample = 0.02200000; EvalErr[1]PerSample = 0.02200000; TotalTime = 0.4763s; SamplesPerSecond = 33593.6
|
||||
04/07/2016 14:49:30: Epoch[20 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.08013644; EvalErr[0]PerSample = 0.02243750; EvalErr[1]PerSample = 0.02243750; TotalTime = 0.4585s; SamplesPerSecond = 34893.4
|
||||
04/07/2016 14:49:31: Finished Epoch[20 of 30]: [Training Set] TrainLossPerSample = 0.075732835; TotalSamplesSeen = 1200000; EvalErrPerSample [0]=0.020933334; [1]=0.020933334; AvgLearningRatePerSample = 0.003125; EpochTime=1.7424
|
||||
04/07/2016 14:49:31: Finished Epoch[20 of 30]: Criterion Node [ce] Per Sample = 0.075732835
|
||||
04/07/2016 14:49:31: Finished Epoch[20 of 30]: Evaluation Node [errTop5] Per Sample = 0.020933334
|
||||
04/07/2016 14:49:31: Finished Epoch[20 of 30]: Evaluation Node [err] Per Sample = 0.020933334
|
||||
04/07/2016 14:49:31: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.20'
|
||||
|
||||
04/07/2016 14:49:31: Starting Epoch 21: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 20 at record count 1200000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:31: Starting minibatch loop.
|
||||
04/07/2016 14:49:31: Epoch[21 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06996849; EvalErr[0]PerSample = 0.01900000; EvalErr[1]PerSample = 0.01900000; TotalTime = 0.4846s; SamplesPerSecond = 33014.6
|
||||
04/07/2016 14:49:32: Epoch[21 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07442801; EvalErr[0]PerSample = 0.02100000; EvalErr[1]PerSample = 0.02100000; TotalTime = 0.4574s; SamplesPerSecond = 34981.2
|
||||
04/07/2016 14:49:32: Epoch[21 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07649426; EvalErr[0]PerSample = 0.02143750; EvalErr[1]PerSample = 0.02143750; TotalTime = 0.4427s; SamplesPerSecond = 36140.6
|
||||
04/07/2016 14:49:32: Finished Epoch[21 of 30]: [Training Set] TrainLossPerSample = 0.072139725; TotalSamplesSeen = 1260000; EvalErrPerSample [0]=0.019916667; [1]=0.019916667; AvgLearningRatePerSample = 0.003125; EpochTime=1.71984
|
||||
04/07/2016 14:49:32: Finished Epoch[21 of 30]: Criterion Node [ce] Per Sample = 0.072139725
|
||||
04/07/2016 14:49:32: Finished Epoch[21 of 30]: Evaluation Node [errTop5] Per Sample = 0.019916667
|
||||
04/07/2016 14:49:32: Finished Epoch[21 of 30]: Evaluation Node [err] Per Sample = 0.019916667
|
||||
04/07/2016 14:49:32: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.21'
|
||||
|
||||
04/07/2016 14:49:32: Starting Epoch 22: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 21 at record count 1260000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:32: Starting minibatch loop.
|
||||
04/07/2016 14:49:33: Epoch[22 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06658476; EvalErr[0]PerSample = 0.01781250; EvalErr[1]PerSample = 0.01781250; TotalTime = 0.4218s; SamplesPerSecond = 37928.7
|
||||
04/07/2016 14:49:33: Epoch[22 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07104192; EvalErr[0]PerSample = 0.01968750; EvalErr[1]PerSample = 0.01968750; TotalTime = 0.4471s; SamplesPerSecond = 35785.3
|
||||
04/07/2016 14:49:34: Epoch[22 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07310529; EvalErr[0]PerSample = 0.02025000; EvalErr[1]PerSample = 0.02025000; TotalTime = 0.4293s; SamplesPerSecond = 37272.8
|
||||
04/07/2016 14:49:34: Finished Epoch[22 of 30]: [Training Set] TrainLossPerSample = 0.068810955; TotalSamplesSeen = 1320000; EvalErrPerSample [0]=0.0188; [1]=0.0188; AvgLearningRatePerSample = 0.003125; EpochTime=1.61324
|
||||
04/07/2016 14:49:34: Finished Epoch[22 of 30]: Criterion Node [ce] Per Sample = 0.068810955
|
||||
04/07/2016 14:49:34: Finished Epoch[22 of 30]: Evaluation Node [errTop5] Per Sample = 0.0188
|
||||
04/07/2016 14:49:34: Finished Epoch[22 of 30]: Evaluation Node [err] Per Sample = 0.0188
|
||||
04/07/2016 14:49:34: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.22'
|
||||
|
||||
04/07/2016 14:49:34: Starting Epoch 23: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 22 at record count 1320000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:34: Starting minibatch loop.
|
||||
04/07/2016 14:49:34: Epoch[23 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06346445; EvalErr[0]PerSample = 0.01668750; EvalErr[1]PerSample = 0.01668750; TotalTime = 0.4318s; SamplesPerSecond = 37052.0
|
||||
04/07/2016 14:49:35: Epoch[23 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06788847; EvalErr[0]PerSample = 0.01906250; EvalErr[1]PerSample = 0.01906250; TotalTime = 0.4527s; SamplesPerSecond = 35346.5
|
||||
04/07/2016 14:49:35: Epoch[23 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06994334; EvalErr[0]PerSample = 0.01962500; EvalErr[1]PerSample = 0.01962500; TotalTime = 0.4259s; SamplesPerSecond = 37570.6
|
||||
04/07/2016 14:49:36: Finished Epoch[23 of 30]: [Training Set] TrainLossPerSample = 0.065717839; TotalSamplesSeen = 1380000; EvalErrPerSample [0]=0.018066667; [1]=0.018066667; AvgLearningRatePerSample = 0.003125; EpochTime=1.62343
|
||||
04/07/2016 14:49:36: Finished Epoch[23 of 30]: Criterion Node [ce] Per Sample = 0.065717839
|
||||
04/07/2016 14:49:36: Finished Epoch[23 of 30]: Evaluation Node [errTop5] Per Sample = 0.018066667
|
||||
04/07/2016 14:49:36: Finished Epoch[23 of 30]: Evaluation Node [err] Per Sample = 0.018066667
|
||||
04/07/2016 14:49:36: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.23'
|
||||
|
||||
04/07/2016 14:49:36: Starting Epoch 24: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 23 at record count 1380000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:36: Starting minibatch loop.
|
||||
04/07/2016 14:49:36: Epoch[24 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.06057711; EvalErr[0]PerSample = 0.01612500; EvalErr[1]PerSample = 0.01612500; TotalTime = 0.4343s; SamplesPerSecond = 36837.6
|
||||
04/07/2016 14:49:37: Epoch[24 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06494395; EvalErr[0]PerSample = 0.01787500; EvalErr[1]PerSample = 0.01787500; TotalTime = 0.4401s; SamplesPerSecond = 36357.6
|
||||
04/07/2016 14:49:37: Epoch[24 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06698516; EvalErr[0]PerSample = 0.01881250; EvalErr[1]PerSample = 0.01881250; TotalTime = 0.4206s; SamplesPerSecond = 38040.4
|
||||
04/07/2016 14:49:37: Finished Epoch[24 of 30]: [Training Set] TrainLossPerSample = 0.062835939; TotalSamplesSeen = 1440000; EvalErrPerSample [0]=0.017183334; [1]=0.017183334; AvgLearningRatePerSample = 0.003125; EpochTime=1.63118
|
||||
04/07/2016 14:49:37: Finished Epoch[24 of 30]: Criterion Node [ce] Per Sample = 0.062835939
|
||||
04/07/2016 14:49:37: Finished Epoch[24 of 30]: Evaluation Node [errTop5] Per Sample = 0.017183334
|
||||
04/07/2016 14:49:37: Finished Epoch[24 of 30]: Evaluation Node [err] Per Sample = 0.017183334
|
||||
04/07/2016 14:49:37: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.24'
|
||||
|
||||
04/07/2016 14:49:37: Starting Epoch 25: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 24 at record count 1440000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:37: Starting minibatch loop.
|
||||
04/07/2016 14:49:38: Epoch[25 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05789647; EvalErr[0]PerSample = 0.01525000; EvalErr[1]PerSample = 0.01525000; TotalTime = 0.4240s; SamplesPerSecond = 37739.9
|
||||
04/07/2016 14:49:38: Epoch[25 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06218763; EvalErr[0]PerSample = 0.01693750; EvalErr[1]PerSample = 0.01693750; TotalTime = 0.4049s; SamplesPerSecond = 39519.1
|
||||
04/07/2016 14:49:39: Epoch[25 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06421169; EvalErr[0]PerSample = 0.01775000; EvalErr[1]PerSample = 0.01775000; TotalTime = 0.3995s; SamplesPerSecond = 40050.9
|
||||
04/07/2016 14:49:39: Finished Epoch[25 of 30]: [Training Set] TrainLossPerSample = 0.060144316; TotalSamplesSeen = 1500000; EvalErrPerSample [0]=0.016216667; [1]=0.016216667; AvgLearningRatePerSample = 0.003125; EpochTime=1.55418
|
||||
04/07/2016 14:49:39: Finished Epoch[25 of 30]: Criterion Node [ce] Per Sample = 0.060144316
|
||||
04/07/2016 14:49:39: Finished Epoch[25 of 30]: Evaluation Node [errTop5] Per Sample = 0.016216667
|
||||
04/07/2016 14:49:39: Finished Epoch[25 of 30]: Evaluation Node [err] Per Sample = 0.016216667
|
||||
04/07/2016 14:49:39: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.25'
|
||||
|
||||
04/07/2016 14:49:39: Starting Epoch 26: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 25 at record count 1500000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:39: Starting minibatch loop.
|
||||
04/07/2016 14:49:39: Epoch[26 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05540020; EvalErr[0]PerSample = 0.01456250; EvalErr[1]PerSample = 0.01456250; TotalTime = 0.4228s; SamplesPerSecond = 37840.8
|
||||
04/07/2016 14:49:40: Epoch[26 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05960179; EvalErr[0]PerSample = 0.01625000; EvalErr[1]PerSample = 0.01625000; TotalTime = 0.4376s; SamplesPerSecond = 36564.9
|
||||
04/07/2016 14:49:40: Epoch[26 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06160596; EvalErr[0]PerSample = 0.01693750; EvalErr[1]PerSample = 0.01693750; TotalTime = 0.4307s; SamplesPerSecond = 37152.7
|
||||
04/07/2016 14:49:41: Finished Epoch[26 of 30]: [Training Set] TrainLossPerSample = 0.05762472; TotalSamplesSeen = 1560000; EvalErrPerSample [0]=0.015433334; [1]=0.015433334; AvgLearningRatePerSample = 0.003125; EpochTime=1.61449
|
||||
04/07/2016 14:49:41: Finished Epoch[26 of 30]: Criterion Node [ce] Per Sample = 0.05762472
|
||||
04/07/2016 14:49:41: Finished Epoch[26 of 30]: Evaluation Node [errTop5] Per Sample = 0.015433334
|
||||
04/07/2016 14:49:41: Finished Epoch[26 of 30]: Evaluation Node [err] Per Sample = 0.015433334
|
||||
04/07/2016 14:49:41: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.26'
|
||||
|
||||
04/07/2016 14:49:41: Starting Epoch 27: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 26 at record count 1560000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:41: Starting minibatch loop.
|
||||
04/07/2016 14:49:41: Epoch[27 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05306902; EvalErr[0]PerSample = 0.01381250; EvalErr[1]PerSample = 0.01381250; TotalTime = 0.4103s; SamplesPerSecond = 38991.7
|
||||
04/07/2016 14:49:41: Epoch[27 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05717074; EvalErr[0]PerSample = 0.01531250; EvalErr[1]PerSample = 0.01531250; TotalTime = 0.4080s; SamplesPerSecond = 39212.8
|
||||
04/07/2016 14:49:42: Epoch[27 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05915296; EvalErr[0]PerSample = 0.01600000; EvalErr[1]PerSample = 0.01600000; TotalTime = 0.4068s; SamplesPerSecond = 39331.4
|
||||
04/07/2016 14:49:42: Finished Epoch[27 of 30]: [Training Set] TrainLossPerSample = 0.05526093; TotalSamplesSeen = 1620000; EvalErrPerSample [0]=0.0146; [1]=0.0146; AvgLearningRatePerSample = 0.003125; EpochTime=1.53385
|
||||
04/07/2016 14:49:42: Finished Epoch[27 of 30]: Criterion Node [ce] Per Sample = 0.05526093
|
||||
04/07/2016 14:49:42: Finished Epoch[27 of 30]: Evaluation Node [errTop5] Per Sample = 0.0146
|
||||
04/07/2016 14:49:42: Finished Epoch[27 of 30]: Evaluation Node [err] Per Sample = 0.0146
|
||||
04/07/2016 14:49:42: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.27'
|
||||
|
||||
04/07/2016 14:49:42: Starting Epoch 28: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 27 at record count 1620000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:42: Starting minibatch loop.
|
||||
04/07/2016 14:49:43: Epoch[28 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05088624; EvalErr[0]PerSample = 0.01306250; EvalErr[1]PerSample = 0.01306250; TotalTime = 0.4000s; SamplesPerSecond = 39995.6
|
||||
04/07/2016 14:49:43: Epoch[28 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05488034; EvalErr[0]PerSample = 0.01493750; EvalErr[1]PerSample = 0.01493750; TotalTime = 0.4062s; SamplesPerSecond = 39388.1
|
||||
04/07/2016 14:49:43: Epoch[28 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05683998; EvalErr[0]PerSample = 0.01481250; EvalErr[1]PerSample = 0.01481250; TotalTime = 0.4049s; SamplesPerSecond = 39512.5
|
||||
04/07/2016 14:49:44: Finished Epoch[28 of 30]: [Training Set] TrainLossPerSample = 0.053038687; TotalSamplesSeen = 1680000; EvalErrPerSample [0]=0.013816667; [1]=0.013816667; AvgLearningRatePerSample = 0.003125; EpochTime=1.51778
|
||||
04/07/2016 14:49:44: Finished Epoch[28 of 30]: Criterion Node [ce] Per Sample = 0.053038687
|
||||
04/07/2016 14:49:44: Finished Epoch[28 of 30]: Evaluation Node [errTop5] Per Sample = 0.013816667
|
||||
04/07/2016 14:49:44: Finished Epoch[28 of 30]: Evaluation Node [err] Per Sample = 0.013816667
|
||||
04/07/2016 14:49:44: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.28'
|
||||
|
||||
04/07/2016 14:49:44: Starting Epoch 29: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 28 at record count 1680000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:44: Starting minibatch loop.
|
||||
04/07/2016 14:49:44: Epoch[29 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04883759; EvalErr[0]PerSample = 0.01231250; EvalErr[1]PerSample = 0.01231250; TotalTime = 0.4081s; SamplesPerSecond = 39202.2
|
||||
04/07/2016 14:49:45: Epoch[29 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05271794; EvalErr[0]PerSample = 0.01462500; EvalErr[1]PerSample = 0.01462500; TotalTime = 0.4544s; SamplesPerSecond = 35210.6
|
||||
04/07/2016 14:49:45: Epoch[29 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05465497; EvalErr[0]PerSample = 0.01431250; EvalErr[1]PerSample = 0.01431250; TotalTime = 0.4455s; SamplesPerSecond = 35918.0
|
||||
04/07/2016 14:49:45: Finished Epoch[29 of 30]: [Training Set] TrainLossPerSample = 0.050945017; TotalSamplesSeen = 1740000; EvalErrPerSample [0]=0.013233334; [1]=0.013233334; AvgLearningRatePerSample = 0.003125; EpochTime=1.64353
|
||||
04/07/2016 14:49:45: Finished Epoch[29 of 30]: Criterion Node [ce] Per Sample = 0.050945017
|
||||
04/07/2016 14:49:45: Finished Epoch[29 of 30]: Evaluation Node [errTop5] Per Sample = 0.013233334
|
||||
04/07/2016 14:49:45: Finished Epoch[29 of 30]: Evaluation Node [err] Per Sample = 0.013233334
|
||||
04/07/2016 14:49:45: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden.29'
|
||||
|
||||
04/07/2016 14:49:45: Starting Epoch 30: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 29 at record count 1740000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:45: Starting minibatch loop.
|
||||
04/07/2016 14:49:46: Epoch[30 of 30]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04691060; EvalErr[0]PerSample = 0.01168750; EvalErr[1]PerSample = 0.01168750; TotalTime = 0.4178s; SamplesPerSecond = 38293.3
|
||||
04/07/2016 14:49:46: Epoch[30 of 30]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.05067239; EvalErr[0]PerSample = 0.01362500; EvalErr[1]PerSample = 0.01362500; TotalTime = 0.4200s; SamplesPerSecond = 38092.6
|
||||
04/07/2016 14:49:47: Epoch[30 of 30]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05258816; EvalErr[0]PerSample = 0.01350000; EvalErr[1]PerSample = 0.01350000; TotalTime = 0.4476s; SamplesPerSecond = 35746.8
|
||||
04/07/2016 14:49:47: Finished Epoch[30 of 30]: [Training Set] TrainLossPerSample = 0.048968919; TotalSamplesSeen = 1800000; EvalErrPerSample [0]=0.01245; [1]=0.01245; AvgLearningRatePerSample = 0.003125; EpochTime=1.60951
|
||||
04/07/2016 14:49:47: Finished Epoch[30 of 30]: Criterion Node [ce] Per Sample = 0.048968919
|
||||
04/07/2016 14:49:47: Finished Epoch[30 of 30]: Evaluation Node [errTop5] Per Sample = 0.01245
|
||||
04/07/2016 14:49:47: Finished Epoch[30 of 30]: Evaluation Node [err] Per Sample = 0.01245
|
||||
04/07/2016 14:49:47: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_01_OneHidden@release_gpu/Models/01_OneHidden'
|
||||
04/07/2016 14:49:47: CNTKCommandTrainEnd: MNISTtrain
|
||||
|
||||
04/07/2016 14:49:47: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:49:47: ##############################################################################
|
||||
04/07/2016 14:49:47: # #
|
||||
04/07/2016 14:49:47: # Action "test" #
|
||||
04/07/2016 14:49:47: # #
|
||||
04/07/2016 14:49:47: ##############################################################################
|
||||
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
4 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
errTop5 = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 17 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 9 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 200]
|
||||
Validating --> h1.W = LearnableParameter() : -> [200 x 784]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [784 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [784 x *] -> [784 x 1 x *]
|
||||
Validating --> h1.t = Times (h1.W, featScaled) : [200 x 784], [784 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [200 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [200 x 1 x *], [200 x 1] -> [200 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [200 x 1 x *] -> [200 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 200], [200 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> unnamed81 = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> errTop5 = ErrorPrediction (labels, ol.z, unnamed81) : [10 x *], [10 x 1 x *], [1 x 1] -> [1]
|
||||
|
||||
|
||||
9 out of 17 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 1989 retries for 10000 elements (19.9%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2334, 3830, ...
|
||||
Minibatch[1-500]: SamplesSeen = 8000 errTop5: ErrorPrediction/Sample = 0.024625 err: ErrorPrediction/Sample = 0.024625 ce: CrossEntropyWithSoftmax/Sample = 0.079777001
|
||||
Minibatch[501-625]: SamplesSeen = 2000 errTop5: ErrorPrediction/Sample = 0.0165 err: ErrorPrediction/Sample = 0.0165 ce: CrossEntropyWithSoftmax/Sample = 0.065022096
|
||||
Final Results: Minibatch[1-625]: SamplesSeen = 10000 errTop5: ErrorPrediction/Sample = 0.023 err: ErrorPrediction/Sample = 0.023 ce: CrossEntropyWithSoftmax/Sample = 0.07682602 Perplexity = 1.0798542
|
||||
|
||||
04/07/2016 14:49:48: Action "test" complete.
|
||||
|
||||
04/07/2016 14:49:48: __COMPLETED__
|
|
@ -3,14 +3,35 @@
|
|||
. $TEST_ROOT_DIR/run-test-common
|
||||
|
||||
ConfigDir=$TEST_DIR/../../../../../../Examples/Image/MNIST/Config
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
CleanDataDir=$(cygpath -aw $DataDir)
|
||||
else
|
||||
CleanDataDir=$DataDir
|
||||
|
||||
if [[ ! -d $TEST_DATA_DIR || ! -e $TEST_DATA_DIR/Test-28x28.txt || ! -e $TEST_DATA_DIR/Test-28x28.txt ]]; then
|
||||
# Cannot find test data locally.
|
||||
# Try external test data directory (not part of the CNTK repository) as an alternative.
|
||||
if [[ -d "$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY" ]]; then
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
DataSourceDir=`cygpath -au $CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY`/Image/MNIST/v0
|
||||
else
|
||||
DataSourceDir=$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY/Image/MNIST/v0
|
||||
fi
|
||||
|
||||
# Copy the test data to the test run directory
|
||||
DataDir=$TEST_RUN_DIR/TestData
|
||||
mkdir $DataDir
|
||||
cp -R $DataSourceDir/* $DataDir || exit $?
|
||||
Copied=1
|
||||
else
|
||||
echo Error: cannot find data. Please see Examples/Image/MNIST/README.md for instructions to get it.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# cntkrun <CNTK config file name> <additional CNTK args>
|
||||
imageLayout=cudnn
|
||||
|
||||
cntkrun 01_OneHidden.cntk "MNISTtrain=[reader=[file=$CleanDataDir/Train.txt]] MNISTtest=[reader=[file=$CleanDataDir/Test.txt]] MNISTtrain=[SGD=[maxEpochs=1]] MNISTtrain=[SGD=[epochSize=100]] MNISTtrain=[reader=[randomize=none]] imageLayout=\"$imageLayout\"" || exit $?
|
||||
# Note: explicitly turn off randomization, as it crashes the reader.
|
||||
cntkrun 01_OneHidden.cntk "train=[reader=[randomize=none]] imageLayout=\"$imageLayout\""
|
||||
ExitCode=$?
|
||||
|
||||
# Delete the test data if copied
|
||||
[[ "$Copied" -eq "1" ]] && rm -rf "$DataDir"
|
||||
|
||||
exit $ExitCode
|
||||
|
|
|
@ -1,13 +1,33 @@
|
|||
dataDir: ../../../../Image/Data
|
||||
dataDir: ../../../../../../Examples/Image/MNIST/Data
|
||||
|
||||
tags:
|
||||
# running on every BVT job in 'E' (Examples) leg
|
||||
- bvt-e ((flavor=='debug') ^ (device=='cpu'))
|
||||
# running every Nightly job in 'E' (Examples) leg
|
||||
- nightly-e
|
||||
# In BVT, run Release GPU (~ 30 - 60 sec)
|
||||
- bvt-e (build_sku=='gpu') and (device=='gpu') and (flavor=='release')
|
||||
# In Nightly on Linux, additionally run Debug GPU and Release CPU (~ 30 - 120 sec)
|
||||
- nightly-e (build_sku=='gpu') and (((device=='gpu') and (flavor=='release')) or (os=='linux' and ((flavor=='debug') ^ (device=='cpu'))))
|
||||
|
||||
testCases:
|
||||
CNTK Run must be completed:
|
||||
patterns:
|
||||
- __COMPLETED__
|
||||
|
||||
Must train epochs in exactly same order and parameters:
|
||||
patterns:
|
||||
- Starting Epoch {{integer}}
|
||||
- learning rate per sample = {{float}}
|
||||
- momentum = {{float}}
|
||||
|
||||
Epochs must be finished with expected results:
|
||||
patterns:
|
||||
- Finished Epoch[{{integer}} of {{integer}}]
|
||||
- TrainLossPerSample = {{float,tolerance=.1%}}
|
||||
- EvalErrPerSample [0]={{float,tolerance=.1%}}
|
||||
- AvgLearningRatePerSample = {{float,tolerance=0.001%}}
|
||||
|
||||
Per-minibatch training results must match:
|
||||
patterns:
|
||||
- Epoch[{{integer}} of {{integer}}]-Minibatch[{{integer}}-{{integer}}
|
||||
- SamplesSeen = {{integer}}
|
||||
- TrainLossPerSample = {{float,tolerance=.1%}}
|
||||
- EvalErr[0]PerSample = {{float,tolerance=.1%}}
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,604 @@
|
|||
=== Running /home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 6 2016 15:52:46
|
||||
Last modified date: Tue Apr 5 14:19:05 2016
|
||||
Build type: release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: no
|
||||
Math lib: acml
|
||||
CUDA_PATH: /usr/local/cuda-7.0
|
||||
CUB_PATH: /usr/local/cub-1.4.1
|
||||
CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
Built by mahilleb on atleneu04
|
||||
Build Path: /home/mahilleb/CNTK
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to /home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
04/07/2016 14:16:55: -------------------------------------------------------------------
|
||||
04/07/2016 14:16:55: Build info:
|
||||
|
||||
04/07/2016 14:16:55: Built time: Apr 6 2016 15:52:46
|
||||
04/07/2016 14:16:55: Last modified date: Tue Apr 5 14:19:05 2016
|
||||
04/07/2016 14:16:55: Build type: release
|
||||
04/07/2016 14:16:55: Build target: GPU
|
||||
04/07/2016 14:16:55: With 1bit-SGD: no
|
||||
04/07/2016 14:16:55: Math lib: acml
|
||||
04/07/2016 14:16:55: CUDA_PATH: /usr/local/cuda-7.0
|
||||
04/07/2016 14:16:55: CUB_PATH: /usr/local/cub-1.4.1
|
||||
04/07/2016 14:16:55: CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
04/07/2016 14:16:55: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:16:55: Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
04/07/2016 14:16:55: Built by mahilleb on atleneu04
|
||||
04/07/2016 14:16:55: Build Path: /home/mahilleb/CNTK
|
||||
04/07/2016 14:16:55: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:16:55: Running on localhost at 2016/04/07 14:16:55
|
||||
04/07/2016 14:16:55: Command line:
|
||||
/home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:16:55: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:16:55: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/02_Convolution"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
prefetch=true
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:16:55: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:16:55: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:16:55: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution"
|
||||
ndlMacros = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
prefetch=true
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:16:55: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:16:55: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 02_Convolution.cntk:command=train:test
|
||||
configparameters: 02_Convolution.cntk:ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config
|
||||
configparameters: 02_Convolution.cntk:currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 02_Convolution.cntk:DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 02_Convolution.cntk:deviceId=0
|
||||
configparameters: 02_Convolution.cntk:imageLayout=cudnn
|
||||
configparameters: 02_Convolution.cntk:initOnCPUOnly=true
|
||||
configparameters: 02_Convolution.cntk:ModelDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models
|
||||
configparameters: 02_Convolution.cntk:modelPath=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution
|
||||
configparameters: 02_Convolution.cntk:ndlMacros=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/Macros.ndl
|
||||
configparameters: 02_Convolution.cntk:numMBsToShowResult=500
|
||||
configparameters: 02_Convolution.cntk:OutputDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
configparameters: 02_Convolution.cntk:precision=float
|
||||
configparameters: 02_Convolution.cntk:prefetch=true
|
||||
configparameters: 02_Convolution.cntk:RootDir=..
|
||||
configparameters: 02_Convolution.cntk:RunDir=/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu
|
||||
configparameters: 02_Convolution.cntk:test=[
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 02_Convolution.cntk:timestamping=true
|
||||
configparameters: 02_Convolution.cntk:traceLevel=1
|
||||
configparameters: 02_Convolution.cntk:train=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/02_Convolution/../../../../../../Examples/Image/MNIST/Config/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
04/07/2016 14:16:55: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:16:55: Commands: train test
|
||||
04/07/2016 14:16:55: Precision = "float"
|
||||
04/07/2016 14:16:55: CNTKModelPath: /tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution
|
||||
04/07/2016 14:16:55: CNTKCommandTrainInfo: train : 15
|
||||
04/07/2016 14:16:55: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 15
|
||||
|
||||
04/07/2016 14:16:55: ##############################################################################
|
||||
04/07/2016 14:16:55: # #
|
||||
04/07/2016 14:16:55: # Action "train" #
|
||||
04/07/2016 14:16:55: # #
|
||||
04/07/2016 14:16:55: ##############################################################################
|
||||
|
||||
04/07/2016 14:16:55: CNTKCommandTrainBegin: train
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:16:55: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 27 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.w.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.w.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c = Convolution (conv1.w.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.b.b = LearnableParameter() : -> [1 x 1 x 16]
|
||||
Validating --> conv1.cpb = Plus (conv1.c.c, conv1.b.b) : [28 x 28 x 16 x *], [1 x 1 x 16] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.out = RectifiedLinear (conv1.cpb) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.out) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 32, Stride: 1 x 1 x 16, Sharing: (1, 1, 1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c = Convolution (conv2.w.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.b.b = LearnableParameter() : -> [1 x 1 x 32]
|
||||
Validating --> conv2.cpb = Plus (conv2.c.c, conv2.b.b) : [14 x 14 x 32 x *], [1 x 1 x 32] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.out = RectifiedLinear (conv2.cpb) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2.p = Pooling (conv2.out) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2.p) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [128 x *], [128 x 1] -> [128 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [128 x 1 x *] -> [128 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
11 out of 27 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:16:56: Created model with 27 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:16:56: Training criterion node(s):
|
||||
04/07/2016 14:16:56: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:16:56: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:16:56: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:16:56: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:16:56: Starting Epoch 1: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:16:57: Starting minibatch loop.
|
||||
04/07/2016 14:16:58: Epoch[ 1 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.98526776; EvalErr[0]PerSample = 0.32500000; TotalTime = 1.3030s; SamplesPerSecond = 12279.5
|
||||
04/07/2016 14:16:59: Epoch[ 1 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.15344269; EvalErr[0]PerSample = 0.04256250; TotalTime = 1.1170s; SamplesPerSecond = 14324.7
|
||||
04/07/2016 14:17:00: Epoch[ 1 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.11083789; EvalErr[0]PerSample = 0.03312500; TotalTime = 1.1154s; SamplesPerSecond = 14344.5
|
||||
04/07/2016 14:17:01: Finished Epoch[ 1 of 15]: [Training Set] TrainLossPerSample = 0.35035855; TotalSamplesSeen = 60000; EvalErrPerSample = 0.11196667; AvgLearningRatePerSample = 0.003125; EpochTime=4.88531
|
||||
04/07/2016 14:17:01: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.1'
|
||||
|
||||
04/07/2016 14:17:01: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:01: Starting minibatch loop.
|
||||
04/07/2016 14:17:02: Epoch[ 2 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.07656574; EvalErr[0]PerSample = 0.02256250; TotalTime = 1.1022s; SamplesPerSecond = 14516.7
|
||||
04/07/2016 14:17:03: Epoch[ 2 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06332232; EvalErr[0]PerSample = 0.01906250; TotalTime = 1.0978s; SamplesPerSecond = 14574.4
|
||||
04/07/2016 14:17:04: Epoch[ 2 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06096477; EvalErr[0]PerSample = 0.01843750; TotalTime = 1.1001s; SamplesPerSecond = 14544.3
|
||||
04/07/2016 14:17:05: Finished Epoch[ 2 of 15]: [Training Set] TrainLossPerSample = 0.063830577; TotalSamplesSeen = 120000; EvalErrPerSample = 0.019066667; AvgLearningRatePerSample = 0.003125; EpochTime=4.13708
|
||||
04/07/2016 14:17:05: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.2'
|
||||
|
||||
04/07/2016 14:17:05: Starting Epoch 3: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 2 at record count 120000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:05: Starting minibatch loop.
|
||||
04/07/2016 14:17:06: Epoch[ 3 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04828593; EvalErr[0]PerSample = 0.01356250; TotalTime = 1.0697s; SamplesPerSecond = 14957.2
|
||||
04/07/2016 14:17:07: Epoch[ 3 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.04083206; EvalErr[0]PerSample = 0.01162500; TotalTime = 1.0691s; SamplesPerSecond = 14966.1
|
||||
04/07/2016 14:17:08: Epoch[ 3 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.04202024; EvalErr[0]PerSample = 0.01231250; TotalTime = 1.0707s; SamplesPerSecond = 14943.5
|
||||
04/07/2016 14:17:09: Finished Epoch[ 3 of 15]: [Training Set] TrainLossPerSample = 0.042137712; TotalSamplesSeen = 180000; EvalErrPerSample = 0.01205; AvgLearningRatePerSample = 0.003125; EpochTime=4.0138
|
||||
04/07/2016 14:17:09: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.3'
|
||||
|
||||
04/07/2016 14:17:09: Starting Epoch 4: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 3 at record count 180000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:09: Starting minibatch loop.
|
||||
04/07/2016 14:17:10: Epoch[ 4 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.03436259; EvalErr[0]PerSample = 0.00925000; TotalTime = 1.0653s; SamplesPerSecond = 15019.7
|
||||
04/07/2016 14:17:11: Epoch[ 4 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02914092; EvalErr[0]PerSample = 0.00862500; TotalTime = 1.0646s; SamplesPerSecond = 15029.5
|
||||
04/07/2016 14:17:12: Epoch[ 4 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.03001163; EvalErr[0]PerSample = 0.00825000; TotalTime = 1.0670s; SamplesPerSecond = 14995.9
|
||||
04/07/2016 14:17:13: Finished Epoch[ 4 of 15]: [Training Set] TrainLossPerSample = 0.030067476; TotalSamplesSeen = 240000; EvalErrPerSample = 0.0083999997; AvgLearningRatePerSample = 0.003125; EpochTime=3.99878
|
||||
04/07/2016 14:17:13: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.4'
|
||||
|
||||
04/07/2016 14:17:13: Starting Epoch 5: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 4 at record count 240000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:13: Starting minibatch loop.
|
||||
04/07/2016 14:17:14: Epoch[ 5 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02486609; EvalErr[0]PerSample = 0.00643750; TotalTime = 1.0664s; SamplesPerSecond = 15003.8
|
||||
04/07/2016 14:17:15: Epoch[ 5 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02056633; EvalErr[0]PerSample = 0.00468750; TotalTime = 1.0666s; SamplesPerSecond = 15000.6
|
||||
04/07/2016 14:17:16: Epoch[ 5 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02225651; EvalErr[0]PerSample = 0.00568750; TotalTime = 1.0919s; SamplesPerSecond = 14653.7
|
||||
04/07/2016 14:17:17: Finished Epoch[ 5 of 15]: [Training Set] TrainLossPerSample = 0.021797771; TotalSamplesSeen = 300000; EvalErrPerSample = 0.0053333333; AvgLearningRatePerSample = 0.003125; EpochTime=4.07281
|
||||
04/07/2016 14:17:17: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.5'
|
||||
|
||||
04/07/2016 14:17:17: Starting Epoch 6: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 5 at record count 300000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:17: Starting minibatch loop.
|
||||
04/07/2016 14:17:18: Epoch[ 6 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.05357927; EvalErr[0]PerSample = 0.01725000; TotalTime = 1.0933s; SamplesPerSecond = 14634.1
|
||||
04/07/2016 14:17:19: Epoch[ 6 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.04825898; EvalErr[0]PerSample = 0.01512500; TotalTime = 1.0932s; SamplesPerSecond = 14636.5
|
||||
04/07/2016 14:17:21: Epoch[ 6 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05669657; EvalErr[0]PerSample = 0.01856250; TotalTime = 1.1126s; SamplesPerSecond = 14380.9
|
||||
04/07/2016 14:17:21: Finished Epoch[ 6 of 15]: [Training Set] TrainLossPerSample = 0.051842291; TotalSamplesSeen = 360000; EvalErrPerSample = 0.01675; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.13201
|
||||
04/07/2016 14:17:21: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.6'
|
||||
|
||||
04/07/2016 14:17:21: Starting Epoch 7: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 6 at record count 360000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:21: Starting minibatch loop.
|
||||
04/07/2016 14:17:23: Epoch[ 7 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.03755896; EvalErr[0]PerSample = 0.01231250; TotalTime = 1.0725s; SamplesPerSecond = 14919.0
|
||||
04/07/2016 14:17:24: Epoch[ 7 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.03454590; EvalErr[0]PerSample = 0.01106250; TotalTime = 1.0694s; SamplesPerSecond = 14961.5
|
||||
04/07/2016 14:17:25: Epoch[ 7 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.03838315; EvalErr[0]PerSample = 0.01231250; TotalTime = 1.0800s; SamplesPerSecond = 14815.0
|
||||
04/07/2016 14:17:25: Finished Epoch[ 7 of 15]: [Training Set] TrainLossPerSample = 0.036162037; TotalSamplesSeen = 420000; EvalErrPerSample = 0.01165; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.04887
|
||||
04/07/2016 14:17:25: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.7'
|
||||
|
||||
04/07/2016 14:17:26: Starting Epoch 8: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 7 at record count 420000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:26: Starting minibatch loop.
|
||||
04/07/2016 14:17:27: Epoch[ 8 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02838500; EvalErr[0]PerSample = 0.00900000; TotalTime = 1.0700s; SamplesPerSecond = 14953.3
|
||||
04/07/2016 14:17:28: Epoch[ 8 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02249695; EvalErr[0]PerSample = 0.00693750; TotalTime = 1.0673s; SamplesPerSecond = 14991.7
|
||||
04/07/2016 14:17:29: Epoch[ 8 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02647878; EvalErr[0]PerSample = 0.00787500; TotalTime = 1.0675s; SamplesPerSecond = 14987.6
|
||||
04/07/2016 14:17:30: Finished Epoch[ 8 of 15]: [Training Set] TrainLossPerSample = 0.025751172; TotalSamplesSeen = 480000; EvalErrPerSample = 0.0080000004; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.00719
|
||||
04/07/2016 14:17:30: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.8'
|
||||
|
||||
04/07/2016 14:17:30: Starting Epoch 9: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 8 at record count 480000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:30: Starting minibatch loop.
|
||||
04/07/2016 14:17:31: Epoch[ 9 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.01875125; EvalErr[0]PerSample = 0.00568750; TotalTime = 1.0679s; SamplesPerSecond = 14983.3
|
||||
04/07/2016 14:17:32: Epoch[ 9 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.01544093; EvalErr[0]PerSample = 0.00493750; TotalTime = 1.1085s; SamplesPerSecond = 14433.3
|
||||
04/07/2016 14:17:33: Epoch[ 9 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02129173; EvalErr[0]PerSample = 0.00618750; TotalTime = 1.1186s; SamplesPerSecond = 14303.5
|
||||
04/07/2016 14:17:34: Finished Epoch[ 9 of 15]: [Training Set] TrainLossPerSample = 0.018342821; TotalSamplesSeen = 540000; EvalErrPerSample = 0.0054833335; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.13339
|
||||
04/07/2016 14:17:34: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.9'
|
||||
|
||||
04/07/2016 14:17:34: Starting Epoch 10: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 9 at record count 540000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:34: Starting minibatch loop.
|
||||
04/07/2016 14:17:35: Epoch[10 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.01390128; EvalErr[0]PerSample = 0.00431250; TotalTime = 1.0825s; SamplesPerSecond = 14780.8
|
||||
04/07/2016 14:17:36: Epoch[10 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.01073189; EvalErr[0]PerSample = 0.00318750; TotalTime = 1.0816s; SamplesPerSecond = 14792.4
|
||||
04/07/2016 14:17:37: Epoch[10 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.01273914; EvalErr[0]PerSample = 0.00350000; TotalTime = 1.0743s; SamplesPerSecond = 14893.1
|
||||
04/07/2016 14:17:38: Finished Epoch[10 of 15]: [Training Set] TrainLossPerSample = 0.011971561; TotalSamplesSeen = 600000; EvalErrPerSample = 0.0033500001; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.04162
|
||||
04/07/2016 14:17:38: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.10'
|
||||
|
||||
04/07/2016 14:17:38: Starting Epoch 11: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 10 at record count 600000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:38: Starting minibatch loop.
|
||||
04/07/2016 14:17:39: Epoch[11 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00943945; EvalErr[0]PerSample = 0.00268750; TotalTime = 1.0811s; SamplesPerSecond = 14800.1
|
||||
04/07/2016 14:17:40: Epoch[11 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00678514; EvalErr[0]PerSample = 0.00150000; TotalTime = 1.1282s; SamplesPerSecond = 14181.4
|
||||
04/07/2016 14:17:41: Epoch[11 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00904479; EvalErr[0]PerSample = 0.00287500; TotalTime = 1.1270s; SamplesPerSecond = 14197.6
|
||||
04/07/2016 14:17:42: Finished Epoch[11 of 15]: [Training Set] TrainLossPerSample = 0.0083944919; TotalSamplesSeen = 660000; EvalErrPerSample = 0.0022333334; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.16931
|
||||
04/07/2016 14:17:42: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.11'
|
||||
|
||||
04/07/2016 14:17:42: Starting Epoch 12: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 11 at record count 660000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:42: Starting minibatch loop.
|
||||
04/07/2016 14:17:43: Epoch[12 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00509959; EvalErr[0]PerSample = 0.00100000; TotalTime = 1.1282s; SamplesPerSecond = 14182.0
|
||||
04/07/2016 14:17:44: Epoch[12 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00453443; EvalErr[0]PerSample = 0.00106250; TotalTime = 1.1145s; SamplesPerSecond = 14356.5
|
||||
04/07/2016 14:17:45: Epoch[12 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00516278; EvalErr[0]PerSample = 0.00137500; TotalTime = 1.1098s; SamplesPerSecond = 14417.2
|
||||
04/07/2016 14:17:46: Finished Epoch[12 of 15]: [Training Set] TrainLossPerSample = 0.0050358889; TotalSamplesSeen = 720000; EvalErrPerSample = 0.0011; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.18719
|
||||
04/07/2016 14:17:46: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.12'
|
||||
|
||||
04/07/2016 14:17:46: Starting Epoch 13: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 12 at record count 720000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:46: Starting minibatch loop.
|
||||
04/07/2016 14:17:47: Epoch[13 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00354859; EvalErr[0]PerSample = 0.00075000; TotalTime = 1.0754s; SamplesPerSecond = 14878.3
|
||||
04/07/2016 14:17:48: Epoch[13 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00238765; EvalErr[0]PerSample = 0.00018750; TotalTime = 1.1305s; SamplesPerSecond = 14153.6
|
||||
04/07/2016 14:17:49: Epoch[13 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00274582; EvalErr[0]PerSample = 0.00043750; TotalTime = 1.1121s; SamplesPerSecond = 14387.6
|
||||
04/07/2016 14:17:50: Finished Epoch[13 of 15]: [Training Set] TrainLossPerSample = 0.0030327758; TotalSamplesSeen = 780000; EvalErrPerSample = 0.00044999999; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.1546
|
||||
04/07/2016 14:17:50: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.13'
|
||||
|
||||
04/07/2016 14:17:50: Starting Epoch 14: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 13 at record count 780000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:50: Starting minibatch loop.
|
||||
04/07/2016 14:17:51: Epoch[14 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00180821; EvalErr[0]PerSample = 6.25000000e-05; TotalTime = 1.0882s; SamplesPerSecond = 14703.4
|
||||
04/07/2016 14:17:53: Epoch[14 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00134340; EvalErr[0]PerSample = 0.00000000; TotalTime = 1.0883s; SamplesPerSecond = 14702.0
|
||||
04/07/2016 14:17:54: Epoch[14 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00173738; EvalErr[0]PerSample = 0.00012500; TotalTime = 1.0890s; SamplesPerSecond = 14692.6
|
||||
04/07/2016 14:17:54: Finished Epoch[14 of 15]: [Training Set] TrainLossPerSample = 0.0018632461; TotalSamplesSeen = 840000; EvalErrPerSample = 0.00013333333; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.08432
|
||||
04/07/2016 14:17:54: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution.14'
|
||||
|
||||
04/07/2016 14:17:54: Starting Epoch 15: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 14 at record count 840000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:17:54: Starting minibatch loop.
|
||||
04/07/2016 14:17:56: Epoch[15 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00128256; EvalErr[0]PerSample = 6.25000000e-05; TotalTime = 1.0746s; SamplesPerSecond = 14889.1
|
||||
04/07/2016 14:17:57: Epoch[15 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00096656; EvalErr[0]PerSample = 0.00000000; TotalTime = 1.0755s; SamplesPerSecond = 14876.4
|
||||
04/07/2016 14:17:58: Epoch[15 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00122244; EvalErr[0]PerSample = 6.25000000e-05; TotalTime = 1.0757s; SamplesPerSecond = 14873.7
|
||||
04/07/2016 14:17:58: Finished Epoch[15 of 15]: [Training Set] TrainLossPerSample = 0.0013114499; TotalSamplesSeen = 900000; EvalErrPerSample = 6.6666667e-05; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.03499
|
||||
04/07/2016 14:17:59: SGD: Saving checkpoint model '/tmp/cntk-test-20160407141655.895430/Examples/Image/MNIST_02_Convolution@release_gpu/Models/02_Convolution'
|
||||
04/07/2016 14:17:59: CNTKCommandTrainEnd: train
|
||||
|
||||
04/07/2016 14:17:59: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:17:59: ##############################################################################
|
||||
04/07/2016 14:17:59: # #
|
||||
04/07/2016 14:17:59: # Action "test" #
|
||||
04/07/2016 14:17:59: # #
|
||||
04/07/2016 14:17:59: ##############################################################################
|
||||
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 27 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.w.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.w.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c = Convolution (conv1.w.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.b.b = LearnableParameter() : -> [1 x 1 x 16]
|
||||
Validating --> conv1.cpb = Plus (conv1.c.c, conv1.b.b) : [28 x 28 x 16 x *], [1 x 1 x 16] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.out = RectifiedLinear (conv1.cpb) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.out) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 32, Stride: 1 x 1 x 16, Sharing: (1, 1, 1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c = Convolution (conv2.w.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.b.b = LearnableParameter() : -> [1 x 1 x 32]
|
||||
Validating --> conv2.cpb = Plus (conv2.c.c, conv2.b.b) : [14 x 14 x 32 x *], [1 x 1 x 32] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.out = RectifiedLinear (conv2.cpb) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2.p = Pooling (conv2.out) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2.p) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [128 x *], [128 x 1] -> [128 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [128 x 1 x *] -> [128 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
11 out of 27 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 2036 retries for 10000 elements (20.4%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2009, 1524, ...
|
||||
Minibatch[1-500]: SamplesSeen = 8000 err: ErrorPrediction/Sample = 0.00925 ce: CrossEntropyWithSoftmax/Sample = 0.034084008
|
||||
Minibatch[501-625]: SamplesSeen = 2000 err: ErrorPrediction/Sample = 0.0065 ce: CrossEntropyWithSoftmax/Sample = 0.019834762
|
||||
Final Results: Minibatch[1-625]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.0087 ce: CrossEntropyWithSoftmax/Sample = 0.031234159 Perplexity = 1.0317271
|
||||
|
||||
04/07/2016 14:17:59: Action "test" complete.
|
||||
|
||||
04/07/2016 14:17:59: __COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,602 @@
|
|||
=== Running /cygdrive/c/R/CNTK3/x64/release/cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 7 2016 15:32:16
|
||||
Last modified date: Thu Apr 7 09:19:53 2016
|
||||
Build type: Release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: yes
|
||||
CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
CUB_PATH: C:\R\cub-1.4.1
|
||||
CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
Built by mahilleb on mahilleb57
|
||||
Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
04/07/2016 14:49:49: -------------------------------------------------------------------
|
||||
04/07/2016 14:49:49: Build info:
|
||||
|
||||
04/07/2016 14:49:49: Built time: Apr 7 2016 15:32:16
|
||||
04/07/2016 14:49:49: Last modified date: Thu Apr 7 09:19:53 2016
|
||||
04/07/2016 14:49:49: Build type: Release
|
||||
04/07/2016 14:49:49: Build target: GPU
|
||||
04/07/2016 14:49:49: With 1bit-SGD: yes
|
||||
04/07/2016 14:49:49: CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
04/07/2016 14:49:49: CUB_PATH: C:\R\cub-1.4.1
|
||||
04/07/2016 14:49:49: CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
04/07/2016 14:49:49: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:49:49: Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
04/07/2016 14:49:49: Built by mahilleb on mahilleb57
|
||||
04/07/2016 14:49:49: Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
04/07/2016 14:49:49: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:49:49: Running on mahilleb57 at 2016/04/07 14:49:49
|
||||
04/07/2016 14:49:49: Command line:
|
||||
C:\R\CNTK3\x64\release\cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:49:49: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:49:49: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/02_Convolution"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
prefetch=true
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:49:49: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:49:49: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:49:49: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution"
|
||||
ndlMacros = "C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
prefetch=true
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:49:49: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:49:49: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 02_Convolution.cntk:command=train:test
|
||||
configparameters: 02_Convolution.cntk:ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
configparameters: 02_Convolution.cntk:currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 02_Convolution.cntk:DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 02_Convolution.cntk:deviceId=0
|
||||
configparameters: 02_Convolution.cntk:imageLayout=cudnn
|
||||
configparameters: 02_Convolution.cntk:initOnCPUOnly=true
|
||||
configparameters: 02_Convolution.cntk:ModelDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models
|
||||
configparameters: 02_Convolution.cntk:modelPath=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution
|
||||
configparameters: 02_Convolution.cntk:ndlMacros=C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl
|
||||
configparameters: 02_Convolution.cntk:numMBsToShowResult=500
|
||||
configparameters: 02_Convolution.cntk:OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
configparameters: 02_Convolution.cntk:precision=float
|
||||
configparameters: 02_Convolution.cntk:prefetch=true
|
||||
configparameters: 02_Convolution.cntk:RootDir=..
|
||||
configparameters: 02_Convolution.cntk:RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu
|
||||
configparameters: 02_Convolution.cntk:test=[
|
||||
action = test
|
||||
minibatchSize = 16
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 02_Convolution.cntk:timestamping=true
|
||||
configparameters: 02_Convolution.cntk:traceLevel=1
|
||||
configparameters: 02_Convolution.cntk:train=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/02_Convolution.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.1*5:0.3
|
||||
momentumPerMB = 0*10:0.7
|
||||
maxEpochs = 15
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
04/07/2016 14:49:49: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:49:49: Commands: train test
|
||||
04/07/2016 14:49:49: Precision = "float"
|
||||
04/07/2016 14:49:49: CNTKModelPath: C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution
|
||||
04/07/2016 14:49:49: CNTKCommandTrainInfo: train : 15
|
||||
04/07/2016 14:49:49: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 15
|
||||
|
||||
04/07/2016 14:49:49: ##############################################################################
|
||||
04/07/2016 14:49:49: # #
|
||||
04/07/2016 14:49:49: # Action "train" #
|
||||
04/07/2016 14:49:49: # #
|
||||
04/07/2016 14:49:49: ##############################################################################
|
||||
|
||||
04/07/2016 14:49:49: CNTKCommandTrainBegin: train
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:49:49: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 27 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.w.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.w.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c = Convolution (conv1.w.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.b.b = LearnableParameter() : -> [1 x 1 x 16]
|
||||
Validating --> conv1.cpb = Plus (conv1.c.c, conv1.b.b) : [28 x 28 x 16 x *], [1 x 1 x 16] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.out = RectifiedLinear (conv1.cpb) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.out) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 32, Stride: 1 x 1 x 16, Sharing: (1, 1, 1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c = Convolution (conv2.w.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.b.b = LearnableParameter() : -> [1 x 1 x 32]
|
||||
Validating --> conv2.cpb = Plus (conv2.c.c, conv2.b.b) : [14 x 14 x 32 x *], [1 x 1 x 32] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.out = RectifiedLinear (conv2.cpb) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2.p = Pooling (conv2.out) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2.p) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [128 x *], [128 x 1] -> [128 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [128 x 1 x *] -> [128 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
11 out of 27 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:49:50: Created model with 27 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:49:50: Training criterion node(s):
|
||||
04/07/2016 14:49:50: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:49:50: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:49:50: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:49:50: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:49:50: Starting Epoch 1: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:50: Starting minibatch loop.
|
||||
04/07/2016 14:49:52: Epoch[ 1 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 1.42179163; EvalErr[0]PerSample = 0.49431250; TotalTime = 1.5069s; SamplesPerSecond = 10617.9
|
||||
04/07/2016 14:49:53: Epoch[ 1 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.18478955; EvalErr[0]PerSample = 0.05068750; TotalTime = 1.2586s; SamplesPerSecond = 12712.4
|
||||
04/07/2016 14:49:54: Epoch[ 1 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.11844775; EvalErr[0]PerSample = 0.03412500; TotalTime = 1.2703s; SamplesPerSecond = 12595.7
|
||||
04/07/2016 14:49:55: Finished Epoch[ 1 of 15]: [Training Set] TrainLossPerSample = 0.47771728; TotalSamplesSeen = 60000; EvalErrPerSample = 0.15943334; AvgLearningRatePerSample = 0.003125; EpochTime=5.44822
|
||||
04/07/2016 14:49:55: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.1'
|
||||
|
||||
04/07/2016 14:49:55: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:49:55: Starting minibatch loop.
|
||||
04/07/2016 14:49:56: Epoch[ 2 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.08068558; EvalErr[0]PerSample = 0.02362500; TotalTime = 1.2619s; SamplesPerSecond = 12678.8
|
||||
04/07/2016 14:49:58: Epoch[ 2 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.06578558; EvalErr[0]PerSample = 0.01918750; TotalTime = 1.2875s; SamplesPerSecond = 12427.1
|
||||
04/07/2016 14:49:59: Epoch[ 2 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06249409; EvalErr[0]PerSample = 0.01900000; TotalTime = 1.2899s; SamplesPerSecond = 12403.7
|
||||
04/07/2016 14:50:00: Finished Epoch[ 2 of 15]: [Training Set] TrainLossPerSample = 0.06600941; TotalSamplesSeen = 120000; EvalErrPerSample = 0.019433334; AvgLearningRatePerSample = 0.003125; EpochTime=4.7917
|
||||
04/07/2016 14:50:00: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.2'
|
||||
|
||||
04/07/2016 14:50:00: Starting Epoch 3: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 2 at record count 120000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:00: Starting minibatch loop.
|
||||
04/07/2016 14:50:01: Epoch[ 3 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04787527; EvalErr[0]PerSample = 0.01381250; TotalTime = 1.2953s; SamplesPerSecond = 12352.6
|
||||
04/07/2016 14:50:02: Epoch[ 3 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.04153992; EvalErr[0]PerSample = 0.01150000; TotalTime = 1.2605s; SamplesPerSecond = 12693.2
|
||||
04/07/2016 14:50:04: Epoch[ 3 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.04213457; EvalErr[0]PerSample = 0.01162500; TotalTime = 1.2599s; SamplesPerSecond = 12699.4
|
||||
04/07/2016 14:50:05: Finished Epoch[ 3 of 15]: [Training Set] TrainLossPerSample = 0.042023789; TotalSamplesSeen = 180000; EvalErrPerSample = 0.0117; AvgLearningRatePerSample = 0.003125; EpochTime=4.77046
|
||||
04/07/2016 14:50:05: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.3'
|
||||
|
||||
04/07/2016 14:50:05: Starting Epoch 4: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 3 at record count 180000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:05: Starting minibatch loop.
|
||||
04/07/2016 14:50:06: Epoch[ 4 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.03172888; EvalErr[0]PerSample = 0.00912500; TotalTime = 1.2684s; SamplesPerSecond = 12613.9
|
||||
04/07/2016 14:50:07: Epoch[ 4 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02865261; EvalErr[0]PerSample = 0.00756250; TotalTime = 1.2594s; SamplesPerSecond = 12704.4
|
||||
04/07/2016 14:50:08: Epoch[ 4 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02999757; EvalErr[0]PerSample = 0.00762500; TotalTime = 1.2632s; SamplesPerSecond = 12666.5
|
||||
04/07/2016 14:50:09: Finished Epoch[ 4 of 15]: [Training Set] TrainLossPerSample = 0.029084617; TotalSamplesSeen = 240000; EvalErrPerSample = 0.0077166669; AvgLearningRatePerSample = 0.003125; EpochTime=4.73903
|
||||
04/07/2016 14:50:09: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.4'
|
||||
|
||||
04/07/2016 14:50:09: Starting Epoch 5: learning rate per sample = 0.003125 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 4 at record count 240000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:09: Starting minibatch loop.
|
||||
04/07/2016 14:50:11: Epoch[ 5 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02264893; EvalErr[0]PerSample = 0.00525000; TotalTime = 1.2622s; SamplesPerSecond = 12676.0
|
||||
04/07/2016 14:50:12: Epoch[ 5 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02050629; EvalErr[0]PerSample = 0.00512500; TotalTime = 1.2595s; SamplesPerSecond = 12703.9
|
||||
04/07/2016 14:50:13: Epoch[ 5 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02201180; EvalErr[0]PerSample = 0.00581250; TotalTime = 1.2882s; SamplesPerSecond = 12420.5
|
||||
04/07/2016 14:50:14: Finished Epoch[ 5 of 15]: [Training Set] TrainLossPerSample = 0.020945026; TotalSamplesSeen = 300000; EvalErrPerSample = 0.0052; AvgLearningRatePerSample = 0.003125; EpochTime=4.76145
|
||||
04/07/2016 14:50:14: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.5'
|
||||
|
||||
04/07/2016 14:50:14: Starting Epoch 6: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 5 at record count 300000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:14: Starting minibatch loop.
|
||||
04/07/2016 14:50:16: Epoch[ 6 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.04591298; EvalErr[0]PerSample = 0.01350000; TotalTime = 1.2638s; SamplesPerSecond = 12660.3
|
||||
04/07/2016 14:50:17: Epoch[ 6 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.04733852; EvalErr[0]PerSample = 0.01593750; TotalTime = 1.2608s; SamplesPerSecond = 12690.8
|
||||
04/07/2016 14:50:18: Epoch[ 6 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.05172458; EvalErr[0]PerSample = 0.01600000; TotalTime = 1.2651s; SamplesPerSecond = 12647.6
|
||||
04/07/2016 14:50:19: Finished Epoch[ 6 of 15]: [Training Set] TrainLossPerSample = 0.047511656; TotalSamplesSeen = 360000; EvalErrPerSample = 0.014916667; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.73228
|
||||
04/07/2016 14:50:19: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.6'
|
||||
|
||||
04/07/2016 14:50:19: Starting Epoch 7: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 6 at record count 360000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:19: Starting minibatch loop.
|
||||
04/07/2016 14:50:20: Epoch[ 7 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.03389177; EvalErr[0]PerSample = 0.01050000; TotalTime = 1.2654s; SamplesPerSecond = 12644.1
|
||||
04/07/2016 14:50:22: Epoch[ 7 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.03008663; EvalErr[0]PerSample = 0.00906250; TotalTime = 1.2581s; SamplesPerSecond = 12717.1
|
||||
04/07/2016 14:50:23: Epoch[ 7 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.03362006; EvalErr[0]PerSample = 0.01062500; TotalTime = 1.2675s; SamplesPerSecond = 12623.0
|
||||
04/07/2016 14:50:24: Finished Epoch[ 7 of 15]: [Training Set] TrainLossPerSample = 0.031670555; TotalSamplesSeen = 420000; EvalErrPerSample = 0.0098333331; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.73697
|
||||
04/07/2016 14:50:24: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.7'
|
||||
|
||||
04/07/2016 14:50:24: Starting Epoch 8: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 7 at record count 420000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:24: Starting minibatch loop.
|
||||
04/07/2016 14:50:25: Epoch[ 8 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02153152; EvalErr[0]PerSample = 0.00656250; TotalTime = 1.2871s; SamplesPerSecond = 12431.0
|
||||
04/07/2016 14:50:26: Epoch[ 8 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.01910104; EvalErr[0]PerSample = 0.00575000; TotalTime = 1.3312s; SamplesPerSecond = 12019.5
|
||||
04/07/2016 14:50:28: Epoch[ 8 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02056895; EvalErr[0]PerSample = 0.00625000; TotalTime = 1.2556s; SamplesPerSecond = 12742.9
|
||||
04/07/2016 14:50:29: Finished Epoch[ 8 of 15]: [Training Set] TrainLossPerSample = 0.020226961; TotalSamplesSeen = 480000; EvalErrPerSample = 0.0060999999; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.82154
|
||||
04/07/2016 14:50:29: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.8'
|
||||
|
||||
04/07/2016 14:50:29: Starting Epoch 9: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 8 at record count 480000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:29: Starting minibatch loop.
|
||||
04/07/2016 14:50:30: Epoch[ 9 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.01654039; EvalErr[0]PerSample = 0.00437500; TotalTime = 1.2656s; SamplesPerSecond = 12641.9
|
||||
04/07/2016 14:50:31: Epoch[ 9 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.01310685; EvalErr[0]PerSample = 0.00400000; TotalTime = 1.2678s; SamplesPerSecond = 12620.6
|
||||
04/07/2016 14:50:32: Epoch[ 9 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.01517984; EvalErr[0]PerSample = 0.00481250; TotalTime = 1.2629s; SamplesPerSecond = 12669.3
|
||||
04/07/2016 14:50:33: Finished Epoch[ 9 of 15]: [Training Set] TrainLossPerSample = 0.014533882; TotalSamplesSeen = 540000; EvalErrPerSample = 0.0042166668; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.74395
|
||||
04/07/2016 14:50:33: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.9'
|
||||
|
||||
04/07/2016 14:50:33: Starting Epoch 10: learning rate per sample = 0.009375 effective momentum = 0.000000 momentum as time constant = 0.0 samples
|
||||
starting epoch 9 at record count 540000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:33: Starting minibatch loop.
|
||||
04/07/2016 14:50:35: Epoch[10 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00918645; EvalErr[0]PerSample = 0.00231250; TotalTime = 1.2623s; SamplesPerSecond = 12675.5
|
||||
04/07/2016 14:50:36: Epoch[10 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00747875; EvalErr[0]PerSample = 0.00187500; TotalTime = 1.2588s; SamplesPerSecond = 12710.3
|
||||
04/07/2016 14:50:37: Epoch[10 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00868891; EvalErr[0]PerSample = 0.00225000; TotalTime = 1.2623s; SamplesPerSecond = 12675.1
|
||||
04/07/2016 14:50:38: Finished Epoch[10 of 15]: [Training Set] TrainLossPerSample = 0.0081688892; TotalSamplesSeen = 600000; EvalErrPerSample = 0.0019833334; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.73994
|
||||
04/07/2016 14:50:38: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.10'
|
||||
|
||||
04/07/2016 14:50:38: Starting Epoch 11: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 10 at record count 600000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:38: Starting minibatch loop.
|
||||
04/07/2016 14:50:39: Epoch[11 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00705737; EvalErr[0]PerSample = 0.00168750; TotalTime = 1.2620s; SamplesPerSecond = 12678.8
|
||||
04/07/2016 14:50:41: Epoch[11 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00511601; EvalErr[0]PerSample = 0.00075000; TotalTime = 1.2656s; SamplesPerSecond = 12641.7
|
||||
04/07/2016 14:50:42: Epoch[11 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00515049; EvalErr[0]PerSample = 0.00093750; TotalTime = 1.2622s; SamplesPerSecond = 12675.9
|
||||
04/07/2016 14:50:43: Finished Epoch[11 of 15]: [Training Set] TrainLossPerSample = 0.0054711187; TotalSamplesSeen = 660000; EvalErrPerSample = 0.001; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.74728
|
||||
04/07/2016 14:50:43: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.11'
|
||||
|
||||
04/07/2016 14:50:43: Starting Epoch 12: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 11 at record count 660000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:43: Starting minibatch loop.
|
||||
04/07/2016 14:50:44: Epoch[12 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00363765; EvalErr[0]PerSample = 0.00062500; TotalTime = 1.2713s; SamplesPerSecond = 12585.3
|
||||
04/07/2016 14:50:46: Epoch[12 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00304792; EvalErr[0]PerSample = 0.00031250; TotalTime = 1.2644s; SamplesPerSecond = 12654.7
|
||||
04/07/2016 14:50:47: Epoch[12 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00272178; EvalErr[0]PerSample = 0.00043750; TotalTime = 1.2657s; SamplesPerSecond = 12641.2
|
||||
04/07/2016 14:50:48: Finished Epoch[12 of 15]: [Training Set] TrainLossPerSample = 0.0031371212; TotalSamplesSeen = 720000; EvalErrPerSample = 0.00046666668; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.7532
|
||||
04/07/2016 14:50:48: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.12'
|
||||
|
||||
04/07/2016 14:50:48: Starting Epoch 13: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 12 at record count 720000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:48: Starting minibatch loop.
|
||||
04/07/2016 14:50:49: Epoch[13 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00198168; EvalErr[0]PerSample = 0.00025000; TotalTime = 1.2648s; SamplesPerSecond = 12650.1
|
||||
04/07/2016 14:50:50: Epoch[13 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00189558; EvalErr[0]PerSample = 0.00018750; TotalTime = 1.2649s; SamplesPerSecond = 12649.3
|
||||
04/07/2016 14:50:52: Epoch[13 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00163649; EvalErr[0]PerSample = 0.00012500; TotalTime = 1.2776s; SamplesPerSecond = 12523.6
|
||||
04/07/2016 14:50:53: Finished Epoch[13 of 15]: [Training Set] TrainLossPerSample = 0.001889063; TotalSamplesSeen = 780000; EvalErrPerSample = 0.00018333334; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.7567
|
||||
04/07/2016 14:50:53: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.13'
|
||||
|
||||
04/07/2016 14:50:53: Starting Epoch 14: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 13 at record count 780000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:53: Starting minibatch loop.
|
||||
04/07/2016 14:50:54: Epoch[14 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00127319; EvalErr[0]PerSample = 0.00012500; TotalTime = 1.2743s; SamplesPerSecond = 12555.9
|
||||
04/07/2016 14:50:55: Epoch[14 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00130068; EvalErr[0]PerSample = 0.00012500; TotalTime = 1.2652s; SamplesPerSecond = 12646.6
|
||||
04/07/2016 14:50:56: Epoch[14 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00113766; EvalErr[0]PerSample = 6.25000000e-005; TotalTime = 1.2664s; SamplesPerSecond = 12633.9
|
||||
04/07/2016 14:50:57: Finished Epoch[14 of 15]: [Training Set] TrainLossPerSample = 0.0012903105; TotalSamplesSeen = 840000; EvalErrPerSample = 0.0001; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.76122
|
||||
04/07/2016 14:50:57: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution.14'
|
||||
|
||||
04/07/2016 14:50:57: Starting Epoch 15: learning rate per sample = 0.009375 effective momentum = 0.700000 momentum as time constant = 89.7 samples
|
||||
starting epoch 14 at record count 840000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:50:57: Starting minibatch loop.
|
||||
04/07/2016 14:50:59: Epoch[15 of 15]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.00087188; EvalErr[0]PerSample = 6.25000000e-005; TotalTime = 1.2677s; SamplesPerSecond = 12621.3
|
||||
04/07/2016 14:51:00: Epoch[15 of 15]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.00092434; EvalErr[0]PerSample = 6.25000000e-005; TotalTime = 1.2763s; SamplesPerSecond = 12535.7
|
||||
04/07/2016 14:51:01: Epoch[15 of 15]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.00089293; EvalErr[0]PerSample = 6.25000000e-005; TotalTime = 1.2731s; SamplesPerSecond = 12568.1
|
||||
04/07/2016 14:51:02: Finished Epoch[15 of 15]: [Training Set] TrainLossPerSample = 0.00097708357; TotalSamplesSeen = 900000; EvalErrPerSample = 6.6666667e-005; AvgLearningRatePerSample = 0.0093750004; EpochTime=4.77523
|
||||
04/07/2016 14:51:02: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_02_Convolution@release_gpu/Models/02_Convolution'
|
||||
04/07/2016 14:51:02: CNTKCommandTrainEnd: train
|
||||
|
||||
04/07/2016 14:51:02: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:51:02: ##############################################################################
|
||||
04/07/2016 14:51:02: # #
|
||||
04/07/2016 14:51:02: # Action "test" #
|
||||
04/07/2016 14:51:02: # #
|
||||
04/07/2016 14:51:02: ##############################################################################
|
||||
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 27 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.w.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.w.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c = Convolution (conv1.w.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.b.b = LearnableParameter() : -> [1 x 1 x 16]
|
||||
Validating --> conv1.cpb = Plus (conv1.c.c, conv1.b.b) : [28 x 28 x 16 x *], [1 x 1 x 16] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.out = RectifiedLinear (conv1.cpb) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.out) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 32, Stride: 1 x 1 x 16, Sharing: (1, 1, 1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c = Convolution (conv2.w.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.b.b = LearnableParameter() : -> [1 x 1 x 32]
|
||||
Validating --> conv2.cpb = Plus (conv2.c.c, conv2.b.b) : [14 x 14 x 32 x *], [1 x 1 x 32] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.out = RectifiedLinear (conv2.cpb) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (1, 1, 0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2.p = Pooling (conv2.out) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2.p) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.z = Plus (h1.t, h1.b) : [128 x *], [128 x 1] -> [128 x 1 x *]
|
||||
Validating --> h1.y = Sigmoid (h1.z) : [128 x 1 x *] -> [128 x 1 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x 1 x *] -> [10 x 1 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x 1 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
11 out of 27 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 1989 retries for 10000 elements (19.9%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2334, 3830, ...
|
||||
Minibatch[1-500]: SamplesSeen = 8000 err: ErrorPrediction/Sample = 0.01125 ce: CrossEntropyWithSoftmax/Sample = 0.038305461
|
||||
Minibatch[501-625]: SamplesSeen = 2000 err: ErrorPrediction/Sample = 0.0055 ce: CrossEntropyWithSoftmax/Sample = 0.021902326
|
||||
Final Results: Minibatch[1-625]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.0101 ce: CrossEntropyWithSoftmax/Sample = 0.035024834 Perplexity = 1.0356454
|
||||
|
||||
04/07/2016 14:51:03: Action "test" complete.
|
||||
|
||||
04/07/2016 14:51:03: __COMPLETED__
|
|
@ -3,13 +3,35 @@
|
|||
. $TEST_ROOT_DIR/run-test-common
|
||||
|
||||
ConfigDir=$TEST_DIR/../../../../../../Examples/Image/MNIST/Config
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
CleanDataDir=$(cygpath -aw $DataDir)
|
||||
else
|
||||
CleanDataDir=$DataDir
|
||||
|
||||
if [[ ! -d $TEST_DATA_DIR || ! -e $TEST_DATA_DIR/Test-28x28.txt || ! -e $TEST_DATA_DIR/Test-28x28.txt ]]; then
|
||||
# Cannot find test data locally.
|
||||
# Try external test data directory (not part of the CNTK repository) as an alternative.
|
||||
if [[ -d "$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY" ]]; then
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
DataSourceDir=`cygpath -au $CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY`/Image/MNIST/v0
|
||||
else
|
||||
DataSourceDir=$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY/Image/MNIST/v0
|
||||
fi
|
||||
|
||||
# Copy the test data to the test run directory
|
||||
DataDir=$TEST_RUN_DIR/TestData
|
||||
mkdir $DataDir
|
||||
cp -R $DataSourceDir/* $DataDir || exit $?
|
||||
Copied=1
|
||||
else
|
||||
echo Error: cannot find data. Please see Examples/Image/MNIST/README.md for instructions to get it.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# cntkrun <CNTK config file name> <additional CNTK args>
|
||||
imageLayout=cudnn
|
||||
|
||||
cntkrun 02_Convolution.cntk "train=[reader=[file=$CleanDataDir/Train.txt]] test=[reader=[file=$CleanDataDir/Test.txt]] train=[SGD=[maxEpochs=1]] train=[SGD=[epochSize=128]] train=[reader=[randomize=none]] imageLayout=\"$imageLayout\"" || exit $?
|
||||
# Note: explicitly turn off randomization, as it crashes the reader.
|
||||
cntkrun 02_Convolution.cntk "train=[reader=[randomize=none]] imageLayout=\"$imageLayout\""
|
||||
ExitCode=$?
|
||||
|
||||
# Delete the test data if copied
|
||||
[[ "$Copied" -eq "1" ]] && rm -rf "$DataDir"
|
||||
|
||||
exit $ExitCode
|
||||
|
|
|
@ -1,15 +1,36 @@
|
|||
dataDir: ../../../../Image/Data
|
||||
dataDir: ../../../../../../Examples/Image/MNIST/Data
|
||||
|
||||
# Note: Windows temporarily disabled because of instability (pending investigation; perhaps
|
||||
# alexeyk/batch_norm fixes it)
|
||||
tags:
|
||||
# running on every BVT job in 'E' (Examples) leg
|
||||
- bvt-e ((flavor=='debug') ^ (device=='cpu')) and (os=='linux')
|
||||
# running every Nightly job in 'E' (Examples) leg
|
||||
- nightly-e (os=='linux')
|
||||
# In BVT, run Release GPU (~ 60 - 90 sec)
|
||||
- bvt-e (build_sku=='gpu') and (device=='gpu') and (flavor=='release')
|
||||
# In Nightly on Linux, run Debug GPU in addition (~ 100 - 240 sec).
|
||||
- nightly-e (build_sku=='gpu') and (device=='gpu')
|
||||
|
||||
testCases:
|
||||
CNTK Run must be completed:
|
||||
patterns:
|
||||
- __COMPLETED__
|
||||
|
||||
Must train epochs in exactly same order and parameters:
|
||||
patterns:
|
||||
- Starting Epoch {{integer}}
|
||||
- learning rate per sample = {{float}}
|
||||
- momentum = {{float}}
|
||||
|
||||
Epochs (with low train loss) must be finished with expected results:
|
||||
patterns:
|
||||
- Finished Epoch[{{integer}} of {{integer}}]
|
||||
- TrainLossPerSample = 0.0
|
||||
- TrainLossPerSample = {{float,tolerance=0.05}}
|
||||
- EvalErrPerSample = {{float,tolerance=0.005}}
|
||||
- AvgLearningRatePerSample = {{float,tolerance=0.1%}}
|
||||
|
||||
Per-minibatch (with low train loss) training results must match:
|
||||
patterns:
|
||||
# Ignores first set of minibatches at start of epoch, which sometimes has a larger deviation:
|
||||
- 01-
|
||||
- Epoch[{{integer}} of {{integer}}]-Minibatch[{{integer}}-{{integer}}
|
||||
- SamplesSeen = {{integer}}
|
||||
- TrainLossPerSample = 0.0
|
||||
- TrainLossPerSample = {{float,tolerance=0.05}}
|
||||
- EvalErr[0]PerSample = {{float,tolerance=0.005}}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,496 @@
|
|||
=== Running /home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 6 2016 15:52:46
|
||||
Last modified date: Tue Apr 5 14:19:05 2016
|
||||
Build type: release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: no
|
||||
Math lib: acml
|
||||
CUDA_PATH: /usr/local/cuda-7.0
|
||||
CUB_PATH: /usr/local/cub-1.4.1
|
||||
CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
Built by mahilleb on atleneu04
|
||||
Build Path: /home/mahilleb/CNTK
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to /home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
04/07/2016 14:05:08: -------------------------------------------------------------------
|
||||
04/07/2016 14:05:08: Build info:
|
||||
|
||||
04/07/2016 14:05:08: Built time: Apr 6 2016 15:52:46
|
||||
04/07/2016 14:05:08: Last modified date: Tue Apr 5 14:19:05 2016
|
||||
04/07/2016 14:05:08: Build type: release
|
||||
04/07/2016 14:05:08: Build target: GPU
|
||||
04/07/2016 14:05:08: With 1bit-SGD: no
|
||||
04/07/2016 14:05:08: Math lib: acml
|
||||
04/07/2016 14:05:08: CUDA_PATH: /usr/local/cuda-7.0
|
||||
04/07/2016 14:05:08: CUB_PATH: /usr/local/cub-1.4.1
|
||||
04/07/2016 14:05:08: CUDNN_PATH: /usr/local/cudnn-4.0
|
||||
04/07/2016 14:05:08: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:05:08: Build SHA1: ad5c8cd8002553a87d462a9a1ddcdabf2c84f519 (modified)
|
||||
04/07/2016 14:05:08: Built by mahilleb on atleneu04
|
||||
04/07/2016 14:05:08: Build Path: /home/mahilleb/CNTK
|
||||
04/07/2016 14:05:08: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:05:08: Running on localhost at 2016/04/07 14:05:08
|
||||
04/07/2016 14:05:08: Command line:
|
||||
/home/mahilleb/CNTK/build/gpu/release/bin/cntk configFile=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.cntk currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:05:08: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:05:08: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/03_ConvBatchNorm"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=$ModelDir$/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:05:08: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:05:08: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:05:08: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm"
|
||||
ndlMacros = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config
|
||||
OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:05:08: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:05:08: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 03_ConvBatchNorm.cntk:command=train:test
|
||||
configparameters: 03_ConvBatchNorm.cntk:ConfigDir=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config
|
||||
configparameters: 03_ConvBatchNorm.cntk:currentDirectory=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 03_ConvBatchNorm.cntk:DataDir=/home/mahilleb/CNTK/Examples/Image/MNIST/Data
|
||||
configparameters: 03_ConvBatchNorm.cntk:deviceId=0
|
||||
configparameters: 03_ConvBatchNorm.cntk:imageLayout=cudnn
|
||||
configparameters: 03_ConvBatchNorm.cntk:initOnCPUOnly=true
|
||||
configparameters: 03_ConvBatchNorm.cntk:ModelDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models
|
||||
configparameters: 03_ConvBatchNorm.cntk:modelPath=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
configparameters: 03_ConvBatchNorm.cntk:ndlMacros=/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/Macros.ndl
|
||||
configparameters: 03_ConvBatchNorm.cntk:numMBsToShowResult=500
|
||||
configparameters: 03_ConvBatchNorm.cntk:OutputDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
configparameters: 03_ConvBatchNorm.cntk:precision=float
|
||||
configparameters: 03_ConvBatchNorm.cntk:RootDir=..
|
||||
configparameters: 03_ConvBatchNorm.cntk:RunDir=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu
|
||||
configparameters: 03_ConvBatchNorm.cntk:test=[
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 03_ConvBatchNorm.cntk:timestamping=true
|
||||
configparameters: 03_ConvBatchNorm.cntk:traceLevel=1
|
||||
configparameters: 03_ConvBatchNorm.cntk:train=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "/home/mahilleb/CNTK/Tests/EndToEndTests/Examples/Image/MNIST/03_ConvBatchNorm/../../../../../../Examples/Image/MNIST/Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "/home/mahilleb/CNTK/Examples/Image/MNIST/Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
04/07/2016 14:05:08: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:05:08: Commands: train test
|
||||
04/07/2016 14:05:08: Precision = "float"
|
||||
04/07/2016 14:05:08: CNTKModelPath: /tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
04/07/2016 14:05:08: CNTKCommandTrainInfo: train : 2
|
||||
04/07/2016 14:05:08: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 2
|
||||
|
||||
04/07/2016 14:05:08: ##############################################################################
|
||||
04/07/2016 14:05:08: # #
|
||||
04/07/2016 14:05:08: # Action "train" #
|
||||
04/07/2016 14:05:08: # #
|
||||
04/07/2016 14:05:08: ##############################################################################
|
||||
|
||||
04/07/2016 14:05:08: CNTKCommandTrainBegin: train
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:05:08: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 36 nodes to process in pass 1.
|
||||
|
||||
|
||||
h1.t Times operation: For legacy compatibility, the sample layout of left input (h1.W LearnableParameter operation) was patched to [128 x 7 x 7 x 32] (from [128 x 1568])
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.c.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.c.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c.c = Convolution (conv1.c.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.c.c.sc = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.b = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.m = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.isd = LearnableParameter() : -> [16 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv1.c.c.y = BatchNormalization (conv1.c.c.c, conv1.c.c.sc, conv1.c.c.b, conv1.c.c.m, conv1.c.c.isd) : [28 x 28 x 16 x *], [16 x 1], [16 x 1], [16 x 1], [16 x 1] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.y = RectifiedLinear (conv1.c.c.y) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.y) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 1 x 1 x 32, Stride: 1 x 1 x 16, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c.c = Convolution (conv2.c.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.c.c.sc = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.b = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.m = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.isd = LearnableParameter() : -> [32 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv2.c.c.y = BatchNormalization (conv2.c.c.c, conv2.c.c.sc, conv2.c.c.b, conv2.c.c.m, conv2.c.c.isd) : [14 x 14 x 32 x *], [32 x 1], [32 x 1], [32 x 1], [32 x 1] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.y = RectifiedLinear (conv2.c.c.y) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2 = MaxPooling (conv2.y) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.sc = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.m = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.isd = LearnableParameter() : -> [128 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> h1.bn = BatchNormalization (h1.t, h1.sc, h1.b, h1.m, h1.isd) : [128 x *], [128 x 1], [128 x 1], [128 x 1], [128 x 1] -> [128 x *]
|
||||
Validating --> h1.y = RectifiedLinear (h1.bn) : [128 x *] -> [128 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x *] -> [10 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
20 out of 36 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:05:09: Created model with 36 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:05:09: Training criterion node(s):
|
||||
04/07/2016 14:05:09: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:05:09: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:05:09: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:05:09: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:05:09: Starting Epoch 1: learning rate per sample = 0.015625 effective momentum = 0.900000 momentum as time constant = 303.7 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:09: Starting minibatch loop.
|
||||
04/07/2016 14:05:11: Epoch[ 1 of 2]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.18024605; EvalErr[0]PerSample = 0.05475000; TotalTime = 1.3570s; SamplesPerSecond = 11790.6
|
||||
04/07/2016 14:05:12: Epoch[ 1 of 2]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07408237; EvalErr[0]PerSample = 0.02306250; TotalTime = 1.1839s; SamplesPerSecond = 13515.0
|
||||
04/07/2016 14:05:13: Epoch[ 1 of 2]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.07168071; EvalErr[0]PerSample = 0.02187500; TotalTime = 1.1835s; SamplesPerSecond = 13519.7
|
||||
04/07/2016 14:05:14: Finished Epoch[ 1 of 2]: [Training Set] TrainLossPerSample = 0.09788157; TotalSamplesSeen = 60000; EvalErrPerSample = 0.029816667; AvgLearningRatePerSample = 0.015625; EpochTime=5.13037
|
||||
04/07/2016 14:05:14: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm.1'
|
||||
Setting batch normalization blend time constant to inf.
|
||||
|
||||
04/07/2016 14:05:14: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.900000 momentum as time constant = 303.7 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:05:14: Starting minibatch loop.
|
||||
04/07/2016 14:05:15: Epoch[ 2 of 2]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02897029; EvalErr[0]PerSample = 0.00837500; TotalTime = 1.1826s; SamplesPerSecond = 13530.0
|
||||
04/07/2016 14:05:16: Epoch[ 2 of 2]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.02011888; EvalErr[0]PerSample = 0.00631250; TotalTime = 1.1831s; SamplesPerSecond = 13523.8
|
||||
04/07/2016 14:05:18: Epoch[ 2 of 2]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02228251; EvalErr[0]PerSample = 0.00625000; TotalTime = 1.1831s; SamplesPerSecond = 13523.5
|
||||
04/07/2016 14:05:18: Finished Epoch[ 2 of 2]: [Training Set] TrainLossPerSample = 0.021928078; TotalSamplesSeen = 120000; EvalErrPerSample = 0.0064333333; AvgLearningRatePerSample = 0.003125; EpochTime=4.43763
|
||||
04/07/2016 14:05:18: SGD: Saving checkpoint model '/tmp/cntk-test-20160407140437.931634/Examples/Image/MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm'
|
||||
04/07/2016 14:05:18: CNTKCommandTrainEnd: train
|
||||
|
||||
04/07/2016 14:05:18: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:05:18: ##############################################################################
|
||||
04/07/2016 14:05:18: # #
|
||||
04/07/2016 14:05:18: # Action "test" #
|
||||
04/07/2016 14:05:18: # #
|
||||
04/07/2016 14:05:18: ##############################################################################
|
||||
|
||||
Reading UCI file /home/mahilleb/CNTK/Examples/Image/MNIST/Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 36 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.c.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.c.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c.c = Convolution (conv1.c.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.c.c.sc = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.b = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.m = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.isd = LearnableParameter() : -> [16 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv1.c.c.y = BatchNormalization (conv1.c.c.c, conv1.c.c.sc, conv1.c.c.b, conv1.c.c.m, conv1.c.c.isd) : [28 x 28 x 16 x *], [16 x 1], [16 x 1], [16 x 1], [16 x 1] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.y = RectifiedLinear (conv1.c.c.y) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.y) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 1 x 1 x 32, Stride: 1 x 1 x 16, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c.c = Convolution (conv2.c.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.c.c.sc = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.b = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.m = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.isd = LearnableParameter() : -> [32 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv2.c.c.y = BatchNormalization (conv2.c.c.c, conv2.c.c.sc, conv2.c.c.b, conv2.c.c.m, conv2.c.c.isd) : [14 x 14 x 32 x *], [32 x 1], [32 x 1], [32 x 1], [32 x 1] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.y = RectifiedLinear (conv2.c.c.y) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2 = MaxPooling (conv2.y) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.sc = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.m = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.isd = LearnableParameter() : -> [128 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> h1.bn = BatchNormalization (h1.t, h1.sc, h1.b, h1.m, h1.isd) : [128 x *], [128 x 1], [128 x 1], [128 x 1], [128 x 1] -> [128 x *]
|
||||
Validating --> h1.y = RectifiedLinear (h1.bn) : [128 x *] -> [128 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x *] -> [10 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
20 out of 36 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 2036 retries for 10000 elements (20.4%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2009, 1524, ...
|
||||
Minibatch[1-313]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.009 ce: CrossEntropyWithSoftmax/Sample = 0.026436486
|
||||
Final Results: Minibatch[1-313]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.009 ce: CrossEntropyWithSoftmax/Sample = 0.026436486 Perplexity = 1.026789
|
||||
|
||||
04/07/2016 14:05:19: Action "test" complete.
|
||||
|
||||
04/07/2016 14:05:19: __COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -1 +0,0 @@
|
|||
__COMPLETED__
|
|
@ -0,0 +1,494 @@
|
|||
=== Running /cygdrive/c/R/CNTK3/x64/release/cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
-------------------------------------------------------------------
|
||||
Build info:
|
||||
|
||||
Built time: Apr 7 2016 15:32:16
|
||||
Last modified date: Thu Apr 7 09:19:53 2016
|
||||
Build type: Release
|
||||
Build target: GPU
|
||||
With 1bit-SGD: yes
|
||||
CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
CUB_PATH: C:\R\cub-1.4.1
|
||||
CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
Build Branch: mahilleb/MNISTLinux
|
||||
Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
Built by mahilleb on mahilleb57
|
||||
Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
-------------------------------------------------------------------
|
||||
Changed current directory to C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
04/07/2016 14:51:04: -------------------------------------------------------------------
|
||||
04/07/2016 14:51:04: Build info:
|
||||
|
||||
04/07/2016 14:51:04: Built time: Apr 7 2016 15:32:16
|
||||
04/07/2016 14:51:04: Last modified date: Thu Apr 7 09:19:53 2016
|
||||
04/07/2016 14:51:04: Build type: Release
|
||||
04/07/2016 14:51:04: Build target: GPU
|
||||
04/07/2016 14:51:04: With 1bit-SGD: yes
|
||||
04/07/2016 14:51:04: CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5
|
||||
04/07/2016 14:51:04: CUB_PATH: C:\R\cub-1.4.1
|
||||
04/07/2016 14:51:04: CUDNN_PATH: C:\R\cudnn-7.0-win-x64-v4.0-prod\cuda
|
||||
04/07/2016 14:51:04: Build Branch: mahilleb/MNISTLinux
|
||||
04/07/2016 14:51:04: Build SHA1: 5161c21b466987a144f96bad84f8763b08b05c40
|
||||
04/07/2016 14:51:04: Built by mahilleb on mahilleb57
|
||||
04/07/2016 14:51:04: Build Path: C:\R\CNTK3\Source\CNTK\
|
||||
04/07/2016 14:51:04: -------------------------------------------------------------------
|
||||
|
||||
04/07/2016 14:51:04: Running on mahilleb57 at 2016/04/07 14:51:04
|
||||
04/07/2016 14:51:04: Command line:
|
||||
C:\R\CNTK3\x64\release\cntk.exe configFile=C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.cntk currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu DeviceId=0 timestamping=true train=[reader=[randomize=none]] imageLayout="cudnn"
|
||||
|
||||
|
||||
|
||||
04/07/2016 14:51:04: >>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:51:04: RootDir = ".."
|
||||
ConfigDir = "$RootDir$/Config"
|
||||
DataDir = "$RootDir$/Data"
|
||||
OutputDir = "$RootDir$/Output"
|
||||
ModelDir = "$OutputDir$/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "$ModelDir$/03_ConvBatchNorm"
|
||||
ndlMacros = "$ConfigDir$/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=$ModelDir$/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "$ConfigDir$/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "$DataDir$/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "$DataDir$/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:51:04: <<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:51:04: >>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
04/07/2016 14:51:04: RootDir = ".."
|
||||
ConfigDir = "../Config"
|
||||
DataDir = "../Data"
|
||||
OutputDir = "../Output"
|
||||
ModelDir = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models"
|
||||
deviceId = 0
|
||||
imageLayout = "cudnn"
|
||||
command = train:test
|
||||
precision = "float"
|
||||
modelPath = "C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm"
|
||||
ndlMacros = "C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl"
|
||||
traceLevel=1
|
||||
numMBsToShowResult=500
|
||||
initOnCPUOnly=true
|
||||
train = [
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
test = [
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
DeviceId=0
|
||||
timestamping=true
|
||||
train=[reader=[randomize=none]]
|
||||
imageLayout="cudnn"
|
||||
|
||||
04/07/2016 14:51:04: <<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
04/07/2016 14:51:04: >>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>
|
||||
configparameters: 03_ConvBatchNorm.cntk:command=train:test
|
||||
configparameters: 03_ConvBatchNorm.cntk:ConfigDir=C:\R\CNTK3\Examples\Image\MNIST\Config
|
||||
configparameters: 03_ConvBatchNorm.cntk:currentDirectory=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 03_ConvBatchNorm.cntk:DataDir=C:\R\CNTK3\Examples\Image\MNIST\Data
|
||||
configparameters: 03_ConvBatchNorm.cntk:deviceId=0
|
||||
configparameters: 03_ConvBatchNorm.cntk:imageLayout=cudnn
|
||||
configparameters: 03_ConvBatchNorm.cntk:initOnCPUOnly=true
|
||||
configparameters: 03_ConvBatchNorm.cntk:ModelDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models
|
||||
configparameters: 03_ConvBatchNorm.cntk:modelPath=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
configparameters: 03_ConvBatchNorm.cntk:ndlMacros=C:\R\CNTK3\Examples\Image\MNIST\Config/Macros.ndl
|
||||
configparameters: 03_ConvBatchNorm.cntk:numMBsToShowResult=500
|
||||
configparameters: 03_ConvBatchNorm.cntk:OutputDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
configparameters: 03_ConvBatchNorm.cntk:precision=float
|
||||
configparameters: 03_ConvBatchNorm.cntk:RootDir=..
|
||||
configparameters: 03_ConvBatchNorm.cntk:RunDir=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu
|
||||
configparameters: 03_ConvBatchNorm.cntk:test=[
|
||||
action = "test"
|
||||
minibatchSize = 32
|
||||
modelPath=C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
configparameters: 03_ConvBatchNorm.cntk:timestamping=true
|
||||
configparameters: 03_ConvBatchNorm.cntk:traceLevel=1
|
||||
configparameters: 03_ConvBatchNorm.cntk:train=[
|
||||
action = "train"
|
||||
NDLNetworkBuilder = [
|
||||
networkDescription = "C:\R\CNTK3\Examples\Image\MNIST\Config/03_ConvBatchNorm.ndl"
|
||||
]
|
||||
SGD = [
|
||||
epochSize = 60000
|
||||
minibatchSize = 32
|
||||
learningRatesPerMB = 0.5:0.1
|
||||
momentumPerMB = 0.9
|
||||
maxEpochs = 2
|
||||
batchNormalizationBlendTimeConstant=0:1#INF
|
||||
]
|
||||
reader = [
|
||||
readerType = "UCIFastReader"
|
||||
file = "C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt"
|
||||
features = [
|
||||
dim = 784
|
||||
start = 1
|
||||
]
|
||||
labels = [
|
||||
dim = 1
|
||||
start = 0
|
||||
labelDim = 10
|
||||
labelMappingFile = "C:\R\CNTK3\Examples\Image\MNIST\Data/labelsmap.txt"
|
||||
]
|
||||
]
|
||||
] [reader=[randomize=none]]
|
||||
|
||||
04/07/2016 14:51:04: <<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<
|
||||
04/07/2016 14:51:04: Commands: train test
|
||||
04/07/2016 14:51:04: Precision = "float"
|
||||
04/07/2016 14:51:04: CNTKModelPath: C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm
|
||||
04/07/2016 14:51:04: CNTKCommandTrainInfo: train : 2
|
||||
04/07/2016 14:51:04: CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : 2
|
||||
|
||||
04/07/2016 14:51:04: ##############################################################################
|
||||
04/07/2016 14:51:04: # #
|
||||
04/07/2016 14:51:04: # Action "train" #
|
||||
04/07/2016 14:51:04: # #
|
||||
04/07/2016 14:51:04: ##############################################################################
|
||||
|
||||
04/07/2016 14:51:04: CNTKCommandTrainBegin: train
|
||||
NDLBuilder Using GPU 0
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Train-28x28.txt
|
||||
|
||||
04/07/2016 14:51:04: Creating virgin network.
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 36 nodes to process in pass 1.
|
||||
|
||||
|
||||
h1.t Times operation: For legacy compatibility, the sample layout of left input (h1.W LearnableParameter operation) was patched to [128 x 7 x 7 x 32] (from [128 x 1568])
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.c.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.c.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c.c = Convolution (conv1.c.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.c.c.sc = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.b = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.m = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.isd = LearnableParameter() : -> [16 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv1.c.c.y = BatchNormalization (conv1.c.c.c, conv1.c.c.sc, conv1.c.c.b, conv1.c.c.m, conv1.c.c.isd) : [28 x 28 x 16 x *], [16 x 1], [16 x 1], [16 x 1], [16 x 1] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.y = RectifiedLinear (conv1.c.c.y) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.y) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 1 x 1 x 32, Stride: 1 x 1 x 16, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c.c = Convolution (conv2.c.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.c.c.sc = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.b = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.m = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.isd = LearnableParameter() : -> [32 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv2.c.c.y = BatchNormalization (conv2.c.c.c, conv2.c.c.sc, conv2.c.c.b, conv2.c.c.m, conv2.c.c.isd) : [14 x 14 x 32 x *], [32 x 1], [32 x 1], [32 x 1], [32 x 1] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.y = RectifiedLinear (conv2.c.c.y) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2 = MaxPooling (conv2.y) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.sc = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.m = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.isd = LearnableParameter() : -> [128 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> h1.bn = BatchNormalization (h1.t, h1.sc, h1.b, h1.m, h1.isd) : [128 x *], [128 x 1], [128 x 1], [128 x 1], [128 x 1] -> [128 x *]
|
||||
Validating --> h1.y = RectifiedLinear (h1.bn) : [128 x *] -> [128 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x *] -> [10 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
20 out of 36 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
04/07/2016 14:51:05: Created model with 36 nodes on GPU 0.
|
||||
|
||||
04/07/2016 14:51:05: Training criterion node(s):
|
||||
04/07/2016 14:51:05: ce = CrossEntropyWithSoftmax
|
||||
|
||||
04/07/2016 14:51:05: Evaluation criterion node(s):
|
||||
|
||||
04/07/2016 14:51:05: err = ErrorPrediction
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
04/07/2016 14:51:05: No PreCompute nodes found, skipping PreCompute step.
|
||||
|
||||
04/07/2016 14:51:05: Starting Epoch 1: learning rate per sample = 0.015625 effective momentum = 0.900000 momentum as time constant = 303.7 samples
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
60000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:51:05: Starting minibatch loop.
|
||||
04/07/2016 14:51:07: Epoch[ 1 of 2]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.17553700; EvalErr[0]PerSample = 0.05406250; TotalTime = 1.5797s; SamplesPerSecond = 10128.8
|
||||
04/07/2016 14:51:08: Epoch[ 1 of 2]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.07510864; EvalErr[0]PerSample = 0.02443750; TotalTime = 1.2863s; SamplesPerSecond = 12438.4
|
||||
04/07/2016 14:51:10: Epoch[ 1 of 2]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.06946776; EvalErr[0]PerSample = 0.02143750; TotalTime = 1.2994s; SamplesPerSecond = 12313.4
|
||||
04/07/2016 14:51:11: Finished Epoch[ 1 of 2]: [Training Set] TrainLossPerSample = 0.096543156; TotalSamplesSeen = 60000; EvalErrPerSample = 0.03005; AvgLearningRatePerSample = 0.015625; EpochTime=5.72121
|
||||
04/07/2016 14:51:11: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm.1'
|
||||
Setting batch normalization blend time constant to 1.#INF.
|
||||
|
||||
04/07/2016 14:51:11: Starting Epoch 2: learning rate per sample = 0.003125 effective momentum = 0.900000 momentum as time constant = 303.7 samples
|
||||
starting epoch 1 at record count 60000, and file position 0
|
||||
already there from last epoch
|
||||
|
||||
04/07/2016 14:51:11: Starting minibatch loop.
|
||||
04/07/2016 14:51:12: Epoch[ 2 of 2]-Minibatch[ 1- 500, 26.67%]: SamplesSeen = 16000; TrainLossPerSample = 0.02842559; EvalErr[0]PerSample = 0.00812500; TotalTime = 1.3239s; SamplesPerSecond = 12085.6
|
||||
04/07/2016 14:51:13: Epoch[ 2 of 2]-Minibatch[ 501-1000, 53.33%]: SamplesSeen = 16000; TrainLossPerSample = 0.01900919; EvalErr[0]PerSample = 0.00600000; TotalTime = 1.2822s; SamplesPerSecond = 12479.0
|
||||
04/07/2016 14:51:15: Epoch[ 2 of 2]-Minibatch[1001-1500, 80.00%]: SamplesSeen = 16000; TrainLossPerSample = 0.02238147; EvalErr[0]PerSample = 0.00650000; TotalTime = 1.3364s; SamplesPerSecond = 11972.6
|
||||
04/07/2016 14:51:16: Finished Epoch[ 2 of 2]: [Training Set] TrainLossPerSample = 0.02144276; TotalSamplesSeen = 120000; EvalErrPerSample = 0.0062833335; AvgLearningRatePerSample = 0.003125; EpochTime=4.92934
|
||||
04/07/2016 14:51:16: SGD: Saving checkpoint model 'C:\cygwin64\tmp\cntk-test-20160407154856.106529\Examples\Image\MNIST_03_ConvBatchNorm@release_gpu/Models/03_ConvBatchNorm'
|
||||
04/07/2016 14:51:16: CNTKCommandTrainEnd: train
|
||||
|
||||
04/07/2016 14:51:16: Action "train" complete.
|
||||
|
||||
|
||||
04/07/2016 14:51:16: ##############################################################################
|
||||
04/07/2016 14:51:16: # #
|
||||
04/07/2016 14:51:16: # Action "test" #
|
||||
04/07/2016 14:51:16: # #
|
||||
04/07/2016 14:51:16: ##############################################################################
|
||||
|
||||
Reading UCI file C:\R\CNTK3\Examples\Image\MNIST\Data/Test-28x28.txt
|
||||
|
||||
Post-processing network...
|
||||
|
||||
3 roots:
|
||||
ce = CrossEntropyWithSoftmax()
|
||||
err = ErrorPrediction()
|
||||
ol.z = Plus()
|
||||
|
||||
Validating network. 36 nodes to process in pass 1.
|
||||
|
||||
|
||||
Validating network. 16 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
Validating --> labels = InputValue() : -> [10 x *]
|
||||
Validating --> ol.W = LearnableParameter() : -> [10 x 128]
|
||||
Validating --> h1.W = LearnableParameter() : -> [128 x 7 x 7 x 32]
|
||||
Validating --> conv2.c.W = LearnableParameter() : -> [32 x 400]
|
||||
Validating --> conv1.c.W = LearnableParameter() : -> [16 x 25]
|
||||
Validating --> featScale = LearnableParameter() : -> [1 x 1]
|
||||
Validating --> features = InputValue() : -> [28 x 28 x 1 x *]
|
||||
Validating --> featScaled = ElementTimes (featScale, features) : [1 x 1], [28 x 28 x 1 x *] -> [28 x 28 x 1 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 1, Output: 28 x 28 x 16, Kernel: 5 x 5 x 1, Map: 1 x 1 x 16, Stride: 1 x 1 x 1, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv1.c.c.c = Convolution (conv1.c.W, featScaled) : [16 x 25], [28 x 28 x 1 x *] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.c.c.sc = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.b = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.m = LearnableParameter() : -> [16 x 1]
|
||||
Validating --> conv1.c.c.isd = LearnableParameter() : -> [16 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv1.c.c.y = BatchNormalization (conv1.c.c.c, conv1.c.c.sc, conv1.c.c.b, conv1.c.c.m, conv1.c.c.isd) : [28 x 28 x 16 x *], [16 x 1], [16 x 1], [16 x 1], [16 x 1] -> [28 x 28 x 16 x *]
|
||||
Validating --> conv1.y = RectifiedLinear (conv1.c.c.y) : [28 x 28 x 16 x *] -> [28 x 28 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 28 x 28 x 16, Output: 14 x 14 x 16, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool1 = MaxPooling (conv1.y) : [28 x 28 x 16 x *] -> [14 x 14 x 16 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 16, Output: 14 x 14 x 32, Kernel: 5 x 5 x 16, Map: 1 x 1 x 32, Stride: 1 x 1 x 16, Sharing: (1), AutoPad: (1), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> conv2.c.c.c = Convolution (conv2.c.W, pool1) : [32 x 400], [14 x 14 x 16 x *] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.c.c.sc = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.b = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.m = LearnableParameter() : -> [32 x 1]
|
||||
Validating --> conv2.c.c.isd = LearnableParameter() : -> [32 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> conv2.c.c.y = BatchNormalization (conv2.c.c.c, conv2.c.c.sc, conv2.c.c.b, conv2.c.c.m, conv2.c.c.isd) : [14 x 14 x 32 x *], [32 x 1], [32 x 1], [32 x 1], [32 x 1] -> [14 x 14 x 32 x *]
|
||||
Validating --> conv2.y = RectifiedLinear (conv2.c.c.y) : [14 x 14 x 32 x *] -> [14 x 14 x 32 x *]
|
||||
|
||||
Using cuDNN convolution engine for geometry: Input: 14 x 14 x 32, Output: 7 x 7 x 32, Kernel: 2 x 2 x 1, Map: 1, Stride: 2 x 2 x 1, Sharing: (1), AutoPad: (0), LowerPad: 0, UpperPad: 0.
|
||||
Validating --> pool2 = MaxPooling (conv2.y) : [14 x 14 x 32 x *] -> [7 x 7 x 32 x *]
|
||||
Validating --> h1.t = Times (h1.W, pool2) : [128 x 7 x 7 x 32], [7 x 7 x 32 x *] -> [128 x *]
|
||||
Validating --> h1.sc = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.b = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.m = LearnableParameter() : -> [128 x 1]
|
||||
Validating --> h1.isd = LearnableParameter() : -> [128 x 1]
|
||||
|
||||
Using CNTK batch normalization engine.
|
||||
Validating --> h1.bn = BatchNormalization (h1.t, h1.sc, h1.b, h1.m, h1.isd) : [128 x *], [128 x 1], [128 x 1], [128 x 1], [128 x 1] -> [128 x *]
|
||||
Validating --> h1.y = RectifiedLinear (h1.bn) : [128 x *] -> [128 x *]
|
||||
Validating --> ol.t = Times (ol.W, h1.y) : [10 x 128], [128 x *] -> [10 x *]
|
||||
Validating --> ol.b = LearnableParameter() : -> [10 x 1]
|
||||
Validating --> ol.z = Plus (ol.t, ol.b) : [10 x *], [10 x 1] -> [10 x 1 x *]
|
||||
Validating --> ce = CrossEntropyWithSoftmax (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
Validating --> err = ErrorPrediction (labels, ol.z) : [10 x *], [10 x 1 x *] -> [1]
|
||||
|
||||
|
||||
20 out of 36 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
|
||||
evalNodeNames are not specified, using all the default evalnodes and training criterion nodes.
|
||||
|
||||
|
||||
Allocating matrices for forward and/or backward propagation.
|
||||
UCIFastReader: Starting at epoch 0, counting lines to determine record count...
|
||||
10000 records found.
|
||||
starting epoch 0 at record count 0, and file position 0
|
||||
already there from last epoch
|
||||
RandomOrdering: 1989 retries for 10000 elements (19.9%) to ensure window condition
|
||||
RandomOrdering: recached sequence for seed 0: 2334, 3830, ...
|
||||
Minibatch[1-313]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.0077 ce: CrossEntropyWithSoftmax/Sample = 0.023818888
|
||||
Final Results: Minibatch[1-313]: SamplesSeen = 10000 err: ErrorPrediction/Sample = 0.0077 ce: CrossEntropyWithSoftmax/Sample = 0.023818888 Perplexity = 1.0241048
|
||||
|
||||
04/07/2016 14:51:16: Action "test" complete.
|
||||
|
||||
04/07/2016 14:51:16: __COMPLETED__
|
|
@ -3,14 +3,35 @@
|
|||
. $TEST_ROOT_DIR/run-test-common
|
||||
|
||||
ConfigDir=$TEST_DIR/../../../../../../Examples/Image/MNIST/Config
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
CleanDataDir=$(cygpath -aw $DataDir)
|
||||
else
|
||||
CleanDataDir-$DataDir
|
||||
|
||||
if [[ ! -d $TEST_DATA_DIR || ! -e $TEST_DATA_DIR/Test-28x28.txt || ! -e $TEST_DATA_DIR/Test-28x28.txt ]]; then
|
||||
# Cannot find test data locally.
|
||||
# Try external test data directory (not part of the CNTK repository) as an alternative.
|
||||
if [[ -d "$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY" ]]; then
|
||||
if [ "$OS" == "Windows_NT" ]; then
|
||||
DataSourceDir=`cygpath -au $CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY`/Image/MNIST/v0
|
||||
else
|
||||
DataSourceDir=$CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY/Image/MNIST/v0
|
||||
fi
|
||||
|
||||
# Copy the test data to the test run directory
|
||||
DataDir=$TEST_RUN_DIR/TestData
|
||||
mkdir $DataDir
|
||||
cp -R $DataSourceDir/* $DataDir || exit $?
|
||||
Copied=1
|
||||
else
|
||||
echo Error: cannot find data. Please see Examples/Image/MNIST/README.md for instructions to get it.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# cntkrun <CNTK config file name> <additional CNTK args>
|
||||
imageLayout=cudnn
|
||||
|
||||
cntkrun 03_ConvBatchNorm.cntk "train=[reader=[file=$CleanDataDir/Train.txt]] test=[reader=[file=$CleanDataDir/Test.txt]] train=[SGD=[maxEpochs=1]] train=[SGD=[epochSize=128]] train=[reader=[randomize=none]] imageLayout=\"$imageLayout\"" || exit $?
|
||||
# Note: explicitly turn off randomization, as it crashes the reader.
|
||||
cntkrun 03_ConvBatchNorm.cntk "train=[reader=[randomize=none]] imageLayout=\"$imageLayout\""
|
||||
ExitCode=$?
|
||||
|
||||
# Delete the test data if copied
|
||||
[[ "$Copied" -eq "1" ]] && rm -rf "$DataDir"
|
||||
|
||||
exit $ExitCode
|
||||
|
|
|
@ -1,13 +1,35 @@
|
|||
dataDir: ../../../../Image/Data
|
||||
dataDir: ../../../../../../Examples/Image/MNIST/Data
|
||||
|
||||
tags:
|
||||
# running on every BVT job in 'E' (Examples) leg
|
||||
#- bvt-e ((flavor=='debug') ^ (device=='cpu'))
|
||||
# running every Nightly job in 'E' (Examples) leg
|
||||
#- nightly-e
|
||||
# In BVT, run Release GPU (~ 30 - 60 sec)
|
||||
- bvt-e (build_sku=='gpu') and (device=='gpu') and (flavor=='release')
|
||||
# In Nightly, additionally run Debug GPU
|
||||
# Note: Batch normalization training on CPU is not yet implemented.
|
||||
- nightly-e (build_sku=='gpu') and (device=='gpu')
|
||||
|
||||
testCases:
|
||||
CNTK Run must be completed:
|
||||
patterns:
|
||||
- __COMPLETED__
|
||||
|
||||
Must train epochs in exactly same order and parameters:
|
||||
patterns:
|
||||
- Starting Epoch {{integer}}
|
||||
- learning rate per sample = {{float}}
|
||||
- momentum = {{float}}
|
||||
|
||||
Epochs (with low train loss) must be finished with expected results:
|
||||
patterns:
|
||||
- Finished Epoch[{{integer}} of {{integer}}]
|
||||
- TrainLossPerSample = 0.0
|
||||
- TrainLossPerSample = {{float,tolerance=0.05}}
|
||||
- EvalErrPerSample = {{float,tolerance=0.005}}
|
||||
- AvgLearningRatePerSample = {{float,tolerance=0.1%}}
|
||||
|
||||
Per-minibatch (with low train loss) training results must match:
|
||||
patterns:
|
||||
- Epoch[{{integer}} of {{integer}}]-Minibatch[{{integer}}-{{integer}}
|
||||
- SamplesSeen = {{integer}}
|
||||
- TrainLossPerSample = 0.0
|
||||
- TrainLossPerSample = {{float,tolerance=0.05}}
|
||||
- EvalErr[0]PerSample = {{float,tolerance=0.005}}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
Test runtimes
|
||||
|
||||
Windows: 24 x Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz, GeForce GTX 960, 2 GB
|
||||
Linux: 12 x Intel(R) Xeon(R) CPU E5-1650 0 @ 3.20GHz, GeForce GTX 960, 2 GB
|
||||
|
||||
Windows: Running test Examples/Image/MNIST/01_OneHidden (debug gpu) - [OK] 212.24 sec
|
||||
Linux: Running test Examples/Image/MNIST/01_OneHidden (debug gpu) - [OK] 91.29 sec
|
||||
Windows: Running test Examples/Image/MNIST/01_OneHidden (release cpu) - [OK] 870.41 sec
|
||||
Linux: Running test Examples/Image/MNIST/01_OneHidden (release cpu) - [OK] 48.72 sec
|
||||
Windows: Running test Examples/Image/MNIST/01_OneHidden (release gpu) - [OK] 47.53 sec
|
||||
Linux: Running test Examples/Image/MNIST/01_OneHidden (release gpu) - [OK] 29.88 sec
|
||||
Windows: Running test Examples/Image/MNIST/02_Convolution (debug gpu) - [OK] 195.76 sec
|
||||
Linux: Running test Examples/Image/MNIST/02_Convolution (debug gpu) - [OK] 107.40 sec
|
||||
Windows: Running test Examples/Image/MNIST/02_Convolution (release cpu) - [OK] 8557.53 sec
|
||||
Linux: Running test Examples/Image/MNIST/02_Convolution (release cpu) - [OK] 8417.79 sec
|
||||
Windows: Running test Examples/Image/MNIST/02_Convolution (release gpu) - [OK] 72.53 sec
|
||||
Linux: Running test Examples/Image/MNIST/02_Convolution (release gpu) - [OK] 65.87 sec
|
||||
Windows: Running test Examples/Image/MNIST/03_ConvBatchNorm (debug gpu) - [OK] 49.76 sec
|
||||
Linux: Running test Examples/Image/MNIST/03_ConvBatchNorm (debug gpu) - [OK] 22.06 sec
|
||||
Linux: Running test Examples/Image/MNIST/03_ConvBatchNorm (release gpu) - [OK] 11.43 sec
|
||||
Windows: Running test Examples/Image/MNIST/03_ConvBatchNorm (release gpu) - [OK] 12.87 sec
|
|
@ -52,9 +52,9 @@ COMMAND: currentDirectory=\\storage.ccp.philly.selfhost.corp.microsoft.com\pu
|
|||
|
||||
--- MNIST:
|
||||
|
||||
COMMAND: configFile=$(SolutionDir)Examples/Image/MNIST/Config/01_OneHidden.cntk currentDirectory=$(SolutionDir)Tests/EndToEndTests/Image/Data RunDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_01_OneHidden DataDir=$(SolutionDir)Tests/EndToEndTests/Image/Data ConfigDir=$(SolutionDir)Examples/Image/MNIST/Config OutputDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_01_OneHidden DeviceId=0 MNISTtrain=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Train.txt]] MNISTtest=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Test.txt]] MNISTtrain=[SGD=[maxEpochs=1]] MNISTtrain=[SGD=[epochSize=100]] MNISTtrain=[reader=[randomize=none]] imageLayout="cudnn" makeMode=false
|
||||
COMMAND: configFile=$(SolutionDir)Examples/Image/MNIST/Config/01_OneHidden.cntk currentDirectory=$(SolutionDir)Tests/EndToEndTests/Image/Data RunDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_01_OneHidden DataDir=$(SolutionDir)Tests/EndToEndTests/Image/Data ConfigDir=$(SolutionDir)Examples/Image/MNIST/Config OutputDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_01_OneHidden DeviceId=0 train=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Train.txt]] test=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Test.txt]] train=[SGD=[maxEpochs=1]] train=[SGD=[epochSize=100]] train=[reader=[randomize=none]] imageLayout="cudnn" makeMode=false
|
||||
|
||||
COMMAND: configFile=$(SolutionDir)Examples/Image/MNIST/Config/02_Convolution.cntk currentDirectory=$(SolutionDir)Tests/EndToEndTests/Image/Data RunDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_02_Convolution DataDir=$(SolutionDir)Tests/EndToEndTests/Image/Data ConfigDir=$(SolutionDir)Examples/Image/MNIST/Config OutputDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_02_Convolution DeviceId=0 train=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Train.txt]] MNISTtest=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Test.txt]] train=[SGD=[maxEpochs=1]] train=[SGD=[epochSize=100]] train=[reader=[randomize=none]] imageLayout="cudnn" makeMode=false
|
||||
COMMAND: configFile=$(SolutionDir)Examples/Image/MNIST/Config/02_Convolution.cntk currentDirectory=$(SolutionDir)Tests/EndToEndTests/Image/Data RunDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_02_Convolution DataDir=$(SolutionDir)Tests/EndToEndTests/Image/Data ConfigDir=$(SolutionDir)Examples/Image/MNIST/Config OutputDir=$(SolutionDir)Tests/EndToEndTests/RunDir/Image/MNIST_02_Convolution DeviceId=0 train=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Train.txt]] test=[reader=[file=$(SolutionDir)Tests/EndToEndTests/Image/Data/Test.txt]] train=[SGD=[maxEpochs=1]] train=[SGD=[epochSize=100]] train=[reader=[randomize=none]] imageLayout="cudnn" makeMode=false
|
||||
|
||||
TODO out-of-date:
|
||||
COMMAND: currentDirectory=$(SolutionDir)ExampleSetups\Image\MNIST configFile=02_Conv.cntk configName=02_Conv
|
||||
|
|
|
@ -106,6 +106,17 @@ except ImportError:
|
|||
thisDir = os.path.dirname(os.path.realpath(__file__))
|
||||
windows = os.getenv("OS")=="Windows_NT"
|
||||
|
||||
def cygpath(path, relative=False):
|
||||
if windows:
|
||||
if path.startswith('/'):
|
||||
return path
|
||||
path = os.path.abspath(path)
|
||||
if not relative and path[1]==':': # Windows drive
|
||||
path = '/cygdrive/' + path[0] + path[2:]
|
||||
path = path.replace('\\','/')
|
||||
|
||||
return path
|
||||
|
||||
# This class encapsulates an instance of the test
|
||||
class Test:
|
||||
# "Suite/TestName" => instance of Test
|
||||
|
@ -120,7 +131,7 @@ class Test:
|
|||
self.fullName = suite + "/" + name
|
||||
|
||||
# computing location of test directory (yml file directory)
|
||||
self.testDir = os.path.dirname(pathToYmlFile)
|
||||
self.testDir = cygpath(os.path.dirname(pathToYmlFile), relative=True)
|
||||
|
||||
# parsing yml file with testcases
|
||||
with open(pathToYmlFile, "r") as f:
|
||||
|
@ -229,6 +240,7 @@ class Test:
|
|||
|
||||
# preparing run directory
|
||||
runDir = os.path.join(args.run_dir, "{0}_{1}@{2}_{3}".format(self.suite, self.name, flavor, device))
|
||||
runDir = cygpath(runDir)
|
||||
if not os.path.isdir(runDir):
|
||||
os.makedirs(runDir)
|
||||
|
||||
|
@ -321,21 +333,21 @@ class Test:
|
|||
for testCase in self.testCases:
|
||||
testCaseRunResult = testCase.processBaseline(allLines)
|
||||
if not testCaseRunResult.succeeded:
|
||||
result.succeeded = False
|
||||
result.succeeded = False
|
||||
result.testCaseRunResults.append(testCaseRunResult)
|
||||
|
||||
if baselineFile == None:
|
||||
baselineFile = self.newBaselineFilePath(device)
|
||||
|
||||
if result.succeeded:
|
||||
if args.verbose:
|
||||
if args.update_baseline:
|
||||
if args.verbose:
|
||||
if args.update_baseline:
|
||||
six.print_("Updating baseline file " + baselineFile)
|
||||
else:
|
||||
else:
|
||||
six.print_("Creating baseline file " + baselineFile)
|
||||
|
||||
with open(baselineFile, "w") as f:
|
||||
f.write("\n".join(allLines))
|
||||
with open(baselineFile, "w") as f:
|
||||
f.write("\n".join(allLines))
|
||||
|
||||
return result
|
||||
|
||||
|
@ -362,7 +374,7 @@ class Test:
|
|||
for f in ["." + flavor.lower(), ""]:
|
||||
for d in ["." + device.lower(), ""]:
|
||||
candidateName = "baseline" + o + f + d + ".txt"
|
||||
fullPath = os.path.join(self.testDir, candidateName)
|
||||
fullPath = cygpath(os.path.join(self.testDir, candidateName), relative=True)
|
||||
if os.path.isfile(fullPath):
|
||||
return fullPath
|
||||
return None
|
||||
|
|
|
@ -2,8 +2,8 @@ dataDir: ./
|
|||
tags:
|
||||
# running on every BVT job in 'S' (Speech) leg in Debug-GPU and Release-CPU configurations:
|
||||
- bvt-s (build_sku == 'gpu') and ((flavor=='debug') ^ (device=='cpu'))
|
||||
# running unconditionally on every Nightly job in 'S' leg
|
||||
- nightly-s (build_sku == 'gpu')
|
||||
# For Nightly, run everything except for Windows Debug-CPU (which runs ~ 7200 sec)
|
||||
- nightly-s (build_sku == 'gpu') and ((os=='linux') or (device=='gpu') or (flavor=='release'))
|
||||
|
||||
testCases:
|
||||
Must train epochs in exactly same order and parameters:
|
||||
|
|
|
@ -45,16 +45,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Math;..\..\..\Source\Common\include;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Math;..\..\..\Source\Common\include;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -100,24 +101,20 @@
|
|||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\Source\Common\Config.cpp">
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\Source\Common\DataReader.cpp" />
|
||||
<ClCompile Include="..\..\..\Source\Common\Eval.cpp" />
|
||||
<ClCompile Include="..\..\..\Source\Common\ExceptionWithCallStack.cpp" />
|
||||
<ClCompile Include="..\..\..\Source\Common\File.cpp">
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\Source\Common\fileutil.cpp">
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">NotUsing</PrecompiledHeader>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
<ClCompile Include="CNTKEvalTest.cpp" />
|
||||
<ClCompile Include="stdafx.cpp">
|
||||
<PrecompiledHeader Condition="$(DebugBuild)">Create</PrecompiledHeader>
|
||||
<PrecompiledHeader Condition="$(ReleaseBuild)">Create</PrecompiledHeader>
|
||||
<PrecompiledHeader>Create</PrecompiledHeader>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
|
|
|
@ -72,17 +72,18 @@
|
|||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Math;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
|
@ -90,7 +91,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Common\include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -104,7 +104,6 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Common\include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -121,7 +120,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Common\include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -139,7 +137,6 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Common\include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -43,13 +43,17 @@
|
|||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="$(DebugBuild)">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Common\include\;$(VCInstallDir)include;$(WindowsSDK_IncludePath);</IncludePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Math;$(SolutionDir)Source\Common\Include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>Use</PrecompiledHeader>
|
||||
|
@ -58,13 +62,11 @@
|
|||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Math; ..\..\..\Source\Common\Include; %(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Math.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(OutDir);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
<CudaCompile>
|
||||
<TargetMachinePlatform>64</TargetMachinePlatform>
|
||||
|
@ -81,7 +83,6 @@
|
|||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Math; ..\..\..\Source\Common\Include; %(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
|
@ -89,7 +90,6 @@
|
|||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(OutDir);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>Math.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
@ -100,7 +100,7 @@
|
|||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(CudaToolkitIncludeDir); %(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>$(CudaToolkitIncludeDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ImportGroup Condition="$(GpuBuild)" Label="ExtensionSettings">
|
||||
|
|
|
@ -61,17 +61,22 @@
|
|||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath)</LibraryPath>
|
||||
<LinkIncremental>$(DebugBuild)</LinkIncremental>
|
||||
<OutDir>$(OutDir)\UnitTests\</OutDir>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);$(SolutionDir)Source\Common\Include</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir)..;$(BOOST_LIB_PATH)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
<WarningLevel>Level4</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);..\..\..\Source\Common\include\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<UseFullPaths>true</UseFullPaths>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
|
@ -80,7 +85,6 @@
|
|||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalLibraryDirectories>$(OutDir)..\;$(BOOST_LIB_PATH);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>Math.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
<CudaCompile>
|
||||
|
@ -95,8 +99,6 @@
|
|||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);..\..\..\Source\Common\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<UseFullPaths>true</UseFullPaths>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
|
@ -108,10 +110,19 @@
|
|||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(OutDir)..\;$(BOOST_LIB_PATH);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalDependencies>Math.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>%(DelayLoadDLLs);nvml.dll;$(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>CPUONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
|
|
|
@ -36,20 +36,8 @@
|
|||
<CharacterSet>Unicode</CharacterSet>
|
||||
<UseOfMfc>false</UseOfMfc>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(DebugBuild)" Label="Configuration">
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<IncludePath>$(MSMPI_INC);$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(ReleaseBuild)" Label="Configuration">
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<IncludePath>$(MSMPI_INC);$(WindowsSDK_IncludePath);</IncludePath>
|
||||
<LibraryPath>$(MSMPI_LIB64);$(SolutionDir)$(Platform)\$(Configuration);$(WindowsSDK_LibraryPath_x64)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="$(GpuBuild)">
|
||||
<IncludePath>$(IncludePath);$(CudaInclude)</IncludePath>
|
||||
<LibraryPath>$(LibraryPath);$(CudaLibPath)</LibraryPath>
|
||||
<PropertyGroup Label="Configuration">
|
||||
<UseDebugLibraries>$(DebugBuild)</UseDebugLibraries>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings" />
|
||||
|
@ -59,8 +47,6 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);..\..\..\Source\Common\include;..\..\..\Source\Math;..\..\..\Source\ActionsLib;..\..\..\Source\ComputationNetworkLib;..\..\..\Source\CNTK\BrainScript;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(OutDir);$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);$(LibraryPath)</LibraryPath>
|
||||
<OutDir>$(OutDir)\UnitTests\</OutDir>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
|
@ -69,28 +55,24 @@
|
|||
<WarningLevel>Level4</WarningLevel>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<PreprocessorDefinitions>WIN32;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<UseFullPaths>true</UseFullPaths>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
<AdditionalIncludeDirectories>$(MSMPI_INC);$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math;$(SolutionDir)Source\ActionsLib;$(SolutionDir)Source\ComputationNetworkLib;$(SolutionDir)Source\CNTK\BrainScript;$(BOOST_INCLUDE_PATH)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalLibraryDirectories>$(BOOST_LIB_PATH);$(OutDir)..\;</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>math.lib;actionslib.lib;computationnetworklib.lib;sequencetraininglib.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(MSMPI_LIB64);$(OutDir)..;$(BOOST_LIB_PATH);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>math.dll;msmpi.dll</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(DebugBuild)">
|
||||
<ClCompile>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<DelayLoadDLLs>math.dll;msmpi.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(BOOST_LIB_PATH);$(OutDir);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(ReleaseBuild)">
|
||||
<ClCompile>
|
||||
|
@ -100,12 +82,18 @@
|
|||
<PreprocessorDefinitions>NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
|
||||
<AdditionalIncludeDirectories>$(BOOST_INCLUDE_PATH);$(NvmlInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<DelayLoadDLLs>math.dll;msmpi.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
<AdditionalLibraryDirectories>$(BOOST_LIB_PATH);$(OutDir);$(NvmlLibPath)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(GpuBuild)">
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(CudaInclude)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
|
||||
<DelayLoadDLLs>%(DelayLoadDLLs);nvml.dll;$(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
RootDir = .
|
||||
ModelDir = "models"
|
||||
command = "MultiView_Test"
|
||||
|
||||
precision = "float"
|
||||
|
||||
modelPath = "$ModelDir$/ImageReaderMultiView_Model.dnn"
|
||||
|
||||
# deviceId = -1 for CPU, >= 0 for GPU devices
|
||||
deviceId = -1
|
||||
|
||||
outputNodeNames = "Dummy"
|
||||
traceLevel = 1
|
||||
|
||||
MultiView_Test = [
|
||||
# Parameter values for the reader
|
||||
reader = [
|
||||
# reader to use
|
||||
readerType = "ImageReader"
|
||||
file = "$RootDir$/ImageReaderMultiView_map.txt"
|
||||
|
||||
randomize = "none"
|
||||
verbosity = 1
|
||||
|
||||
numCPUThreads = 1
|
||||
features=[
|
||||
width=2
|
||||
height=2
|
||||
channels=3
|
||||
cropType=multiview10
|
||||
cropRatio=0.5
|
||||
jitterType=UniRatio
|
||||
interpolations=Linear
|
||||
#meanFile=$RootDir$/ImageReaderMultiView_mean.xml
|
||||
]
|
||||
labels=[
|
||||
labelDim=4
|
||||
]
|
||||
]
|
||||
]
|
|
@ -0,0 +1,10 @@
|
|||
255 0 255 255 255 0 0 255 255 0 0 255
|
||||
0 0 128 0 0 255 128 0 255 0 128 255
|
||||
0 255 0 0 255 0 0 255 0 0 255 0
|
||||
255 0 255 255 255 0 0 255 255 0 0 255
|
||||
255 128 255 255 255 128 0 255 255 128 0 255
|
||||
0 255 255 255 0 255 255 0 0 255 255 0
|
||||
0 0 0 128 255 0 0 128 0 255 255 128
|
||||
255 0 0 0 0 255 255 0 0 0 0 255
|
||||
0 255 255 255 0 255 255 0 0 255 255 0
|
||||
128 255 255 255 128 255 255 0 128 255 255 0
|
|
@ -0,0 +1 @@
|
|||
images\multi.png 0
|
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 151 B |
|
@ -133,5 +133,22 @@ BOOST_AUTO_TEST_CASE(ImageReaderZipMissingFile)
|
|||
[](std::runtime_error const& ex) { return string("Failed to get file info of missing.jpg, zip library error: Unknown error -1") == ex.what(); });
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ImageReaderMultiView)
|
||||
{
|
||||
HelperRunReaderTest<float>(
|
||||
testDataPath() + "/Config/ImageReaderMultiView_Config.cntk",
|
||||
testDataPath() + "/Control/ImageReaderMultiView_Control.txt",
|
||||
testDataPath() + "/Control/ImageReaderMultiView_Output.txt",
|
||||
"MultiView_Test",
|
||||
"reader",
|
||||
10,
|
||||
10,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
} } } }
|
||||
|
|
|
@ -57,24 +57,28 @@
|
|||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\..\Source\Common\include;..\..\..\Source\Math;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>$(OutDir);$(LibraryPath)</LibraryPath>
|
||||
<OutDir>$(OutDir)\UnitTests\</OutDir>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<AdditionalIncludeDirectories>$(SolutionDir)Source\Common\Include;$(SolutionDir)Source\Math;$(SolutionDir)Source\Readers\ReaderLib;$(BOOST_INCLUDE_PATH)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalLibraryDirectories>$(OutDir);$(OutDir)..;$(BOOST_LIB_PATH)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>NotUsing</PrecompiledHeader>
|
||||
<WarningLevel>Level4</WarningLevel>
|
||||
<TreatWarningAsError>true</TreatWarningAsError>
|
||||
<PreprocessorDefinitions>WIN32;$(ImageReaderDefine);$(ZipDefine);%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\Source\Readers\ReaderLib;$(BOOST_INCLUDE_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<UseFullPaths>true</UseFullPaths>
|
||||
<OpenMPSupport>true</OpenMPSupport>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalLibraryDirectories>$(BOOST_LIB_PATH);$(OutDir)..\;</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>htkmlfreader.lib;experimentalhtkmlfreader.lib;Math.lib;ReaderLib.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
|
@ -168,6 +172,7 @@
|
|||
<Text Include="Control\HTKMLFReaderSimpleDataLoop6_16_17_Control.txt" />
|
||||
<Text Include="Control\HTKMLFReaderSimpleDataLoop7_Control.txt" />
|
||||
<Text Include="Control\HTKMLFReaderSimpleDataLoop9_19_Control.txt" />
|
||||
<Text Include="Control\ImageReaderMultiView_Control.txt" />
|
||||
<Text Include="Control\ImageReaderSimple_Control.txt" />
|
||||
<Text Include="Control\ImageReaderZip_Control.txt" />
|
||||
<Text Include="Control\UCIFastReaderSimpleDataLoop_Control.txt" />
|
||||
|
@ -180,6 +185,7 @@
|
|||
<Text Include="Data\ImageReaderBadLabel_map.txt" />
|
||||
<Text Include="Data\ImageReaderBadMap_map.txt" />
|
||||
<Text Include="Data\ImageReaderLabelOutOfRange_map.txt" />
|
||||
<Text Include="Data\ImageReaderMultiView_map.txt" />
|
||||
<Text Include="Data\ImageReaderSimple_map.txt" />
|
||||
<Text Include="Data\ImageReaderZip_map.txt" />
|
||||
<Text Include="Data\UCIFastReaderSimpleDataLoop_Mapping.txt" />
|
||||
|
@ -189,6 +195,7 @@
|
|||
<Image Include="Data\images\black.jpg" />
|
||||
<Image Include="Data\images\blue.jpg" />
|
||||
<Image Include="Data\images\green.jpg" />
|
||||
<Image Include="Data\images\multi.png" />
|
||||
<Image Include="Data\images\red.jpg" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
|
@ -197,6 +204,7 @@
|
|||
<None Include="Config\ImageReaderBadLabel_Config.cntk" />
|
||||
<None Include="Config\ImageReaderBadMap_Config.cntk" />
|
||||
<None Include="Config\ImageReaderLabelOutOfRange_Config.cntk" />
|
||||
<None Include="Config\ImageReaderMultiView_Config.cntk" />
|
||||
<None Include="Config\ImageReaderZip_Config.cntk" />
|
||||
<None Include="Data\images\chunk0.zip" />
|
||||
<None Include="Data\images\chunk1.zip" />
|
||||
|
|
|
@ -246,6 +246,12 @@
|
|||
<Text Include="Data\CNTKTextFormatReader\100x1_dense.txt">
|
||||
<Filter>Data\CNTKTextFormatReader</Filter>
|
||||
</Text>
|
||||
<Text Include="Data\ImageReaderMultiView_map.txt">
|
||||
<Filter>Data</Filter>
|
||||
</Text>
|
||||
<Text Include="Control\ImageReaderMultiView_Control.txt">
|
||||
<Filter>Control</Filter>
|
||||
</Text>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Image Include="Data\images\black.jpg">
|
||||
|
@ -260,6 +266,9 @@
|
|||
<Image Include="Data\images\red.jpg">
|
||||
<Filter>Data\images</Filter>
|
||||
</Image>
|
||||
<Image Include="Data\images\multi.png">
|
||||
<Filter>Data\images</Filter>
|
||||
</Image>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="Data\images\chunk0.zip">
|
||||
|
@ -286,5 +295,8 @@
|
|||
<None Include="Config\CNTKTextFormatReader\sparse.cntk">
|
||||
<Filter>Config\CNTKTextFormatReader</Filter>
|
||||
</None>
|
||||
<None Include="Config\ImageReaderMultiView_Config.cntk">
|
||||
<Filter>Config</Filter>
|
||||
</None>
|
||||
</ItemGroup>
|
||||
</Project>
|
Загрузка…
Ссылка в новой задаче