Merge branch 'master' of https://git01.codeplex.com/cntk into amitaga/bufferedAsyncGradientAggregation

This commit is contained in:
Amit Agarwal 2015-12-05 01:45:32 -08:00
Родитель 388a3579c2 8bcc6b2530
Коммит cc4db8d994
99 изменённых файлов: 3711 добавлений и 3981 удалений

Просмотреть файл

@ -84,6 +84,9 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Linux build files", "Linux
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Documentation", "Documentation", "{065AF55D-AF02-448B-BFCD-52619FDA4BD0}"
ProjectSection(SolutionItems) = preProject
README = README
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tutorial", "Tutorial", "{98D2C32B-0C1F-4E19-A626-65F7BA4600CF}"
ProjectSection(SolutionItems) = preProject

Просмотреть файл

@ -16,7 +16,7 @@
#define _CRT_SECURE_NO_WARNINGS // "secure" CRT not available on all platforms --add this at the top of all CPP files that give "function or variable may be unsafe" warnings
#include "Platform.h"
#include "BestGpu.h"
#include "commandArgUtil.h" // for ConfigParameters
#include "Config.h" // for ConfigParameters
#include "ScriptableObjects.h"
#include "DebugUtil.h"
#ifndef CPUONLY

Просмотреть файл

@ -5,12 +5,13 @@
//
// ConfigFile.cpp : Defines the configuration file loader.
//
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS // "secure" CRT not available on all platforms --add this at the top of all CPP files that give "function or variable may be unsafe" warnings
#endif
#include "File.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "ScriptableObjects.h"
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -10,7 +10,7 @@
#define DATAREADER_LOCAL
#include "Basics.h"
#include "DataReader.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "ScriptableObjects.h"
using namespace std;

Просмотреть файл

@ -23,6 +23,8 @@
#define UNUSED(x) (void)(x) // for variables that are, e.g., only used in _DEBUG builds
#pragma warning (disable: 4702) // disable some incorrect unreachable-code warnings
namespace Microsoft { namespace MSR { namespace CNTK {
using namespace std;
@ -83,6 +85,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
};
#pragma warning(pop)
static inline void Warning(const string & message) { Warning("%s", message.c_str()); }
#ifndef NOT_IMPLEMENTED
#define NOT_IMPLEMENTED \
{ \
fprintf(stderr, "Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
LogicError("Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
}
#endif
}}}
#ifndef _MSC_VER

Просмотреть файл

@ -9,7 +9,7 @@
// #define CPUONLY // #define this to build without GPU support nor needing the SDK installed
#include "CommonMatrix.h"
// define IConfigRecord and ConfigParameters as incomplete types, in order to avoid having to include "ScriptableObjects.h" and "commandArgUtil.h", as that confuses some .CU code
// define IConfigRecord and ConfigParameters as incomplete types, in order to avoid having to include "ScriptableObjects.h" and "Config.h", as that confuses some .CU code
namespace Microsoft { namespace MSR { namespace ScriptableObjects {
struct IConfigRecord;
}}}

Просмотреть файл

@ -1,5 +1,5 @@
//
// <copyright file="commandArgUtil.h" company="Microsoft">
// <copyright file="Config.h" company="Microsoft">
// Copyright (c) Microsoft Corporation. All rights reserved.
// </copyright>
//

Просмотреть файл

@ -26,7 +26,7 @@
#include "Basics.h"
#include "Matrix.h"
#include "Sequences.h"
#include "commandArgUtil.h" // for ConfigParameters
#include "Config.h" // for ConfigParameters
#include "ScriptableObjects.h"
#include <map>
#include <string>

Просмотреть файл

@ -13,9 +13,9 @@
namespace Microsoft { namespace MSR { namespace CNTK {
// -----------------------------------------------------------------------
// TensorShape -- tensor descriptor to describe the inner layout of a data vector that holds a tensor
// TensorShape -- tensor descriptor to describe the inner layout of a sample vector that holds a tensor
//
// Minibatches are stored as Matrices. While the column dimension represents multiple data vectors, and may have
// Minibatches are stored as Matrix objects. While the column dimension represents multiple sample vectors, and may have
// an inner structure (time, parallel sequences) described by the MBLayout, the row dimension represents data
// vectors that hold tensors of data.
//
@ -23,14 +23,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
//
// Specifically, when the image is an image, then this is a 3-dimensional tensor with dimensions ( channels, width, height ),
// which represents the column-major interpretation of a transposed row-by-row-scanned image where each pixel stores (R,G,B) as a float3.
//
// BUGBUG: Tensors with other than 3 dimensions can currently not be used because they cannot be serialized with the current file format.
// -----------------------------------------------------------------------
// TODO: really support lengths other than 3, e.g. fix serialization code to handle variable-length descriptors
// TODO: rename to DataLayout
// TODO: must match ComputationNode::m_numRows; or, rather, the TensorShape is how m_numRows is stored??
// TODO: move this elsewhere, maybe a separate header Tensors.h?
struct TensorShape
{
public:
@ -113,6 +108,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// When constructing an image tensor with the usual W, H, C format, use the following function instead.
// This will sort the three parameters into the correct order.
// BUGBUG: at several places, a comment says "after multiplication the structure is lost" and the vector dimension
// is set as the image height. However, the image height is actually the wrong dimension since images are assumed transposed.
// This will get fixed once we get more complete arbitrary tensor support throughout, including better-defined inference rules.
static inline TensorShape ImageLayoutWHC(size_t width, size_t height, size_t channels)
{
return TensorShape(std::vector<size_t> { channels, width, height });
@ -122,6 +120,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
return TensorShape(std::vector<size_t> { 1, 1, n }); // for now storing it as a 3D object as well --TODO: fix this
}
// TODO: we need a constructor from config; that will generalize
// TODO: we need a constructor from config; that will allow us to generalize
}}}

Просмотреть файл

@ -25,7 +25,7 @@
#include "Basics.h"
#include "Matrix.h"
#include "commandArgUtil.h" // for ConfigParameters
#include "Config.h" // for ConfigParameters
#include "ScriptableObjects.h"
#include <map>
#include <string>

Просмотреть файл

@ -20,7 +20,7 @@ enum class MinibatchPackingFlags : char // (note: not using unsigned char be
NoFeature = 1 << 2, // binary 0100 frame has no feature (e.g. a gap due to BPTT)
NoLabel = 1 << 3, // binary 1000 frame has no label
NoInput = NoFeature | NoLabel, // when we refactorize reader, NoInput will no longer needed
NoInput = NoFeature | NoLabel, // Note: Once we refactorized the reader, NoInput will no longer needed.
SequenceStartOrNoFeature = SequenceStart | NoFeature,
SequenceEndOrNoFeature = SequenceEnd | NoFeature,
SequenceStartOrEndOrNoFeature = SequenceStart | SequenceEnd | NoFeature,
@ -92,9 +92,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// - if a node has no MBLayout, m_{function,gradient}Values are not samples (they are not activations or input data), but e.g. model parameters
// - ComputationNode::GetNumCols() == MBLayout::GetNumTimeSteps() * MBLayout::GetNumParallelSequences()
// - ComputationNetwork ensures that m_{function,gradient}Values are allocated correctly before calling ForwardProp() on a node
// NOTE: This class represents an ongoing abstraction of an originally distributed/code-duped way of defining and accessing the MB layout.
// NOTE: Parts of this class represents the result of refactoring code, including a few irregular edge cases.
// Some code below represents the actual use cases I encountered. Not all are, I believe, needed to be as they are; this class could be simplified/streamlined much further.
// Some wackiness below is explained by this.
struct MBLayout
{
@ -348,7 +347,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
other->m_minibatchPackingFlags.begin() + startTimeStep + numTimeSteps);
}
shared_ptr<Matrix<char>> GetColumnsValidityMask(const FrameRange& frameRange, DEVICEID_TYPE deviceId) const;
shared_ptr<Matrix<char>> GetColumnsValidityMask(const FrameRange& fr, DEVICEID_TYPE deviceId) const;
};
typedef MBLayout::MBLayoutPtr MBLayoutPtr;
@ -375,7 +374,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// TODO: Where this design currently breaks: // <- BUGBUG: I think these are outdated
// - BatchModeNodes must access GetNumParallelSequences(), yet operate on the whole sequence
// - likewise, LSTMNode does its own iteration, hence needs access to GetNumParallelSequences() or NumCols() in the whole-batch iterator
// BUGBUG: These are currently broken and will need to be fixed:
// BUGBUG: These nodes are currently broken and will need to be fixed:
// - CRFNode does not support > 1 parallel sequence
class FrameRange
{
@ -437,31 +436,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// code that can only handle single-frame ranges will call t() to get the time index, which will throw if numFrames != 1
// Some functions need just the time index, e.g. for looking up stuff in m_boundaryInfo. That's where an unscaled index is needed (as opposed to startColumn()).
// Really only used in RecurrentNodes(), where it will be replaced by FrameRange::WithDelay() which allows to access delayed frames through the FrameRange object.
size_t t() const { EnsureNotAllFrames(); return timeIdxInSeq; }
// multi-frame slice case: these two get startFrame and numFrames
//size_t StartColumn() const { EnsureNotAllFrames(); return timeIdxInSeq * samplesInRecurrentStep; }
//size_t NumCols() const { EnsureNotAllFrames(); return samplesInRecurrentStep; }
// TODO: remove these ^^ two in favor of these vv
size_t StartColumn(const shared_ptr<MBLayout> & pMBLayout) const { EnsureNotAllFrames(); return timeIdxInSeq * pMBLayout->GetNumParallelSequences(); }
size_t NumCols(const shared_ptr<MBLayout> & pMBLayout) const { EnsureNotAllFrames(); return pMBLayout->GetNumParallelSequences(); }
bool IsAllFrames() const { return timeIdxInSeq == SIZE_MAX; } // if true then above functions may not be called; caller must use entire batch instead (PAR mode)
const FrameRange & Check(size_t expectedStartColumn, size_t expectedNumCols, const shared_ptr<MBLayout> & pMBLayout) const
{
if (!IsAllFrames() && (expectedStartColumn != StartColumn(pMBLayout) || expectedNumCols != NumCols(pMBLayout)))
LogicError("FrameRange::Check: FrameRange object gives different range than original explicit code. Logic is borked.");
return *this;
}
const FrameRange & Check_t(size_t expectedNumCols, const shared_ptr<MBLayout> & pMBLayout) const
{
#if 1 // temporary workaround
if (expectedNumCols == SIZE_MAX || !pMBLayout)
return *this;
#endif
if (!IsAllFrames())
Check(t() * expectedNumCols, expectedNumCols, pMBLayout);
return *this;
}
private:
void EnsureNotAllFrames() const
{
@ -470,7 +449,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
};
inline shared_ptr<Matrix<char>> MBLayout::GetColumnsValidityMask(const FrameRange& frameRange, DEVICEID_TYPE deviceId) const
inline shared_ptr<Matrix<char>> MBLayout::GetColumnsValidityMask(const FrameRange& fr, DEVICEID_TYPE deviceId) const
{
// lazily compute the validity mask
if (m_columnsValidityMask == nullptr)
@ -478,7 +457,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Lock();
m_columnsValidityMask.reset(new Matrix<char>(deviceId));
// Determine indices of all invalid columns in the specified frameRange
// Determine indices of all invalid columns in the specified fr
if (!IsAllNone()) // TODO: use HasGaps() (but currently that would mean a second linear scan, which is not efficient)
{
size_t nT = GetNumTimeSteps();
@ -509,26 +488,26 @@ namespace Microsoft { namespace MSR { namespace CNTK {
return nullptr;
// we have a validity mask: decide what to return
if (frameRange.IsAllFrames())
if (fr.IsAllFrames())
return m_columnsValidityMask;
// Check if there are any invalid frames in the specified frameRange
// Check if there are any invalid frames in the specified fr
bool foundInvalidColumnsInRange = false;
if (frameRange.seqIndex == SIZE_MAX)
if (fr.seqIndex == SIZE_MAX)
{
foundInvalidColumnsInRange = Is(frameRange.t(), MinibatchPackingFlags::NoInput);
foundInvalidColumnsInRange = Is(fr.t(), MinibatchPackingFlags::NoInput);
}
else
{
foundInvalidColumnsInRange = Is(frameRange.seqIndex, frameRange.t(), MinibatchPackingFlags::NoInput);
foundInvalidColumnsInRange = Is(fr.seqIndex, fr.t(), MinibatchPackingFlags::NoInput);
}
if (!foundInvalidColumnsInRange)
return nullptr;
// we get here if there is an actual validity mask and there are invalid frames in its range
size_t startColumn = (frameRange.t() * GetNumParallelSequences()) + ((frameRange.seqIndex == SIZE_MAX) ? 0 : frameRange.seqIndex);
size_t numColumns = (frameRange.seqIndex == SIZE_MAX) ? GetNumParallelSequences() : 1;
size_t startColumn = (fr.t() * GetNumParallelSequences()) + ((fr.seqIndex == SIZE_MAX) ? 0 : fr.seqIndex);
size_t numColumns = (fr.seqIndex == SIZE_MAX) ? GetNumParallelSequences() : 1;
// TODO: why use ColumnSlice() and not DataFor()?
return make_shared<Matrix<char>>(m_columnsValidityMask->ColumnSlice(startColumn, numColumns));
@ -595,31 +574,31 @@ namespace Microsoft { namespace MSR { namespace CNTK {
template<class ElemType>
static inline Matrix<ElemType> DataWithMBLayoutFor(Matrix<ElemType> & data,
const FrameRange & frameRange/*select frame or entire batch*/,
const FrameRange & fr/*select frame or entire batch*/,
const MBLayoutPtr & pMBLayout/*the MB layout of 'data'*/)
{
// MBLayout of data and of FrameRange must be identical pointers,
// or in case of broadcasting, respective parent pointers.
// MBLayouts that are identical in content but not object identity (pointer) are not admissible.
// For those cases, use a ReconcileMBLayout node.
if (frameRange.m_pMBLayout != pMBLayout)
if (fr.m_pMBLayout != pMBLayout)
{
// if broadcast allowed then it is allowed to broadcast from an outer-loop value
// Currently, the only 'outer' loop we have is to have no layout.
if (frameRange.m_broadcastAllowed && !pMBLayout && data.GetNumCols() == 1)
if (fr.m_broadcastAllowed && !pMBLayout && data.GetNumCols() == 1)
return data.AsReference();
if (frameRange.m_pMBLayout && pMBLayout && *frameRange.m_pMBLayout == *pMBLayout)
LogicError("DataFor: frameRange's MBLayout inconsistent with matrix. They are compatible though--are you missing a ReconcileMBLayout operation?");
if (fr.m_pMBLayout && pMBLayout && *fr.m_pMBLayout == *pMBLayout)
LogicError("DataFor: fr's MBLayout inconsistent with matrix. They are compatible though--are you missing a ReconcileMBLayout operation?");
else
LogicError("DataFor: frameRange's MBLayout inconsistent with matrix");
LogicError("DataFor: fr's MBLayout inconsistent with matrix");
}
// if FrameRange refers to whole minibatch (map mode)
// or if we don't even have a layout
// then return the whole matrix
// but as a reference (e.g. it cannot be resized)
if (!pMBLayout || frameRange.IsAllFrames())
if (!pMBLayout || fr.IsAllFrames())
{
if (frameRange.seqIndex == SIZE_MAX)
if (fr.seqIndex == SIZE_MAX)
return data.AsReference();
else
{
@ -632,7 +611,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// get a reshaped view that stacks all sequences into T long vectors
auto mat = data.ColumnSlice(0, data.GetNumCols());
mat.Resize(data.GetNumRows() * pMBLayout->GetNumParallelSequences(), data.GetNumRows() / pMBLayout->GetNumParallelSequences());
return mat; // .RowSlice(frameRange.seqIndex * data.GetNumRows());
return mat; // .RowSlice(fr.seqIndex * data.GetNumRows());
// TODO: Why does RowSlice() not exist? Seems simple. Is there a hidden assumption of contiguous memory?#endif
#endif
}
@ -641,11 +620,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else
{
size_t numParallelSequences = pMBLayout->GetNumParallelSequences();
size_t startColumn = frameRange.t() * numParallelSequences;
if (frameRange.seqIndex == SIZE_MAX)
size_t startColumn = fr.t() * numParallelSequences;
if (fr.seqIndex == SIZE_MAX)
return data.ColumnSlice(startColumn, numParallelSequences);
else
return data.ColumnSlice(startColumn + frameRange.seqIndex, 1);
return data.ColumnSlice(startColumn + fr.seqIndex, 1);
}
}
@ -663,7 +642,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// Note that existing 'reduce' style operations--the criterion nodes and gradient computation--already call this. --BUGBUG: They can't, wrong layout!
// Warning: The layout used here must match the matrix. E.g. don't pass a child's matrix from a criterion node (use Input(x)->MaskMissing{Values,Gradient}ColumnsToZero() instead.
template<class ElemType>
static inline bool MaskMissingColumnsTo(Matrix<ElemType>& matrixToBeMasked, const MBLayoutPtr & pMBLayout, const FrameRange & frameRange, ElemType val)
static inline bool MaskMissingColumnsTo(Matrix<ElemType>& matrixToBeMasked, const MBLayoutPtr & pMBLayout, const FrameRange & fr, ElemType val)
{
bool foundLabelOrFeatureMissing = false; // return value: set to true if either nolabel or feature missing is processed
@ -675,10 +654,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (matrixToBeMasked.GetNumCols() != nT * nS)
LogicError("MaskMissingColumnsToZero: pMBLayout->m_minibatchPackingFlags should have one element for each timestep of all streams. Check feature reader. ");
shared_ptr<Matrix<char>> columnsValidityMask = pMBLayout->GetColumnsValidityMask(frameRange, matrixToBeMasked.GetDeviceId());
shared_ptr<Matrix<char>> columnsValidityMask = pMBLayout->GetColumnsValidityMask(fr, matrixToBeMasked.GetDeviceId());
if (columnsValidityMask != nullptr)
{
auto matrixSliceToMask = DataWithMBLayoutFor(matrixToBeMasked, frameRange, pMBLayout);
auto matrixSliceToMask = DataWithMBLayoutFor(matrixToBeMasked, fr, pMBLayout);
foundLabelOrFeatureMissing = true;
matrixSliceToMask.MaskColumnsValue(*columnsValidityMask, val);
}

Просмотреть файл

@ -6,7 +6,7 @@
#pragma once
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include <string>
#include <map>
#include <vector>

Просмотреть файл

@ -1,153 +1,154 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{1D5787D4-52E4-45DB-951B-82F220EE0C6A}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="BinaryReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="BinaryFile.cpp" />
<ClCompile Include="BinaryReader.cpp" />
<ClCompile Include="BinaryWriter.cpp" />
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{1D5787D4-52E4-45DB-951B-82F220EE0C6A}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="BinaryReader.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="BinaryFile.cpp" />
<ClCompile Include="BinaryReader.cpp" />
<ClCompile Include="BinaryWriter.cpp" />
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -13,9 +13,6 @@
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -25,6 +22,9 @@
<ClCompile Include="..\..\Common\File.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="BinaryReader.h" />
@ -48,6 +48,9 @@
<ClInclude Include="..\..\Common\Include\DebugUtil.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">

Просмотреть файл

@ -7,7 +7,7 @@
#pragma once
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "RandomOrdering.h"
#include <string>
#include <map>

Просмотреть файл

@ -1,154 +1,152 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{014DA766-B37B-4581-BC26-963EA5507931}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>DSSMReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>c:\Program Files\Microsoft MPI\Inc;..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>c:\Program Files\Microsoft MPI\Lib\amd64;$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\File.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\fileutil.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\DebugUtil.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="DSSMReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="dllmain.cpp" />
<ClCompile Include="DSSMReader.cpp" />
<ClCompile Include="Exports.cpp" />
<ClCompile Include="stdafx.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{014DA766-B37B-4581-BC26-963EA5507931}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>DSSMReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>c:\Program Files\Microsoft MPI\Inc;..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>c:\Program Files\Microsoft MPI\Lib\amd64;$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\File.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\fileutil.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\DebugUtil.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="DSSMReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp" />
<ClCompile Include="dllmain.cpp" />
<ClCompile Include="DSSMReader.cpp" />
<ClCompile Include="Exports.cpp" />
<ClCompile Include="stdafx.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -17,9 +17,6 @@
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -47,6 +44,9 @@
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h">
@ -79,5 +79,8 @@
<ClInclude Include="..\..\Common\Include\RandomOrdering.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
</Project>

Просмотреть файл

@ -21,7 +21,7 @@
#define DATAREADER_EXPORTS
#include "DataReader.h"
#include "HTKMLFReader.h"
#include "commandArgUtil.h"
#include "Config.h"
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -23,7 +23,7 @@
#include "minibatchiterator.h"
#define DATAREADER_EXPORTS // creating the exports here
#include "DataReader.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "ScriptableObjects.h"
#include "HTKMLFReader.h"
#include "TimerUtility.h"

Просмотреть файл

@ -6,7 +6,7 @@
// HTKMLFReader.h - Include file for the MTK and MLF format of features and samples
#pragma once
#include "DataReader.h"
#include "commandArgUtil.h" // for intargvector
#include "Config.h" // for intargvector
#include "CUDAPageLockedMemAllocator.h"
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -105,6 +105,7 @@
<ClInclude Include="..\..\Common\Include\ssematrix.h" />
<ClInclude Include="basetypes.h" />
<ClInclude Include="biggrowablevectors.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="chunkevalsource.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />

Просмотреть файл

@ -50,6 +50,9 @@
<ClInclude Include="..\..\Common\Include\ssematrix.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Text Include="ReadMe.txt" />

Просмотреть файл

@ -17,7 +17,7 @@
#define DATAWRITER_EXPORTS // creating the exports here
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "HTKMLFWriter.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection

Просмотреть файл

@ -8,7 +8,7 @@
#define DATAREADER_EXPORTS // creating the exports here
#include "DataReader.h"
#include "ImageReader.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "ScriptableObjects.h"
#include "ConcStack.h"
#include <algorithm>

Просмотреть файл

@ -108,14 +108,12 @@
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="ImageReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
@ -126,6 +124,10 @@
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="dllmain.cpp" />
<ClCompile Include="Exports.cpp">
<ExcludedFromBuild Condition="!$(HasOpenCV)">true</ExcludedFromBuild>
@ -141,7 +143,6 @@
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<Target Name="CheckDependencies">
<Warning Condition="!$(HasOpenCV)"
Text="ImageReader requires OpenCV library v3.0 or higher to build. Please install the library from http://opencv.org/downloads.html and set OPENCV_PATH environment variable to OpenCV build folder (e.g. C:\src\opencv\build)." />
<Warning Condition="!$(HasOpenCV)" Text="ImageReader requires OpenCV library v3.0 or higher to build. Please install the library from http://opencv.org/downloads.html and set OPENCV_PATH environment variable to OpenCV build folder (e.g. C:\src\opencv\build)." />
</Target>
</Project>

Просмотреть файл

@ -6,9 +6,6 @@
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -17,6 +14,12 @@
</ClCompile>
<ClCompile Include="dllmain.cpp" />
<ClCompile Include="ImageReader.cpp" />
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="stdafx.h" />
@ -34,6 +37,9 @@
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="ImageReader.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">

Просмотреть файл

@ -20,7 +20,7 @@
#define DATAREADER_EXPORTS
#include "DataReader.h"
#include "HTKMLFReader.h"
#include "commandArgUtil.h"
#include "Config.h"
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -21,7 +21,7 @@
#define DATAREADER_EXPORTS // creating the exports here
#include "DataReader.h"
#include "HTKMLFReader.h"
#include "commandArgUtil.h"
#include "Config.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection
#endif

Просмотреть файл

@ -8,7 +8,7 @@
#include "DataReader.h"
#include "KaldiSequenceTrainingDerivative.h"
#include "UtteranceDerivativeBuffer.h"
#include "commandArgUtil.h" // for intargvector
#include "Config.h" // for intargvector
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -15,9 +15,9 @@
#define DATAWRITER_EXPORTS // creating the exports here
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "HTKMLFWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection
#endif

Просмотреть файл

@ -24,7 +24,7 @@
#define DATAREADER_EXPORTS
#include "DataReader.h"
#include "HTKMLFReader.h"
#include "commandArgUtil.h"
#include "Config.h"
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -6,7 +6,7 @@
// HTKMLFReader.h - Include file for the MTK and MLF format of features and samples
#pragma once
#include "DataReader.h"
#include "commandArgUtil.h" // for intargvector
#include "Config.h" // for intargvector
namespace Microsoft { namespace MSR { namespace CNTK {

Просмотреть файл

@ -26,10 +26,10 @@
#define DATAWRITER_EXPORTS // creating the exports here
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "HTKMLFWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection
#endif

Просмотреть файл

@ -109,6 +109,7 @@
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="SequenceWriter.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
@ -116,10 +117,6 @@
<ClInclude Include="SequenceParser.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
@ -134,6 +131,10 @@
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>

Просмотреть файл

@ -9,7 +9,7 @@
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "SequenceParser.h"
#include "RandomOrdering.h"
#include <string>

Просмотреть файл

@ -16,7 +16,7 @@
#include "DataWriter.h"
#include "SequenceReader.h"
#include "SequenceWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection
#endif

Просмотреть файл

@ -11,7 +11,7 @@
#include "DataReader.h"
#include "DataWriter.h"
#include "LUSequenceParser.h"
#include "commandArgUtil.h" // for intargvector
#include "Config.h" // for intargvector
#include "ScriptableObjects.h"
#include <string>
#include <map>

Просмотреть файл

@ -1,160 +1,161 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{62836DC1-DF77-4B98-BF2D-45C943B7DDC6}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
<ProjectName>LUSequenceReader</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="LUSequenceWriter.h" />
<ClInclude Include="minibatchsourcehelpers.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
<ClInclude Include="LUSequenceReader.h" />
<ClInclude Include="LUSequenceParser.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="DataWriter.cpp" />
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LUSequenceWriter.cpp" />
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LUSequenceReader.cpp" />
<ClCompile Include="LUSequenceParser.cpp" />
</ItemGroup>
<ItemGroup>
<Text Include="SequenceTest.txt" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{62836DC1-DF77-4B98-BF2D-45C943B7DDC6}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
<ProjectName>LUSequenceReader</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="LUSequenceWriter.h" />
<ClInclude Include="minibatchsourcehelpers.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
<ClInclude Include="LUSequenceReader.h" />
<ClInclude Include="LUSequenceParser.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="DataWriter.cpp" />
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LUSequenceWriter.cpp" />
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LUSequenceReader.cpp" />
<ClCompile Include="LUSequenceParser.cpp" />
</ItemGroup>
<ItemGroup>
<Text Include="SequenceTest.txt" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -7,9 +7,6 @@
<ClCompile Include="stdafx.cpp" />
<ClCompile Include="LUSequenceReader.cpp" />
<ClCompile Include="LUSequenceParser.cpp" />
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -22,6 +19,10 @@
<ClCompile Include="..\..\Common\File.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp" />
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="LUSequenceWriter.h" />
@ -47,6 +48,10 @@
<ClInclude Include="minibatchsourcehelpers.h">
<Filter>Duplicates to remove</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Text Include="SequenceTest.txt" />

Просмотреть файл

@ -15,7 +15,7 @@
#define DATAWRITER_EXPORTS // creating the exports here
#include "DataWriter.h"
#include "LUSequenceWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#ifdef LEAKDETECT
#include <vld.h> // for memory leak detection
#endif

Просмотреть файл

@ -7,7 +7,7 @@
#pragma once
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "RandomOrdering.h"
#include <map>
#include <vector>

Просмотреть файл

@ -1,157 +1,158 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{D667AF32-028A-4A5D-BE19-F46776F0F6B2}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>LibSVMBinaryReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>c:\Program Files\Microsoft MPI\Inc;..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>c:\Program Files\Microsoft MPI\Lib\amd64;$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>false</TreatWarningAsError>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBSVMBINARYREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="LibSVMBinaryReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataWriter.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="dllmain.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LibSVMBinaryReader.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{D667AF32-028A-4A5D-BE19-F46776F0F6B2}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>LibSVMBinaryReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>c:\Program Files\Microsoft MPI\Inc;..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>c:\Program Files\Microsoft MPI\Lib\amd64;$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_WINDOWS;_USRDLL;DSSMREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>false</TreatWarningAsError>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBSVMBINARYREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="LibSVMBinaryReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\DataReader.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataWriter.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="dllmain.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="LibSVMBinaryReader.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -13,9 +13,6 @@
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -35,6 +32,9 @@
<ClCompile Include="Exports.cpp" />
<ClCompile Include="LibSVMBinaryReader.cpp" />
<ClCompile Include="stdafx.cpp" />
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h">
@ -61,5 +61,8 @@
<ClInclude Include="..\..\Common\Include\RandomOrdering.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
</Project>

Просмотреть файл

@ -7,7 +7,7 @@
#pragma once
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "RandomOrdering.h"
#include <string>
#include <map>

Просмотреть файл

@ -118,15 +118,12 @@
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="SparsePCReader.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
@ -143,6 +140,9 @@
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="dllmain.cpp" />
<ClCompile Include="SparsePCReader.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Use</PrecompiledHeader>

Просмотреть файл

@ -21,9 +21,6 @@
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -51,6 +48,9 @@
<ClCompile Include="stdafx.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h">
@ -83,5 +83,8 @@
<ClInclude Include="..\..\Common\Include\RandomOrdering.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
</Project>

Просмотреть файл

@ -8,7 +8,7 @@
#include "stdafx.h"
#include "DataReader.h"
#include "DataWriter.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "RandomOrdering.h"
#include <future>
#include "UCIParser.h"

Просмотреть файл

@ -1,153 +1,154 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{E6646FFE-3588-4276-8A15-8D65C22711C1}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
<ClInclude Include="UCIFastReader.h" />
<ClInclude Include="UCIParser.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="UCIFastReader.cpp" />
<ClCompile Include="UCIParser.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{E6646FFE-3588-4276-8A15-8D65C22711C1}</ProjectGuid>
<SccProjectName>
</SccProjectName>
<SccAuxPath>
</SccAuxPath>
<SccLocalPath>
</SccLocalPath>
<SccProvider>
</SccProvider>
<Keyword>Win32Proj</Keyword>
<RootNamespace>UCIReader</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\common\include;..\..\math\math;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<LibraryPath>$(SolutionDir)$(Platform)\$(Configuration);$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);</LibraryPath>
<IntDir>$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\;..\..\math\$(Platform)\$(Configuration);..\$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UCIREADER_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>..\..\common\include;..\..\math\math</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/d2Zi+ %(AdditionalOptions)</AdditionalOptions>
<TreatWarningAsError>true</TreatWarningAsError>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>CNTKMath.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>..\..\math\$(Platform)\$(Configuration);$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories>
<Profile>true</Profile>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
<ClInclude Include="..\..\Common\Include\DebugUtil.h" />
<ClInclude Include="..\..\Common\Include\RandomOrdering.h" />
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
<ClInclude Include="UCIFastReader.h" />
<ClInclude Include="UCIParser.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\fileutil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Exports.cpp" />
<ClCompile Include="dllmain.cpp">
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
</PrecompiledHeader>
<CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="UCIFastReader.cpp" />
<ClCompile Include="UCIParser.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -6,9 +6,6 @@
<ClCompile Include="stdafx.cpp" />
<ClCompile Include="UCIFastReader.cpp" />
<ClCompile Include="UCIParser.cpp" />
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -24,6 +21,9 @@
<ClCompile Include="..\..\Common\DebugUtil.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="C:\work\cntk-public\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="stdafx.h" />
@ -48,6 +48,9 @@
<ClInclude Include="..\..\Common\Include\RandomOrdering.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="C:\work\cntk-public\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">

Просмотреть файл

@ -2291,7 +2291,7 @@ status open
\begin_layout Plain Layout
#include "commandArgUtil.h"
#include "Config.h"
\end_layout
\begin_layout Plain Layout

Просмотреть файл

@ -41,7 +41,7 @@
#include "CPUMatrix.h" // used for SetNumThreads()
#include "SGD.h"
#include "MPIWrapper.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "MultiNetworksSGD.h"
#include "SimpleEvaluator.h"
#include "SimpleOutputWriter.h"

Просмотреть файл

@ -214,7 +214,10 @@
<ClCompile Include="..\..\BrainScript\BrainScriptEvaluator.cpp" />
<ClCompile Include="..\..\BrainScript\BrainScriptParser.cpp" />
<ClCompile Include="..\..\BrainScript\BrainScriptTest.cpp" />
<ClCompile Include="..\..\Common\ConfigFile.cpp" />
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">

Просмотреть файл

@ -1,9 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -59,6 +56,9 @@
<ClCompile Include="..\..\Common\Include\ConcStack.h">
<Filter>Common\Include</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\commandArgUtil.h">

Просмотреть файл

@ -6,7 +6,7 @@
#pragma once
#include "Basics.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "ComputationNetwork.h"
#include "ComputationNetworkBuilder.h"
#include "NetworkDescriptionLanguage.h"

Просмотреть файл

@ -9,7 +9,6 @@
#include "IExecutionEngine.h"
#include "Basics.h"
#include <string>
//#include "commandArgUtil.h"
#include "DataReader.h"
#include "Matrix.h"
#include "NDLUtil.h"

Просмотреть файл

@ -11,7 +11,7 @@
#include "ComputationNetwork.h"
#include "SynchronousExecutionEngine.h"
#include <string>
#include "commandArgUtil.h"
#include "Config.h"
#include <stdexcept>
using namespace std;

Просмотреть файл

@ -1837,6 +1837,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
for (size_t sidx = 0; sidx < inputObs.size(); sidx++)
{
input = inputObs[sidx];
#if 0
if (inputWeightSparse)
{
Wxo = builder.CreateSparseLearnableParameter(msra::strfun::wstrprintf(L"WXO%dI%d", iLayer, sidx), outputDim, inputDim[sidx]);
@ -1845,6 +1846,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Wxc = builder.CreateSparseLearnableParameter(msra::strfun::wstrprintf(L"WXC%dI%d", iLayer, sidx), outputDim, inputDim[sidx]);
}
else
#endif
{
Wxo = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WXO%dI%d", iLayer, sidx), outputDim, inputDim[sidx]);
Wxi = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WXI%dI%d", iLayer, sidx), outputDim, inputDim[sidx]);

Просмотреть файл

@ -10,7 +10,7 @@
#include "BestGpu.h"
#include "ComputationNetwork.h"
#include "commandArgUtil.h"
#include "Config.h"
// TODO: giving up moving stuff for now, running out of time. The following #includes should not be necessary once the hard-working code in here gets moved to .cpp
#include "InputAndParamNodes.h"

Просмотреть файл

@ -179,6 +179,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
RuntimeError("'init' must be one of the values of [ uniform | gaussian | fixedValue ]");
}
}
#if 0 // not functional at present
else if (OperationNameOf(SparseLearnableParameter) == cnNodeType)
{
if (parameter.size() < 1 || parameter.size() > 2)
@ -226,6 +227,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
RuntimeError("init must be one of the values of [ uniform | gaussian | fixedValue ]");
}
}
#endif
else if (cnNodeType == L"Constant")
{
if (parameter.size() != 1)

Просмотреть файл

@ -5,6 +5,8 @@
//
#pragma once
// This file represents the beginning of moving actions out from CNTK.cpp to make them accessible as a library. To be continued...
#include "ScriptableObjects.h"
#include "File.h"
@ -16,7 +18,7 @@
namespace Microsoft { namespace MSR { namespace CNTK {
class ActionsBase : public ScriptableObjects::Object //, public ScriptableObjects::CanDo --we call Do() method on actions
class ActionsBase : public ScriptableObjects::Object
{
};

Просмотреть файл

@ -0,0 +1 @@
// This file represents the beginning of moving actions out from CNTK.cpp to make them accessible as a library. To be continued...

Просмотреть файл

@ -0,0 +1 @@
// This file represents the beginning of moving actions out from CNTK.cpp to make them accessible as a library. To be continued...

Просмотреть файл

@ -0,0 +1 @@
// This file represents the beginning of moving actions out from CNTK.cpp to make them accessible as a library. To be continued...

Просмотреть файл

@ -4,6 +4,8 @@
// </copyright>
//
// This file represents the beginning of moving actions out from CNTK.cpp to make them accessible as a library. To be continued...
#include "Actions.h"
#include "ScriptableObjects.h"
#include "File.h"
@ -16,8 +18,7 @@
namespace Microsoft { namespace MSR { namespace CNTK {
// TODO: MakeRuntimeObject will just call procedures, and return a dummy 'bool = true'
class TrainAction : public ActionsBase
class TrainAction : public ActionsBase // TODO: to be continued...
{
void Do()
{

Просмотреть файл

@ -155,6 +155,7 @@
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\Basics.h" />
<ClInclude Include="..\..\Common\Include\BestGpu.h" />
<ClInclude Include="..\..\Common\Include\Config.h" />
<ClInclude Include="..\..\Common\Include\DataTensor.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
@ -197,7 +198,6 @@
<ClCompile Include="ComputationNetworkEvaluation.cpp" />
<ClCompile Include="ComputationNetworkScripting.cpp" />
<ClCompile Include="ComputationNode.cpp" />
<ClCompile Include="NetworkBuilderFromConfig.cpp" />
<ClCompile Include="stdafx.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

Просмотреть файл

@ -25,9 +25,6 @@
<ClCompile Include="ComputationNetwork.cpp">
<Filter>Network</Filter>
</ClCompile>
<ClCompile Include="NetworkBuilderFromConfig.cpp">
<Filter>Scripting</Filter>
</ClCompile>
<ClCompile Include="ComputationNetworkEvaluation.cpp">
<Filter>Network</Filter>
</ClCompile>
@ -129,6 +126,9 @@
<ClInclude Include="..\..\Common\Include\DataTensor.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">
@ -152,8 +152,5 @@
<Filter Include="from CNTKMath">
<UniqueIdentifier>{7d838fa4-b5a1-4b8a-b37d-823fb026055b}</UniqueIdentifier>
</Filter>
<Filter Include="Scripting">
<UniqueIdentifier>{fe2443a1-6323-449f-96be-cbd0f608f382}</UniqueIdentifier>
</Filter>
</ItemGroup>
</Project>

Просмотреть файл

@ -389,7 +389,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
if (m_hasComputed)
return; // not accumulating
@ -397,12 +397,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
LogicError("%ls %ls operation: MarkComputed(false) has not been called.", NodeName().c_str(), OperationName().c_str());
// set gaps to zero, since we are reducing in time
Input(0)->MaskMissingValuesColumnsToZero(frameRange);
Input(0)->MaskMissingValuesColumnsToZero(fr);
auto & samples = Input(0)->Output();
auto & avg = Output();
#if 1//NANCHECK
#if NANCHECK
samples.HasNan("Mean-Samples");
#endif
size_t numNewSamples = Input(0)->GetMBLayout()->DetermineActualNumSamples();
@ -411,7 +411,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Matrix<ElemType>::MultiplyAndWeightedAdd(1.0f / totalNumSamples, samples, false,
ConstOnes(samples.GetNumCols(), 1, samples.GetDeviceId()),
false, (ElemType)m_numSamples / totalNumSamples, avg);
#if 1//NANCHECK
#if NANCHECK
avg.HasNan("Mean-avg");
#endif
@ -458,12 +458,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
ElemType sqrtFloor = 1e-10f;
m_var.InplaceTruncateBottom(sqrtFloor); // prevent too small variance (and negative square roots due to numeric inaccuracy)
#if 1//NANCHECK
#if NANCHECK
m_var.HasNan("MarkComputed-InplaceTruncateBottom");
#endif
m_var.InplaceSqrt();
#if 1//NANCHECK
#if NANCHECK
m_var.HasNan("MarkComputed-InplaceSqrt");
#endif
m_var.ElementInverse();
@ -477,7 +477,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
if (m_hasComputed)
return; // not accumulating
@ -485,10 +485,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
LogicError("%ls %ls operation: MarkComputed(false) has not been called.", NodeName().c_str(), OperationName().c_str());
// set gaps to zero, since we are reducing in time
Input(0)->MaskMissingValuesColumnsToZero(frameRange);
Input(0)->MaskMissingValuesColumnsToZero(fr);
auto & samples = Input(0)->Output();
#if 1//NANCHECK
#if NANCHECK
samples.HasNan("InvStdDev-Samples");
#endif
m_temp.SetValue(m_mean);
@ -510,7 +510,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ConstOnes(samples.GetNumCols(), 1, samples.GetDeviceId()),
false, (ElemType)m_numSamples / totalNumSamples, m_var);
#if 1//NANCHECK
#if NANCHECK
m_var.HasNan("InvStdDev-m_var");
#endif
@ -557,11 +557,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
InvalidArgument("PerDimMeanVarNormalizationNode should only be called in the evaluation stage.");
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//only feature (input0) and output needs to be sliced
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(sliceOutputValue, sliceInput0Value, Input(1)->Output(), Input(2)->Output());
}
@ -668,11 +668,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//(feature-mean).*InvStdDev
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//only feature (input0) and output needs to be sliced
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(sliceOutputValue, sliceInput0Value, Input(1)->Output(), Input(2)->Output());
}

Просмотреть файл

@ -598,8 +598,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
for (auto nodeIter = nodes.begin(); nodeIter != nodes.end(); nodeIter++)
{
ComputationNodeBasePtr node = *nodeIter;
if ((node->OperationName() == OperationNameOf(LearnableParameter) && node->IsParameterUpdateRequired()) ||
(node->OperationName() == OperationNameOf(SparseLearnableParameter) && node->IsParameterUpdateRequired()))
if ((node->OperationName() == OperationNameOf(LearnableParameter) && node->IsParameterUpdateRequired())
//|| (node->OperationName() == OperationNameOf(SparseLearnableParameter) && node->IsParameterUpdateRequired())
)
{
learnableParameterNames.push_back(node->NodeName());
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -17,10 +17,10 @@
#include "ConvolutionalNodes.h"
#include "RecurrentNodes.h"
#include "ReshapingNodes.h"
#include "EsotericNodes.h"
#include "TrainingCriterionNodes.h"
#include "CompositeComputationNodes.h"
#include "EvaluationCriterionNodes.h"
#include "EsotericNodes.h"
#include <string>
@ -114,7 +114,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else if (nodeType == OperationNameOf(InputValue)) return New<InputValue<ElemType>>(forward<_Types>(_Args)...);
else if (nodeType == OperationNameOf(LearnableParameter)) return New<LearnableParameter<ElemType>>(forward<_Types>(_Args)...);
else if (nodeType == OperationNameOf(MaxPoolingNode)) return New<MaxPoolingNode<ElemType>>(forward<_Types>(_Args)...);
else if (nodeType == OperationNameOf(SparseLearnableParameter)) return New<SparseLearnableParameter<ElemType>>(forward<_Types>(_Args)...);
//else if (nodeType == OperationNameOf(SparseLearnableParameter)) return New<SparseLearnableParameter<ElemType>>(forward<_Types>(_Args)...);
else return nullptr;
}
@ -166,11 +166,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
return net.AddNodeToNetWithElemType(New<LearnableParameter<ElemType>>(net.GetDeviceId(), paramName, rows, cols));
}
#if 0 // not functional at present
//sparse matrix size is optionally specified
template<class ElemType> shared_ptr<ComputationNode<ElemType>> ComputationNetworkBuilder<ElemType>::CreateSparseLearnableParameter(const std::wstring & paramName, const size_t rows, const size_t cols, const size_t size)
{
return net.AddNodeToNetWithElemType(New<SparseLearnableParameter<ElemType>>(net.GetDeviceId(), paramName, rows, cols, size));
}
#endif
template<class ElemType> shared_ptr<ComputationNode<ElemType>> ComputationNetworkBuilder<ElemType>::CreateInputNode(const std::wstring & inputName, const size_t rows, const size_t cols)
{

Просмотреть файл

@ -1,5 +1,7 @@
// ComputationNetworkBuilder -- helper class for constructing ComputationNetworks and ComputationNodes from C++ (internal and external)
// This is used by NDL and the SimpleNetworkBuilder. It will not be used by BrainScript except for New{Standard}Node().
#pragma once
#include "Basics.h"
@ -27,6 +29,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// -----------------------------------------------------------------------
// TODO: can these be changed to ComputationNodeBasePtr?
// TODO: move into a separate header/class, to decouple from this class which would then be only used by old NDL and SimpleNetworkBuilder.
static ComputationNodePtr NewStandardNode(const std::wstring & nodeType, DEVICEID_TYPE deviceId, const wstring & name);
static ComputationNodePtr NewNode(const std::wstring & nodeType, DEVICEID_TYPE deviceId, const wstring & name);
@ -37,7 +40,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ComputationNodePtr CreateLearnableParameter(const std::wstring & paramName, const size_t rows, const size_t cols);
//sparse matrix size is optionally specified
ComputationNodePtr CreateSparseLearnableParameter(const std::wstring & paramName, const size_t rows, const size_t cols, const size_t size = 0);
//ComputationNodePtr CreateSparseLearnableParameter(const std::wstring & paramName, const size_t rows, const size_t cols, const size_t size = 0);
ComputationNodePtr CreateInputNode(const std::wstring & inputName, const size_t rows, const size_t cols);
ComputationNodePtr CreateSparseInputNode(const std::wstring & inputName, const size_t rows, const size_t cols);
ComputationNodePtr CreateInputNode(const std::wstring & inputName, const TensorShape & imageLayout, const size_t numImages);

Просмотреть файл

@ -18,7 +18,7 @@ using namespace std;
namespace Microsoft { namespace MSR { namespace CNTK {
// This source file contains files related to model editing.
// This source file contains files related to model editing with MEL. Future BrainScript editing will not modify nodes in-place.
// -----------------------------------------------------------------------
// network editing

Просмотреть файл

@ -22,23 +22,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// This source file contains methods related to evaluation (forward prop, backprop), network validation, and matrix memory allocation (memory sharing).
// -----------------------------------------------------------------------
// evaluation
// forward and backward propagation
// -----------------------------------------------------------------------
// MAIN ENTRY POINT for evaluating one minibatch (forward prop)
// TODO: pass a set of nodes instead of only one
// TODO: rename to ForwardProp()? To make it very clear?
// This calls ForwardProp() on all nodes in order of data flow through the network.
// By default, the network is applied concurrently on all frames in a minibatch in parallel (PAR mode, a "map" operation)
// Recurrent loops deviate:
// Recurrent loops must be treated differently:
// - a recurrent loop is the loop of nodes that make up computation for one time step (e.g. Times -> Plus -> Sigmoid -> Delay)
// - these must be executed frame by frame rather than as a map
// - these must be executed frame by frame (SEQuential) rather than as a map
// - such a loop is treated as if they were a little nested network; this is done inside SEQTraversalFlowControlNodes
// - these little nested networks are defined in m_recurrentInfo[]
void ComputationNetwork::ForwardProp(const ComputationNodeBasePtr & rootNode)
// - these little nested networks are defined in the execution network in the form of nested sentinel nodes of type SEQTraversalFlowControlNode
void ComputationNetwork::ForwardProp(const ComputationNodeBasePtr rootNode)
{
// caller must call BuildAndValidateSubNetwork() before
// TODO: Some places are hard to fix, e.g. encoder-decoder best-path functions. Those may be broken; this message will tell you.
if (!BuiltAndValidatedSubNetwork(rootNode))
LogicError("Evaluate for node %ls %ls: BuildAndValidateSubNetwork() has not been called on this node.", rootNode->NodeName().c_str(), rootNode->OperationName().c_str());
@ -47,49 +44,43 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
// MAIN ENTRY POINT for evaluation followed by gradient computation (forward prop then back prop)
// TODO: pass a set of nodes instead of only one?
// The actual call pattern is
// The typical calling pattern is:
// - ForwardProp() for eval nodes
// - ForwardProp() for the training criterion
// - ForwardProp() for the training criterion (which will reuse computation results from the previous step)
// - Backprop() for the training criterion
// I.e. we must call Evaluate() inside here as well, but it will typically only evaluate the training criterion bits because the eval nodes already require most of the network to be computed.
template<class ElemType>
void ComputationNetwork::Backprop(const ComputationNodeBasePtr rootNode, // training criterion to compute the gradients for
bool bResetToOne, // true if reset the gradient of rootnode to 1.0 --This is the default.
const Matrix<ElemType>* rootGradientInitValue, // if given then this is the starting gradient from the top --is this ever used?? f not, we can get rid of <ElemType>
bool bClearGradient, // if false then gradients are not cleared --TODO: When does that happen?
bool resetTimeStampAfterComputation)
void ComputationNetwork::Backprop(const ComputationNodeBasePtr rootNode) // training criterion to compute the gradients for
{
// TODO: can we check whether criterion node has been computed actually? IsOutputValue...?
ZeroGradients(rootNode); // reset the flags that will trigger lazy resetting of gradients to zero
// TODO: comment what the purpose/condition of this is
if (bClearGradient)
ClearGradientOfAllNodes(rootNode); // reset m_completedGradient, which is meant to make sure each gradient is computed only once. Only used for recurrence, actually.
// TODO: do a runtime check for float vs. double. Also use the Is/AsPtr macros
// The normal case is with the top root with a scalar gradient value of 1.0. This assumes a single and closure network.
// Allowing to not initialize to 1 allows network to be open to accept gradients from somewhere.
// TODO: aren't these two mechanisms mutually exclusive?
if (bResetToOne)
// initialize root gradient with a scalar gradient value of 1.0
auto nodeFloat = dynamic_pointer_cast<ComputationNode<float>>(rootNode);
if (nodeFloat)
{
dynamic_pointer_cast<ComputationNode<ElemType>>(rootNode)->GradientValues().Resize(1, 1); // TODO: make this a function of ComputationNode; but first need to get rid of Matrix<ElemType> here, or make it a local template parameter
dynamic_pointer_cast<ComputationNode<ElemType>>(rootNode)->GradientValues().SetValue(1); // TODO: is there not a single SetValue() call that also takes dimensions?
nodeFloat->GradientValues().Resize(1, 1);
nodeFloat->GradientValues().SetValue(1.0f);
}
else
{
auto nodeDouble = dynamic_pointer_cast<ComputationNode<double>>(rootNode);
if (nodeDouble)
{
nodeDouble->GradientValues().Resize(1, 1);
nodeDouble->GradientValues().SetValue(1.0);
}
else
LogicError("Backprop: Training criterion is neither ComputationNode<float> nor ComputationNode<double>.");
}
if (rootGradientInitValue != nullptr) // user-specified gradient to start with
dynamic_pointer_cast<ComputationNode<ElemType>>(rootNode)->GradientValues().SetValue(*rootGradientInitValue);
// backpropagate through the network
GetOuterLoopNode(rootNode)->Backprop(FrameRange(nullptr), true, true);
// Since we allow sharing of the matrix for function value and gradient value. the function values are destroyed
// after gradient computation and need to be recomputed. This is indicated by the timestamp updated using this function
// resetTimeStampAfterComputation is by default false because Backprop in normal case is followed by new batch of input
if (resetTimeStampAfterComputation)
ResetEvalTimeStamp();
}
template void ComputationNetwork::Backprop<float>(const ComputationNodeBasePtr rootNode, bool bResetToOne, const Matrix<float>* rootGradientInitValue, bool bClearGradient, bool resetTimeStampAfterComputation);
template void ComputationNetwork::Backprop<double>(const ComputationNodeBasePtr rootNode, bool bResetToOne, const Matrix<double>* rootGradientInitValue, bool bClearGradient, bool resetTimeStampAfterComputation);
ComputationNodeBasePtr ComputationNetwork::GetOuterLoopNode(const ComputationNodeBasePtr& rootNode)
{
if (m_cachedOuterLoopNodes.find(rootNode) == m_cachedOuterLoopNodes.end())
m_cachedOuterLoopNodes[rootNode] = make_shared<PARTraversalFlowControlNode>(m_recurrentInfo, GetEvalOrder(rootNode, false));
return m_cachedOuterLoopNodes[rootNode];
}
// -----------------------------------------------------------------------
// PARTraversalFlowControlNode methods -- implements PAR traversal
@ -124,7 +115,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
}
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ForwardProp(const FrameRange & frameRange) /*override*/
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ForwardProp(const FrameRange & fr) /*override*/
{
for (auto & node : m_nestedNodes)
{
@ -135,7 +126,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
assert(recInfo->m_sourceNode->GetMBLayout() == node->GetMBLayout());
node->BeginForwardProp();
node->ForwardProp(frameRange.WithLayout(node->GetMBLayout()));
node->ForwardProp(fr.WithLayout(node->GetMBLayout()));
node->EndForwardProp();
node->UpdateEvalTimeStamp();
@ -147,7 +138,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::Backprop(const FrameRange & frameRange, bool childrenInThisLoop, bool childrenInOuterLoop) /*override*/
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::Backprop(const FrameRange & fr, bool childrenInThisLoop, bool childrenInOuterLoop) /*override*/
{
childrenInThisLoop, childrenInOuterLoop; // TODO: think through what these mean when coming from PAR mode
// process nodes in pre-determined order
@ -156,15 +147,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
auto & node = *pnode;
node->BeginBackprop();
node->Backprop(frameRange.WithLayout(node->GetMBLayout()), true/*childrenInThisLoop*/, true/*childrenInOuterLoop*/);
node->Backprop(fr.WithLayout(node->GetMBLayout()), true/*childrenInThisLoop*/, true/*childrenInOuterLoop*/);
node->EndBackprop();
}
}
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::RequestMatricesBeforeEval(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ReleaseMatricesAfterEval(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::AllocateGradientMatricesForChildren(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::RequestMatricesBeforeGradientComp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::RequestMatricesBeforeForwardProp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ReleaseMatricesAfterForwardProp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::AllocateGradientMatricesForInputs(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::RequestMatricesBeforeBackprop(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::PARTraversalFlowControlNode::ReleaseMatricesAfterBackprop(MatrixPool& matrixPool) /*override*/ { }
// -----------------------------------------------------------------------
// SEQTraversalFlowControlNode methods -- implements SEQ traversal (loop unrolling)
@ -262,27 +253,27 @@ namespace Microsoft { namespace MSR { namespace CNTK {
node2->EndBackprop();
}
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::RequestMatricesBeforeEval(MatrixPool& matrixPool) /*override*/
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::RequestMatricesBeforeForwardProp(MatrixPool& matrixPool) /*override*/
{
for (auto & nodeLoopIter : m_nestedNodes)
nodeLoopIter->RequestMatricesBeforeEval(matrixPool);
nodeLoopIter->RequestMatricesBeforeForwardProp(matrixPool);
}
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::ReleaseMatricesAfterEval(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::AllocateGradientMatricesForChildren(MatrixPool& matrixPool) /*override*/
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::ReleaseMatricesAfterForwardProp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::AllocateGradientMatricesForInputs(MatrixPool& matrixPool) /*override*/
{
// TODO: should we deallocate in opposite order?
for (auto nodeIter = m_nestedNodes.rbegin(); nodeIter != m_nestedNodes.rend(); ++nodeIter)
{
(*nodeIter)->AllocateGradientMatricesForChildren(matrixPool);
(*nodeIter)->AllocateGradientMatricesForInputs(matrixPool);
}
}
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::RequestMatricesBeforeGradientComp(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool) /*override*/
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::RequestMatricesBeforeBackprop(MatrixPool& matrixPool) /*override*/ { }
/*virtual*/ void ComputationNetwork::SEQTraversalFlowControlNode::ReleaseMatricesAfterBackprop(MatrixPool& matrixPool) /*override*/
{
for (auto nodeIter = m_nestedNodes.rbegin(); nodeIter != m_nestedNodes.rend(); ++nodeIter)
{
if ((*nodeIter)->NeedGradient())
(*nodeIter)->ReleaseMatricesAfterGradientComp(matrixPool);
(*nodeIter)->ReleaseMatricesAfterBackprop(matrixPool);
}
}
@ -646,11 +637,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (completedEvaluate.insert(recInfo).second)
{
#if 1
recInfo->RequestMatricesBeforeEval(m_matrixPool);
recInfo->RequestMatricesBeforeForwardProp(m_matrixPool);
#else
for (auto &nodeLoopIter : recInfo->m_nestedNodes)
{
nodeLoopIter->RequestMatricesBeforeEval(m_matrixPool);
nodeLoopIter->RequestMatricesBeforeForwardProp(m_matrixPool);
}
#endif
@ -662,7 +653,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
else
{
nodeIter->RequestMatricesBeforeEval(m_matrixPool);
nodeIter->RequestMatricesBeforeForwardProp(m_matrixPool);
//we only release matrices for the children since the root node's informatioin will be used and should not be shared
//with others
ReleaseMatricesAfterEvalForChildren(nodeIter, parentCount);
@ -677,7 +668,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ComputationNodeBasePtr pNode = n->GetInputs()[i];
parentCount[pNode]--;
if (parentCount[pNode] == 0)
pNode->ReleaseMatricesAfterEval(m_matrixPool);
pNode->ReleaseMatricesAfterForwardProp(m_matrixPool);
}
}
@ -691,7 +682,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
set<ComputationNodeBasePtr> completedGradient;
//we need to call it here since we always compute gradients for children and root node is not children of other node
rootNode->RequestMatricesBeforeGradientComp(m_matrixPool);
rootNode->RequestMatricesBeforeBackprop(m_matrixPool);
for (auto &n : allNodes)
{
@ -702,24 +693,24 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (completedGradient.insert(recInfo).second)
{
// SEQ mode: allocate all in loop first, then deallocate again
#if 1 // TODO: next step: use PARTraversalFlowControlNode::AllocateGradientMatricesForChildren() and ReleaseMatricesAfterGradientComp()...
#if 1 // TODO: next step: use PARTraversalFlowControlNode::AllocateGradientMatricesForInputs() and ReleaseMatricesAfterBackprop()...
// BUGBUG: naw, ^^ would not work! Wrong order! Need to rethink this. Need to make AllocateEvalMatrices() and AllocateGradientMatrices() the virtual functions.
recInfo->AllocateGradientMatricesForChildren(m_matrixPool);
recInfo->AllocateGradientMatricesForInputs(m_matrixPool);
//loops are computed sample by sample so we have to allocate them all
recInfo->ReleaseMatricesAfterGradientComp(m_matrixPool);
recInfo->ReleaseMatricesAfterBackprop(m_matrixPool);
#else
const auto & recurrentNodes = recInfo->m_nestedNodes;
//loops are computed sample by sample so we have to allocate them all
for (auto nodeIter = recurrentNodes.rbegin(); nodeIter != recurrentNodes.rend(); ++nodeIter)
{
(*nodeIter)->AllocateGradientMatricesForChildren(m_matrixPool);
(*nodeIter)->AllocateGradientMatricesForInputs(m_matrixPool);
}
recInfo->m_completedGradient = true;
for (auto nodeIter = recurrentNodes.rbegin(); nodeIter != recurrentNodes.rend(); ++nodeIter)
{
if ((*nodeIter)->NeedGradient())
{
(*nodeIter)->ReleaseMatricesAfterGradientComp(m_matrixPool);
(*nodeIter)->ReleaseMatricesAfterBackprop(m_matrixPool);
}
}
#endif
@ -728,9 +719,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else
{
// PAR mode: we can allocate and immediately deallocate one by one
n->AllocateGradientMatricesForChildren(m_matrixPool);
n->AllocateGradientMatricesForInputs(m_matrixPool);
if ((n != rootNode) && n->NeedGradient()) //root node's information will be used and should not be shared with others, also it's small (1x1)
n->ReleaseMatricesAfterGradientComp(m_matrixPool);
n->ReleaseMatricesAfterBackprop(m_matrixPool);
}
}
}

Просмотреть файл

@ -28,14 +28,6 @@
//#define RNN_DEBUG 1
#define DEFAULT_HIDDEN_ACTIVATION 0.1
#ifndef NOT_IMPLEMENTED
#define NOT_IMPLEMENTED \
{ \
fprintf(stderr, "Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
LogicError("Not Implemented"); \
}
#endif
#pragma warning (disable: 4267)
// version number to control how to read and write
@ -78,35 +70,35 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void UpdateFunctionMBSize() = 0; // recalculate our column dimension from MBLayout
virtual void BeginForwardProp() = 0;
virtual void ForwardProp(const FrameRange &) = 0; // forward prop for one minibatch
virtual void EndForwardProp() = 0; // called after last iteration step of ForwardProp()
virtual void BeginForwardProp() = 0; // called beforefirst iteration step of ForwardProp()
virtual void ForwardProp(const FrameRange &) = 0; // forward prop for one minibatch
virtual void EndForwardProp() = 0; // called after last iteration step of ForwardProp()
virtual void BeginBackprop() = 0; // called before first iteration step of ComputeGradient()
virtual void BackpropTo(const size_t inputIndex, const FrameRange &) = 0;
virtual void EndBackprop() = 0; // called after last iteration step of ComputeGradient()
virtual void BeginBackprop() = 0; // called before first iteration step of ComputeGradient()
virtual void BackpropTo(const size_t inputIndex, const FrameRange &) = 0; // backprop gradient into one of the inputs
virtual void EndBackprop() = 0; // called after last iteration step of ComputeGradient()
// --- these are meant to be overridden by ControlFlowNodes
virtual void Backprop(const FrameRange & frameRange, bool childrenInThisLoop, bool childrenInOuterLoop) = 0;
virtual void Backprop(const FrameRange & fr, bool childrenInThisLoop, bool childrenInOuterLoop) = 0;
// --- optional overrides that add functionality
// Any override must call Base version as well.
// Default implementations are in ComputationNodeBase or ComputationNode<ElemType>.
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool) = 0; //request matrices needed to do node function value evaluation
virtual void ReleaseMatricesAfterEval(MatrixPool& matrixPool) = 0; //release temp matrices that are only used by forward computation. Don't release matrices that need to be used in the gradient computation
virtual void AllocateGradientMatricesForChildren(MatrixPool& matrixPool) = 0;
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool) = 0; //request matrices that are needed for gradient computation
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool) = 0; //release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void Validate(bool isFinalValidationPass) = 0; // main base validation function
virtual void InferImageDimsFromInputs() = 0;
virtual void Save(File& fstream) const = 0;
virtual void Load(File& /*fstream*/, size_t /*modelVersion*/) = 0;
virtual void CopyTo(ComputationNodeBasePtr node, const std::wstring& newName, const CopyNodeFlags flags) const = 0;
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool) = 0; // request matrices needed to do node function value evaluation
virtual void ReleaseMatricesAfterForwardProp(MatrixPool& matrixPool) = 0; // release temp matrices that are only used by forward computation. Don't release matrices that need to be used in the gradient computation
virtual void AllocateGradientMatricesForInputs(MatrixPool& matrixPool) = 0;
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool) = 0; // request matrices that are needed for gradient computation
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool) = 0; // release gradient and temp matrices that no longer needed after all the children's gradients are computed.
// --- optional overrides that describe a feature or property of the node
virtual bool RequiresPreCompute() const = 0; // return true if the node's value should be computed before the normal training. e.g., mean and invStd of input features.
@ -300,22 +292,22 @@ namespace Microsoft { namespace MSR { namespace CNTK {
size_t GetNumCols() const { return m_numCols; }
pair<size_t, size_t> GetDims() { return make_pair(GetNumRows(), GetNumCols()); }
// TODO: add an overload SetDims(TensorShape, cols)
virtual // for now virtual as this still updates m_output
void SetDims(size_t rows, size_t cols)
{
m_numRows = rows;
m_numCols = cols;
// actual memory allocation happens elsewhere
// NOTE: current ComputationNode<> overrides this in order to still do actual memory allocation like before
}
void SetDims(ComputationNodeBasePtr node) { SetDims(node->GetNumRows(), node->GetNumCols()); }
virtual void NotifyFunctionValuesMBSizeModified() { } // someone outside changed our m_output--update our internal state, e.g. m_numRows, m_numCols
void VerifyDims(size_t rows, size_t cols)
{
if (rows != GetNumRows() || cols != GetNumCols())
{
LogicError("VerifyDims: %ls %ls operation expected size %d x %d, but it is %d x %d",
NodeName().c_str(), OperationName().c_str(),
(int)rows, (int)cols, (int)GetNumRows(), (int)GetNumCols());
}
}
virtual void VerifyDims(ComputationNodeBasePtr node) { VerifyDims(node->GetNumRows(), node->GetNumCols()); }
virtual void VerifyDimsMatch() const = 0; // verify that m_output dimensions match ours
@ -425,8 +417,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
protected:
public: // the following should be protected, but nodes inquire about their children, requiring public access
// This is used at 284 places inside nodes, most of the time as
// ...Slice(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences()), m_pMBLayout)
size_t GetNumParallelSequences() const
{
#if 1
@ -540,7 +531,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void InvalidateMissingValuesColumns(const FrameRange &) = 0;
virtual void InvalidateMissingGradientColumns(const FrameRange &) = 0;
virtual void ClearGradientOfInputs() = 0;
virtual void ZeroGradientsOfInputs() = 0;
virtual void /*IComputationNode::*/BeginForwardProp() override // called before first iteration step of ForwardProp()
{
@ -885,34 +876,34 @@ namespace Microsoft { namespace MSR { namespace CNTK {
public:
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
RequestMatrixFromPool(m_output, matrixPool);
}
//release temp matrices that are only used by forward computation
//don't release matrices that need to be used in the gradient computation
virtual void ReleaseMatricesAfterEval(MatrixPool& /*matrixPool*/)
virtual void ReleaseMatricesAfterForwardProp(MatrixPool& /*matrixPool*/)
{
}
virtual void AllocateGradientMatricesForChildren(MatrixPool& matrixPool) override
virtual void AllocateGradientMatricesForInputs(MatrixPool& matrixPool) override
{
for (int i = 0; i < m_inputs.size(); i++)
{
if (m_inputs[i]->NeedGradient())
m_inputs[i]->RequestMatricesBeforeGradientComp(matrixPool);
m_inputs[i]->RequestMatricesBeforeBackprop(matrixPool);
}
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
RequestMatrixFromPool(m_gradientValues, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
if (!IsLeaf() && !RequiresPreCompute())
{
@ -985,33 +976,33 @@ namespace Microsoft { namespace MSR { namespace CNTK {
void ValidateInferInputDims(size_t i, size_t rows, size_t cols) override final;
public:
static bool MaskMissingColumnsToZero(Matrix<ElemType>& matrixToBeMasked, const MBLayoutPtr & pMBLayout, const FrameRange & frameRange)
static bool MaskMissingColumnsToZero(Matrix<ElemType>& matrixToBeMasked, const MBLayoutPtr & pMBLayout, const FrameRange & fr)
{
//fprintf(stderr, "masking column range %d\n", (int)frameRange.timeIdxInSeq);
return MaskMissingColumnsTo(matrixToBeMasked, pMBLayout, frameRange, (ElemType)0);
//fprintf(stderr, "masking column range %d\n", (int)fr.timeIdxInSeq);
return MaskMissingColumnsTo(matrixToBeMasked, pMBLayout, fr, (ElemType)0);
}
void /*ComputationNodeBase::*/MaskMissingValuesColumnsToZero(const FrameRange & frameRange) override final
void /*ComputationNodeBase::*/MaskMissingValuesColumnsToZero(const FrameRange & fr) override final
{
//fprintf(stderr, "%ls %ls m_output ", NodeName().c_str(), OperationName().c_str());
MaskMissingColumnsToZero(*m_output, m_pMBLayout, frameRange);
MaskMissingColumnsToZero(*m_output, m_pMBLayout, fr);
}
void /*ComputationNodeBase::*/MaskMissingGradientColumnsToZero(const FrameRange & frameRange) override final
void /*ComputationNodeBase::*/MaskMissingGradientColumnsToZero(const FrameRange & fr) override final
{
//fprintf(stderr, "%ls %ls m_gradientValues ", NodeName().c_str(), OperationName().c_str());
MaskMissingColumnsToZero(*m_gradientValues, m_pMBLayout, frameRange);
MaskMissingColumnsToZero(*m_gradientValues, m_pMBLayout, fr);
}
// for debugging, set the gaps to NaN instead (to track whether it bubbles up somewhere)
void InvalidateMissingValuesColumns(const FrameRange & frameRange) override final
void InvalidateMissingValuesColumns(const FrameRange & fr) override final
{
//fprintf(stderr, "invalidating %ls %ls m_output column range %d\n", NodeName().c_str(), OperationName().c_str(), (int)frameRange.timeIdxInSeq);
MaskMissingColumnsTo(*m_output, m_pMBLayout, frameRange, Matrix<ElemType>::MakeNan(__LINE__));
//fprintf(stderr, "invalidating %ls %ls m_output column range %d\n", NodeName().c_str(), OperationName().c_str(), (int)fr.timeIdxInSeq);
MaskMissingColumnsTo(*m_output, m_pMBLayout, fr, Matrix<ElemType>::MakeNan(__LINE__));
}
void InvalidateMissingGradientColumns(const FrameRange & frameRange) override final
void InvalidateMissingGradientColumns(const FrameRange & fr) override final
{
//fprintf(stderr, "invalidating %ls %ls m_gradientValues column range %d\n", NodeName().c_str(), OperationName().c_str(), (int)frameRange.timeIdxInSeq);
MaskMissingColumnsTo(*m_gradientValues, m_pMBLayout, frameRange, Matrix<ElemType>::MakeNan(__LINE__));
//fprintf(stderr, "invalidating %ls %ls m_gradientValues column range %d\n", NodeName().c_str(), OperationName().c_str(), (int)fr.timeIdxInSeq);
MaskMissingColumnsTo(*m_gradientValues, m_pMBLayout, fr, Matrix<ElemType>::MakeNan(__LINE__));
}
// for debugging purposes
@ -1077,48 +1068,46 @@ namespace Microsoft { namespace MSR { namespace CNTK {
const Matrix<ElemType>& Output() const { return *m_output; }
Matrix<ElemType>& Output() { return *m_output; }
//shared_ptr<Matrix<ElemType>>& OutputPtr() { return m_output; }
const Matrix<ElemType>& GradientValues() const { return *m_gradientValues; }
Matrix<ElemType>& GradientValues() { return *m_gradientValues; }
shared_ptr<Matrix<ElemType>>& GradientValuesPtr() { return m_gradientValues; }
// function to access any input and output, value and gradient, whole batch or single frame
// Note: This returns a reference into 'data' in the form of a column slice, i.e. a small matrix object that just points into 'data'.
Matrix<ElemType> DataFor(Matrix<ElemType> & data, const FrameRange & frameRange/*select frame or entire batch*/)
Matrix<ElemType> DataFor(Matrix<ElemType> & data, const FrameRange & fr/*select frame or entire batch*/)
{
try
{
return DataWithMBLayoutFor(data, frameRange, m_pMBLayout);
return DataWithMBLayoutFor(data, fr, m_pMBLayout);
}
catch (const logic_error & e) // catch the error and rethrow it with the node name attached
{
LogicError("%s, for %ls %ls operation.", e.what(), NodeName().c_str(), OperationName().c_str());
}
}
Matrix<ElemType> ValueSliceToDense(const FrameRange & frameRange/*select frame or entire batch*/, bool keepValuesOnSwitch)
Matrix<ElemType> ValueSliceToDense(const FrameRange & fr/*select frame or entire batch*/, bool keepValuesOnSwitch)
{
Output().SwitchToMatrixType(MatrixType::DENSE, MatrixFormat::matrixFormatDense, keepValuesOnSwitch);
return OutputFor(frameRange);
return OutputFor(fr);
}
Matrix<ElemType> OutputFor(const FrameRange & frameRange/*select frame or entire batch*/)
Matrix<ElemType> OutputFor(const FrameRange & fr/*select frame or entire batch*/)
{
return DataFor(Output(), frameRange);
return DataFor(Output(), fr);
}
Matrix<ElemType> GradientFor(const FrameRange & frameRange/*select frame or entire batch*/)
Matrix<ElemType> GradientFor(const FrameRange & fr/*select frame or entire batch*/)
{
return DataFor(GradientValues(), frameRange);
return DataFor(GradientValues(), fr);
}
// use the following two versions if you assume the inputs may contain gaps that must be set to zero because you want to reduce over frames with a BLAS operation
Matrix<ElemType> MaskedValueSlice(const FrameRange & frameRange/*select frame or entire batch*/)
Matrix<ElemType> MaskedValueSlice(const FrameRange & fr/*select frame or entire batch*/)
{
MaskMissingValuesColumnsToZero(frameRange);
return OutputFor(frameRange);
MaskMissingValuesColumnsToZero(fr);
return OutputFor(fr);
}
Matrix<ElemType> MaskedGradientSlice(const FrameRange & frameRange/*select frame or entire batch*/)
Matrix<ElemType> MaskedGradientSlice(const FrameRange & fr/*select frame or entire batch*/)
{
MaskMissingGradientColumnsToZero(frameRange);
return GradientFor(frameRange);
MaskMissingGradientColumnsToZero(fr);
return GradientFor(fr);
}
void UpdateFunctionValuesSize()
@ -1185,9 +1174,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// this is the entry point from Network; while it will call virtual BackpropTo() into the actual node implementation
// TODO: move to -Base (or -Network?)
void Backprop(const FrameRange & frameRange, bool childrenInThisLoop, bool childrenInOuterLoop) override
void Backprop(const FrameRange & fr, bool childrenInThisLoop, bool childrenInOuterLoop) override
{
if (frameRange.IsAllFrames() && IsPartOfLoop() && childrenInThisLoop)
if (fr.IsAllFrames() && IsPartOfLoop() && childrenInThisLoop)
LogicError("%ls %ls operation: Backprop called with whole-batch FrameRange on node that participates in a loop", NodeName().c_str(), OperationName().c_str());
for (size_t i = 0; i < m_inputs.size(); i++)
@ -1212,14 +1201,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// If we propagate from a loop to a node that is outside the loop, we are not efficient.
// This case is handled by SEQTraversalFlowControlNode::Backprop().
// The check below is to verify that.
if (IsPartOfLoop() && !child->IsPartOfLoop() && !frameRange.IsAllFrames())
if (IsPartOfLoop() && !child->IsPartOfLoop() && !fr.IsAllFrames())
{
LogicError("Backprop: Inefficiency: %ls %ls operation in loop propagates gradient to non-loop %ls %ls\n",
NodeName().c_str(), OperationName().c_str(), child->NodeName().c_str(), child->OperationName().c_str());
}
//fprintf(stderr, "BackpropTo %d %d %ls %ls\n", (int)frameRange.timeIdxInSeq, (int)i, NodeName().c_str(), OperationName().c_str());
BackpropTo(i, frameRange); // this computes partial wrt to the child and sums the gradient value in the child
//fprintf(stderr, "BackpropTo %d %d %ls %ls\n", (int)fr.timeIdxInSeq, (int)i, NodeName().c_str(), OperationName().c_str());
BackpropTo(i, fr); // this computes partial wrt to the child and sums the gradient value in the child
}
#ifdef DISPLAY_DEBUG
else fprintf (stderr, " [%lu]: %s(%s) (no gradient needed so don't compute for)\n", i, child->OperationName().c_str(), child->NodeName().c_str());
@ -1227,7 +1216,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
void /*ComputationNodeBase::*/ClearGradientOfInputs() override // TODO: bad naming--this just clears the lazy flags, whereas LazyZeroGradient() actually clears the values
// TODO: why of the inputs, and not the node itself?
void /*ComputationNodeBase::*/ZeroGradientsOfInputs() override // clears the lazy-init flags (LazyZeroGradient() actually clears the values lazily)
{
for (size_t i = 0; i < m_inputs.size(); i++)
Input(i)->m_gradientInitialized = false;
@ -1245,10 +1235,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// TODO: we should move this pattern to class Matrix. We should not be concerned here with the storage format of the gradient.
GradientValues().Resize(Output().GetNumRows(), Output().GetNumCols());
if (GradientValues().GetMatrixType() == DENSE)
//if (GradientValues().GetMatrixType() == DENSE)
GradientValues().SetValue(0);
else
GradientValues().Reset();
//else // no longer needed, SetValue() does this right
// GradientValues().Reset();
m_gradientInitialized = true;
}
@ -1272,8 +1262,22 @@ namespace Microsoft { namespace MSR { namespace CNTK {
return *m;
}
void CreateGradientMatrixIfNull()
{
CreateMatrixIfNull(m_gradientValues);
}
protected:
// this function is used to create matrices for those needed before matrix pool is available
// e.g., for model parameters and input nodes you will need to resize the functions based on NDL
// and before matrix pool is available
void CreateMatrixIfNull(shared_ptr<Matrix<ElemType>>& matrixPtr)
{
if (!matrixPtr)
matrixPtr = make_shared<Matrix<ElemType>>(m_deviceId);
}
void RequestMatrixFromPool(shared_ptr<Matrix<ElemType>>& matrixPtr, MatrixPool& matrixPool)
{
if (matrixPtr == nullptr)
@ -1288,17 +1292,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
matrixPool.Release<ElemType>(matrixPtr);
}
//this function is used to create matrices for those needed before matrix pool is available
//e.g., for model parameters and input nodes you will need to resize the functions based on NDL
//and before matrix pool is available
void CreateMatrixIfNull(shared_ptr<Matrix<ElemType>>& matrixPtr)
{
if (matrixPtr == nullptr)
{
matrixPtr = make_shared<Matrix<ElemType>>(m_deviceId);
}
}
//to be called by derived classed if that class needs to print node values
void PrintNodeValuesToFile(const bool printValues, File& fstream) const
{
@ -1381,16 +1374,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{ }
// these two implement the ComputationNode<> interface
void ForwardProp(const FrameRange & frameRange) override final
void ForwardProp(const FrameRange & fr) override final
{
if (frameRange.IsAllFrames())
if (fr.IsAllFrames())
ForwardPropNonLooping();
else
LogicError("%s node should never be in a loop.", typeid(*this).name());
}
void BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override final
void BackpropTo(const size_t inputIndex, const FrameRange & fr) override final
{
if (frameRange.IsAllFrames())
if (fr.IsAllFrames())
BackpropToNonLooping(inputIndex);
else
LogicError("%s node should never be in a loop.", typeid(*this).name());
@ -1421,7 +1414,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void Load(File& /*fstream*/, size_t /*modelVersion*/) override { NOT_IMPLEMENTED; }
virtual void CopyTo(ComputationNodeBasePtr node, const std::wstring& newName, const CopyNodeFlags flags) const override { NOT_IMPLEMENTED; }
virtual ComputationNodeBasePtr Duplicate(const std::wstring& newName, const CopyNodeFlags flags) override { NOT_IMPLEMENTED; }
//virtual void SetDims(size_t rows, size_t cols) override { NOT_IMPLEMENTED; }
virtual double Get00Element() const override { NOT_IMPLEMENTED; }
virtual void UpdateFunctionMBSize() override { NOT_IMPLEMENTED; }
virtual void VerifyDimsMatch() const override { NOT_IMPLEMENTED; }
@ -1429,7 +1421,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void PrintSelf(bool) const override { NOT_IMPLEMENTED; }
virtual void ValidateInferInputDims(size_t,size_t,size_t) override { NOT_IMPLEMENTED; }
virtual void SetInput(const size_t,const Microsoft::MSR::CNTK::ComputationNodeBase::ComputationNodeBasePtr &) override { NOT_IMPLEMENTED; }
virtual void ClearGradientOfInputs(void) override { NOT_IMPLEMENTED; }
virtual void ZeroGradientsOfInputs(void) override { NOT_IMPLEMENTED; }
virtual void MaskMissingValuesColumnsToZero(const Microsoft::MSR::CNTK::FrameRange &) override { NOT_IMPLEMENTED; }
virtual void MaskMissingGradientColumnsToZero(const Microsoft::MSR::CNTK::FrameRange &) override { NOT_IMPLEMENTED; }
virtual void InvalidateMissingValuesColumns(const Microsoft::MSR::CNTK::FrameRange &) override { NOT_IMPLEMENTED; }
@ -1489,14 +1481,14 @@ protected: \
using Base::SetDims; /*using Base::NotifyFunctionValuesMBSizeModified;*/ using Base::GetNumRows; using Base::GetNumCols; using Base::UpdateFunctionValuesSize; using Base::LoadFunctionValues; \
using Base::m_pMBLayout; using Base::GetNumTimeSteps; using Base::GetNumParallelSequences; \
using Base::MaskMissingColumnsToZero; using Base::MaskMissingValuesColumnsToZero; using Base::MaskMissingGradientColumnsToZero; using Base::InvalidateMissingValuesColumns; using Base::InvalidateMissingGradientColumns; \
using Base::DataFor; using Base::OutputFor; using Base::GradientValues; using Base::GradientValuesPtr; using Base::GradientFor; using Base::MaskedValueSlice; using Base::MaskedGradientSlice; \
using Base::DataFor; using Base::OutputFor; using Base::GradientValues; using Base::GradientFor; using Base::MaskedValueSlice; using Base::MaskedGradientSlice; \
using Base::ForwardProp; using Base::BackpropTo; \
using Base::m_inputs; using Base::m_deviceId; using Base::m_output; using Base::m_gradientValues; \
using Base::m_inputImageLayout; using Base::m_sampleLayout; \
using Base::m_parameterUpdateRequired; using Base::m_nodeName; \
using Base::CreateMatrixIfNull; using Base::RequestMatrixFromPool; using Base::ReleaseMatrixToPool; \
using Base::CreateUniqId; \
using Base::GetNumInputs; using Base::ClearGradientOfInputs; using Base::VerifyDims; \
using Base::GetNumInputs; using Base::ZeroGradientsOfInputs; using Base::VerifyDims; \
using Base::ConstOnes; \
using Base::GetImageLayout; using Base::InferImageDimsFromInput; using Base::InferImageDimsFromInputs; using Base::InferMBLayoutFromInputsForStandardCase; \
using Base::CopyTo; using Base::CreateUniqNodeName; using Base::DetachInputs; using Base::GetInputsFromConfig; \
@ -1507,12 +1499,12 @@ protected: \
using Base::Load; \
using Base::PrintNodeValuesToFile; using Base::PrintSelfBeforeValidation; \
using Base::Save; using Base::UpdateFunctionMBSize; \
using Base::RequestMatricesBeforeEval; using Base::ReleaseMatricesAfterEval; \
using Base::RequestMatricesBeforeGradientComp; using Base::ReleaseMatricesAfterGradientComp; \
using Base::RequestMatricesBeforeForwardProp; using Base::ReleaseMatricesAfterForwardProp; \
using Base::RequestMatricesBeforeBackprop; using Base::ReleaseMatricesAfterBackprop; \
using Base::Validate; using Base::ValidateUnaryMap; using Base::ValidateBinaryZip; using Base::ValidateUnaryReduce; using Base::ValidateBinaryReduce; using Base::ValidateInferBinaryInputDims; using Base::ValidateInferInputDims; \
public: \
using Base::RequiresPreCompute; \
using Base::AttachInputs; using Base::NodeName; \
using Base::AttachInputs; using Base::CreateGradientMatrixIfNull; using Base::NodeName; \
using Base::Output;
#define ComputationNodeBoilerplate \

Просмотреть файл

@ -102,16 +102,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(Input(1)->GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
if (inputIndex == 0) //derivative with regard to the weight matrix
BackpropToOverWeight(sliceOutputGrad, Input(0)->GradientValues(), Input(0)->Output(), sliceInput1Value, *m_tempMatrix, !frameRange.IsAllFrames());
BackpropToOverWeight(sliceOutputGrad, Input(0)->GradientValues(), Input(0)->Output(), sliceInput1Value, *m_tempMatrix, !fr.IsAllFrames());
else if (inputIndex == 1) // derivative with regard to the input feature
{
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
BackpropToOverInputFeature(sliceOutputGrad, sliceInput1Grad, Input(0)->Output(), sliceInput1Value, *m_tempMatrix);
}
}
@ -207,10 +207,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
public:
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(sliceOutputValue, Input(0)->Output(), sliceInput1Value, *m_tempMatrix);
}
@ -374,16 +374,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_tempMatrix, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_tempMatrix, matrixPool);
}
@ -458,13 +458,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
BackpropToV(sliceOutputGrad, sliceInput0Grad, sliceInput0Value, sliceOutputValue);
}
@ -472,10 +472,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// this function must be overriden by Max or AveragePoolingNode
virtual void BackpropToV(const Matrix<ElemType> &gradientValues, Matrix<ElemType> &inputGradientValues, const Matrix<ElemType> &input0, const Matrix<ElemType> &functionValues) = 0;
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropV(sliceOutputValue, sliceInput0Value);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -38,11 +38,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
Input(0)->OutputFor(frameRange).VectorMax(*m_maxIndexes0, *m_maxValues, true);
Input(1)->OutputFor(frameRange).VectorMax(*m_maxIndexes1, *m_maxValues, true, m_topK);
MaskMissingColumnsToZero(*m_maxIndexes0, Input(0)->GetMBLayout(), frameRange);
MaskMissingColumnsToZero(*m_maxIndexes1, Input(1)->GetMBLayout(), frameRange);
FrameRange fr(Input(0)->GetMBLayout());
Input(0)->OutputFor(fr).VectorMax(*m_maxIndexes0, *m_maxValues, true);
Input(1)->OutputFor(fr).VectorMax(*m_maxIndexes1, *m_maxValues, true, m_topK);
MaskMissingColumnsToZero(*m_maxIndexes0, Input(0)->GetMBLayout(), fr);
MaskMissingColumnsToZero(*m_maxIndexes1, Input(1)->GetMBLayout(), fr);
Output().AssignNumOfDiff(*m_maxIndexes0, *m_maxIndexes1, m_topK > 1);
#if NANCHECK
Output().HasNan("ErrorPrediction");
@ -91,9 +91,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_maxIndexes0, matrixPool);
RequestMatrixFromPool(m_maxIndexes1, matrixPool);
RequestMatrixFromPool(m_maxValues, matrixPool);
@ -101,9 +101,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
//release temp matrices that are only used by forward computation
//don't release matrices that need to be used in the gradient computation
virtual void ReleaseMatricesAfterEval(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterForwardProp(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterEval(matrixPool);
Base::ReleaseMatricesAfterForwardProp(matrixPool);
ReleaseMatrixToPool(m_maxIndexes0, matrixPool);
ReleaseMatrixToPool(m_maxIndexes1, matrixPool);
ReleaseMatrixToPool(m_maxValues, matrixPool);

Просмотреть файл

@ -181,12 +181,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
};
#if 0
// -----------------------------------------------------------------------
// SparseLearnableParameter (/*no input*/)
// -----------------------------------------------------------------------
//WARNING: Don't use SparseLearnableParameter yet since the current version assumes the parameter is dense instead of sparse
//WARNING: After the right implementation is put here we need to turn it on in NetworkDescriptionLangauge.cpp
// WARNING: Don't use SparseLearnableParameter yet since the current version assumes the parameter is dense instead of sparse
// WARNING: After the right implementation is put here we need to turn it on in NetworkDescriptionLangauge.cpp
template<class ElemType>
class SparseLearnableParameter : public LearnableParameter<ElemType>
{
@ -218,6 +219,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
template class SparseLearnableParameter<float>;
template class SparseLearnableParameter<double>;
#endif
// -----------------------------------------------------------------------
// InputValueBase (/*no input*/)
@ -568,82 +570,4 @@ namespace Microsoft { namespace MSR { namespace CNTK {
template class LookupTableNode<float>;
template class LookupTableNode<double>;
// -----------------------------------------------------------------------
// PairNetworkNode (input)
// -----------------------------------------------------------------------
/**
pair this node to a node in another network
this node provide an interface from this network. The next layer network then can use this interface to know which node to connect to.
*/
template<class ElemType>
class PairNetworkNode : public ComputationNode<ElemType>, public NumInputs<1>
{
typedef ComputationNode<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"PairNetwork"; }
void Init(size_t row_size, size_t col_size)
{
CreateMatrixIfNull(m_output);
SetDims(row_size, col_size);
UpdateFunctionValuesSize();
}
public:
DeclareConstructorFromConfigWithNumInputs(PairNetworkNode);
PairNetworkNode(DEVICEID_TYPE deviceId, const wstring & name, size_t row_size = 1, size_t col_size = 1) :
Base(deviceId, name)
{
Init(row_size, col_size);
CreateMatrixIfNull(m_gradientValues);
m_gradientValues->Resize(row_size, col_size);
m_gradientValues->SetValue(0.0f);
}
virtual void Load(File& fstream, size_t modelVersion) override
{
Init(1, 1); // TODO: this looks wrong; should the dimension not come from the loaded model data?
Base::Load(fstream, modelVersion);
}
/// to-do: need to change to the new way of resetting state
void BackpropToMap(const size_t inputIndex)
{
if (inputIndex > 0)
InvalidArgument("PairNetwork operation only takes one input.");
Matrix<ElemType>::ScaleAndAdd(1.0, GradientValues(), Input(inputIndex)->GradientValues());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(m_output->GetNumRows() == GradientValues().GetNumRows()); // original used m_output->GetNumRows() for loop dimension
assert(m_pMBLayout);
Matrix<ElemType> mTmp = Input(inputIndex)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType>::ScaleAndAdd(1.0, GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout)), mTmp);
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
{
Matrix<ElemType> mTmp = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
mTmp.SetValue(Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout)));
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
size_t rows0 = Input(0)->GetNumRows(), cols0 = Input(0)->GetNumCols();
if (rows0 > 0 && cols0 > 0) // TODO: is this check needed?
SetDims(Input(0));
InferMBLayoutFromInputsForStandardCase();
InferImageDimsFromInputs();
}
};
template class PairNetworkNode<float>;
template class PairNetworkNode<double>;
}}}

Просмотреть файл

@ -40,12 +40,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Matrix<ElemType> gradientValues = GradientFor(frameRange);
Matrix<ElemType> functionValues = OutputFor(frameRange);
Matrix<ElemType> inputGradientValues = Input(inputIndex)->GradientFor(frameRange.AllowBroadcast());
Matrix<ElemType> inputFunctionValues = Input(inputIndex)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> gradientValues = GradientFor(fr);
Matrix<ElemType> functionValues = OutputFor(fr);
Matrix<ElemType> inputGradientValues = Input(inputIndex)->GradientFor(fr.AllowBroadcast());
Matrix<ElemType> inputFunctionValues = Input(inputIndex)->OutputFor(fr.AllowBroadcast());
#if DUMPOUTPUT
functionValues.Print("PlusNode");
@ -58,20 +58,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
inputGradientValues.Print("child Gradient-in/out");
#endif
if (colsc == colsp && rowsc == rowsp) // matching dimensions --this may also trigger for column vector added to a frame, if frameRange denotes a single frame
if (colsc == colsp && rowsc == rowsp) // matching dimensions --this may also trigger for column vector added to a frame, if fr denotes a single frame
{
// BUGBUG: if we reduce from a frame of a MB into a one-column vector, then we must also mask gaps
inputGradientValues += gradientValues;
}
else if (colsc == 1 && rowsc == 1) // child is a scalar
{
MaskMissingGradientColumnsToZero(frameRange); // reducing over frames, so we must zero out the gaps
MaskMissingGradientColumnsToZero(fr); // reducing over frames, so we must zero out the gaps
inputGradientValues += gradientValues.SumOfElements();
}
else if (colsc == 1 && colsp != 1) // child is a broadcasting column vector
{
size_t colspExpand = rowsp*colsp/rowsc;
MaskMissingGradientColumnsToZero(frameRange); // reducing over frames, so we must zero out the gaps
MaskMissingGradientColumnsToZero(fr); // reducing over frames, so we must zero out the gaps
Matrix<ElemType>::MultiplyAndAdd(gradientValues.Reshaped(rowsc, colspExpand), false, ConstOnes(colspExpand, 1, functionValues.GetDeviceId()), false, inputGradientValues);
}
else if (rowsc == 1 && rowsp != 1) // child is a broadcasting row vector
@ -100,11 +100,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
#endif
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> functionValues = ValueSliceToDense(frameRange, false); // Switch to dense as a work-around because ColumnSlice doesn't support all the sparse formats
Matrix<ElemType> inputFunctionValues0 = Input(0)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> inputFunctionValues1 = Input(1)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> functionValues = ValueSliceToDense(fr, false); // Switch to dense as a work-around because ColumnSlice doesn't support all the sparse formats
Matrix<ElemType> inputFunctionValues0 = Input(0)->OutputFor(fr.AllowBroadcast());
Matrix<ElemType> inputFunctionValues1 = Input(1)->OutputFor(fr.AllowBroadcast());
// Note: If one input is a column vector (no MBLayout) and the other a sequence of frames (MBLayout), then the above will be a slice for the other only.
size_t rows0 = inputFunctionValues0.GetNumRows(), cols0 = inputFunctionValues0.GetNumCols();
@ -190,13 +190,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Matrix<ElemType> gradientValues = GradientFor(frameRange);
Matrix<ElemType> functionValues = OutputFor(frameRange);
Matrix<ElemType> gradientValues = GradientFor(fr);
Matrix<ElemType> functionValues = OutputFor(fr);
Matrix<ElemType> childGradientValues = Input(inputIndex)->GradientFor(frameRange.AllowBroadcast());
Matrix<ElemType> childFunctionValues = Input(inputIndex)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> childGradientValues = Input(inputIndex)->GradientFor(fr.AllowBroadcast());
Matrix<ElemType> childFunctionValues = Input(inputIndex)->OutputFor(fr.AllowBroadcast());
size_t rowsc = childFunctionValues.GetNumRows(), colsc = childFunctionValues.GetNumCols();
size_t rowsp = functionValues.GetNumRows(), colsp = functionValues.GetNumCols();
@ -212,7 +212,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
else if (colsc == 1 && rowsc == 1) // child is a scalar (1 x 1)
{
MaskMissingGradientColumnsToZero(frameRange); // reducing over frames, so we must zero out the gaps
MaskMissingGradientColumnsToZero(fr); // reducing over frames, so we must zero out the gaps
if (sign > 0)
childGradientValues += gradientValues.SumOfElements();
else
@ -221,7 +221,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else if (colsc == 1 && colsp != 1) // child is broadcasting column vector
{
size_t colspExpand = rowsp * colsp / rowsc;
MaskMissingGradientColumnsToZero(frameRange); // reducing over frames, so we must zero out the gaps
MaskMissingGradientColumnsToZero(fr); // reducing over frames, so we must zero out the gaps
Matrix<ElemType>::MultiplyAndWeightedAdd(sign, gradientValues.Reshaped(rowsc, colspExpand), false, ConstOnes(colspExpand, 1, Output().GetDeviceId()), false, 1, childGradientValues);
}
else if (rowsc == 1 && rowsp != 1) // child is a broadcasting row vector
@ -232,11 +232,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
LogicError("%ls %ls operation's Validate() function let invalid dimensions slip by.", NodeName().c_str(), OperationName().c_str());
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> functionValues = OutputFor(frameRange);
Matrix<ElemType> inputFunctionValues0 = Input(0)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> inputFunctionValues1 = Input(1)->OutputFor(frameRange.AllowBroadcast());
Matrix<ElemType> functionValues = OutputFor(fr);
Matrix<ElemType> inputFunctionValues0 = Input(0)->OutputFor(fr.AllowBroadcast());
Matrix<ElemType> inputFunctionValues1 = Input(1)->OutputFor(fr.AllowBroadcast());
size_t rows0 = inputFunctionValues0.GetNumRows(), cols0 = inputFunctionValues0.GetNumCols();
size_t rows1 = inputFunctionValues1.GetNumRows(), cols1 = inputFunctionValues1.GetNumCols();
@ -302,25 +302,23 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (inputIndex == 0) // left derivative
{
// this is a reduction over frames, so we must mask gaps to zero
Input(0)->GradientValues() += Matrix<ElemType>::InnerProductOfMatrices(MaskedGradientSlice(frameRange), Input(1)->MaskedValueSlice(frameRange)); // element-wise product summed up over all
Input(0)->GradientValues() += Matrix<ElemType>::InnerProductOfMatrices(MaskedGradientSlice(fr), Input(1)->MaskedValueSlice(fr)); // element-wise product summed up over all
}
else if (inputIndex == 1) // right derivative
{
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange);
//Matrix<ElemType>::ScaleAndAdd(Input(0)->Output().Get00Element(), GradientFor(frameRange), sliceInput1Grad);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(+1.0f, Input(0)->Output()/*1x1*/, GradientFor(frameRange), 1.0f, sliceInput1Grad);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(+1.0f, Input(0)->Output()/*1x1*/, GradientFor(fr), 1.0f, sliceInput1Grad);
}
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//OutputFor(frameRange).AssignProductOf(Input(0)->Output().Get00Element(), Input(1)->OutputFor(frameRange));
OutputFor(frameRange).Assign1x1ProductOf(Input(0)->Output()/*1x1*/, Input(1)->OutputFor(frameRange));
OutputFor(fr).Assign1x1ProductOf(Input(0)->Output()/*1x1*/, Input(1)->OutputFor(fr));
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -361,14 +359,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Input(0)->GradientFor(frameRange) -= GradientFor(frameRange);
Input(0)->GradientFor(fr) -= GradientFor(fr);
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
OutputFor(frameRange).AssignDifferenceOf(0, Input(0)->OutputFor(frameRange));
OutputFor(fr).AssignDifferenceOf(0, Input(0)->OutputFor(fr));
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -398,13 +396,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (inputIndex == 0) // left derivative
{
// this potentially computes inner products over time, so we use the Masked- variants
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(frameRange);
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(fr);
// currently we only support one combination when the input is sparse.
if (sliceInput1Value.GetMatrixType() == SPARSE && Input(0)->GradientValues().GetMatrixType() == DENSE && sliceOutputGrad.GetMatrixType() == DENSE)
@ -414,21 +412,21 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
else // right derivative
{
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType>::MultiplyAndAdd(Input(0)->Output(), true, sliceOutputGrad, false, sliceInput1Grad);
}
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
size_t rows0 = Input(0)->GetNumRows(), cols1 = Input(1)->GetNumCols();
VerifyDims(rows0, cols1);
// right operand and output can have MB layout, while left operand cannot
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
#if DUMPOUTPUT
Input(0)->Output().Print("TimesNode - Input0");
#endif
@ -481,18 +479,18 @@ namespace Microsoft { namespace MSR { namespace CNTK {
m_sampleLayout = ImageLayoutWHC(1, Input(0)->GetNumRows(), 1);
}
virtual void AllocateGradientMatricesForChildren(MatrixPool& matrixPool) override
virtual void AllocateGradientMatricesForInputs(MatrixPool& matrixPool) override
{
//this is a special handling case. We need to allocate sparse matrix directly instead of from pool.
// this is a special handling case. We need to allocate sparse matrix directly instead of from pool.
if (m_inputs[0]->NeedGradient() && Input(1)->Output().GetMatrixType() == SPARSE)
{
CreateMatrixIfNull(Input(0)->GradientValuesPtr());
Input(0)->CreateGradientMatrixIfNull();
Input(0)->GradientValues().SwitchToMatrixType(SPARSE, MatrixFormat::matrixFormatSparseBlockCol, false);
}
//we need to call base allocation at end since we will need to allocate special ones first
//so that the default allocator will not allocate it again.
Base::AllocateGradientMatricesForChildren(matrixPool);
// we need to call base allocation at end since we will need to allocate special ones first
// so that the default allocator will not allocate it again.
Base::AllocateGradientMatricesForInputs(matrixPool);
}
};
@ -516,20 +514,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (inputIndex == 0) //left derivative
{
// this potentially computes inner products over time, so we use the Masked- variants
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(frameRange);
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(fr);
BackpropToLeft(sliceInput1Value, Input(0)->GradientValues(), sliceOutputGrad);
}
else //right derivative
{
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
BackpropToRight(Input(0)->Output(), sliceInput1Grad, sliceOutputGrad);
}
@ -568,10 +566,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
#endif
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
sliceOutputValue.AssignProductOf(Input(0)->Output(), true, sliceInput1Value, false);
}
@ -629,11 +627,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Matrix<ElemType> sliceInput0Grad = Input(inputIndex)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1-inputIndex)->OutputFor(frameRange);
Matrix<ElemType> sliceInput0Grad = Input(inputIndex)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1-inputIndex)->OutputFor(fr);
// depending on inputIndex, all the input variables change meaning
// inputIndex == 0 (left) - inputGradientValues[0], inputFunctionValues[1]
@ -641,11 +639,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
sliceInput0Grad.AddElementProductOf(sliceOutputGrad, sliceInput1Value);
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
//ForwardPropS(sliceOutputValue, sliceInput0Value, sliceInput1Value);
sliceOutputValue.AssignElementProductOf(sliceInput0Value, sliceInput1Value);
@ -698,13 +696,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Grad = Input(inputIndex)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Grad = Input(inputIndex)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1 - inputIndex)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1 - inputIndex)->OutputFor(fr);
if (inputIndex == 0)
{
@ -749,12 +747,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ForwardPropS(Output(), Input(0)->Output(), Input(1)->Output());
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//if (frameRange.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
//if (fr.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(sliceOutputValue, sliceInput0Value, sliceInput1Value);
}
@ -790,16 +788,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_tempMatrix, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_tempMatrix, matrixPool);
}
@ -840,20 +838,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
if (inputIndex == 0)
{
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(frameRange);
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(fr);
BackpropToLeftS(Input(1)->Output(), sliceInput0Grad, sliceOutputGrad, *m_tempMatrix);
}
else
{
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
BackpropToRightS(sliceInput0Value, Input(1)->GradientValues(), sliceOutputGrad, *m_tempMatrix);
}
}
@ -891,11 +889,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
ForwardPropS(Output(), Input(0)->Output(), Input(1)->Output());
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//if (frameRange.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
//if (fr.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(sliceOutputValue, sliceInput0Value, Input(1)->Output());
}
@ -939,16 +937,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_tempMatrix, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_tempMatrix, matrixPool);
}
@ -974,19 +972,19 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (inputIndex == 0) // left derivative
{
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(frameRange); // use Masked- version since this is reducing over frames
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(frameRange);
Matrix<ElemType> sliceOutputGrad = MaskedGradientSlice(fr); // use Masked- version since this is reducing over frames
Matrix<ElemType> sliceInput1Value = Input(1)->MaskedValueSlice(fr);
m_innerproduct->AssignInnerProductOf(sliceOutputGrad, sliceInput1Value, false);
Input(0)->GradientValues() += *m_innerproduct;
}
else // right derivative
{
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
m_rightGradient->SetValue(sliceOutputGrad);
m_rightGradient->ColumnElementMultiplyWith(Input(0)->Output());
sliceInput1Grad += *m_rightGradient;
@ -1006,10 +1004,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// inputGradientValues += temp;
//}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
sliceOutputValue.SetValue(sliceInput1Value);
sliceOutputValue.ColumnElementMultiplyWith(Input(0)->Output());
@ -1056,17 +1054,17 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_innerproduct, matrixPool);
RequestMatrixFromPool(m_rightGradient, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_innerproduct, matrixPool);
ReleaseMatrixToPool(m_rightGradient, matrixPool);
}
@ -1094,15 +1092,14 @@ private:
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
// BUGBUG: In the future we may want to allow this to operate on a scalar that is one step of an outer time loop.
Input(0)->GradientFor(frameRange) += GradientValues(); // here the assumption is that gradientValues are 1x1 matrix
Input(0)->GradientFor(fr) += GradientValues(); // here the assumption is that gradientValues are 1x1 matrix
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Output().AssignSumOfElements(Input(0)->MaskedValueSlice(frameRange)); // since we are reducing over frames, we must first mask gaps in input to zero
Output().AssignSumOfElements(Input(0)->MaskedValueSlice(fr)); // since we are reducing over frames, we must first mask gaps in input to zero
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -1141,18 +1138,18 @@ private:
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
sliceInputGrad += sliceOutputGrad; // here the assumption is that gradientValues is a row vector
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
//ForwardPropS(sliceOutputValue, sliceInputValue);
Matrix<ElemType>::VectorSum(sliceInputValue, sliceOutputValue, true);
@ -1374,7 +1371,7 @@ private:
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
// functionValues, invNorm0, invNorm1 - output from the EvaluateNode() method
// temp, rightTerm, leftTerm - temporary matrices
@ -1383,25 +1380,25 @@ private:
else //right derivative
m_temp->AssignElementProductOf(*m_invNorm1, *m_invNorm1);
m_temp->ElementMultiplyWith(OutputFor(frameRange));
m_rightTerm->SetValue(Input(inputIndex)->OutputFor(frameRange));
m_temp->ElementMultiplyWith(OutputFor(fr));
m_rightTerm->SetValue(Input(inputIndex)->OutputFor(fr));
m_rightTerm->RowElementMultiplyWith(*m_temp);
m_temp->AssignElementProductOf(*m_invNorm0, *m_invNorm1);
m_leftTerm->SetValue(Input(1 - inputIndex)->OutputFor(frameRange));
m_leftTerm->SetValue(Input(1 - inputIndex)->OutputFor(fr));
m_leftTerm->RowElementMultiplyWith(*m_temp);
*m_leftTerm -= *m_rightTerm;
m_leftTerm->RowElementMultiplyWith(GradientFor(frameRange));
Input(inputIndex)->GradientFor(frameRange) += *m_leftTerm;
m_leftTerm->RowElementMultiplyWith(GradientFor(fr));
Input(inputIndex)->GradientFor(fr) += *m_leftTerm;
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
m_invNorm0->AssignVectorNorm2Of(sliceInput0Value, true);
m_invNorm0->AssignElementInverseOf(*m_invNorm0);
@ -1450,26 +1447,26 @@ private:
}
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_invNorm0, matrixPool);
RequestMatrixFromPool(m_invNorm1, matrixPool);
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_leftTerm, matrixPool);
RequestMatrixFromPool(m_rightTerm, matrixPool);
RequestMatrixFromPool(m_temp, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_invNorm0, matrixPool);
ReleaseMatrixToPool(m_invNorm1, matrixPool);
ReleaseMatrixToPool(m_leftTerm, matrixPool);
@ -1504,29 +1501,29 @@ private:
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
if (inputIndex == 0) //left derivative
{
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
sliceInput0Grad.AddColumnReshapeProductOf(sliceOutputGrad, sliceInput1Value, false);
}
else //right derivative
{
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(frameRange);
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Grad = Input(1)->GradientFor(fr);
sliceInput1Grad.AddColumnReshapeProductOf(sliceOutputGrad, sliceInput0Value, true);
}
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
OutputFor(frameRange).AssignKhatriRaoProductOf(Input(0)->OutputFor(frameRange), Input(1)->OutputFor(frameRange));
OutputFor(fr).AssignKhatriRaoProductOf(Input(0)->OutputFor(fr), Input(1)->OutputFor(fr));
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -1588,14 +1585,14 @@ private:
BackpropToS(inputIndex, *m_invNorm0, *m_invNorm1, Output(), *m_temp, *m_rightTerm, *m_leftTerm, *m_invNormSquare, Input(0)->Output(), Input(1)->Output(), Input(2)->Output(), Input(3)->Output(), Input(inputIndex)->GradientValues(), GradientValues());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
Matrix<ElemType> sliceInputGrad = Input(inputIndex)->GradientFor(frameRange);
Matrix<ElemType> sliceThisGrad = GradientFor(frameRange);
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
Matrix<ElemType> sliceInputGrad = Input(inputIndex)->GradientFor(fr);
Matrix<ElemType> sliceThisGrad = GradientFor(fr);
BackpropToS(inputIndex, *m_invNorm0, *m_invNorm1, sliceOutputValue, *m_temp, *m_rightTerm, *m_leftTerm, *m_invNormSquare, sliceInput0Value, sliceInput1Value, Input(2)->Output(), Input(3)->Output(), sliceInputGrad, sliceThisGrad);
}
@ -1706,12 +1703,12 @@ private:
ForwardPropS(*m_invNorm0, *m_invNorm1, Output(), Input(0)->Output(), Input(1)->Output(), Input(2)->Output(), Input(3)->Output(), *m_leftTerm, *m_rightTerm);
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//if (frameRange.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(frameRange);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange);
//if (fr.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceInput1Value = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
ForwardPropS(*m_invNorm0, *m_invNorm1, sliceOutputValue, sliceInput0Value, sliceInput1Value, Input(2)->Output(), Input(3)->Output(), *m_leftTerm, *m_rightTerm);
}
@ -1797,9 +1794,9 @@ private:
}
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_invNorm0, matrixPool);
RequestMatrixFromPool(m_invNorm1, matrixPool);
RequestMatrixFromPool(m_leftTerm, matrixPool);
@ -1807,17 +1804,17 @@ private:
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_invNormSquare, matrixPool);
RequestMatrixFromPool(m_temp, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_invNorm0, matrixPool);
ReleaseMatrixToPool(m_invNorm1, matrixPool);
ReleaseMatrixToPool(m_leftTerm, matrixPool);

Просмотреть файл

@ -1,324 +0,0 @@
#if 0 // This is no longer needed. Keeping it around for reference, but should simply be deleted after a few weeks.
// NetworkBuilderFromConfig.cpp -- interface to node and network creation from glue languages through config record parameters --fseide
#define _CRT_SECURE_NO_WARNINGS // "secure" CRT not available on all platforms --add this at the top of all CPP files that give "function or variable may be unsafe" warnings
#include "Basics.h"
#include "ScriptableObjects.h"
#include "ComputationNode.h"
#include "InputAndParamNodes.h"
#include "RecurrentNodes.h"
#include "NonlinearityNodes.h"
#include "LinearAlgebraNodes.h"
#include "ConvolutionalNodes.h"
#include "ReshapingNodes.h"
#include "ComputationNetwork.h"
#include "ComputationNetworkBuilder.h"
#include <memory>
#include <deque>
#include <set>
#include <string>
#ifndef let
#define let const auto
#endif
namespace Microsoft { namespace MSR { namespace ScriptableObjects {
using namespace Microsoft::MSR;
// The following class(es) implement the MakeRuntimeObject() function for different types. Sorry for the strange template dance.
// -------------------------------------------------------------------
// basic function template, for classes that can instantiate themselves from IConfigRecordPtr TODO: do we even have any?
// -------------------------------------------------------------------
template<typename ElemType, class C>
struct DualPrecisionHelpers
{
static shared_ptr<Object> MakeRuntimeObject(const IConfigRecordPtr config) { return make_shared<C>(config); }
};
// -------------------------------------------------------------------
// ComputationNode -- covers all standard nodes
// -------------------------------------------------------------------
template<class ElemType>
struct DualPrecisionHelpers<ElemType, ComputationNode<ElemType>>
{
// create ComputationNode
// This is the equivalent of the old SynchronousNodeEvaluator::Evaluate(), and we duplicate code from there.
static shared_ptr<Object> MakeRuntimeObject(const IConfigRecordPtr configp)
{
let & config = *configp;
wstring operationName = config[L"operation"];
wstring nodeName = L"<placeholder>"; // name will be overwritten by caller upon return (TODO: fix this here? pass expression name in?)
DEVICEID_TYPE deviceId = (DEVICEID_TYPE)(int)config[L"deviceId"];
static unsigned long m_randomSeedOffset = 0; // TODO: this is held in the ComputationNetwork, but we don't have one yet
// TODO" ^^ actually it seems only used by initialization of LearnableParameters--check that again; in that case, we can have a local
// note on optional parameters
// Instead of defining optional parameters here in code, they are defined as optional args to the creating macro.
ComputationNodeBasePtr node;
#define OpIs(op) (operationName == msra::strfun::utf16(OperationNameOf(op)))
// first group: nodes without inputs
if (OpIs(InputValue))
{
let isSparse = config[L"isSparse"];
let isImage = config[L"isImage"];
if (!isImage)
node = New<InputValue<ElemType>>(deviceId, nodeName, (size_t)config[L"rows"], (size_t)config[L"cols"], isSparse);
else
node = New<InputValue<ElemType>>(deviceId, nodeName, ImageLayoutWHC(config[L"imageWidth"], config[L"imageHeight"], config[L"imageChannels"]), (size_t)config[L"numImages"], isSparse);
}
else if (OpIs(LearnableParameter) || OpIs(SparseLearnableParameter))
{
// parameters[rows, [cols=1]] plus other optional parameters (needGradient=[true|false], init=[uniform|gaussian|fixedvalue], initValueScale=[1|float], value=[0|float])
// TODO: do we need a default value mechanism? How to make sure it does not pop upwards? Current functions do not allow overloads.
// TODO: test this with random init for QuickE2E on CPU against SimpleNetworkBuilder
let isSparse = (operationName.find(L"Sparse") != wstring::npos);
if (!isSparse)
node = New<LearnableParameter<ElemType>>(deviceId, nodeName, (size_t)config[L"rows"], (size_t)config[L"cols"]);
else
node = New<SparseLearnableParameter<ElemType>>(deviceId, nodeName, (size_t)config[L"rows"], (size_t)config[L"cols"], 0/*size*/); // TODO: what is size?
// TODO: "needGradient" should be renamed to better match m_parameterUpdateRequired
node->SetParameterUpdateRequired(config[L"needGradient"]);
static int randomSeed = 1;
wstring initString = config[L"init"];
if (initString == L"fixedValue")
dynamic_pointer_cast<LearnableParameter<ElemType>>(node)->Output().SetValue((ElemType)config[L"value"]);
else if (initString == L"uniform" || initString == L"gaussian")
{
// TODO: add these options also to old NDL
int forcedRandomSeed = config[L"randomSeed"]; // forcing a specific random seed is useful for testing to get repeatable initialization independent of evaluation order
dynamic_pointer_cast<LearnableParameter<ElemType>>(node)->InitRandom((initString == L"uniform"), forcedRandomSeed < 0 ? (randomSeed++ + m_randomSeedOffset) : (unsigned long)forcedRandomSeed, config[L"initValueScale"], config[L"initOnCPUOnly"]);
}
else if (initString == L"fromFile")
{
wstring initFromFilePath = config[L"initFromFilePath"];
if (initFromFilePath.empty())
RuntimeError("initFromFilePath must be set when using \"fromFile\" initialization method");
dynamic_pointer_cast<LearnableParameter<ElemType>>(node)->InitFromFile(initFromFilePath);
}
else
RuntimeError("init must be one of the values of [uniform|gaussian|fixedValue|fromFile]");
}
// Constant is implemented as a LearnableParameter with initializion as fixedValue with needGradient false, on script level
// nodes with delayed inputs, where we cannot yet resolve inputs due to circular references
else if (OpIs(PastValueNode) || OpIs(FutureValueNode)) // TODO: untested
{
// rows, cols, input, [timeStep=1, defaultHiddenActivation=0.1]
// Note: changed names of optional args compared to current NDL
// TODO: we really should NOT have to specify the dimensions; network builder can figure it out. Keep it for now, fix when it is time.
// We instantiate not the node directly, but a wrapped version that can cast to LateAttachingNode, which holds a lambda to complete the attachment process at the appropriate time.
function<void(ComputationNode<ElemType>*)> completeAttachInputs = [configp](ComputationNode<ElemType>* node) // This is the lambda to complete the process. Note that config captured as a shared_ptr.
{
node->AttachInputs(GetInputs(*configp)); // this is executed by network builder while iterating the nodes
};
// legacy: bad spelling. Warn users who may have converted.
if (config.Find(L"defaultHiddenActivity"))
config[L"defaultHiddenActivity"].Fail(L"Past/FutureValueNode: Optional NDL parameter 'defaultHiddenActivity' should be spelled 'defaultHiddenActivation'. Please update your script.");
if (OpIs(PastValueNode))
node = New<LateAttachingNode<PastValueNode<ElemType>>>(deviceId, nodeName, completeAttachInputs, (ElemType)config[L"defaultHiddenActivation"], (size_t)config[L"rows"], (size_t)config[L"cols"], (size_t)config[L"timeStep"]);
else
node = New<LateAttachingNode<FutureValueNode<ElemType>>>(deviceId, nodeName, completeAttachInputs, (ElemType)config[L"defaultHiddenActivation"], (size_t)config[L"rows"], (size_t)config[L"cols"], (size_t)config[L"timeStep"]);
}
else // nodes with inputs
{
let inputs = GetInputs(config);
// second group: nodes with special initializers
if (OpIs(RowSliceNode))
{
// startIndex, numRows, inputs /*one*/, needGradient=false
node = New<RowSliceNode<ElemType>>(deviceId, nodeName, (size_t)config[L"startIndex"], (size_t)config[L"numRows"]);
//node->SetParameterUpdateRequired(config[L"needGradient"]);
// TODO: Why is this ^^ flag here? This node has no parameters.
}
else if (OpIs(RowRepeatNode)) // TODO: untested
{
// inputs /*one*/, numRepeats, needGradient=false
node = New<RowRepeatNode<ElemType>>(deviceId, nodeName, (size_t)config[L"numRepeats"]);
//node->SetParameterUpdateRequired(config[L"needGradient"]);
}
else if (OpIs(DiagonalNode)) // TODO: seems this is no longer a special case (needGradient makes no sense here)
{
// inputs /*one*/, numRepeats, needGradient=false
node = New<DiagonalNode<ElemType>>(deviceId, nodeName);
//node->SetParameterUpdateRequired(config[L"needGradient"]);
}
else if (OpIs(ReshapeNode))
{
// inputs /*one*/, numRows, imageWidth = 0, imageHeight = 0, imageChannels = 0
node = New<ReshapeNode<ElemType>>(deviceId, nodeName, (size_t)config[L"numRows"], ImageLayoutWHC(config[L"imageWidth"], config[L"imageHeight"], config[L"imageChannels"]));
}
else if (OpIs(ConvolutionNode)) // TODO: untested
{
// weightNodeName, inputValueNodeName, kernelWidth, kernelHeight, outputChannels, horizontalSubsample, verticalSubsample, zeroPadding = false, maxTempMemSizeInSamples = 0
node = New<ConvolutionNode<ElemType>>(deviceId, nodeName, (size_t)config[L"kernelWidth"], (size_t)config[L"kernelHeight"], (size_t)config[L"outputChannels"],
(size_t)config[L"horizontalSubsample"], (size_t)config[L"verticalSubsample"],
(bool)config[L"zeroPadding"], (size_t)config[L"maxTempMemSizeInSamples"]);
}
else if (OpIs(MaxPoolingNode)) // TODO: untested
{
// input, windowWidth, windowHeight, horizontalSubsample, verticalSubsample
node = New<MaxPoolingNode<ElemType>>(deviceId, nodeName, (size_t)config[L"windowWidth"], (size_t)config[L"windowHeight"], (size_t)config[L"horizontalSubsample"], (size_t)config[L"verticalSubsample"]);
}
else if (OpIs(AveragePoolingNode)) // TODO: untested
{
// input, windowWidth, windowHeight, horizontalSubsample, verticalSubsample
node = New<AveragePoolingNode<ElemType>>(deviceId, nodeName, (size_t)config[L"windowWidth"], (size_t)config[L"windowHeight"], (size_t)config[L"horizontalSubsample"], (size_t)config[L"verticalSubsample"]);
}
// last group: standard nodes that only take 'inputs'
else
{
node = ComputationNetworkBuilder<ElemType>::NewStandardNode(operationName, deviceId, nodeName);
if (!node)
config[L"operation"].Fail(L"Unknown operation " + operationName);
}
node->AttachInputs(inputs); // TODO: where to check the number of inputs? Should be a template parameter to ComputationNode!
}
// add a tag
let nodeWithTag = dynamic_pointer_cast<WithTag>(node);
if (nodeWithTag)
nodeWithTag->SetTag(config[L"tag"]);
// and done
return node;
}
private:
// helper for the factory function for ComputationNodes
static vector<ComputationNodeBasePtr> GetInputs(const IConfigRecord & config)
{
vector<ComputationNodeBasePtr> inputs;
let inputsArg = config[L"inputs"];
if (inputsArg.Is<ComputationNodeBase>()) // single arg
inputs.push_back(inputsArg);
else // a whole vector
{
ConfigArrayPtr inputsArray = (ConfigArrayPtr&)inputsArg;
let range = inputsArray->GetIndexRange();
for (int i = range.first; i <= range.second; i++) // pull them. This will resolve all of them.
inputs.push_back(inputsArray->At(i, [](const wstring &){ LogicError("GetInputs: out of bounds index while iterating??"); }));
}
return inputs;
}
};
#if 0
// creates the lambda for creating an object that can exist as 'float' or 'double'
// Pass both types as the two template args.
template<class Cfloat, class Cdouble>
static ConfigurableRuntimeType MakeRuntimeTypeConstructorDualPrecision()
{
ConfigurableRuntimeType rtInfo;
rtInfo.construct = [](const IConfigRecordPtr config) // lambda to construct--this lambda can construct both the <float> and the <double> variant based on config parameter 'precision'
{
wstring precision = (*config)[L"precision"]; // dispatch on ElemType
if (precision == L"float")
return DualPrecisionHelpers<float, Cfloat>::MakeRuntimeObject(config);
else if (precision == L"double")
return DualPrecisionHelpers<double, Cdouble>::MakeRuntimeObject(config);
else
RuntimeError("invalid value '%ls' for 'precision', must be 'float' or 'double'", precision.c_str());
};
rtInfo.isConfigRecord = is_base_of<IConfigRecord, Cfloat>::value;
static_assert(is_base_of<IConfigRecord, Cfloat>::value == is_base_of<IConfigRecord, Cdouble>::value, ""); // we assume that both float and double have the same behavior
return rtInfo;
}
// and the regular one without ElemType dependency
template<class C>
static ConfigurableRuntimeType MakeRuntimeTypeConstructor()
{
ConfigurableRuntimeType rtInfo;
rtInfo.construct = [](const IConfigRecordPtr config) // lambda to construct--this lambda can construct both the <float> and the <double> variant based on config parameter 'precision'
{
return MakeRuntimeObject<C>(config);
};
rtInfo.isConfigRecord = is_base_of<IConfigRecord, C>::value;
return rtInfo;
}
#define DefineRuntimeType(T) { L ## #T, MakeRuntimeTypeConstructor<T>() }
#define DefineRuntimeTypeDualPrecision(T) { L ## #T, MakeRuntimeTypeConstructorDualPrecision<T<float>,T<double>>() }
// get information about configurable runtime types
// This returns a ConfigurableRuntimeType structure which primarily contains a lambda to construct a runtime object from a ConfigRecord ('new' expression).
const ConfigurableRuntimeType * FindExternalRuntimeTypeInfo(const wstring & typeId)
{
// lookup table for "new" expression
// This table lists all C++ types that can be instantiated from "new" expressions, and gives a constructor lambda and type flags.
static map<wstring, ConfigurableRuntimeType> configurableRuntimeTypes =
{
// ComputationNodes
DefineRuntimeTypeDualPrecision(ComputationNode),
DefineRuntimeType(ComputationNetwork),
#if 0
DefineRuntimeType(RecurrentComputationNode),
// In this experimental state, we only have Node and Network.
// Once BrainScript becomes the driver of everything, we will add other objects like Readers, Optimizers, and Actions here.
#endif
};
// first check our own
let newIter = configurableRuntimeTypes.find(typeId);
if (newIter != configurableRuntimeTypes.end())
return &newIter->second;
return nullptr; // not found
}
#endif
// temporary code for BrainScript update (using register)
#if 0
template<> shared_ptr<Object> MakeRuntimeObject<ComputationNode<float>>(const IConfigRecordPtr configp)
{
return DualPrecisionHelpers<float, ComputationNode<float>>::MakeRuntimeObject(configp);
}
template<> shared_ptr<Object> MakeRuntimeObject<ComputationNode<double>>(const IConfigRecordPtr configp)
{
return DualPrecisionHelpers<double, ComputationNode<double>>::MakeRuntimeObject(configp);
}
// register ComputationNetwork with the ScriptableObject system
ScriptableObjects::ConfigurableRuntimeTypeRegister::AddFloatDouble<ComputationNode<float>, ComputationNode<double>> adderx(L"ComputationNode");
#else
template<> shared_ptr<Object> MakeRuntimeObject<ComputationNodeBase>(const IConfigRecordPtr configp)
{
return NewComputationNodeFromConfig(configp);
}
// register ComputationNetwork with the ScriptableObject system
ScriptableObjects::ConfigurableRuntimeTypeRegister::Add<ComputationNodeBase> registerComputationNode(L"ComputationNode");
#endif
}}}
// temporarily moved this function here, to force this compilation unit to emit something
namespace Microsoft { namespace MSR { namespace CNTK {
using namespace Microsoft::MSR;
template<class ElemType>
/*virtual*/ void ComputationNode<ElemType>::DumpNodeInfo(const bool /*printValues*/, File& fstream) const
{
fstream << L"\n" + NodeName() + L"=" + OperationName();
if (!IsLeaf())
{
fstream << wstring(L"(");
for (size_t i = 0; i<GetNumInputs(); i++)
{
if (i > 0)
fstream << wstring(L",");
fstream << (Input(i) ? Input(i)->NodeName() : L"NULL");
}
fstream << wstring(L")");
}
}
}}}
#endif

Просмотреть файл

@ -43,20 +43,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{ }
// TODO: with FrameRange, this code has now been reduced so much that there is no need to have these overrides here; they can just be implemented in the derived classes directly.
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
assert(inputIndex == 0); inputIndex;
auto gradient = Input(0)->GradientFor(frameRange);
BackpropToV(*m_gradient, Input(0)->OutputFor(frameRange), gradient, GradientFor(frameRange));
auto gradient = Input(0)->GradientFor(fr);
BackpropToV(*m_gradient, Input(0)->OutputFor(fr), gradient, GradientFor(fr));
}
// derived class implement the actual non-linear operation
virtual void BackpropToV(Matrix<ElemType>& gradient, const Matrix<ElemType>& inputFunctionValues, Matrix<ElemType>& inputGradientValues, const Matrix<ElemType>& gradientValues) = 0;
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
auto values = OutputFor(frameRange);
ForwardPropV(values, Input(0)->OutputFor(frameRange));
auto values = OutputFor(fr);
ForwardPropV(values, Input(0)->OutputFor(fr));
}
// derived class implement the actual non-linear operation
@ -78,16 +78,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
// request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_gradient, matrixPool);
}
// release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_gradient, matrixPool);
}
protected:
@ -160,15 +160,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, Input(0)->GradientValues(), GradientValues(), Output());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
BackpropToS(*m_gradient, sliceInputGrad, sliceOutputGrad, sliceOutputValue);
}
@ -216,15 +216,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, Input(0)->GradientValues(), GradientValues(), Output());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
BackpropToS(*m_gradient, sliceInputGrad, sliceOutputGrad, sliceOutputValue);
}
@ -274,15 +274,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, Input(0)->GradientValues(), Input(0)->Output(), GradientValues());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(fr);
BackpropToS(*m_gradient, sliceInputGrad, sliceInputValue, sliceOutputGrad);
}
@ -324,13 +324,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
NonlinearityNodeBase<ElemType>(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange);
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange);
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(frameRange);
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(fr);
m_gradient->AssignExpOf(sliceInputValue); // Exp(x) is its own partial
sliceInputGrad.AddElementProductOf(sliceOutputGrad, *m_gradient);
@ -371,15 +371,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, Input(0)->GradientValues(), Input(0)->Output(), GradientValues());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputValue = Input(0)->OutputFor(fr);
BackpropToS(*m_gradient, sliceInputGrad, sliceInputValue, sliceOutputGrad);
}
@ -429,15 +429,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, *m_diff, Input(0)->GradientValues(), GradientValues(), Output());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
BackpropToS(*m_gradient, *m_diff, sliceInputGrad, sliceOutputGrad, sliceOutputValue);
}
@ -478,16 +478,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_diff, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_diff, matrixPool);
}
private:
@ -519,15 +519,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(*m_gradient, *m_softmax, Input(0)->GradientValues(), GradientValues(), Output());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
assert(inputIndex == 0); inputIndex;
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceInputGrad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
BackpropToS(*m_gradient, *m_softmax, sliceInputGrad, sliceOutputGrad, sliceOutputValue);
}
@ -567,16 +567,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_softmax, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_softmax, matrixPool);
}
private:
@ -623,14 +623,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
//get the right slice
const size_t colsPrior = Input(0)->GetNumCols();
Matrix<ElemType> sliceGradientValue = DataFor(*m_gradientValues, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceGradientValue = DataFor(*m_gradientValues, fr);
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, fr);
switch (inputIndex)
{
@ -640,40 +640,40 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToUnnormedPrior(Input(0)->GradientValues(), sliceGradientValue, *m_prior, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceUnnormedPriorGradient = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> slicePrior = DataFor(*m_prior, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceUnnormedPriorGradient = Input(0)->GradientFor(fr);
Matrix<ElemType> slicePrior = DataFor(*m_prior, fr);
BackpropToUnnormedPrior(sliceUnnormedPriorGradient, sliceGradientValue, slicePrior, slicePosterior, *m_temp);
}
}
break;
case 1:
{
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
if (colsPrior == 1)
BackpropToMean(Input(1)->GradientValues(), sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceMeanGradient = Input(1)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceMeanGradient = Input(1)->GradientFor(fr);
BackpropToMean(sliceMeanGradient, sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
}
}
break;
case 2:
{
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, fr);
if (colsPrior == 1)
BackpropToLogStddev(Input(2)->GradientValues(), sliceGradientValue, sliceNormedDeviation, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceLotStddevGradient = Input(2)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceLotStddevGradient = Input(2)->GradientFor(fr);
BackpropToLogStddev(sliceLotStddevGradient, sliceGradientValue, sliceNormedDeviation, slicePosterior, *m_temp);
}
}
break;
case 3:
{
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceFeatureGradient = Input(3)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
Matrix<ElemType> sliceFeatureGradient = Input(3)->GradientFor(fr);
BackpropToFeature(sliceFeatureGradient, sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
}
break;
@ -784,18 +784,18 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//input0=unnormedPrior, input1=mean, input2=logstddev, input3=feature
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//if (frameRange.IsAllFrames()) { ForwardPropMap(); return; }
//if (fr.IsAllFrames()) { ForwardPropMap(); return; }
size_t colsPrior = Input(0)->GetNumCols();
size_t numSamples = Input(3)->GetNumCols();
//get the right slice
Matrix<ElemType> sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceFeature = Input(3)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputValue = OutputFor(fr);
Matrix<ElemType> sliceFeature = Input(3)->OutputFor(fr);
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, fr);
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, fr);
if (colsPrior == 1)
{
@ -804,12 +804,12 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
else if (colsPrior == numSamples)
{
Matrix<ElemType> sliceUnnormedPrior = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceMean = Input(1)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceLogstddev = Input(2)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceUnnormedPrior = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceMean = Input(1)->OutputFor(fr);
Matrix<ElemType> sliceLogstddev = Input(2)->OutputFor(fr);
Matrix<ElemType> slicePrior = DataFor(*m_prior, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceStddev = DataFor(*m_stddev, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> slicePrior = DataFor(*m_prior, fr);
Matrix<ElemType> sliceStddev = DataFor(*m_stddev, fr);
ForwardPropS(sliceOutputValue, sliceUnnormedPrior, sliceMean, sliceLogstddev, sliceFeature,
slicePrior, sliceStddev, sliceNormedDeviationVectors, sliceNormedDeviation, slicePosterior, *m_temp);
@ -951,9 +951,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_prior, matrixPool);
RequestMatrixFromPool(m_normedDeviation, matrixPool);
RequestMatrixFromPool(m_normedDeviationVectors, matrixPool);
@ -963,9 +963,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_prior, matrixPool);
ReleaseMatrixToPool(m_normedDeviation, matrixPool);
ReleaseMatrixToPool(m_normedDeviationVectors, matrixPool);
@ -1011,16 +1011,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
BackpropToS(m_dropoutRate, Input(0)->GradientValues(), *m_maskOfDropout, GradientValues());
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
if (frameRange.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
Matrix<ElemType> sliceOutputGrad = GradientFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
if (fr.IsAllFrames()) { BackpropToMap(inputIndex); return; } // TODO: remove these one by one
Matrix<ElemType> sliceInput0Grad = Input(0)->GradientFor(fr);
Matrix<ElemType> sliceOutputGrad = GradientFor(fr);
Matrix<ElemType> sliceMask = Matrix<ElemType>();
if (m_dropoutRate > 0)
{
sliceMask = DataFor(*m_maskOfDropout, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
sliceMask = DataFor(*m_maskOfDropout, fr);
}
BackpropToS(m_dropoutRate, sliceInput0Grad, sliceMask, sliceOutputGrad);
@ -1042,10 +1042,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
ForwardPropS(m_dropoutRate, m_randomSeed, Output(), *m_maskOfDropout, Input(0)->Output());
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
//if (frameRange.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
//if (fr.IsAllFrames()) { ForwardPropMap(); return; }
Matrix<ElemType> sliceInput0Value = Input(0)->OutputFor(fr);
Matrix<ElemType> sliceOutputValue = Matrix <ElemType>();
Matrix<ElemType> sliceMask = Matrix<ElemType>();
@ -1053,10 +1053,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
SetDims(Input(0));
m_maskOfDropout->Resize(Input(0)->GetNumRows(), Input(0)->GetNumCols());
sliceMask = DataFor(*m_maskOfDropout, frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
sliceMask = DataFor(*m_maskOfDropout, fr);
}
sliceOutputValue = OutputFor(frameRange/*TODO: delete this:*/.Check_t(GetNumParallelSequences(), m_pMBLayout));
sliceOutputValue = OutputFor(fr);
ForwardPropS(m_dropoutRate, m_randomSeed, sliceOutputValue, sliceMask, sliceInput0Value);
}
@ -1128,16 +1128,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_maskOfDropout, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_maskOfDropout, matrixPool);
}
private:
@ -1173,7 +1173,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
LogicError("Hardmax is not differentiable and is used for evaluation only.");
}
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & /*frameRange*/) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & /*fr*/) override
{
LogicError("Hardmax is not differentiable and is used for evaluation only.");
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -43,7 +43,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// stack K consecutive frames into a single frame that is K times taller
// FrameRange and MBLayout refer to the 'to' (reduced) timeline.
// BUGBUG: THIS IS UNTESTED!!
static void Stack(const FrameRange & frameRange, const shared_ptr<MBLayout> & pMBLayout, /*const*/ Matrix<ElemType> & from, Matrix<ElemType> & to, size_t K, bool addTo)
static void Stack(const FrameRange & fr, const shared_ptr<MBLayout> & pMBLayout, /*const*/ Matrix<ElemType> & from, Matrix<ElemType> & to, size_t K, bool addTo)
{
// example
// input: T=2, D=2, K=3, S=2 (abcdef and uvwxyz)
@ -83,11 +83,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// D = featDim
// M = 1, thrown in for generality of underlying Matrix function
// We operate on the 'to' layout, frameRange refers to result, not the input.
// We operate on the 'to' layout, fr refers to result, not the input.
// The input layout is different, but reshaping the input to output dimensions will allow us to pull out the right values anyway.
auto from0 = from.Reshaped(to.GetNumRows(), to.GetNumCols()); // we operate on 'to' layout
auto fromSlice0 = DataWithMBLayoutFor(from0, frameRange, pMBLayout);
auto toSlice0 = DataWithMBLayoutFor(to, frameRange, pMBLayout);
auto fromSlice0 = DataWithMBLayoutFor(from0, fr, pMBLayout);
auto toSlice0 = DataWithMBLayoutFor(to, fr, pMBLayout);
// now we got views on the right ranges of values, but with weird dimensions
// reshape them into a unified view with D being the row dimension, and (S,M,K,T) the column dimension
@ -106,11 +106,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// split frames of D*K elements into K consecutive frames of dimension D.
// FrameRange and MBLayout refer to the 'from' (reduced) timeline.
// This function is the inverse of Stack(). See comments there and exchange from and to.
static void Unstack(const FrameRange & frameRange, const shared_ptr<MBLayout> & pMBLayout, /*const*/ Matrix<ElemType> & from, Matrix<ElemType> & to, size_t K, bool addTo)
static void Unstack(const FrameRange & fr, const shared_ptr<MBLayout> & pMBLayout, /*const*/ Matrix<ElemType> & from, Matrix<ElemType> & to, size_t K, bool addTo)
{
auto fromSlice0 = DataWithMBLayoutFor(from, frameRange, pMBLayout);
auto fromSlice0 = DataWithMBLayoutFor(from, fr, pMBLayout);
auto to0 = to.Reshaped(from.GetNumRows(), from.GetNumCols());
auto toSlice0 = DataWithMBLayoutFor(to0, frameRange, pMBLayout);
auto toSlice0 = DataWithMBLayoutFor(to0, fr, pMBLayout);
size_t D = to.GetNumRows();
size_t SMKT = to.GetNumCols();
@ -307,8 +307,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// notes:
// - input and output have different time base and different layouts (unless the canonical case of factor() == 1)
// - frameRange refers to *functionValues*, not the inputs
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
// - fr refers to *functionValues*, not the inputs
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
size_t rows = Input(0)->GetNumRows(), cols = Input(0)->GetNumCols();
size_t newCols = cols * rows / m_numTargetRows;
@ -326,16 +326,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
// TODO: It does not make sense to run ReshapeNode frame-by-frame inside a loop, because it changes the time base.
// However, in the future, we should be able to run inside an outer loop.
if (!frameRange.IsAllFrames())
if (!fr.IsAllFrames())
InvalidArgument("%ls %ls operation cannot be run from inside a loop since it changes the time base.", NodeName().c_str(), OperationName().c_str());
if (weStack())
Base::Stack(frameRange, m_pMBLayout, Input(0)->Output(), Output(), factor(), false/*addTo*/);
Base::Stack(fr, m_pMBLayout, Input(0)->Output(), Output(), factor(), false/*addTo*/);
else
Base::Unstack(frameRange.WithLayout(Input(0)->GetMBLayout()), Input(0)->GetMBLayout(), Input(0)->Output(), Output(), factor(), false/*addTo*/);
Base::Unstack(fr.WithLayout(Input(0)->GetMBLayout()), Input(0)->GetMBLayout(), Input(0)->Output(), Output(), factor(), false/*addTo*/);
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
size_t rows = Input(0)->GetNumRows(), cols = Input(0)->GetNumCols();
size_t newCols = cols * rows / m_numTargetRows;
@ -349,9 +349,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else
{
if (weStack())
Base::Unstack(frameRange, m_pMBLayout, GradientValues(), Input(0)->GradientValues(), factor(), true/*addTo*/);
Base::Unstack(fr, m_pMBLayout, GradientValues(), Input(0)->GradientValues(), factor(), true/*addTo*/);
else
Base::Stack(frameRange.WithLayout(Input(0)->GetMBLayout()), Input(0)->GetMBLayout(), GradientValues(), Input(0)->GradientValues(), factor(), true/*addTo*/);
Base::Stack(fr.WithLayout(Input(0)->GetMBLayout()), Input(0)->GetMBLayout(), GradientValues(), Input(0)->GradientValues(), factor(), true/*addTo*/);
}
}
@ -437,13 +437,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Base(deviceId, name)
{ }
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Input(0)->GradientFor(frameRange.WithLayout(Input(0)->GetMBLayout())) += GradientFor(frameRange);
Input(0)->GradientFor(fr.WithLayout(Input(0)->GetMBLayout())) += GradientFor(fr);
// TODO: Once we do in-place, the above must include a copy-to-self check (pay special attention to adding vs. copying).
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
// enforce compatibility of 'dataInput' with 'layoutInput'
// TODO: how to deal with boundary flags?
@ -454,7 +454,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Input(1)->NodeName().c_str(), Input(1)->OperationName().c_str());
// copy the data from 'dataInput'
OutputFor(frameRange).SetValue(Input(0)->OutputFor(frameRange.WithLayout(Input(0)->GetMBLayout()))); // just propagate through
OutputFor(fr).SetValue(Input(0)->OutputFor(fr.WithLayout(Input(0)->GetMBLayout()))); // just propagate through
// TODO: Once we do in-place, the above must include a copy-to-self check (either here or inside the matrix lib).
}
@ -519,14 +519,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
fstream >> m_startIndex >> m_sliceHeight;
}
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Input(0)->GradientFor(frameRange).AddToRowSliceValuesOf(GradientFor(frameRange), m_startIndex, m_sliceHeight);
Input(0)->GradientFor(fr).AddToRowSliceValuesOf(GradientFor(fr), m_startIndex, m_sliceHeight);
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
OutputFor(frameRange).AssignRowSliceValuesOf(Input(0)->OutputFor(frameRange), m_startIndex, m_sliceHeight);
OutputFor(fr).AssignRowSliceValuesOf(Input(0)->OutputFor(fr), m_startIndex, m_sliceHeight);
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -584,15 +584,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
}
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t inputIndex, const FrameRange & fr) override
{
Input(inputIndex)->GradientFor(frameRange).AddWithRowSliceValuesOf(GradientFor(frameRange), m_startRowIndices[inputIndex], Input(inputIndex)->GetNumRows());
Input(inputIndex)->GradientFor(fr).AddWithRowSliceValuesOf(GradientFor(fr), m_startRowIndices[inputIndex], Input(inputIndex)->GetNumRows());
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
for (size_t inputIndex = 0; inputIndex < GetNumInputs(); inputIndex++)
OutputFor(frameRange).AssignToRowSliceValuesOf(Input(inputIndex)->OutputFor(frameRange), m_startRowIndices[inputIndex], Input(inputIndex)->GetNumRows());
OutputFor(fr).AssignToRowSliceValuesOf(Input(inputIndex)->OutputFor(fr), m_startRowIndices[inputIndex], Input(inputIndex)->GetNumRows());
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
@ -727,14 +727,14 @@ namespace Microsoft { namespace MSR { namespace CNTK {
InferImageDimsFromInputs();
}
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & frameRange) override
virtual void /*ComputationNode::*/ForwardProp(const FrameRange & fr) override
{
OutputFor(frameRange).AssignRepeatOf(Input(0)->OutputFor(frameRange), m_numRepeat, 1);
OutputFor(fr).AssignRepeatOf(Input(0)->OutputFor(fr), m_numRepeat, 1);
}
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & frameRange) override
virtual void /*ComputationNode::*/BackpropTo(const size_t /*inputIndex*/, const FrameRange & fr) override
{
Input(0)->GradientFor(frameRange).AddToRowRepeatValuesOf(GradientFor(frameRange), m_numRepeat);
Input(0)->GradientFor(fr).AddToRowRepeatValuesOf(GradientFor(fr), m_numRepeat);
}
private:

Просмотреть файл

@ -36,16 +36,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange frameRange(Input(0)->GetMBLayout());
#if 1
auto gradient = Input(inputIndex)->GradientFor(frameRange);
FrameRange fr(Input(0)->GetMBLayout());
auto gradient = Input(inputIndex)->GradientFor(fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(inputIndex == 0 ? 1.0f : -1.0f, GradientValues()/*1x1*/, *m_leftMinusRight, 1.0f, gradient);
#else
if (inputIndex == 0)
Input(0)->GradientFor(frameRange).AddWithScaleOf(GradientValues().Get00Element(), *m_leftMinusRight);
else
Input(1)->GradientFor(frameRange).AddWithScaleOf(-GradientValues().Get00Element(), *m_leftMinusRight);
#endif
}
virtual void UpdateFunctionMBSize() override
@ -55,9 +48,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
m_leftMinusRight->AssignDifferenceOf(Input(0)->OutputFor(frameRange), Input(1)->OutputFor(frameRange));
MaskMissingColumnsToZero(*m_leftMinusRight, Input(0)->GetMBLayout(), frameRange); // we are fine since it will only be called with full minibatch.
FrameRange fr(Input(0)->GetMBLayout());
m_leftMinusRight->AssignDifferenceOf(Input(0)->OutputFor(fr), Input(1)->OutputFor(fr));
MaskMissingColumnsToZero(*m_leftMinusRight, Input(0)->GetMBLayout(), fr); // we are fine since it will only be called with full minibatch.
ElemType v = m_leftMinusRight->FrobeniusNorm();
VerifyDims(1,1);
Output().SetValue(v*v / 2);
@ -89,16 +82,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_leftMinusRight, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_leftMinusRight, matrixPool);
}
@ -127,21 +120,20 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
// left input is scalar
if (inputIndex == 0) //left derivative
{
#if DUMPOUTPUT
*m_logSoftmaxOfRight.Print("CrossEntropyWithSoftmax Partial-logSoftmaxOfRight");
GradientValues().Print("CrossEntropyWithSoftmax Partial-gradientValues");
Input(0)->GradientFor(frameRange).Print("CrossEntropyWithSoftmaxNode Partial-Left-in");
Input(0)->GradientFor(fr).Print("CrossEntropyWithSoftmaxNode Partial-Left-in");
#endif
auto gradient = Input(0)->GradientFor(frameRange);
//Matrix<ElemType>::ScaleAndAdd(-GradientValues().Get00Element(), *m_logSoftmaxOfRight, gradient);
auto gradient = Input(0)->GradientFor(fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, GradientValues()/*1x1*/, *m_logSoftmaxOfRight, 1.0f, gradient);
#if DUMPOUTPUT
Input(0)->GradientFor(frameRange).Print("CrossEntropyWithSoftmaxNode Partial-Left-out");
Input(0)->GradientFor(fr).Print("CrossEntropyWithSoftmaxNode Partial-Left-out");
#endif
}
@ -150,18 +142,18 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
#if DUMPOUTPUT
*m_softmaxOfRight.Print("CrossEntropyWithSoftmax Partial-softmaxOfRight");
Input(0)->OutputFor(frameRange).Print("CrossEntropyWithSoftmax Partial-inputFunctionValues");
Input(0)->OutputFor(fr).Print("CrossEntropyWithSoftmax Partial-inputFunctionValues");
GradientValues().Print("CrossEntropyWithSoftmax Partial-gradientValues");
Input(1)->GradientFor(frameRange).Print("CrossEntropyWithSoftmaxNode Partial-Right-in");
Input(1)->GradientFor(fr).Print("CrossEntropyWithSoftmaxNode Partial-Right-in");
#endif
auto gradient = Input(1)->GradientFor(frameRange);
Matrix<ElemType>::AddScaledDifference(GradientValues(), *m_softmaxOfRight, Input(0)->OutputFor(frameRange), gradient);
auto gradient = Input(1)->GradientFor(fr);
Matrix<ElemType>::AddScaledDifference(GradientValues(), *m_softmaxOfRight, Input(0)->OutputFor(fr), gradient);
#if DUMPOUTPUT
Input(1)->GradientFor(frameRange).Print("CrossEntropyWithSoftmaxNode Partial-Right");
Input(1)->GradientFor(fr).Print("CrossEntropyWithSoftmaxNode Partial-Right");
#endif
#ifdef _DEBUG
Input(1)->InvalidateMissingGradientColumns(frameRange); // TODO: This should not be necessary.
Input(1)->InvalidateMissingGradientColumns(fr); // TODO: This should not be necessary.
#endif
}
}
@ -174,16 +166,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override //-sum(left_i * log(softmax_i(right)))
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
// first compute the softmax (column-wise)
// Note that we need both log and non-log for gradient computation.
m_logSoftmaxOfRight->AssignLogSoftmaxOf(Input(1)->OutputFor(frameRange), true);
m_logSoftmaxOfRight->AssignLogSoftmaxOf(Input(1)->OutputFor(fr), true);
m_softmaxOfRight->SetValue(*m_logSoftmaxOfRight);
m_softmaxOfRight->InplaceExp();
// flatten all gaps to zero, such that gaps will contribute zero to the sum
MaskMissingColumnsToZero(*m_logSoftmaxOfRight, Input(1)->GetMBLayout(), frameRange);
MaskMissingColumnsToZero(*m_logSoftmaxOfRight, Input(1)->GetMBLayout(), fr);
// reduce over all frames
Output().AssignInnerProductOfMatrices(Input(0)->MaskedValueSlice(frameRange), *m_logSoftmaxOfRight);
Output().AssignInnerProductOfMatrices(Input(0)->MaskedValueSlice(fr), *m_logSoftmaxOfRight);
Output() *= -1;
#if NANCHECK
Output().HasNan("CrossEntropyWithSoftmax");
@ -217,9 +209,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_logSoftmaxOfRight, matrixPool);
RequestMatrixFromPool(m_softmaxOfRight, matrixPool);
}
@ -252,22 +244,21 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
//left Node must be a scalar
if (inputIndex == 0) //left derivative
{
BackpropToLeft(*m_logOfRight, Input(0)->GradientFor(frameRange), GradientValues());
BackpropToLeft(*m_logOfRight, Input(0)->GradientFor(fr), GradientValues());
}
else
{
BackpropToRight(*m_leftDivRight, Input(0)->OutputFor(frameRange), Input(1)->OutputFor(frameRange), Input(1)->GradientFor(frameRange), GradientValues());
BackpropToRight(*m_leftDivRight, Input(0)->OutputFor(fr), Input(1)->OutputFor(fr), Input(1)->GradientFor(fr), GradientValues());
}
}
/*TODO: merge with call site*/void BackpropToLeft(const Matrix<ElemType>& logOfRight, Matrix<ElemType> inputGradientValues,
const Matrix<ElemType>& gradientValues)
{
//Matrix<ElemType>::ScaleAndAdd(-gradientValues.Get00Element(), logOfRight, inputGradientValues);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, gradientValues/*1x1*/, logOfRight, 1.0f, inputGradientValues);
}
@ -275,10 +266,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
const Matrix<ElemType> inputFunctionValues0, const Matrix<ElemType> inputFunctionValues1,
Matrix<ElemType> inputGradientValues, const Matrix<ElemType>& gradientValues)
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
leftDivRight.AssignElementDivisionOf(inputFunctionValues0, inputFunctionValues1);
MaskMissingColumnsToZero(leftDivRight, Input(0)->GetMBLayout(), frameRange);
//Matrix<ElemType>::ScaleAndAdd(-gradientValues.Get00Element(), leftDivRight, inputGradientValues);
MaskMissingColumnsToZero(leftDivRight, Input(0)->GetMBLayout(), fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, gradientValues/*1x1*/, leftDivRight, 1.0f, inputGradientValues);
}
@ -291,11 +281,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
//-sum(left_i * log(right_i))
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
m_logOfRight->SetValue(Input(1)->OutputFor(frameRange));
FrameRange fr(Input(0)->GetMBLayout());
m_logOfRight->SetValue(Input(1)->OutputFor(fr));
m_logOfRight->InplaceLog();
MaskMissingColumnsToZero(*m_logOfRight, Input(1)->GetMBLayout(), frameRange);
Output().AssignInnerProductOfMatrices(Input(0)->MaskedValueSlice(frameRange), *m_logOfRight);
MaskMissingColumnsToZero(*m_logOfRight, Input(1)->GetMBLayout(), fr);
Output().AssignInnerProductOfMatrices(Input(0)->MaskedValueSlice(fr), *m_logOfRight);
Output() *= -1;
#if NANCHECK
functionValues.HasNan("CrossEntropy");
@ -328,23 +318,23 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_logOfRight, matrixPool);
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_leftDivRight, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_logOfRight, matrixPool);
ReleaseMatrixToPool(m_leftDivRight, matrixPool);
}
@ -377,16 +367,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override // scale by number of cols (or samples)
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
assert(inputIndex == 0); inputIndex;
BackpropToS(*m_gradientOfL1Norm, Input(0)->GradientFor(frameRange), GradientValues(), Input(0)->OutputFor(frameRange));
BackpropToS(*m_gradientOfL1Norm, Input(0)->GradientFor(fr), GradientValues(), Input(0)->OutputFor(fr));
}
/*TODO: merge with call site*/void BackpropToS(Matrix<ElemType>& gradientOfL1Norm,
Matrix<ElemType> inputGradientValues, const Matrix<ElemType>& gradientValues, const Matrix<ElemType>& inputFunctionValues)
{
gradientOfL1Norm.AssignSignOf(inputFunctionValues);
//inputGradientValues.AddWithScaleOf(gradientValues.Get00Element(), gradientOfL1Norm);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(+1.0f, gradientValues/*1x1*/, gradientOfL1Norm, 1.0f, inputGradientValues);
}
@ -397,9 +386,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
VerifyDims(1, 1);
Output().SetValue(Input(0)->MaskedValueSlice(frameRange).MatrixNorm1());
Output().SetValue(Input(0)->MaskedValueSlice(fr).MatrixNorm1());
#if NANCHECK
Output().HasNan("MatrixL1Reg");
#endif
@ -428,16 +417,16 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices that are needed for gradient computation
virtual void RequestMatricesBeforeGradientComp(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeBackprop(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeGradientComp(matrixPool);
Base::RequestMatricesBeforeBackprop(matrixPool);
RequestMatrixFromPool(m_gradientOfL1Norm, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_gradientOfL1Norm, matrixPool);
}
@ -466,9 +455,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override // scale by number of cols (or samples)
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
assert(inputIndex == 0); inputIndex;
BackpropToS(Input(0)->GradientFor(frameRange), GradientValues(), Input(0)->OutputFor(frameRange), Output());
BackpropToS(Input(0)->GradientFor(fr), GradientValues(), Input(0)->OutputFor(fr), Output());
}
/*TODO: merge with call site*/void BackpropToS(Matrix<ElemType> inputGradientValues, const Matrix<ElemType>& gradientValues, const Matrix<ElemType>& inputFunctionValues, const Matrix<ElemType>& functionValues)
@ -479,9 +468,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
VerifyDims(1,1);
Output().SetValue(Input(0)->MaskedValueSlice(frameRange).FrobeniusNorm());
Output().SetValue(Input(0)->MaskedValueSlice(fr).FrobeniusNorm());
#if NANCHECK
Output().HasNan("MatrixL2Reg");
#endif
@ -560,7 +549,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
*/
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
m_needRecomputeGradientToSoftmaxInput = false;
//gradient computation@yinggongzhao
//inputIndex should be 2 this time
@ -569,7 +558,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (inputIndex == 0)
InvalidArgument("ComputeInput partial should not be called for label");
// samples+probs hidden embedding
Input(inputIndex)->GradientFor(frameRange).AssignNCEDerivative(m_ncePrediction, Input(0)->OutputFor(frameRange), Input(1)->OutputFor(frameRange), Input(2)->Output(), inputIndex);
Input(inputIndex)->GradientFor(fr).AssignNCEDerivative(m_ncePrediction, Input(0)->OutputFor(fr), Input(1)->OutputFor(fr), Input(2)->Output(), inputIndex);
}
#if 0 // TODO: delete this. Seems copy-paste leftover?
@ -597,10 +586,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override //-sum(left_i * log(softmax_i(right)))
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
if (Input(0)->HasMBLayout() && Input(0)->GetMBLayout()->HasGaps())
LogicError("%ls %ls operation does not handle multiple parallel sequences with gaps correctly. Contact fseide@microsoft.com if you have a need and a test case.", NodeName().c_str(), OperationName().c_str());
//Input(0)->MaskMissingValuesColumnsToZero(frameRange);
//Input(0)->MaskMissingValuesColumnsToZero(fr);
int positive = 0, negative = 0;
if (Input(0)->GetNumRows() == 1)
{
@ -619,7 +608,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
m_logSoftmax.AssignProductOf(Input(1)->Output(), true, Input(2)->Output(), false);
m_logSoftmax += Input(3)->Output();
m_logSoftmax.InplaceLogSoftmax(false);
MaskMissingColumnsToZero(m_logSoftmax, Input(1)->GetMBLayout(), frameRange); // TODO: is this the right way to neutralize gaps?
MaskMissingColumnsToZero(m_logSoftmax, Input(1)->GetMBLayout(), fr); // TODO: is this the right way to neutralize gaps?
Output().AssignSoftmaxSum(Input(0)->Output(), m_logSoftmax);
}
else if (m_evalMode == NCEEvalMode::Unnormalized || (Input(0)->GetNumRows() == 1 && negative > 0))
@ -738,9 +727,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
if (Input(0)->GetMBLayout()->Is(s, t, MinibatchPackingFlags::NoInput)) // skip gaps
continue;
FrameRange frameRange = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
FrameRange fr = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
Matrix<ElemType> lbl_t = Input(0)->OutputFor(frameRange);
Matrix<ElemType> lbl_t = Input(0)->OutputFor(fr);
size_t c_t = (size_t)lbl_t(1, 0);
size_t lft_bnd = (size_t)lbl_t(2, 0); // index of first word belonging to current word token's class
size_t rgt_bnd = (size_t)lbl_t(3, 0); // and end of that range
@ -748,15 +737,15 @@ namespace Microsoft { namespace MSR { namespace CNTK {
// compute prb - 1 and prb
Matrix<ElemType> weightForClass = Input(2)->Output().ColumnSlice(lft_bnd, nbr_wrd);
Matrix<ElemType> obs = Input(1)->OutputFor(frameRange); // hidden activation vector for current word token
Matrix<ElemType> obs = Input(1)->OutputFor(fr); // hidden activation vector for current word token
Matrix<ElemType> grd_to_soft_max_input = m_grdToSoftMaxInput.ColumnSlice(sz, nbr_wrd);
Matrix<ElemType> grd_to_cls_prob = DataWithMBLayoutFor(m_clsLogSoftmax, frameRange, Input(3)->GetMBLayout());
Matrix<ElemType> grd_to_cls_prob = DataWithMBLayoutFor(m_clsLogSoftmax, fr, Input(3)->GetMBLayout());
switch (inputIndex)
{
case 1:
// gradient to input
grd_t = Input(1)->GradientFor(frameRange);
grd_t = Input(1)->GradientFor(fr);
Matrix<ElemType>::MultiplyAndAdd(weightForClass, false, grd_to_soft_max_input, true, grd_t);
break;
case 2:
@ -765,8 +754,8 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Matrix<ElemType>::MultiplyAndAdd(obs, false, grd_to_soft_max_input, false, grd_to_wgt_t);
break;
case 3:
grd_t = Input(3)->GradientFor(frameRange);
grd_t.SetValue(DataWithMBLayoutFor(m_clsSoftmax, frameRange, Input(3)->GetMBLayout()));
grd_t = Input(3)->GradientFor(fr);
grd_t.SetValue(DataWithMBLayoutFor(m_clsSoftmax, fr, Input(3)->GetMBLayout()));
ComputeCEPartialToSoftmaxInputs(grd_t, GradientValues(), c_t);
break;
}
@ -795,9 +784,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
if (Input(0)->GetMBLayout()->Is(s, t, MinibatchPackingFlags::NoInput)) // skip gaps
continue;
FrameRange frameRange = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
FrameRange fr = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
Matrix<ElemType> lbl_t = Input(0)->OutputFor(frameRange);
Matrix<ElemType> lbl_t = Input(0)->OutputFor(fr);
size_t y_t = (size_t)lbl_t(0, 0); // word index
size_t lft_bnd = (size_t)lbl_t(2, 0); // index of first word belonging to current word token's class
size_t rgt_bnd = (size_t)lbl_t(3, 0); // and end of that range
@ -850,9 +839,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
if (Input(0)->GetMBLayout()->Is(s, t, MinibatchPackingFlags::NoInput)) // skip gaps
continue;
FrameRange frameRange = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
FrameRange fr = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
const Matrix<ElemType> & lbl_t = Input(0)->OutputFor(frameRange);
const Matrix<ElemType> & lbl_t = Input(0)->OutputFor(fr);
size_t lft_bnd = (size_t)lbl_t(2, 0);
size_t rgt_bnd = (size_t)lbl_t(3, 0);
size_t nbr_wrd = (rgt_bnd - lft_bnd); // number of words in the class
@ -874,9 +863,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
if (Input(0)->GetMBLayout()->Is(s, t, MinibatchPackingFlags::NoInput)) // skip gaps
continue;
FrameRange frameRange = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
FrameRange fr = FrameRange(Input(0)->GetMBLayout(), t).Sequence(s);
const Matrix<ElemType> & lbl_t = Input(0)->OutputFor(frameRange);
const Matrix<ElemType> & lbl_t = Input(0)->OutputFor(fr);
size_t y_t = (size_t)lbl_t(0, 0); // current word token index
size_t c_t = (size_t)lbl_t(1, 0); // current word token's class index
size_t lft_bnd = (size_t)lbl_t(2, 0); // index of first word belonging to current word token's class
@ -892,7 +881,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
Matrix<ElemType> softMax_t = m_softMax.ColumnSlice(sz, nbr_wrd);
Matrix<ElemType> logSoftMax_t = m_logSoftmax.ColumnSlice(sz, nbr_wrd);
Matrix<ElemType> obs = Input(1)->OutputFor(frameRange); // hidden activation vector for current word token
Matrix<ElemType> obs = Input(1)->OutputFor(fr); // hidden activation vector for current word token
// multiply hidden activation with weight matrix (the slice of the weight matrix for the range of class members)
// TODO: can we use 'true' here instead? Above transposition hack won't work with row slices. 'obs' not used elsewhere
@ -1018,7 +1007,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
/// compute posterior probability of label y at position t
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
size_t nrow = Input(0)->GetNumRows();
size_t ncol = Input(0)->GetNumCols();
@ -1034,7 +1023,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
LogicError("CRFNode: >1 parallel sequences are curently not implemented correctly.");
for (size_t i = 0; i < nS; i++) // process parallel sequences one by one --BUGBUG: We should loop over individual sequences.
{
FrameRange sequenceRange = frameRange.Sequence(i); // FrameRange to select one sequence
FrameRange sequenceRange = fr.Sequence(i); // FrameRange to select one sequence
// BUGBUG: This ^^ is neither supported nor correct, since this code does not handle gaps or start/end flags
ForwardPropS(
DataWithMBLayoutFor(mPostProb, sequenceRange, Input(0)->GetMBLayout()),
@ -1052,28 +1041,28 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override //scaled by 2*number of colmns (samples) in the Matrix<ElemType>
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
// inputIndex 0 should not get us here, it should be prevented by the needGradient flag of input[0]
if (inputIndex != 1 && inputIndex != 2)
InvalidArgument("CRFNode only takes with respect to input and weight.");
if (inputIndex == 1)
{
auto gradient = Input(1)->GradientFor(frameRange);
Matrix<ElemType>::AddScaledDifference(GradientValues(), mPostProb, Input(0)->OutputFor(frameRange), gradient);
auto gradient = Input(1)->GradientFor(fr);
Matrix<ElemType>::AddScaledDifference(GradientValues(), mPostProb, Input(0)->OutputFor(fr), gradient);
}
else if (inputIndex == 2)
{
assert(Input(inputIndex)->GradientFor(frameRange).GetNumElements() > 0);
assert(Input(inputIndex)->GradientFor(fr).GetNumElements() > 0);
size_t nS = Input(0)->GetNumParallelSequences();
for (size_t i = 0; i < nS; i++) // process all sequences one by one
{
FrameRange sequenceRange = frameRange.Sequence(i); // FrameRange to select one sequence
auto gradient = Input(2)->GradientFor(frameRange);
FrameRange sequenceRange = fr.Sequence(i); // FrameRange to select one sequence
auto gradient = Input(2)->GradientFor(fr);
TransGrdCompute(Input(0)->OutputFor(sequenceRange),
DataWithMBLayoutFor(mAlpha, sequenceRange, Input(0)->GetMBLayout()),
DataWithMBLayoutFor(mBeta, sequenceRange, Input(0)->GetMBLayout()),
Input(2)->OutputFor(frameRange),
Input(2)->OutputFor(fr),
gradient,
mStartLbl, 1);
}
@ -1320,7 +1309,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Left-in");
#endif
//Matrix<ElemType>::ScaleAndAdd(-gradientValues.Get00Element(), logSoftmaxOfRight, inputGradientValues);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, gradientValues/*1x1*/, logSoftmaxOfRight, 1.0f, inputGradientValues);
#if DUMPOUTPUT
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Left-out");
@ -1428,9 +1416,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_logSoftmaxOfRight, matrixPool);
RequestMatrixFromPool(m_softmaxOfRight, matrixPool);
RequestMatrixFromPool(m_gammaFromLattice, matrixPool);
@ -1519,23 +1507,22 @@ namespace Microsoft { namespace MSR { namespace CNTK {
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
if (inputIndex != 1)
InvalidArgument("%ls %ls operation cannot compute the gradient for its first inpute.", NodeName().c_str(), OperationName().c_str());
//BackpropToRight(m_temp, Input(0)->Output(), Input(2)->Output(), Input(inputIndex)->GradientValues(), GradientValues(), m_classZeroLabels, m_result);
// Create vector with 1 for class 1, and -1 for class 0
m_temp->AssignDifferenceOf(Input(0)->OutputFor(frameRange), *m_classZeroLabels); // TODO: need a slice for m_classZeroLabels?
m_temp->AssignDifferenceOf(Input(0)->OutputFor(fr), *m_classZeroLabels); // TODO: need a slice for m_classZeroLabels?
// Multiply the vector by the Input(2)->Output()
if (m_inputs.size() == 3) // without weight
m_temp->AssignElementProductOf(*m_temp, Input(2)->OutputFor(frameRange)); // TODO: is Input(2) minibatch data? Confirm
m_temp->AssignElementProductOf(*m_temp, Input(2)->OutputFor(fr)); // TODO: is Input(2) minibatch data? Confirm
// divide class by p (class 1) or (1-p) (class 0)
m_temp->AssignElementDivisionOf(*m_temp, *m_result); // TODO: this is in-place--does this function allow that?
//Matrix<ElemType>::ScaleAndAdd(-GradientValues().Get00Element(), *m_temp, Input(inputIndex)->GradientValues());
auto gradient = Input(inputIndex)->GradientFor(frameRange);
auto gradient = Input(inputIndex)->GradientFor(fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, GradientValues()/*1x1*/, *m_temp, 1.0f, gradient);
}
@ -1549,10 +1536,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
//-sum(left * log(right) + (1-left)*log(1-right)) (optionally * weight)
virtual void /*ComputationNodeNonLooping::*/ForwardPropNonLooping() override
{
FrameRange frameRange(Input(0)->GetMBLayout());
FrameRange fr(Input(0)->GetMBLayout());
const Matrix<ElemType>& classOneLabels = Input(0)->OutputFor(frameRange);
const Matrix<ElemType>& classOneProbabilities = Input(1)->OutputFor(frameRange);
const Matrix<ElemType>& classOneLabels = Input(0)->OutputFor(fr);
const Matrix<ElemType>& classOneProbabilities = Input(1)->OutputFor(fr);
Matrix<ElemType>& classZeroLabels = *m_classZeroLabels;
Matrix<ElemType> ones = ConstOnes(classOneLabels.GetNumRows(), classOneLabels.GetNumCols(), classOneLabels.GetDeviceId());
@ -1582,7 +1569,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (m_inputs.size() == 2)
Output().AssignSumOfElements(*m_temp);
else
Output().AssignInnerProductOf(Input(2)->OutputFor(frameRange), *m_temp, false);
Output().AssignInnerProductOf(Input(2)->OutputFor(fr), *m_temp, false);
Output() *= (-1);
}
@ -1614,18 +1601,18 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}
//request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeEval(MatrixPool& matrixPool)
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeEval(matrixPool);
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_classZeroLabels, matrixPool);
RequestMatrixFromPool(m_result, matrixPool);
RequestMatrixFromPool(m_temp, matrixPool);
}
//release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterGradientComp(MatrixPool& matrixPool)
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterGradientComp(matrixPool);
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_classZeroLabels, matrixPool);
ReleaseMatrixToPool(m_result, matrixPool);
ReleaseMatrixToPool(m_temp, matrixPool);

Просмотреть файл

@ -119,6 +119,7 @@
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\Basics.h" />
<ClInclude Include="..\..\Common\Include\Config.h" />
<ClInclude Include="..\..\Common\Include\Eval.h" />
<ClInclude Include="..\..\Common\Include\File.h" />
<ClInclude Include="..\..\Common\Include\fileutil.h" />
@ -131,9 +132,7 @@
<ClInclude Include="CNTKEval.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp" />
<ClCompile Include="..\..\Common\Eval.cpp" />
<ClCompile Include="..\..\Common\File.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>

Просмотреть файл

@ -2,9 +2,6 @@
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="CNTKEval.cpp" />
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\Eval.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -26,6 +23,9 @@
<ClCompile Include="stdafx.cpp">
<Filter>Misc</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="EvalReader.h" />
@ -58,6 +58,9 @@
<ClInclude Include="targetver.h">
<Filter>Misc</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">

Просмотреть файл

@ -4,7 +4,7 @@
#include "stdafx.h"
#include "Eval.h"
#include "DataReader.h"
#include "commandArgUtil.h"
#include "Config.h"
using namespace Microsoft::MSR::CNTK;
// process the command

Просмотреть файл

@ -93,7 +93,7 @@
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\Common\ConfigFile.cpp">
<ClCompile Include="..\..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>

Просмотреть файл

@ -155,7 +155,7 @@
<ClInclude Include="..\..\Common\Include\basetypes.h" />
<ClInclude Include="..\..\Common\Include\Basics.h" />
<ClInclude Include="..\..\Common\Include\BestGpu.h" />
<ClInclude Include="..\..\Common\Include\commandArgUtil.h" />
<ClInclude Include="..\..\Common\Include\Config.h" />
<ClInclude Include="..\..\Common\Include\DataReader.h" />
<ClInclude Include="..\..\Common\Include\DataTensor.h" />
<ClInclude Include="..\..\Common\Include\DataWriter.h" />
@ -198,7 +198,10 @@
<ClInclude Include="..\CNTKComputationNetworkLib\TrainingCriterionNodes.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp" />
<ClCompile Include="..\..\Common\Config.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\Common\DataWriter.cpp" />
<ClCompile Include="..\..\Common\File.cpp">

Просмотреть файл

@ -1,9 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -28,14 +25,14 @@
<ClCompile Include="SGD.cpp">
<Filter>SGD</Filter>
</ClCompile>
<ClCompile Include="..\..\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\Common\Include\basetypes.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\commandArgUtil.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\fileutil.h">
<Filter>Common\Include</Filter>
</ClInclude>
@ -165,6 +162,9 @@
<ClInclude Include="..\..\Common\Include\DataTensor.h">
<Filter>Common\Include</Filter>
</ClInclude>
<ClInclude Include="..\..\Common\Include\Config.h">
<Filter>Common\Include</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Common">

Просмотреть файл

@ -18,7 +18,7 @@
#include <string>
#include <stdexcept>
#include "fileutil.h"
#include "commandArgUtil.h"
#include "Config.h"
#include <chrono>
#include <random>
#include "TimerUtility.h"
@ -1209,13 +1209,13 @@ namespace Microsoft { namespace MSR { namespace CNTK {
for (size_t i = 0; i < pairNodes.size(); i++)
{
for (auto ptr = pairNodes[i]->begin(); ptr != pairNodes[i]->end(); ptr++)
networks[i]->ClearGradientOfAllNodes(*ptr);
networks[i]->ZeroGradients(*ptr);
}
for (size_t i = 0; i < criterionNodes.size(); i++)
{
for (auto ptr = criterionNodes[i]->begin(); ptr != criterionNodes[i]->end(); ptr++)
networks[i]->ClearGradientOfAllNodes(*ptr);
networks[i]->ZeroGradients(*ptr);
}
for (auto ptr = criterionNodes[inetworks - 1]->begin(); ptr != criterionNodes[inetworks - 1]->end(); ptr++)
@ -1223,12 +1223,18 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (ptr == criterionNodes[inetworks - 1]->begin())
{
networks[inetworks - 1]->ForwardProp(*ptr);
networks[inetworks - 1]->Backprop<ElemType>(*ptr);
networks[inetworks - 1]->Backprop(*ptr);
}
else
{
networks[inetworks - 1]->ForwardProp(*ptr);
networks[inetworks - 1]->Backprop<ElemType>(*ptr, false, nullptr, false);
#if 1 // disable this, so that we can remove the options from Backprop() (trivial to bring back if ever needed)
NOT_IMPLEMENTED;
#else
// This is the old signature of Backprop()
// void Backprop(const ComputationNodeBasePtr rootNode, bool /*bResetToOne*/, bool /*bClearGradient*/)
networks[inetworks - 1]->Backprop(*ptr, false, false);
#endif
}
}
@ -1241,7 +1247,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
for (auto ptr = criterionNodes[i]->begin(); ptr != criterionNodes[i]->end(); ptr++)
{
networks[i]->ForwardProp(*ptr);
networks[i]->Backprop<ElemType>(*ptr, true, nullptr, false);
#if 1
NOT_IMPLEMENTED;
#else
networks[i]->Backprop(*ptr, true, false);
#endif
}
}
else if (pairNodes[i]->size() > 0)
@ -1250,7 +1260,11 @@ namespace Microsoft { namespace MSR { namespace CNTK {
for (auto ptr = pairNodes[i]->begin(); ptr != pairNodes[i]->end(); ptr++)
{
networks[i]->ForwardProp(*ptr);
networks[i]->Backprop<ElemType>(*ptr, false, nullptr, false);
#if 1
NOT_IMPLEMENTED;
#else
networks[i]->Backprop(*ptr, false, false);
#endif
}
}
}

Просмотреть файл

@ -337,31 +337,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
template SGD<float >::SGD(const ScriptableObjects::IConfigRecord &);
template SGD<double>::SGD(const ScriptableObjects::IConfigRecord &);
template<class ElemType>
void SGD<ElemType>::ForwardBackward(ComputationNetwork& net,
const std::vector<ComputationNodeBasePtr>& evalNodes,
shared_ptr<ComputationNodeBase> criterionNode,
bool dobackpropogate)
{
// evaluate eval nodes
// The bulk of this evaluation is reused in ComputeGradient() below.
net.ForwardProp(evalNodes);
// compute the gradient
// This is where the magic happens, baby!!
// ==============================
// forward prop
// ==============================
net.ForwardProp(criterionNode);
// ==============================
// backprop
// ==============================
// only compute gradient when learning rate is large enough
if (dobackpropogate)
net.Backprop<ElemType>(criterionNode);
}
template<class ElemType>
void SGD<ElemType>::Adapt(wstring origModelFileName, wstring refNodeName,
IDataReader<ElemType>* trainSetDataReader,
@ -1769,27 +1744,28 @@ namespace Microsoft { namespace MSR { namespace CNTK {
refNet->StartEvaluateMinibatchLoop(refNode);
}
DataReaderHelpers::SubminibatchDispatcher<ElemType> smbDisplatcher;
size_t samplesInRAM = m_maxSamplesInRAM;
// convert it to SubminibatchRequested
size_t numSubminibatchRequested = 0;
if (samplesInRAM < SIZE_MAX) // if samplesInRAM = 0 , we will not use subminibatch dispatcher
// prepare for sub-minibatching
// Sub-minibatching is used if a single minibatch is too large to fit into GPU RAM.
DataReaderHelpers::SubminibatchDispatcher<ElemType> smbDispatcher;
size_t numSubminibatchesNeeded = 0;
if (m_maxSamplesInRAM < SIZE_MAX) // user-specified maximum number of samples that fit into GPU RAM; or 0 if not enabled
{
size_t nParallelSequences = trainSetDataReader->GetNumParallelSequences();
size_t estimatedMBSize = tunedMBSize * nParallelSequences;
numSubminibatchRequested = (size_t)std::ceil( (float)estimatedMBSize / samplesInRAM);
// into how many pieces would we need to break the minibatch?
// TODO: The following calculation relies on the ill-devised definition of "minibatch" of the current truncated BPTT implementation. Adapt this once fixed.
size_t numParallelSequences = trainSetDataReader->GetNumParallelSequences();
size_t estimatedMBSize = tunedMBSize * numParallelSequences;
numSubminibatchesNeeded = (size_t)std::ceil((float)estimatedMBSize / m_maxSamplesInRAM);
}
if (numSubminibatchRequested > 1) // only use subminibatch dispatcher if more than 1 subminibatch is required
{
smbDisplatcher.Init(net, learnableNodes, criterionNodes, evaluationNodes);
}
size_t actualNumSubminibatch=0;
// this is non-trivial, we need a manager object to handle this
if (numSubminibatchesNeeded > 1)
smbDispatcher.Init(net, learnableNodes, criterionNodes, evaluationNodes);
// Attemps to compute the error signal for the whole utterance, which will
// The following is a special feature only supported by the Kaldi2Reader for more efficient sequence training.
// This attemps to compute the error signal for the whole utterance, which will
// be fed to the neural network as features. Currently it is a workaround
// for the two-forward-pass sequence and ctc training, which allows
// processing more utterances at the same time. Only used in Kaldi2Reader.
// TODO: move the two-forward-pass support out of the reader.
// processing more utterances at the same time.
// TODO: move the two-forward-pass support out of the reader, make a first-class citizen.
AttemptUtteranceDerivativeFeatures(net, trainSetDataReader, featureNodes, inputMatrices);
fprintf(stderr, "\nStarting minibatch loop");
@ -1806,9 +1782,9 @@ namespace Microsoft { namespace MSR { namespace CNTK {
{
fprintf(stderr, ", distributed reading is ENABLED");
}
if (numSubminibatchRequested > 1)
if (numSubminibatchesNeeded > 1)
{
fprintf(stderr, ", with maximum %d samples in RAM", (int)samplesInRAM);
fprintf(stderr, ", with maximum %d samples in RAM", (int)m_maxSamplesInRAM);
}
fprintf(stderr, ".\n");
@ -1830,15 +1806,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
nSamplesSinceLastModelSync += actualMBSize;
if (numSubminibatchRequested > 1)
{
actualNumSubminibatch = smbDisplatcher.GetMinibatchIntoCache(*trainSetDataReader, *net, *inputMatrices, numSubminibatchRequested);
}
else
{
actualNumSubminibatch = 1;
}
// node data was changed
// TODO: move this to that function as well--just tired to pass everything as arguments
// TODO: We should do this right after the GetMinibatch() call, since that's where these changed.
@ -1853,7 +1820,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
if (m_doGradientCheck && GradientCheck(net, criterionNodes, learnableNodes, 0) == false)
LogicError("cannot pass gradient checker");
#endif
// TODO: currently only support one node regularization
// TODO: currently we only support one node for regularization
if (m_needAdaptRegularization && m_adaptationRegType == AdaptationRegType::KL && refNode)
{
#if 0 // TODO: where does refNet get its features from?
@ -1874,31 +1841,50 @@ namespace Microsoft { namespace MSR { namespace CNTK {
dynamic_pointer_cast<ComputationNode<ElemType>>(labelNodes[0])->Output());
}
//compute eval node first since when gradient is computed the forward function values
//may be changed and need to be recomputed when gradient and function value share the same matrix
if (actualNumSubminibatch > 1)
{
for (size_t ismb = 0; ismb < actualNumSubminibatch; ismb++)
{
smbDisplatcher.GetSubMinibatchToNet(ismb);
ComputationNetwork::UpdateEvalTimeStamps(featureNodes);
ComputationNetwork::UpdateEvalTimeStamps(labelNodes);
ForwardBackward(*net, evaluationNodes, criterionNodes[0], learnRatePerSample > 0.01 * m_minLearnRate);
smbDisplatcher.DoneWithCurrentSubMinibatch(ismb);
}
smbDisplatcher.DoneWithCurrentMinibatch();
}
else
{
ForwardBackward(*net, evaluationNodes, criterionNodes[0], learnRatePerSample > 0.01 * m_minLearnRate);
}
// do forward and back propagation
// We optionally break the minibatch into sub-minibatches.
// This, when enabled, is used when a full minibatch does not fit into GPU RAM.
size_t actualNumSubminibatches = numSubminibatchesNeeded <= 1 ? 1 : smbDispatcher.GetMinibatchIntoCache(*trainSetDataReader, *net, *inputMatrices, numSubminibatchesNeeded);
for (size_t ismb = 0; ismb < actualNumSubminibatches; ismb++)
{
if (actualNumSubminibatches > 1)
{
smbDispatcher.GetSubMinibatchToNet(ismb); // get sub-minibatch from full-size one
ComputationNetwork::UpdateEvalTimeStamps(featureNodes);
ComputationNetwork::UpdateEvalTimeStamps(labelNodes);
}
// ===========================================================
// forward prop for evaluate eval nodes
// ===========================================================
// compute eval node first since when gradient is computed the forward function values
// may be changed and need to be recomputed when gradient and function value share the same matrix
net->ForwardProp(evaluationNodes); // the bulk of this evaluation is reused in ComputeGradient() below
// ===========================================================
// forward prop for training criterion
// ===========================================================
net->ForwardProp(criterionNodes[0]);
// ===========================================================
// backprop
// ===========================================================
if (learnRatePerSample > 0.01 * m_minLearnRate) // only compute gradient when learning rate is large enough
net->Backprop(criterionNodes[0]);
// house-keeping for sub-minibatching
if (actualNumSubminibatches > 1)
smbDispatcher.DoneWithCurrentSubMinibatch(ismb); // page state out
} // end sub-minibatch loop
if (actualNumSubminibatches > 1)
smbDispatcher.DoneWithCurrentMinibatch();
} // if (actualMBSize > 0)
// Some labels may be missing (e.g. forced alignment failed, or being gaps due to packing parallel sequences).
//for now since we share the same label masking flag we call this on the network.
//Later, when we apply different labels on different nodes
//we need to add code to call this function multiple times, one for each criteria node
// for progress and statistics, we should only count frames that are not gaps
size_t numSamplesWithLabel = net->GetNumSamplesWithLabel(actualMBSize);
@ -2659,7 +2645,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
node->UpdateEvalTimeStamp();
net->ForwardProp(criterionNodes[npos]);
net->Backprop<ElemType>(criterionNodes[npos]);
net->Backprop(criterionNodes[npos]);
if (node->GradientValues().GetMatrixType() == MatrixType::SPARSE)
{

Просмотреть файл

@ -17,7 +17,7 @@
#include <string>
#include <stdexcept>
#include "fileutil.h"
#include "commandArgUtil.h"
#include "Config.h"
#include <chrono>
#include <random>
#include "Profiler.h"
@ -497,7 +497,6 @@ protected:
private:
int SGDTrace(FILE *__restrict __stream, const char *__restrict __format, ...);
void ForwardBackward(ComputationNetwork& net,const std::vector<ComputationNodeBasePtr>& evalNodes,shared_ptr<ComputationNodeBase> criterionNode,bool dobackpropogate);
};
}}}

Просмотреть файл

@ -201,7 +201,7 @@ $(BUILDINFO): Scripts/genrate_build_info
# Define all sources that need to be built
COMMON_SRC =\
Common/ConfigFile.cpp \
Common/Config.cpp \
Common/DataReader.cpp \
Common/DataWriter.cpp \
Common/DebugUtil.cpp \
@ -432,7 +432,6 @@ CNTK_SRC =\
MachineLearning/CNTKComputationNetworkLib/ComputationNetworkEditing.cpp \
MachineLearning/CNTKComputationNetworkLib/ComputationNetworkBuilder.cpp \
MachineLearning/CNTKComputationNetworkLib/ComputationNetworkScripting.cpp \
MachineLearning/CNTKComputationNetworkLib/NetworkBuilderFromConfig.cpp \
MachineLearning/CNTKSGDLib/Profiler.cpp \
MachineLearning/CNTKSGDLib/SGD.cpp \
MachineLearning/CNTKActionsLib/TrainActions.cpp \

Просмотреть файл

@ -5,6 +5,7 @@
//
#pragma once
#include "Basics.h"
#include <string>
#include <stdint.h>
@ -48,12 +49,6 @@ static inline DEVICEID_TYPE EnforceOneGPUOnly(DEVICEID_TYPE requestedDeviceId)
#define MINLOGEXP -9.2103
#define LSMALL -0.5E10
#define NOT_IMPLEMENTED \
{ \
fprintf(stderr, "Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
LogicError("Not Implemented"); \
}
#define GPUSPARSE_INDEX_TYPE int //cuSparse only supports int array indexes
#define CPUSPARSE_INDEX_TYPE int //to be consistent with cuSparse but limited the possible size of the matrix.

233
README
Просмотреть файл

@ -1,108 +1,183 @@
######################
1. User Manual
######################
##############################################################################
# #
# CNTK #
# #
##############################################################################
The detailed introduction of the computational network and its implementation as well as the user manual of the computational network toolkit (CNTK) can be found at
-------------------------------
1. Documentation
-------------------------------
Amit Agarwal, Eldar Akchurin, Chris Basoglu, Guoguo Chen, Scott Cyphers, Jasha Droppo, Adam Eversole, Brian Guenter, Mark Hillebrand, Xuedong Huang, Zhiheng Huang, Vladimir Ivanov, Alexey Kamenev, Philipp Kranen, Oleksii Kuchaiev, Wolfgang Manousek, Avner May, Bhaskar Mitra, Olivier Nano, Gaizka Navarro, Alexey Orlov, Marko Padmilac, Hari Parthasarathi, Baolin Peng, Alexey Reznichenko, Frank Seide, Michael L. Seltzer, Malcolm Slaney, Andreas Stolcke, Huaming Wang, Kaisheng Yao, Dong Yu, Yu Zhang, Geoffrey Zweig (in alphabetical order), "An Introduction to Computational Networks and the Computational Network Toolkit", Microsoft Technical Report MSR-TR-2014-112, 2014.
The detailed introduction of the Computational Network and its
implementation as well as the user manual of the Computational Network
Toolkit (CNTK) can be found at
YOu can also check Examples and Demos under the ExampleSetups folder.
"An Introduction to Computational Networks and the Computational
Network Toolkit"
Note: For builds before May 18, 2015 the executable is called cn which is now changed to cntk.
by Amit Agarwal, Eldar Akchurin, Chris Basoglu, Guoguo Chen, Scott
Cyphers, Jasha Droppo, Adam Eversole, Brian Guenter, Mark
Hillebrand, Xuedong Huang, Zhiheng Huang, Vladimir Ivanov, Alexey
Kamenev, Philipp Kranen, Oleksii Kuchaiev, Wolfgang Manousek,
Avner May, Bhaskar Mitra, Olivier Nano, Gaizka Navarro, Alexey
Orlov, Hari Parthasarathi, Baolin Peng, Marko Radmilac, Alexey
Reznichenko, Frank Seide, Michael L. Seltzer, Malcolm Slaney,
Andreas Stolcke, Huaming Wang, Kaisheng Yao, Dong Yu, Yu Zhang, and
Geoffrey Zweig (in alphabetical order)
There are also four files in the documentation directory of the source that contain additional details.
Microsoft Technical Report MSR-TR-2014-112, 2014.
######################
2. Clone Source Code (Windows)
######################
Available through Codeplex and inside the repository.
For Examples and Demos see the ExampleSetups/ folder.
There are also four files in the Documentation/ directory of the source
that contain additional details.
-------------------------------
2. Cloning the Source Code (Windows)
-------------------------------
The CNTK project uses Git as the source version control system.
If you have Visual Studio 2013 installed Git is already available. You can follow the "Clone a remote Git repository from a third-party service" section in Set up Git on your dev machine (configure, create, clone, add) and connect to https://git01.codeplex.com/cntk to clone the source code. We found that installing Git Extension for VS is still helpful esp. for new users.
If you have Visual Studio 2013 installed, Git is already
available. You can follow the "Clone a remote Git repository from a
third-party service" section under Set up Git on your dev machine
(configure, create, clone, add) and connect to
https://git01.codeplex.com/cntk to clone the source code. We found
that installing Git Extension for VS is still helpful esp. for new
users.
Otherwise you can install Git for your OS from the Using Git with CodePlex page and clone the CNTK source code with the command "git clone https://git01.codeplex.com/cntk".
Otherwise you can install Git for your OS from the Using Git with
CodePlex page and clone the CNTK source code with the command
######################
3. Clone Source Code (Linux/mac)
######################
git clone https://git01.codeplex.com/cntk
For the linux user please replace "git01" to "git" (otherwise you will get an RPC error):
git clone https://git.codeplex.com/cntk
-------------------------------
3. Cloning Source Code (Linux/Mac)
-------------------------------
More detail you can follow this thread: http://codeplex.codeplex.com/workitem/26133
Linux users should clone from this URL: https://git.codeplex.com/cntk
######################
4. Windows Visual Studio Setup (CNTK only runs on 64-bit OS)
######################
git clone https://git.codeplex.com/cntk
# 4.1 Install Visual Studio 2013. After installation make sure to install Update 5 or higher: Go to menu Tools -> Extensions and Updates -> Updates -> Product Updates -> Visual Studio 2013 Update 5 (or higher if applicable)
More detail you can follow this thread:
http://codeplex.codeplex.com/workitem/26133
# 4.2 Install CUDA 7.0 from https://developer.nvidia.com/cuda-toolkit-70.
# 4.3 Install NVIDIA CUB from https://github.com/NVlabs/cub/archive/1.4.1.zip Unzip the archive.
Set environment variable CUB_PATH to CUB folder, e.g.:
CUB_PATH=c:\src\cub-1.4.1
# 4.4 Install ACML 5.3.1 or above (specifically the ifort64 variant, e.g., acml5.3.1-ifort64.exe) from http://developer.amd.com/tools/cpu-development/amd-core-math-library-acml/acml-downloads-resources/. Before launching Visual Studio set the system environment variable e.g.:
ACML_PATH=C:\AMD\acml5.3.1\ifort64_mp
or the folder you installed acml. (The easiest way to do this on Windows 8 is to press the windows key, and then in the metro interface start typing: edit environment variables.)
If you are running on an Intel processor with FMA3 support, we also advise to set ACML_FMA=0 in your environment to work around issue in the ACML library.
-------------------------------
4. Windows Visual Studio Setup (64-bit OS only)
-------------------------------
# 4.5 Alternatively if you have an MKL license, you can install Intel MKL library instead of ACML from https://software.intel.com/en-us/intel-math-kernel-library-evaluation-options and define USE_MKL in the CNTKMath project. MKL is faster and more reliable on Intel chips if you have the license.
Install Visual Studio 2013. After installation make sure to
install Update 5 or higher: Go to menu Tools -> Extensions and
Updates -> Updates -> Product Updates -> Visual Studio 2013 Update 5
(or higher if applicable)
# 4.6 Install the latest Microsoft MS-MPI SDK and runtime from https://msdn.microsoft.com/en-us/library/bb524831(v=vs.85).aspx
Install CUDA 7.0 from
# 4.7 If you want to use ImageReader, install OpenCV v3.0.0: Download and install OpenCV v3.0.0 for Windows from http://opencv.org/downloads.html
Set OPENCV_PATH environment variable to OpenCV build folder (e.g. C:\src\opencv\build)
https://developer.nvidia.com/cuda-toolkit-70
# 4.8 Load the CNTKSolution and build the cntk project.
and NVidia CUB from
######################
https://github.com/NVlabs/cub/archive/1.4.1.zip
by unzipping the archive and setting environment variable CUB_PATH to the location, e.g.:
CUB_PATH=c:\src\cub-1.4.1
The easiest way to set a global environment variable is to press the windows
key, and then in the search interface start typing: edit environment
variables. Then close and reopen CMD shells and Visual Studio.
Install ACML 5.3.1 or above (specifically the ifort64_mp variant, e.g.,
acml5.3.1-ifort64.exe) from
http://developer.amd.com/tools/cpu-development/amd-core-math-library-acml/acml-downloads-resources/
Before launching Visual Studio, set environment variable ACML_PATH, to
the folder you installed the library to, e.g.
ACML_PATH=C:\AMD\acml5.3.1\ifort64_mp
If you are running on an Intel processor with FMA3 support, we also
advise to set ACML_FMA=0 in your environment to work around an issue in
the ACML library.
Alternatively if you have an MKL license, you can install Intel MKL
library instead of ACML from
https://software.intel.com/en-us/intel-math-kernel-library-evaluation-options
and define USE_MKL in the CNTKMath project. MKL is faster and more
reliable on Intel chips if you have the license.
Install the latest Microsoft MS-MPI SDK and runtime from
https://msdn.microsoft.com/en-us/library/bb524831(v=vs.85).aspx
If you want to use ImageReader, install OpenCV v3.0.0. Download and
install OpenCV v3.0.0 for Windows from
http://opencv.org/downloads.html
Set environment variable OPENCV_PATH to the OpenCV build folder, e.g.
C:\src\opencv\build
Open the CNTKSolution and build the CNTK project.
Note: If you make modifications to the code, please first disable the
insertion of TAB characters. If you use Visual Studio as your editor,
goto Tools|Options|Text Editor|C/C++|Tabs and make sure it is set to
Smart Indenting Tab, Indent Size set to 4, and "Insert Spaces" option
selected. You can also load the CppCntk.vssettings file (in the CNTK
home directory) which contains settings for C++ editor. To
import/export the settings, use Tools -> Import and Export
Settings... Visual Studio menu option.
Please do *not* auto-format existing code (Edit -> Advanced -> Format
Document/Ctrl+E,D).
-------------------------------
5. Linux GCC Setup
######################
-------------------------------
# 5.1 install needed libraries as indicated in section 4 on your Linux box.
Install needed libraries as indicated in the Windows section above on
your Linux box. You need GCC 4.8.4 or above.
# 5.2 create a directory to build in and make a Config.make in the directory
that provides
ACML_PATH= path to ACML library installation
only needed if MATHLIB=acml
MKL_PATH= path to MKL library installation
only needed if MATHLIB=mkl
GDK_PATH= path to cuda gdk installation, so $(GDK_PATH)/include/nvidia/gdk/nvml.h exists
defaults to /usr
BUILDTYPE= One of release or debug
defaults to release
MATHLIB= One of acml or mkl
defaults to acml
CUDA_PATH= Path to CUDA
If not specified, GPU will not be enabled
CUB_PATH= path to NVIDIA CUB installation, so $(CUB_PATH)/cub/cub.cuh exists
defaults to /usr/local/cub-1.4.1
KALDI_PATH= Path to Kaldi
If not specified, Kaldi plugins will not be built
OPENCV_PATH= path to OpenCV 3.0.0 installation, so $(OPENCV_PATH) exists
defaults to /usr/local/opencv-3.0.0
Create a directory to build in and make a Config.make in the
directory that provides:
* ACML_PATH= path to ACML library installation (only if MATHLIB=acml)
* MKL_PATH= to MKL library installation (only if MATHLIB=mkl)
* GDK_PATH= path to cuda gdk installation, such that
$(GDK_PATH)/include/nvidia/gdk/nvml.h exists (defaults to /usr)
* BUILDTYPE= release (default) or debug
* MATHLIB= acml (default) or mkl
* CUDA_PATH= path to CUDA (if not specified, GPU will not be
enabled)
* CUB_PATH= path to NVidia CUB installation, such that the
file $(CUB_PATH)/cub/cub.cuh exists (defaults to /usr/local/cub-1.4.1)
* KALDI_PATH= Path to Kaldi (if not specified, Kaldi plugins will
not be built)
* OPENCV_PATH= path to OpenCV 3.0.0 installation, such that the
directory $(OPENCV_PATH) exists (defaults to /usr/local/opencv-3.0.0)
# 5.3 Build the clean version with command
make -j all
Build the clean version with command
make -j all
######################
6. Coding Standard
######################
No TABs. Each TAB should be replaced with 4 spaces in the source code. If you use Visual Studio as your editor, goto Tools|Options|Text Editor|C/C++|Tabs and make sure it is set to Smart Indenting Tab and Indent Size set to 4, and "Insert Spaces" option selected.
Follow the same naming conventions as shown in the ComputationNetwork.h file.
Open/close braces should be on lines by themselves aligned with previous code indent level, e.g.,
if (true)
{
Function();
}
If you are using Visual Studio 2013 as your main development environment, you can load the CppCntk.vssettings file (in the CNTK home directory) which contains settings for C++ editor with defaults like using spaces for tabs, curly brace positioning and other preferences that meet CNTK style guidelines. Note that this file will not change any other settings, like your windows layout etc but it's still a good idea to backup your current settings just in case.
To import/export the settings, use Tools -> Import and Export Settings... Visual Studio menu option.
Once the settings are loaded, you can use Edit -> Advanced -> Format Document (or Ctrl+E,D shortcut) or, which is recommended, Edit -> Advanced -> Format Selection (or Ctrl+E,F shortcut) to format only selected fragment.
Note: If you make modifications to the code, please first disable the
insertion of TAB characters in your editor.

Просмотреть файл

@ -5,7 +5,7 @@
//
#include "stdafx.h"
#include "Matrix.h"
#include "commandArgUtil.h"
#include "Config.h"
#include "DataReader.h"
#include "boost/filesystem.hpp"

Просмотреть файл

@ -117,7 +117,7 @@
<ClInclude Include="targetver.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\Common\ConfigFile.cpp" />
<ClCompile Include="..\..\..\Common\Config.cpp" />
<ClCompile Include="..\..\..\Common\DataReader.cpp" />
<ClCompile Include="..\..\..\Common\Eval.cpp" />
<ClCompile Include="..\..\..\Common\File.cpp" />

Просмотреть файл

@ -2,9 +2,6 @@
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="stdafx.cpp" />
<ClCompile Include="..\..\..\Common\ConfigFile.cpp">
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="..\..\..\Common\DataReader.cpp">
<Filter>Common</Filter>
</ClCompile>
@ -24,6 +21,9 @@
<Filter>Common</Filter>
</ClCompile>
<ClCompile Include="DataReaderTests.cpp" />
<ClCompile Include="..\..\..\Common\Config.cpp">
<Filter>Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="stdafx.h" />