CNTKv2Library: Linux port
This commit is contained in:
Родитель
c508e9f4ca
Коммит
26c2006541
122
Makefile
122
Makefile
|
@ -60,7 +60,7 @@ endif
|
|||
CXX = mpic++
|
||||
|
||||
SOURCEDIR:= Source
|
||||
INCLUDEPATH:= $(addprefix $(SOURCEDIR)/, Common/Include Math CNTK ActionsLib ComputationNetworkLib SGDLib SequenceTrainingLib CNTK/BrainScript Readers/ReaderLib)
|
||||
INCLUDEPATH:= $(addprefix $(SOURCEDIR)/, Common/Include CNTKv2LibraryDll CNTKv2LibraryDll/API Math CNTK ActionsLib ComputationNetworkLib SGDLib SequenceTrainingLib CNTK/BrainScript Readers/ReaderLib)
|
||||
# COMMON_FLAGS include settings that are passed both to NVCC and C++ compilers.
|
||||
COMMON_FLAGS:= -D_POSIX_SOURCE -D_XOPEN_SOURCE=600 -D__USE_XOPEN2K -std=c++11
|
||||
CPPFLAGS:=
|
||||
|
@ -247,7 +247,7 @@ READER_SRC =\
|
|||
$(SOURCEDIR)/Readers/ReaderLib/TruncatedBpttPacker.cpp \
|
||||
$(SOURCEDIR)/Readers/ReaderLib/PackerBase.cpp \
|
||||
$(SOURCEDIR)/Readers/ReaderLib/FramePacker.cpp \
|
||||
$(SOURCEDIR)/Readers/ReaderLib/ChunkCache.cpp \
|
||||
$(SOURCEDIR)/Readers/ReaderLib/ChunkCache.cpp \
|
||||
|
||||
COMMON_SRC =\
|
||||
$(SOURCEDIR)/Common/Config.cpp \
|
||||
|
@ -309,7 +309,96 @@ $(CNTKMATH_LIB): $(MATH_OBJ)
|
|||
@mkdir -p $(dir $@)
|
||||
$(CXX) $(LDFLAGS) -shared $(patsubst %,-L%, $(LIBPATH) $(NVMLPATH)) $(patsubst %,$(RPATH)%, $(ORIGINDIR) $(LIBPATH)) -o $@ $^ $(LIBS) -fopenmp
|
||||
|
||||
# CNTKLibrary
|
||||
########################################
|
||||
|
||||
CNTK_COMMON_SRC =\
|
||||
$(SOURCEDIR)/Common/BestGpu.cpp \
|
||||
$(SOURCEDIR)/Common/MPIWrapper.cpp \
|
||||
|
||||
COMPUTATION_NETWORK_LIB_SRC =\
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNode.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNodeScripting.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/InputAndParamNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ReshapingNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/SpecialPurposeNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetwork.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkEvaluation.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkAnalysis.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkEditing.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkBuilder.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkScripting.cpp \
|
||||
|
||||
SEQUENCE_TRAINING_LIB_SRC =\
|
||||
$(SOURCEDIR)/SequenceTrainingLib/latticeforwardbackward.cpp \
|
||||
$(SOURCEDIR)/SequenceTrainingLib/parallelforwardbackward.cpp \
|
||||
|
||||
ifdef CUDA_PATH
|
||||
SEQUENCE_TRAINING_LIB_SRC +=\
|
||||
$(SOURCEDIR)/Math/cudalatticeops.cu \
|
||||
$(SOURCEDIR)/Math/cudalattice.cpp \
|
||||
$(SOURCEDIR)/Math/cudalib.cpp \
|
||||
|
||||
else
|
||||
SEQUENCE_TRAINING_LIB_SRC +=\
|
||||
$(SOURCEDIR)/SequenceTrainingLib/latticeNoGPU.cpp \
|
||||
|
||||
endif
|
||||
|
||||
CNTKLIBRARY_SRC =\
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/Common.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/Function.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/NDArrayView.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/NDMask.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/Utils.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/Value.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/Variable.cpp \
|
||||
|
||||
CNTKLIBRARY_SRC+=$(CNTK_COMMON_SRC)
|
||||
CNTKLIBRARY_SRC+=$(COMPUTATION_NETWORK_LIB_SRC)
|
||||
CNTKLIBRARY_SRC+=$(SEQUENCE_TRAINING_LIB_SRC)
|
||||
|
||||
CNTKLIBRARY_VERSION=2.0
|
||||
CNTKLIBRARY:=cntklibrary-$(CNTKLIBRARY_VERSION)
|
||||
|
||||
CNTKLIBRARY_OBJ := $(patsubst %.cu, $(OBJDIR)/%.o, $(patsubst %.cpp, $(OBJDIR)/%.o, $(CNTKLIBRARY_SRC)))
|
||||
|
||||
CNTKLIBRARY_LIB:=$(LIBDIR)/lib$(CNTKLIBRARY).so
|
||||
ALL+=$(CNTKLIBRARY_LIB)
|
||||
SRC+=$(CNTKLIBRARY_SRC)
|
||||
|
||||
RPATH=-Wl,-rpath,
|
||||
|
||||
$(CNTKLIBRARY_LIB): $(CNTKLIBRARY_OBJ) | $(CNTKMATH_LIB)
|
||||
@echo $(SEPARATOR)
|
||||
@mkdir -p $(dir $@)
|
||||
@echo building output for $(ARCH) with build type $(BUILDTYPE)
|
||||
$(CXX) $(LDFLAGS) -shared $(patsubst %,-L%, $(LIBDIR) $(LIBPATH) $(NVMLPATH)) $(patsubst %,$(RPATH)%, $(ORIGINDIR) $(LIBPATH)) -o $@ $^ $(LIBS) -l$(CNTKMATH)
|
||||
|
||||
# CNTKLibrary tests
|
||||
########################################
|
||||
|
||||
CNTKLIBRARY_TESTS_SRC =\
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/LibraryTests/FeedForwardTests.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/LibraryTests/Main.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/LibraryTests/NDArrayViewTests.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/LibraryTests/RecurrentFunctionTests.cpp \
|
||||
$(SOURCEDIR)/CNTKv2LibraryDll/LibraryTests/TensorTests.cpp \
|
||||
|
||||
CNTKLIBRARY_TESTS:=$(BINDIR)/cntklibrarytests
|
||||
CNTKLIBRARY_TESTS_OBJ := $(patsubst %.cu, $(OBJDIR)/%.o, $(patsubst %.cpp, $(OBJDIR)/%.o, $(CNTKLIBRARY_TESTS_SRC)))
|
||||
|
||||
ALL+=$(CNTKLIBRARY_TESTS)
|
||||
SRC+=$(CNTKLIBRARY_TESTS_SRC)
|
||||
|
||||
RPATH=-Wl,-rpath,
|
||||
|
||||
$(CNTKLIBRARY_TESTS): $(CNTKLIBRARY_TESTS_OBJ) | $(CNTKLIBRARY_LIB)
|
||||
@echo $(SEPARATOR)
|
||||
@mkdir -p $(dir $@)
|
||||
@echo building output for $(ARCH) with build type $(BUILDTYPE)
|
||||
$(CXX) $(LDFLAGS) $(patsubst %,-L%, $(LIBDIR) $(LIBPATH) $(NVMLPATH)) $(patsubst %,$(RPATH)%, $(ORIGINLIBDIR) $(LIBPATH)) -o $@ $^ $(LIBS) -l$(CNTKLIBRARY) -l$(CNTKMATH)
|
||||
|
||||
# BinaryReader plugin
|
||||
########################################
|
||||
|
||||
|
@ -594,17 +683,6 @@ CNTK_SRC =\
|
|||
$(SOURCEDIR)/CNTK/CNTK.cpp \
|
||||
$(SOURCEDIR)/CNTK/ModelEditLanguage.cpp \
|
||||
$(SOURCEDIR)/CNTK/tests.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNode.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNodeScripting.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/InputAndParamNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ReshapingNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/SpecialPurposeNodes.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetwork.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkEvaluation.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkAnalysis.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkEditing.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkBuilder.cpp \
|
||||
$(SOURCEDIR)/ComputationNetworkLib/ComputationNetworkScripting.cpp \
|
||||
$(SOURCEDIR)/SGDLib/Profiler.cpp \
|
||||
$(SOURCEDIR)/SGDLib/SGD.cpp \
|
||||
$(SOURCEDIR)/ActionsLib/TrainActions.cpp \
|
||||
|
@ -615,26 +693,14 @@ CNTK_SRC =\
|
|||
$(SOURCEDIR)/ActionsLib/NetworkDescriptionLanguage.cpp \
|
||||
$(SOURCEDIR)/ActionsLib/SimpleNetworkBuilder.cpp \
|
||||
$(SOURCEDIR)/ActionsLib/NDLNetworkBuilder.cpp \
|
||||
$(SOURCEDIR)/SequenceTrainingLib/latticeforwardbackward.cpp \
|
||||
$(SOURCEDIR)/SequenceTrainingLib/parallelforwardbackward.cpp \
|
||||
$(SOURCEDIR)/CNTK/BrainScript/BrainScriptEvaluator.cpp \
|
||||
$(SOURCEDIR)/CNTK/BrainScript/BrainScriptParser.cpp \
|
||||
$(SOURCEDIR)/CNTK/BrainScript/BrainScriptTest.cpp \
|
||||
$(SOURCEDIR)/Common/BestGpu.cpp \
|
||||
$(SOURCEDIR)/Common/MPIWrapper.cpp \
|
||||
|
||||
|
||||
ifdef CUDA_PATH
|
||||
CNTK_SRC +=\
|
||||
$(SOURCEDIR)/Math/cudalatticeops.cu \
|
||||
$(SOURCEDIR)/Math/cudalattice.cpp \
|
||||
$(SOURCEDIR)/Math/cudalib.cpp \
|
||||
|
||||
else
|
||||
CNTK_SRC +=\
|
||||
$(SOURCEDIR)/SequenceTrainingLib/latticeNoGPU.cpp \
|
||||
|
||||
endif
|
||||
CNTK_SRC+=$(CNTK_COMMON_SRC)
|
||||
CNTK_SRC+=$(COMPUTATION_NETWORK_LIB_SRC)
|
||||
CNTK_SRC+=$(SEQUENCE_TRAINING_LIB_SRC)
|
||||
|
||||
CNTK_OBJ := $(patsubst %.cu, $(OBJDIR)/%.o, $(patsubst %.cpp, $(OBJDIR)/%.o, $(CNTK_SRC)))
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace CNTK
|
|||
/// Get the 'DataType' corresponding to the ElementType template type argument.
|
||||
///
|
||||
template <typename ElementType>
|
||||
inline DataType GetDataType()
|
||||
inline DataType AsDataType()
|
||||
{
|
||||
if (std::is_same<ElementType, float>())
|
||||
return DataType::Float;
|
||||
|
@ -213,9 +213,9 @@ namespace CNTK
|
|||
///
|
||||
/// Creates and returns a new NDShape instance with the same dimensions as 'this' shape's specified axis range.
|
||||
///
|
||||
NDShape SubShape(size_t startAxisId = 0, size_t endAxisIdExclusive = UINT_MAX) const
|
||||
NDShape SubShape(size_t startAxisId = 0, size_t endAxisIdExclusive = SIZE_MAX) const
|
||||
{
|
||||
endAxisIdExclusive = (endAxisIdExclusive == UINT_MAX) ? NumAxes() : endAxisIdExclusive;
|
||||
endAxisIdExclusive = (endAxisIdExclusive == SIZE_MAX) ? NumAxes() : endAxisIdExclusive;
|
||||
if ((endAxisIdExclusive < startAxisId) || (endAxisIdExclusive > NumAxes()))
|
||||
InvalidArgument("NDShape::SubShape : The specified endAxisId cannot exceed the number of axes of 'this' NDShape and must be >= than the specified startAxisId");
|
||||
|
||||
|
@ -332,7 +332,7 @@ namespace CNTK
|
|||
///
|
||||
template <typename ElementType>
|
||||
NDArrayView(const NDShape& viewShape, ElementType* dataBuffer, size_t numBufferElements, const DeviceDescriptor& device, bool readOnly = false)
|
||||
: NDArrayView(GetDataType<ElementType>(), viewShape, dataBuffer, numBufferElements * sizeof(ElementType), device, readOnly)
|
||||
: NDArrayView(AsDataType<ElementType>(), viewShape, dataBuffer, numBufferElements * sizeof(ElementType), device, readOnly)
|
||||
{}
|
||||
|
||||
///
|
||||
|
@ -351,7 +351,7 @@ namespace CNTK
|
|||
///
|
||||
template <typename ElementType>
|
||||
explicit NDArrayView(const ElementType& value, const NDShape& viewShape = { 1 }, const DeviceDescriptor& device = DeviceDescriptor::DefaultDevice(), bool readOnly = false)
|
||||
: NDArrayView(GetDataType<ElementType>(), viewShape, device)
|
||||
: NDArrayView(AsDataType<ElementType>(), viewShape, device)
|
||||
{
|
||||
SetValue(value);
|
||||
m_isReadOnly = readOnly;
|
||||
|
@ -386,7 +386,7 @@ namespace CNTK
|
|||
///
|
||||
/// Returns the data type of 'this' view's contents.
|
||||
///
|
||||
DataType DataType() const
|
||||
DataType GetDataType() const
|
||||
{
|
||||
return m_dataType;
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ namespace CNTK
|
|||
///
|
||||
/// Returns the storage format of 'this' view.
|
||||
///
|
||||
StorageFormat StorageFormat() const
|
||||
StorageFormat GetStorageFormat() const
|
||||
{
|
||||
return m_storageFormat;
|
||||
}
|
||||
|
@ -412,7 +412,7 @@ namespace CNTK
|
|||
///
|
||||
bool IsSparse() const
|
||||
{
|
||||
return (StorageFormat() != StorageFormat::Dense);
|
||||
return (GetStorageFormat() != StorageFormat::Dense);
|
||||
}
|
||||
|
||||
///
|
||||
|
@ -598,14 +598,14 @@ namespace CNTK
|
|||
/// The created Value object contains a copy of the specified 'sequences' data.
|
||||
///
|
||||
template <typename ElementType>
|
||||
static ValuePtr Create(const NDShape& sampleShape, const std::vector<const std::vector<ElementType>>& sequences, const DeviceDescriptor& device, bool readOnly = false);
|
||||
static ValuePtr Create(const NDShape& sampleShape, const std::vector<std::vector<ElementType>>& sequences, const DeviceDescriptor& device, bool readOnly = false);
|
||||
|
||||
///
|
||||
/// Create a new Value object containing a collection of variable length sequences of one hot vectors
|
||||
/// The created Value object contains a copy of the specified 'sequences' data.
|
||||
///
|
||||
template <typename ElementType>
|
||||
static ValuePtr Create(size_t vocabularySize, const std::vector<const std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly = false);
|
||||
static ValuePtr Create(size_t vocabularySize, const std::vector<std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly = false);
|
||||
|
||||
///
|
||||
/// Destruct 'this' Value object.
|
||||
|
@ -960,18 +960,14 @@ namespace CNTK
|
|||
///
|
||||
/// Returns the DataType of the data that 'this' Variable symbolically represents
|
||||
///
|
||||
DataType DataType() const
|
||||
DataType GetDataType() const
|
||||
{
|
||||
return m_dataFields->m_dataType;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
// BUGBUG: Add a default constructor to workaround a bug in VS2013 std::vector implementation that
|
||||
// incorrectly requires its elements to be default constructible
|
||||
Variable()
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
///
|
||||
/// Returns a boolean value indicating if gradient computation is enabled for this variable.
|
||||
|
@ -1057,7 +1053,7 @@ namespace CNTK
|
|||
/// Construct a parameter whose initial contents are a copy of the specified 'value'
|
||||
///
|
||||
explicit Parameter(const NDArrayViewPtr& value, const std::wstring& name = L"")
|
||||
: Variable(value->Shape(), VariableKind::Parameter, value->DataType(), value->DeepClone(), true, {}, name)
|
||||
: Variable(value->Shape(), VariableKind::Parameter, value->GetDataType(), value->DeepClone(), true, {}, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1068,7 +1064,7 @@ namespace CNTK
|
|||
///
|
||||
template<typename ElemType>
|
||||
Parameter(const NDShape& shape, ElemType initValue, const DeviceDescriptor& device = DeviceDescriptor::DefaultDevice(), const std::wstring& name = L"")
|
||||
: Variable(shape, VariableKind::Parameter, GetDataType<ElemType>(), new NDArrayView(initValue, shape, device), true, {}, name)
|
||||
: Variable(shape, VariableKind::Parameter, AsDataType<ElemType>(), new NDArrayView(initValue, shape, device), true, {}, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1106,7 +1102,7 @@ namespace CNTK
|
|||
/// Contruct a Constant whose initial contents are a copy of the specified value
|
||||
///
|
||||
Constant(const NDArrayViewPtr& value, const std::wstring& name = L"")
|
||||
: Variable(value->Shape(), VariableKind::Constant, value->DataType(), value->DeepClone(true), false, {}, name)
|
||||
: Variable(value->Shape(), VariableKind::Constant, value->GetDataType(), value->DeepClone(true), false, {}, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1113,7 @@ namespace CNTK
|
|||
///
|
||||
template<typename ElemType>
|
||||
Constant(const NDShape& shape, ElemType initValue, const DeviceDescriptor& device = DeviceDescriptor::DefaultDevice(), const std::wstring& name = L"")
|
||||
: Variable(shape, VariableKind::Constant, GetDataType<ElemType>(), new NDArrayView(initValue, shape, device), false, {}, name)
|
||||
: Variable(shape, VariableKind::Constant, AsDataType<ElemType>(), new NDArrayView(initValue, shape, device), false, {}, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1178,6 +1174,14 @@ namespace CNTK
|
|||
}
|
||||
|
||||
namespace std {
|
||||
template <> struct hash<CNTK::Axis>
|
||||
{
|
||||
size_t operator()(const CNTK::Axis& x) const
|
||||
{
|
||||
return std::hash<std::wstring>()(x.Name());
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct hash<CNTK::Variable>
|
||||
{
|
||||
size_t operator()(const CNTK::Variable& x) const
|
||||
|
@ -1349,7 +1353,7 @@ namespace CNTK
|
|||
///
|
||||
FunctionPtr RootFunction() const
|
||||
{
|
||||
return (m_rootFunction == nullptr) ? const_cast<Function*>(this) : m_rootFunction;
|
||||
return (m_rootFunction == nullptr) ? const_cast<Function*>(this) : m_rootFunction.GetPtr();
|
||||
}
|
||||
|
||||
///
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include <type_traits>
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#pragma warning(disable: 4702 4127)
|
||||
|
||||
|
@ -47,6 +49,83 @@ namespace Microsoft { namespace MSR { namespace CNTK {
|
|||
class ComputationNode;
|
||||
}}}
|
||||
|
||||
// TODO: The following should be reconciled with the equivalent code in the CNTK implementation
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#define _countof(_Array) (sizeof(_Array) / sizeof(_Array[0]))
|
||||
#endif
|
||||
|
||||
namespace CNTK
|
||||
{
|
||||
|
||||
#define UNUSED(x) (void)(x) // for variables that are, e.g., only used in _DEBUG builds
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define __declspec_noreturn __declspec(noreturn)
|
||||
#else
|
||||
#define __declspec_noreturn __attribute__((noreturn))
|
||||
#endif
|
||||
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996)
|
||||
#ifndef _MSC_VER // TODO: what is the correct trigger for gcc?
|
||||
template <class E>
|
||||
__declspec_noreturn void ThrowFormatted(const char* format, ...) __attribute__((format(printf, 1, 2)));
|
||||
#endif
|
||||
|
||||
template <class E>
|
||||
__declspec_noreturn inline void ThrowFormatted(const char* format, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
|
||||
char buffer[1024] = { 0 }; // Note: pre-VS2015 vsnprintf() is not standards-compliant and may not add a terminator
|
||||
int written = vsnprintf(buffer, _countof(buffer) - 1, format, args); // -1 because pre-VS2015 vsnprintf() does not always write a 0-terminator
|
||||
// TODO: In case of EILSEQ error, choose between just outputting the raw format itself vs. continuing the half-completed buffer
|
||||
//if (written < 0) // an invalid wide-string conversion may lead to EILSEQ
|
||||
// strncpy(buffer, format, _countof(buffer)
|
||||
UNUSED(written); // pre-VS2015 vsnprintf() returns -1 in case of overflow, instead of the #characters written
|
||||
if (strlen(buffer)/*written*/ >= (int)_countof(buffer) - 2)
|
||||
sprintf(buffer + _countof(buffer) - 4, "...");
|
||||
|
||||
// TODO: Should use ExceptionWithCallStack; temporarily using std::exception to avoid duplicating headers
|
||||
//throw ExceptionWithCallStack<E>(buffer, ExceptionWithCallStack<E>::GetCallStack(/*skipLevels=*/2, /*makeFunctionNamesStandOut=*/true));
|
||||
throw E(buffer);
|
||||
}
|
||||
#pragma warning(pop)
|
||||
|
||||
// RuntimeError - throw a std::runtime_error with a formatted error string
|
||||
#ifndef _MSC_VER // gcc __attribute__((format(printf())) does not percolate through variadic templates; so must go the macro route
|
||||
#define RuntimeError ThrowFormatted<std::runtime_error>
|
||||
#define LogicError ThrowFormatted<std::logic_error>
|
||||
#define InvalidArgument ThrowFormatted<std::invalid_argument>
|
||||
#else
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void RuntimeError(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::runtime_error>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void LogicError(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::logic_error>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void InvalidArgument(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::invalid_argument>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef NOT_IMPLEMENTED
|
||||
#define NOT_IMPLEMENTED \
|
||||
{ \
|
||||
fprintf(stderr, "Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
|
||||
LogicError("Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace CNTK
|
||||
{
|
||||
// Forward declarations
|
||||
|
@ -234,7 +313,7 @@ namespace CNTK
|
|||
template <typename ValueType>
|
||||
friend CNTK_API bool operator==(const _SimpleVector<ValueType>& first, const _SimpleVector<ValueType>& second);
|
||||
|
||||
friend class Function;
|
||||
friend class CNTK::Function;
|
||||
|
||||
public:
|
||||
_SimpleVector();
|
||||
|
@ -309,7 +388,7 @@ namespace CNTK
|
|||
template <typename KeyType>
|
||||
class CNTK_API _SimpleSet final
|
||||
{
|
||||
friend class CompositeFunction;
|
||||
friend class CNTK::CompositeFunction;
|
||||
|
||||
template <typename T>
|
||||
friend CNTK_API bool operator==(const _SimpleSet<T>& first, const _SimpleSet<T>& second);
|
||||
|
@ -363,8 +442,8 @@ namespace CNTK
|
|||
template <typename KeyType, typename ValueType>
|
||||
class CNTK_API _SimpleMap final
|
||||
{
|
||||
friend class CompositeFunction;
|
||||
friend class Function;
|
||||
friend class CNTK::CompositeFunction;
|
||||
friend class CNTK::Function;
|
||||
|
||||
public:
|
||||
_SimpleMap();
|
||||
|
@ -435,75 +514,3 @@ namespace std {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: The following should be reconciled with the equivalent code in the CNTK implementation
|
||||
namespace CNTK
|
||||
{
|
||||
|
||||
#define UNUSED(x) (void)(x) // for variables that are, e.g., only used in _DEBUG builds
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define __declspec_noreturn __declspec(noreturn)
|
||||
#else
|
||||
#define __declspec_noreturn __attribute__((noreturn))
|
||||
#endif
|
||||
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996)
|
||||
#ifndef _MSC_VER // TODO: what is the correct trigger for gcc?
|
||||
template <class E>
|
||||
__declspec_noreturn void ThrowFormatted(const char* format, ...) __attribute__((format(printf, 1, 2)));
|
||||
#endif
|
||||
|
||||
template <class E>
|
||||
__declspec_noreturn inline void ThrowFormatted(const char* format, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
|
||||
char buffer[1024] = { 0 }; // Note: pre-VS2015 vsnprintf() is not standards-compliant and may not add a terminator
|
||||
int written = vsnprintf(buffer, _countof(buffer) - 1, format, args); // -1 because pre-VS2015 vsnprintf() does not always write a 0-terminator
|
||||
// TODO: In case of EILSEQ error, choose between just outputting the raw format itself vs. continuing the half-completed buffer
|
||||
//if (written < 0) // an invalid wide-string conversion may lead to EILSEQ
|
||||
// strncpy(buffer, format, _countof(buffer)
|
||||
UNUSED(written); // pre-VS2015 vsnprintf() returns -1 in case of overflow, instead of the #characters written
|
||||
if (strlen(buffer)/*written*/ >= (int)_countof(buffer) - 2)
|
||||
sprintf(buffer + _countof(buffer) - 4, "...");
|
||||
|
||||
// TODO: Should use ExceptionWithCallStack; temporarily using std::exception to avoid duplicating headers
|
||||
//throw ExceptionWithCallStack<E>(buffer, ExceptionWithCallStack<E>::GetCallStack(/*skipLevels=*/2, /*makeFunctionNamesStandOut=*/true));
|
||||
throw std::exception(buffer);
|
||||
}
|
||||
#pragma warning(pop)
|
||||
|
||||
// RuntimeError - throw a std::runtime_error with a formatted error string
|
||||
#ifndef _MSC_VER // gcc __attribute__((format(printf())) does not percolate through variadic templates; so must go the macro route
|
||||
#define RuntimeError ThrowFormatted<std::runtime_error>
|
||||
#define LogicError ThrowFormatted<std::logic_error>
|
||||
#define InvalidArgument ThrowFormatted<std::invalid_argument>
|
||||
#else
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void RuntimeError(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::runtime_error>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void LogicError(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::logic_error>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
template <class... _Types>
|
||||
__declspec_noreturn inline void InvalidArgument(const char* format, _Types&&... _Args)
|
||||
{
|
||||
ThrowFormatted<std::invalid_argument>(format, std::forward<_Types>(_Args)...);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef NOT_IMPLEMENTED
|
||||
#define NOT_IMPLEMENTED \
|
||||
{ \
|
||||
fprintf(stderr, "Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
|
||||
LogicError("Inside File: %s Line: %d Function: %s -> Feature Not Implemented.\n", __FILE__, __LINE__, __FUNCTION__); \
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; Common.lib; ActionsLib.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; Common.lib; SequenceTrainingLib.lib; kernel32.lib; user32.lib; shell32.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
@ -99,7 +99,7 @@
|
|||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; Common.lib; ActionsLib.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalDependencies>ComputationNetworkLib.lib; Math.lib; Common.lib; kernel32.lib; user32.lib; shell32.lib; SequenceTrainingLib.lib; %(AdditionalDependencies)</AdditionalDependencies>
|
||||
<Profile>true</Profile>
|
||||
<DelayLoadDLLs>Math.dll; nvml.dll; $(CudaRuntimeDll)</DelayLoadDLLs>
|
||||
</Link>
|
||||
|
|
|
@ -84,7 +84,7 @@ namespace CNTK
|
|||
else
|
||||
{
|
||||
assert(variable.Kind() == VariableKind::Output);
|
||||
computationNodePtr = GetOutputVariableNode(variable, network, builder, variableToNodeMap, isVariableRootMap)->As<ComputationNode<ElementType>>()->shared_from_this();
|
||||
computationNodePtr = GetOutputVariableNode(variable, network, builder, variableToNodeMap, isVariableRootMap)->template As<ComputationNode<ElementType>>()->shared_from_this();
|
||||
}
|
||||
|
||||
variableToNodeMap[variable] = computationNodePtr;
|
||||
|
@ -106,13 +106,13 @@ namespace CNTK
|
|||
// Create the nodes corresponding to the inputs
|
||||
auto functionInputs = primitiveFunction->Inputs();
|
||||
auto input0BaseNodePtr = GetNode(functionInputs[0], network, builder, variableToNodeMap, isVariableRootMap);
|
||||
std::shared_ptr<ComputationNode<ElementType>> input0Node = (input0BaseNodePtr != nullptr) ? input0BaseNodePtr->As<ComputationNode<ElementType>>()->shared_from_this() : nullptr;
|
||||
std::shared_ptr<ComputationNode<ElementType>> input0Node = (input0BaseNodePtr != nullptr) ? input0BaseNodePtr->template As<ComputationNode<ElementType>>()->shared_from_this() : nullptr;
|
||||
|
||||
std::shared_ptr<ComputationNode<ElementType>> input1Node;
|
||||
if (functionInputs.size() > 1)
|
||||
{
|
||||
auto input1BaseNodePtr = GetNode(functionInputs[1], network, builder, variableToNodeMap, isVariableRootMap);
|
||||
input1Node = (input1BaseNodePtr != nullptr) ? input1BaseNodePtr->As<ComputationNode<ElementType>>()->shared_from_this() : nullptr;
|
||||
input1Node = (input1BaseNodePtr != nullptr) ? input1BaseNodePtr->template As<ComputationNode<ElementType>>()->shared_from_this() : nullptr;
|
||||
}
|
||||
|
||||
PrimitiveOpType op = primitiveFunction->OpType();
|
||||
|
@ -303,17 +303,17 @@ namespace CNTK
|
|||
template <typename ElementType>
|
||||
/*static*/ std::pair<std::shared_ptr<const Matrix<ElementType>>, MBLayoutPtr> CompositeFunction::GetCNTKImplMatrixAndMBLayoutFromValueObject(Variable var, const ValuePtr& value)
|
||||
{
|
||||
if (var.DataType() != value->Data()->DataType())
|
||||
LogicError("The Variable's DataType %s does not match the corresponding Value's DataType %s", DataTypeName(var.DataType()), DataTypeName(value->Data()->DataType()));
|
||||
if (var.GetDataType() != value->Data()->GetDataType())
|
||||
LogicError("The Variable's DataType %s does not match the corresponding Value's DataType %s", DataTypeName(var.GetDataType()), DataTypeName(value->Data()->GetDataType()));
|
||||
|
||||
if (GetDataType<ElementType>() != value->Data()->DataType())
|
||||
LogicError("The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(value->Data()->DataType()));
|
||||
if (AsDataType<ElementType>() != value->Data()->GetDataType())
|
||||
LogicError("The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(value->Data()->GetDataType()));
|
||||
|
||||
// TODO: Is supplying dense data for an Input variable tagged as sparse, a fatal error?
|
||||
if (var.IsSparseInput() && !value->Data()->IsSparse())
|
||||
InvalidArgument("Dense input data supplied for a sparse input Variable");
|
||||
|
||||
if (var.IsSparseInput() && (value->Data()->StorageFormat() != StorageFormat::SparseCSC))
|
||||
if (var.IsSparseInput() && (value->Data()->GetStorageFormat() != StorageFormat::SparseCSC))
|
||||
InvalidArgument("Sparse Input data must be in SparseCSC format");
|
||||
|
||||
if (value->Data()->Shape().NumAxes() == var.Shape().NumAxes())
|
||||
|
@ -397,7 +397,7 @@ namespace CNTK
|
|||
layout->GetNumCols(),
|
||||
AsCNTKImplDeviceId(value->Data()->Device()),
|
||||
value->Data()->IsSparse() ? MatrixType::SPARSE : MatrixType::DENSE,
|
||||
AsCNTKMatrixFormat(value->Data()->StorageFormat()));
|
||||
AsCNTKMatrixFormat(value->Data()->GetStorageFormat()));
|
||||
|
||||
std::vector<size_t> sequencesShorterThanLongestSequence;
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
|
@ -427,8 +427,8 @@ namespace CNTK
|
|||
if (var.DynamicAxes().size() > 1)
|
||||
LogicError("More than one dynamic axis for a variable is currently unsupported");
|
||||
|
||||
if (GetDataType<ElementType>() != var.DataType())
|
||||
LogicError("The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(var.DataType()));
|
||||
if (AsDataType<ElementType>() != var.GetDataType())
|
||||
LogicError("The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(var.GetDataType()));
|
||||
|
||||
if ((layout != nullptr) && (matrix.GetNumRows() != var.Shape().TotalSize()))
|
||||
LogicError("Unexpected matrix layout: The number of rows in the matrix does not match the sample size of the Variable");
|
||||
|
@ -442,7 +442,7 @@ namespace CNTK
|
|||
{
|
||||
// Just create a view over the existing matrix itself
|
||||
auto tensorView = new TensorView<ElementType>(std::make_shared<Matrix<ElementType>>(matrix.AsReference()), AsTensorShape(valueDataShape));
|
||||
auto data = NDArrayViewPtr(new NDArrayView(GetDataType<ElementType>(), AsDeviceDescriptor(matrix.GetDeviceId()), AsStorageFormat(matrix.GetFormat()), valueDataShape, true, tensorView), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(AsDataType<ElementType>(), AsDeviceDescriptor(matrix.GetDeviceId()), AsStorageFormat(matrix.GetFormat()), valueDataShape, true, tensorView), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
return ValuePtr(new Value(data), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ namespace CNTK
|
|||
}
|
||||
|
||||
auto tensorView = new TensorView<ElementType>(shuffledMatrixData, AsTensorShape(valueDataShape));
|
||||
auto data = NDArrayViewPtr(new NDArrayView(GetDataType<ElementType>(), AsDeviceDescriptor(matrix.GetDeviceId()), StorageFormat::Dense, valueDataShape, true, tensorView), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(AsDataType<ElementType>(), AsDeviceDescriptor(matrix.GetDeviceId()), StorageFormat::Dense, valueDataShape, true, tensorView), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
return ValuePtr(new Value(data, mask), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
||||
|
@ -522,7 +522,7 @@ namespace CNTK
|
|||
ValuePtr argumentValue = arguments[*iter];
|
||||
|
||||
MBLayoutPtr layout;
|
||||
switch (argumentValue->Data()->DataType())
|
||||
switch (argumentValue->Data()->GetDataType())
|
||||
{
|
||||
case DataType::Float:
|
||||
{
|
||||
|
@ -547,7 +547,7 @@ namespace CNTK
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LogicError("Unsupported DataType %s", DataTypeName(argumentValue->Data()->DataType()));
|
||||
LogicError("Unsupported DataType %s", DataTypeName(argumentValue->Data()->GetDataType()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ namespace CNTK
|
|||
ValuePtr gradientValue = iter->second;
|
||||
|
||||
MBLayoutPtr layout;
|
||||
switch (gradientValue->Data()->DataType())
|
||||
switch (gradientValue->Data()->GetDataType())
|
||||
{
|
||||
case DataType::Float:
|
||||
{
|
||||
|
@ -594,7 +594,7 @@ namespace CNTK
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LogicError("Unsupported DataType %s", DataTypeName(gradientValue->Data()->DataType()));
|
||||
LogicError("Unsupported DataType %s", DataTypeName(gradientValue->Data()->GetDataType()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -635,14 +635,14 @@ namespace CNTK
|
|||
InvalidArgument("The shape %s of the specified Value object for output does not match the actual output shape %s", AsString(outputValuePtr->Data()->Shape()).c_str(), AsString(outputShape).c_str());
|
||||
}
|
||||
|
||||
switch (iter->first.DataType())
|
||||
switch (iter->first.GetDataType())
|
||||
{
|
||||
case DataType::Float:
|
||||
{
|
||||
auto nodeValue = GetValueObjectFromCNTKImplMatrixAndMBLayout<float>(iter->first, computationNodePtr->As<ComputationNode<float>>()->Value(), computationNodePtr->GetMBLayout());
|
||||
if (outputValuePtr == nullptr)
|
||||
{
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.DataType(), outputShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.GetDataType(), outputShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto mask = (nodeValue->Mask() != nullptr) ? NDMaskPtr(new NDMask(nodeValue->Mask()->Shape(), nodeValue->Mask()->Device()), [](_ReferenceCounter* ptr) { delete ptr; }) : nullptr;
|
||||
outputValuePtr = ValuePtr(new Value(data, mask), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ namespace CNTK
|
|||
auto nodeValue = GetValueObjectFromCNTKImplMatrixAndMBLayout<double>(iter->first, computationNodePtr->As<ComputationNode<double>>()->Value(), computationNodePtr->GetMBLayout());
|
||||
if (outputValuePtr == nullptr)
|
||||
{
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.DataType(), outputShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.GetDataType(), outputShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto mask = (nodeValue->Mask() != nullptr) ? NDMaskPtr(new NDMask(nodeValue->Mask()->Shape(), nodeValue->Mask()->Device()), [](_ReferenceCounter* ptr) { delete ptr; }) : nullptr;
|
||||
outputValuePtr = ValuePtr(new Value(data, mask), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
@ -662,7 +662,7 @@ namespace CNTK
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LogicError("Unsupported DataType %s", DataTypeName(iter->first.DataType()));
|
||||
LogicError("Unsupported DataType %s", DataTypeName(iter->first.GetDataType()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -698,14 +698,14 @@ namespace CNTK
|
|||
if (!computationNodePtr->NeedsGradient())
|
||||
LogicError("Backpropagated gradient value cannot be read from a ComputationNode that has NeedsGradient set to false");
|
||||
|
||||
switch (iter->first.DataType())
|
||||
switch (iter->first.GetDataType())
|
||||
{
|
||||
case DataType::Float:
|
||||
{
|
||||
auto nodeValue = GetValueObjectFromCNTKImplMatrixAndMBLayout<float>(iter->first, computationNodePtr->As<ComputationNode<float>>()->Gradient(), computationNodePtr->GetMBLayout());
|
||||
if (gradientValuePtr == nullptr)
|
||||
{
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.DataType(), gradientShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.GetDataType(), gradientShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto mask = NDMaskPtr((nodeValue->Mask() != nullptr) ? new NDMask(nodeValue->Mask()->Shape(), nodeValue->Mask()->Device()) : nullptr, [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
gradientValuePtr = ValuePtr(new Value(data, mask), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
@ -717,7 +717,7 @@ namespace CNTK
|
|||
auto nodeValue = GetValueObjectFromCNTKImplMatrixAndMBLayout<double>(iter->first, computationNodePtr->As<ComputationNode<double>>()->Gradient(), computationNodePtr->GetMBLayout());
|
||||
if (gradientValuePtr == nullptr)
|
||||
{
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.DataType(), gradientShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto data = NDArrayViewPtr(new NDArrayView(iter->first.GetDataType(), gradientShape, AsDeviceDescriptor(computationNodePtr->ValuePtr()->GetDeviceId())), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
auto mask = NDMaskPtr((nodeValue->Mask() != nullptr) ? new NDMask(nodeValue->Mask()->Shape(), nodeValue->Mask()->Device()) : nullptr, [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
gradientValuePtr = ValuePtr(new Value(data, mask), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
|
||||
|
@ -726,7 +726,7 @@ namespace CNTK
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LogicError("Unsupported DataType %s", DataTypeName(iter->first.DataType()));
|
||||
LogicError("Unsupported DataType %s", DataTypeName(iter->first.GetDataType()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -741,7 +741,7 @@ namespace CNTK
|
|||
{
|
||||
// TODO: How about zero argument functions?
|
||||
// TODO: We need a better way to determine the ElementType for the network
|
||||
auto dataType = arguments.m_map->begin()->second->Data()->DataType();
|
||||
auto dataType = arguments.m_map->begin()->second->Data()->GetDataType();
|
||||
if (dataType == DataType::Float)
|
||||
GetComputationNetwork<float>(computeDevice, outputsToRetainBackwardStateFor);
|
||||
else
|
||||
|
|
|
@ -150,7 +150,7 @@ namespace CNTK
|
|||
static NDShape ReductionOpOutputShape(PrimitiveOpType op, const NDShape& operandShape, const std::vector<size_t>& reductionAxes)
|
||||
{
|
||||
if (reductionAxes.size() > operandShape.NumAxes())
|
||||
RuntimeError("The number of reduction axes %d exceeds the number of axes in the operand shape %s of the reduction operation %s", reductionAxes.size(), AsString(operandShape).c_str(), PrimitiveOpTypeName(op));
|
||||
RuntimeError("The number of reduction axes %d exceeds the number of axes in the operand shape %s of the reduction operation %s", (int)reductionAxes.size(), AsString(operandShape).c_str(), PrimitiveOpTypeName(op));
|
||||
|
||||
size_t numOutputAxes = operandShape.NumAxes() - reductionAxes.size();
|
||||
std::vector<size_t> outputDims(numOutputAxes);
|
||||
|
@ -171,7 +171,7 @@ namespace CNTK
|
|||
std::vector<Variable> outputs;
|
||||
|
||||
// TODO: We are just using the input[0]'s DataType as output node's DataType. This is not always correct
|
||||
DataType outputDataType = inputs[0].DataType();
|
||||
DataType outputDataType = inputs[0].GetDataType();
|
||||
|
||||
// We currently require that the inputs' dynamic axes if any match
|
||||
std::vector<Axis> outputDynamicAxes = inputs[0].DynamicAxes();
|
||||
|
|
|
@ -15,6 +15,6 @@ inline void FloatingPointVectorCompare(const std::vector<ElementType>& first, co
|
|||
ElementType rightVal = second[i];
|
||||
ElementType allowedTolerance = (std::max<ElementType>)((ElementType)absoluteTolerance, ((ElementType)relativeTolerance) * leftVal);
|
||||
if (std::abs(leftVal - rightVal) > allowedTolerance)
|
||||
throw std::exception(message);
|
||||
throw std::runtime_error(message);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,13 +50,13 @@ void TestFeedForwardNetworkCreation(const DeviceDescriptor& device)
|
|||
|
||||
// Now test the structure
|
||||
if (ffNet->Parameters().size() != ((numHiddenLayers * 2) + 1))
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Parameter count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Parameter count");
|
||||
|
||||
if (ffNet->Arguments().size() != 2)
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Argument count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Argument count");
|
||||
|
||||
if (ffNet->Outputs().size() != 3)
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Output count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Output count");
|
||||
|
||||
// Run Forward and backward a few times
|
||||
size_t iterationCount = 4;
|
||||
|
@ -109,7 +109,7 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
Parameter timesParam(new NDArrayView((ElementType)0.5, { outputDim, inputDim }, device));
|
||||
Parameter plusParam(new NDArrayView((ElementType)1.2, { outputDim }, device));
|
||||
|
||||
Variable inputVar({ inputDim }, GetDataType<ElementType>(), L"input");
|
||||
Variable inputVar({ inputDim }, AsDataType<ElementType>(), L"input");
|
||||
auto timesAndPlusFunc = Plus(plusParam, Times(timesParam, inputVar));
|
||||
|
||||
srand(seed);
|
||||
|
@ -131,7 +131,7 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
if (outputAllocationDevice.Type() == DeviceType::CPU)
|
||||
outputValue = new Value(new NDArrayView(outputShape, outputData.data(), outputData.size(), outputAllocationDevice, false));
|
||||
else
|
||||
outputValue = new Value(new NDArrayView(GetDataType<ElementType>(), outputShape, outputAllocationDevice));
|
||||
outputValue = new Value(new NDArrayView(AsDataType<ElementType>(), outputShape, outputAllocationDevice));
|
||||
}
|
||||
|
||||
std::unordered_map<Variable, ValuePtr> outputs = { { timesAndPlusFunc->Output(), outputValue } };
|
||||
|
@ -148,7 +148,7 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
else
|
||||
{
|
||||
NDArrayViewPtr cpuArrayView = new NDArrayView(outputShape, rootGradientsData.data(), rootGradientsData.size(), DeviceDescriptor::CPUDevice(), true);
|
||||
NDArrayViewPtr gpuArrayView = new NDArrayView(GetDataType<ElementType>(), outputShape, device);
|
||||
NDArrayViewPtr gpuArrayView = new NDArrayView(AsDataType<ElementType>(), outputShape, device);
|
||||
gpuArrayView->CopyFrom(*cpuArrayView);
|
||||
rootGradientValue = new Value(gpuArrayView);
|
||||
}
|
||||
|
@ -166,8 +166,8 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
}
|
||||
else
|
||||
{
|
||||
plusParameterGradientValue = new Value(new NDArrayView(GetDataType<ElementType>(), plusParam.Shape(), outputAllocationDevice));
|
||||
timesParameterGradientValue = new Value(new NDArrayView(GetDataType<ElementType>(), timesParam.Shape(), outputAllocationDevice));
|
||||
plusParameterGradientValue = new Value(new NDArrayView(AsDataType<ElementType>(), plusParam.Shape(), outputAllocationDevice));
|
||||
timesParameterGradientValue = new Value(new NDArrayView(AsDataType<ElementType>(), timesParam.Shape(), outputAllocationDevice));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,12 +203,12 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
// Verify backward prop results
|
||||
if (device.Type() != DeviceType::CPU)
|
||||
{
|
||||
NDArrayViewPtr cpuArrayView = new NDArrayView(GetDataType<ElementType>(), plusParam.Shape(), DeviceDescriptor::CPUDevice());
|
||||
NDArrayViewPtr cpuArrayView = new NDArrayView(AsDataType<ElementType>(), plusParam.Shape(), DeviceDescriptor::CPUDevice());
|
||||
cpuArrayView->CopyFrom(*plusParameterGradientValue->Data());
|
||||
const ElementType* cpuArrayViewBuffer = cpuArrayView->DataBuffer<ElementType>();
|
||||
memcpy(plusParameterGradientData.data(), cpuArrayViewBuffer, plusParam.Shape().TotalSize() * sizeof(ElementType));
|
||||
|
||||
cpuArrayView = new NDArrayView(GetDataType<ElementType>(), timesParam.Shape(), DeviceDescriptor::CPUDevice());
|
||||
cpuArrayView = new NDArrayView(AsDataType<ElementType>(), timesParam.Shape(), DeviceDescriptor::CPUDevice());
|
||||
cpuArrayView->CopyFrom(*timesParameterGradientValue->Data());
|
||||
cpuArrayViewBuffer = cpuArrayView->DataBuffer<ElementType>();
|
||||
memcpy(timesParameterGradientData.data(), cpuArrayViewBuffer, timesParam.Shape().TotalSize() * sizeof(ElementType));
|
||||
|
@ -216,7 +216,7 @@ void TestTimesAndPlus(size_t inputDim,
|
|||
|
||||
for (size_t i = 0; i < outputDim; ++i)
|
||||
if (plusParameterGradientData[i] != numSamples)
|
||||
throw std::exception("TestTimesAndPlus: Backprop prop results do not match expected results for Plus params gradients");
|
||||
throw std::runtime_error("TestTimesAndPlus: Backprop prop results do not match expected results for Plus params gradients");
|
||||
|
||||
std::vector<ElementType> expectedTimesParamsGradientValues(timesParam.Shape().TotalSize());
|
||||
for (size_t i = 0; i < inputDim; ++i)
|
||||
|
|
|
@ -18,7 +18,7 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
std::array<ElementType, 1> arrayData = { 3 };
|
||||
auto arrayDataView = new NDArrayView({}, arrayData);
|
||||
if (arrayDataView->DataBuffer<ElementType>() != arrayData.data())
|
||||
throw std::exception("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
|
||||
std::vector<ElementType> data(viewShape.TotalSize());
|
||||
ElementType scale = 19.0;
|
||||
|
@ -28,19 +28,19 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
|
||||
auto cpuDataView = new NDArrayView(viewShape, data);
|
||||
if (cpuDataView->DataBuffer<ElementType>() != data.data())
|
||||
throw std::exception("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
|
||||
NDArrayViewPtr dataView;
|
||||
if ((device.Type() == DeviceType::CPU))
|
||||
dataView = cpuDataView;
|
||||
else
|
||||
{
|
||||
dataView = new NDArrayView(GetDataType<ElementType>(), viewShape, device);
|
||||
dataView = new NDArrayView(AsDataType<ElementType>(), viewShape, device);
|
||||
dataView->CopyFrom(*cpuDataView);
|
||||
}
|
||||
|
||||
if (dataView->Device() != device)
|
||||
throw std::exception("Device of NDArrayView does not match 'device' it was created on");
|
||||
throw std::runtime_error("Device of NDArrayView does not match 'device' it was created on");
|
||||
|
||||
// Test clone
|
||||
auto clonedView = dataView->DeepClone(false);
|
||||
|
@ -50,13 +50,13 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
if ((device.Type() == DeviceType::CPU))
|
||||
{
|
||||
if (dataView->DataBuffer<ElementType>() != data.data())
|
||||
throw std::exception("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over");
|
||||
|
||||
first = clonedView->WritableDataBuffer<ElementType>();
|
||||
}
|
||||
else
|
||||
{
|
||||
temp1CpuDataView = new NDArrayView(GetDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp1CpuDataView = new NDArrayView(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp1CpuDataView->CopyFrom(*clonedView);
|
||||
|
||||
first = temp1CpuDataView->WritableDataBuffer<ElementType>();
|
||||
|
@ -65,7 +65,7 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
for (size_t i = 0; i < viewShape.TotalSize(); ++i)
|
||||
{
|
||||
if (first[i] != second[i])
|
||||
throw std::exception("The contents of the clone do not match expected");
|
||||
throw std::runtime_error("The contents of the clone do not match expected");
|
||||
}
|
||||
|
||||
first[0] += 1;
|
||||
|
@ -79,28 +79,28 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
}
|
||||
else
|
||||
{
|
||||
temp1CpuDataView = new NDArrayView(GetDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp1CpuDataView = new NDArrayView(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp1CpuDataView->CopyFrom(*clonedView);
|
||||
first = temp1CpuDataView->WritableDataBuffer<ElementType>();
|
||||
|
||||
temp2CpuDataView = new NDArrayView(GetDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp2CpuDataView = new NDArrayView(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice());
|
||||
temp2CpuDataView->CopyFrom(*dataView);
|
||||
second = temp2CpuDataView->DataBuffer<ElementType>();
|
||||
}
|
||||
|
||||
if (first[0] != (second[0] + 1))
|
||||
throw std::exception("The clonedView's contents do not match expected");
|
||||
throw std::runtime_error("The clonedView's contents do not match expected");
|
||||
|
||||
// Test alias
|
||||
auto aliasView = clonedView->Alias(true);
|
||||
const ElementType* aliasViewBuffer = aliasView->DataBuffer<ElementType>();
|
||||
const ElementType* clonedDataBuffer = clonedView->DataBuffer<ElementType>();
|
||||
if (aliasViewBuffer != clonedDataBuffer)
|
||||
throw std::exception("The buffers underlying the alias view and the view it is an alias of are different!");
|
||||
throw std::runtime_error("The buffers underlying the alias view and the view it is an alias of are different!");
|
||||
|
||||
clonedView->CopyFrom(*dataView);
|
||||
if (aliasViewBuffer != clonedDataBuffer)
|
||||
throw std::exception("The buffers underlying the alias view and the view it is an alias of are different!");
|
||||
throw std::runtime_error("The buffers underlying the alias view and the view it is an alias of are different!");
|
||||
|
||||
// Test readonliness
|
||||
auto verifyException = [](const std::function<void()>& functionToTest) {
|
||||
|
@ -115,7 +115,7 @@ void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
}
|
||||
|
||||
if (!error)
|
||||
throw std::exception("Was incorrectly able to get a writable buffer pointer from a readonly view");
|
||||
throw std::runtime_error("Was incorrectly able to get a writable buffer pointer from a readonly view");
|
||||
};
|
||||
|
||||
// Should not be able to get the WritableDataBuffer for a read-only view
|
||||
|
@ -184,14 +184,14 @@ void TestSparseCSCArrayView(size_t numAxes, const DeviceDescriptor& device)
|
|||
NDArrayView denseCPUTensor(viewShape, copiedDenseData.data(), copiedDenseData.size(), DeviceDescriptor::CPUDevice());
|
||||
denseCPUTensor.CopyFrom(sparseCSCArrayView);
|
||||
if (copiedDenseData != referenceDenseData)
|
||||
throw std::exception("The contents of the dense vector that the sparse NDArrayView is copied into do not match the expected values");
|
||||
throw std::runtime_error("The contents of the dense vector that the sparse NDArrayView is copied into do not match the expected values");
|
||||
|
||||
NDArrayView emptySparseCSCArrayView(GetDataType<ElementType>(), StorageFormat::SparseCSC, viewShape, device);
|
||||
NDArrayView emptySparseCSCArrayView(AsDataType<ElementType>(), StorageFormat::SparseCSC, viewShape, device);
|
||||
emptySparseCSCArrayView.CopyFrom(denseCPUTensor);
|
||||
NDArrayView newDenseCPUTensor(viewShape, copiedDenseData.data(), copiedDenseData.size(), DeviceDescriptor::CPUDevice());
|
||||
newDenseCPUTensor.CopyFrom(emptySparseCSCArrayView);
|
||||
if (copiedDenseData != referenceDenseData)
|
||||
throw std::exception("The contents of the dense vector that the sparse NDArrayView is copied into do not match the expected values");
|
||||
throw std::runtime_error("The contents of the dense vector that the sparse NDArrayView is copied into do not match the expected values");
|
||||
}
|
||||
|
||||
void NDArrayViewTests()
|
||||
|
|
|
@ -145,10 +145,10 @@ void TestRecurrentNetworkCreation(const DeviceDescriptor& device)
|
|||
const size_t hiddenDim = 512;
|
||||
const size_t numOutputClasses = 9304;
|
||||
|
||||
Variable features({ inputDim }, GetDataType<ElementType>(), L"Features");
|
||||
Variable features({ inputDim }, AsDataType<ElementType>(), L"Features");
|
||||
auto classifierOutputFunction = LSTMNet<ElementType>(features, cellDim, hiddenDim, numOutputClasses, numLSTMLayers, device);
|
||||
|
||||
Variable labelsVar = Variable({ numOutputClasses }, GetDataType<ElementType>(), L"Labels");
|
||||
Variable labelsVar = Variable({ numOutputClasses }, AsDataType<ElementType>(), L"Labels");
|
||||
auto trainingLossFunction = CrossEntropyWithSoftmax(classifierOutputFunction, labelsVar, L"lossFunction");
|
||||
auto predictionFunction = PredictionError(classifierOutputFunction, labelsVar, L"predictionError");
|
||||
|
||||
|
@ -156,13 +156,13 @@ void TestRecurrentNetworkCreation(const DeviceDescriptor& device)
|
|||
|
||||
// Now test the structure
|
||||
if (LSTMClassifier->Arguments().size() != 2)
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Argument count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Argument count");
|
||||
|
||||
if (LSTMClassifier->Outputs().size() != 3)
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Output count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Output count");
|
||||
|
||||
if (LSTMClassifier->Parameters().size() != ((numLSTMLayers * 28) + 3))
|
||||
throw std::exception("TestFeedForwardNetworkCreation: Function does not have expected Parameter count");
|
||||
throw std::runtime_error("TestFeedForwardNetworkCreation: Function does not have expected Parameter count");
|
||||
|
||||
// Run Forward and backward a few times
|
||||
size_t iterationCount = 3;
|
||||
|
@ -181,7 +181,7 @@ void TestRecurrentNetworkCreation(const DeviceDescriptor& device)
|
|||
maxActualSequenceLength = sequenceLengths[i];
|
||||
}
|
||||
|
||||
std::vector<const std::vector<ElementType>> inputSequences;
|
||||
std::vector<std::vector<ElementType>> inputSequences;
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
{
|
||||
std::vector<ElementType> currentSequence(inputDim * sequenceLengths[i]);
|
||||
|
@ -193,7 +193,7 @@ void TestRecurrentNetworkCreation(const DeviceDescriptor& device)
|
|||
|
||||
ValuePtr inputValue = Value::Create({ inputDim }, inputSequences, device, true);
|
||||
|
||||
std::vector<const std::vector<ElementType>> labelsData;
|
||||
std::vector<std::vector<ElementType>> labelsData;
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
{
|
||||
std::vector<ElementType> currentSequence(numOutputClasses * sequenceLengths[i]);
|
||||
|
@ -235,12 +235,12 @@ void TestSimpleRecurrence(size_t inputDim,
|
|||
unsigned int seed = 1)
|
||||
{
|
||||
if (useOneHotSparseInputs && !useSparseInputs)
|
||||
throw std::exception("useOneHotSparseInputs option can only be true when useSparseInputs is true");
|
||||
throw std::runtime_error("useOneHotSparseInputs option can only be true when useSparseInputs is true");
|
||||
|
||||
Parameter timesParam(new NDArrayView((ElementType)0.5, { outputDim, inputDim }, device));
|
||||
Parameter plusParam(new NDArrayView((ElementType)0.1, { outputDim }, device));
|
||||
|
||||
Variable inputVar({ inputDim }, useSparseInputs, GetDataType<ElementType>(), true, L"input");
|
||||
Variable inputVar({ inputDim }, useSparseInputs, AsDataType<ElementType>(), true, L"input");
|
||||
|
||||
auto placeholder = Placeholder({ outputDim });
|
||||
auto plusOutput = Plus(plusParam, Plus(placeholder, Times(timesParam, inputVar)));
|
||||
|
@ -274,7 +274,7 @@ void TestSimpleRecurrence(size_t inputDim,
|
|||
std::vector<ElementType> inputData(inputDim * totalNumInputSamples, useSparseInputs ? 0 : std::numeric_limits<ElementType>::quiet_NaN());
|
||||
if (useOneHotSparseInputs)
|
||||
{
|
||||
std::vector<const std::vector<size_t>> oneHotSequences;
|
||||
std::vector<std::vector<size_t>> oneHotSequences;
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
{
|
||||
std::vector<size_t> currentSequence(sequenceLengths[i]);
|
||||
|
@ -314,7 +314,7 @@ void TestSimpleRecurrence(size_t inputDim,
|
|||
NDArrayViewPtr inputValueData = new NDArrayView(inputShape, inputData.data(), inputData.size(), DeviceDescriptor::CPUDevice(), true);
|
||||
if (useSparseInputs)
|
||||
{
|
||||
NDArrayViewPtr sparseInputValueData = new NDArrayView(GetDataType<ElementType>(), StorageFormat::SparseCSC, inputShape, DeviceDescriptor::CPUDevice());
|
||||
NDArrayViewPtr sparseInputValueData = new NDArrayView(AsDataType<ElementType>(), StorageFormat::SparseCSC, inputShape, DeviceDescriptor::CPUDevice());
|
||||
sparseInputValueData->CopyFrom(*inputValueData);
|
||||
inputValueData = sparseInputValueData->Alias(true);
|
||||
}
|
||||
|
@ -423,7 +423,7 @@ void TestSimpleRecurrence(size_t inputDim,
|
|||
|
||||
for (size_t k = 0; k < plusParam.Shape().TotalSize(); ++k)
|
||||
if (plusParameterGradientData[k] != expectedPlusParameterGradientValue)
|
||||
throw std::exception("TestSimpleRecurrence: Backprop prop results do not match expected results for Plus params gradients");
|
||||
throw std::runtime_error("TestSimpleRecurrence: Backprop prop results do not match expected results for Plus params gradients");
|
||||
|
||||
std::vector<ElementType> expectedTimesParamsGradientValues(timesParam.Shape().TotalSize(), 0);
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
|
|
|
@ -20,8 +20,8 @@ void TestTensorPlus(size_t numAxesLeftOperand, size_t numAxesRightOperand, const
|
|||
for (size_t i = std::min(numAxesLeftOperand, numAxesRightOperand); i < numAxesRightOperand; ++i)
|
||||
rightInputShape[i] = (rand() % maxDimSize) + 1;
|
||||
|
||||
Variable leftInputVar(leftInputShape, GetDataType<ElementType>(), L"leftInput");
|
||||
Variable rightInputVar(rightInputShape, GetDataType<ElementType>(), L"rightInput");
|
||||
Variable leftInputVar(leftInputShape, AsDataType<ElementType>(), L"leftInput");
|
||||
Variable rightInputVar(rightInputShape, AsDataType<ElementType>(), L"rightInput");
|
||||
|
||||
auto plusFunc = Plus(leftInputVar, rightInputVar);
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ namespace CNTK
|
|||
|
||||
template <typename ElementType>
|
||||
NDArrayView::NDArrayView(const NDShape& viewShape, const SparseIndexType* colStarts, const SparseIndexType* rowIndices, const ElementType* nonZeroValues, size_t numNonZeroValues, const DeviceDescriptor& device, bool readOnly/* = false*/)
|
||||
: NDArrayView(GetDataType<ElementType>(), device, StorageFormat::SparseCSC, viewShape, false, AllocateTensorView<ElementType>(viewShape, StorageFormat::SparseCSC, device))
|
||||
: NDArrayView(AsDataType<ElementType>(), device, StorageFormat::SparseCSC, viewShape, false, AllocateTensorView<ElementType>(viewShape, StorageFormat::SparseCSC, device))
|
||||
{
|
||||
if ((colStarts == nullptr) || (rowIndices == nullptr) || (nonZeroValues == nullptr) || (numNonZeroValues == 0) || (numNonZeroValues > viewShape.TotalSize()))
|
||||
InvalidArgument("Invalid sparse CSC format initial data specified for NDArrayView construction");
|
||||
|
@ -194,7 +194,7 @@ namespace CNTK
|
|||
template <typename ElementType>
|
||||
const TensorView<ElementType>* NDArrayView::GetTensorView() const
|
||||
{
|
||||
if (GetDataType<ElementType>() != m_dataType)
|
||||
if (AsDataType<ElementType>() != m_dataType)
|
||||
LogicError("NDArrayView::GetWritableTensorView: The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(m_dataType));
|
||||
|
||||
return (const TensorView<ElementType>*)(m_tensorView);
|
||||
|
@ -211,7 +211,7 @@ namespace CNTK
|
|||
|
||||
NDArrayViewPtr NDArrayView::DeepClone(bool readOnly/* = false*/) const
|
||||
{
|
||||
NDArrayViewPtr newView(new NDArrayView(this->DataType(), this->StorageFormat(), this->Shape(), this->Device()), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
NDArrayViewPtr newView(new NDArrayView(this->GetDataType(), this->GetStorageFormat(), this->Shape(), this->Device()), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
switch (m_dataType)
|
||||
{
|
||||
case DataType::Float:
|
||||
|
@ -285,7 +285,7 @@ namespace CNTK
|
|||
break;
|
||||
}
|
||||
|
||||
auto aliasView = new NDArrayView(DataType(), Device(), StorageFormat(), Shape(), IsReadOnly() || readOnly, tensorView);;
|
||||
auto aliasView = new NDArrayView(GetDataType(), Device(), GetStorageFormat(), Shape(), IsReadOnly() || readOnly, tensorView);;
|
||||
return NDArrayViewPtr(aliasView, [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ namespace CNTK
|
|||
template <typename ElementType>
|
||||
const ElementType* NDArrayView::DataBuffer() const
|
||||
{
|
||||
if (GetDataType<ElementType>() != m_dataType)
|
||||
if (AsDataType<ElementType>() != m_dataType)
|
||||
LogicError("The specified ElementType %s does not match the DataType %s", typeid(ElementType).name(), DataTypeName(m_dataType));
|
||||
|
||||
if (IsSparse())
|
||||
|
@ -322,7 +322,7 @@ namespace CNTK
|
|||
auto randomUniformMatrix = std::make_shared<Matrix<ElementType>>(Matrix<ElementType>::RandomUniform(matrixDims.first, matrixDims.second, AsCNTKImplDeviceId(device), (ElementType)rangeStart, (ElementType)rangeEnd, seed));
|
||||
auto tensorView = new TensorView<ElementType>(randomUniformMatrix, AsTensorShape(shape));
|
||||
|
||||
auto view = new NDArrayView(GetDataType<ElementType>(), device, StorageFormat::Dense, shape, false, tensorView);
|
||||
auto view = new NDArrayView(AsDataType<ElementType>(), device, StorageFormat::Dense, shape, false, tensorView);
|
||||
return NDArrayViewPtr(view, [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
}
|
||||
|
||||
|
@ -336,8 +336,11 @@ namespace CNTK
|
|||
template CNTK_API float* NDArrayView::WritableDataBuffer<float>();
|
||||
template CNTK_API double* NDArrayView::WritableDataBuffer<double>();
|
||||
|
||||
template CNTK_API NDArrayView::NDArrayView(const NDShape& viewShape, const SparseIndexType* colStarts, const SparseIndexType* rowIndices, const float* nonZeroValues, size_t numNonZeroValues, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template CNTK_API NDArrayView::NDArrayView(const NDShape& viewShape, const SparseIndexType* colStarts, const SparseIndexType* rowIndices, const double* nonZeroValues, size_t numNonZeroValues, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template std::shared_ptr<const Matrix<float>> NDArrayView::GetMatrix(size_t rowColSplitPoint/* = AutoSelectRowColSplitPoint*/) const;
|
||||
template std::shared_ptr<const Matrix<double>> NDArrayView::GetMatrix(size_t rowColSplitPoint/* = AutoSelectRowColSplitPoint*/) const;
|
||||
|
||||
template std::shared_ptr<Matrix<float>> NDArrayView::GetWritableMatrix(size_t rowColSplitPoint/* = AutoSelectRowColSplitPoint*/);
|
||||
template std::shared_ptr<Matrix<double>> NDArrayView::GetWritableMatrix(size_t rowColSplitPoint/* = AutoSelectRowColSplitPoint*/);
|
||||
|
||||
template CNTK_API NDArrayView::NDArrayView(const NDShape& viewShape, const SparseIndexType* colStarts, const SparseIndexType* rowIndices, const float* nonZeroValues, size_t numNonZeroValues, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template CNTK_API NDArrayView::NDArrayView(const NDShape& viewShape, const SparseIndexType* colStarts, const SparseIndexType* rowIndices, const double* nonZeroValues, size_t numNonZeroValues, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
|
|
|
@ -122,8 +122,11 @@ namespace CNTK
|
|||
// Explicit template instantiations
|
||||
template class _SimpleVector<Variable>;
|
||||
template class _SimpleVector<size_t>;
|
||||
template class _SimpleVector<Axis>;
|
||||
template class _SimpleVector<FunctionPtr>;
|
||||
|
||||
template bool operator==(const _SimpleVector<size_t>& first, const _SimpleVector<size_t>& second);
|
||||
|
||||
#pragma endregion _SimpleVector
|
||||
|
||||
#pragma region _SimpleSet
|
||||
|
@ -215,9 +218,11 @@ namespace CNTK
|
|||
// Explicit template instantiations
|
||||
template class _SimpleSet<FunctionPtr>;
|
||||
template class _SimpleSet<Variable>;
|
||||
template class _SimpleSet<Placeholder>;
|
||||
template class _SimpleSet<const Function*>;
|
||||
|
||||
template bool operator==(const _SimpleSet<Variable>& first, const _SimpleSet<Variable>& second);
|
||||
template bool operator==(const _SimpleSet<Placeholder>& first, const _SimpleSet<Placeholder>& second);
|
||||
|
||||
#pragma endregion _SimpleSet
|
||||
|
||||
|
@ -314,6 +319,7 @@ namespace CNTK
|
|||
}
|
||||
|
||||
// Explicit template instantiations
|
||||
template class _SimpleMap<Variable, ValuePtr>;
|
||||
template class _SimpleMap<Variable, const ValuePtr>;
|
||||
template class _SimpleMap<Placeholder, Variable>;
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ namespace CNTK
|
|||
: m_valueType(GetValueType<T>())
|
||||
{
|
||||
static_assert(std::is_same<T, NDShape>::value ||
|
||||
std::is_same<T, _Internal::_SimpleVector<DictionaryValue>::value,
|
||||
std::is_same<T, _Internal::_SimpleVector<DictionaryValue>>::value,
|
||||
"Unsupported ValueType");
|
||||
|
||||
AllocateDataPtr(value);
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace CNTK
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
static NDMaskPtr CreateMask(size_t sampleSize, const std::vector<const std::vector<T>>& sequences, const DeviceDescriptor& device)
|
||||
static NDMaskPtr CreateMask(size_t sampleSize, const std::vector<std::vector<T>>& sequences, const DeviceDescriptor& device)
|
||||
{
|
||||
size_t numSequences = sequences.size();
|
||||
std::vector<size_t> sequenceLengths(numSequences);
|
||||
|
@ -58,7 +58,7 @@ namespace CNTK
|
|||
}
|
||||
|
||||
template <typename ElementType>
|
||||
/*static*/ ValuePtr Value::Create(size_t vocabularySize, const std::vector<const std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/)
|
||||
/*static*/ ValuePtr Value::Create(size_t vocabularySize, const std::vector<std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/)
|
||||
{
|
||||
NDMaskPtr deviceValueMask = CreateMask(1, oneHotSequences, device);
|
||||
size_t maxSequenceLength = (deviceValueMask == nullptr) ? oneHotSequences[0].size() : deviceValueMask->Shape()[0];
|
||||
|
@ -91,7 +91,7 @@ namespace CNTK
|
|||
}
|
||||
|
||||
template <typename ElementType>
|
||||
/*static*/ ValuePtr Value::Create(const NDShape& sampleShape, const std::vector<const std::vector<ElementType>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/)
|
||||
/*static*/ ValuePtr Value::Create(const NDShape& sampleShape, const std::vector<std::vector<ElementType>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/)
|
||||
{
|
||||
size_t sampleSize = sampleShape.TotalSize();
|
||||
NDMaskPtr deviceValueMask = CreateMask(sampleSize, sequences, device);
|
||||
|
@ -99,7 +99,7 @@ namespace CNTK
|
|||
|
||||
size_t numSequences = sequences.size();
|
||||
NDShape valueDataShape = sampleShape.AppendShape({ maxSequenceLength, numSequences });
|
||||
NDArrayViewPtr valueData(new NDArrayView(GetDataType<ElementType>(), valueDataShape, DeviceDescriptor::CPUDevice()), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
NDArrayViewPtr valueData(new NDArrayView(AsDataType<ElementType>(), valueDataShape, DeviceDescriptor::CPUDevice()), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
ElementType* dataBuffer = valueData->WritableDataBuffer<ElementType>();
|
||||
for (size_t i = 0; i < numSequences; ++i)
|
||||
std::copy(sequences[i].data(), sequences[i].data() + sequences[i].size(), dataBuffer + (maxSequenceLength * i * sampleSize));
|
||||
|
@ -114,7 +114,7 @@ namespace CNTK
|
|||
}
|
||||
else
|
||||
{
|
||||
deviceValueData = NDArrayViewPtr(new NDArrayView(GetDataType<ElementType>(), valueDataShape, device), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
deviceValueData = NDArrayViewPtr(new NDArrayView(AsDataType<ElementType>(), valueDataShape, device), [](_ReferenceCounter* ptr) { delete ptr; });
|
||||
deviceValueData->CopyFrom(*valueData);
|
||||
if (readOnly)
|
||||
deviceValueData = deviceValueData->Alias(true);
|
||||
|
@ -171,8 +171,8 @@ namespace CNTK
|
|||
}
|
||||
|
||||
// Explicit template instantiations
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<float>(const NDShape& sampleShape, const std::vector<const std::vector<float>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<double>(const NDShape& sampleShape, const std::vector<const std::vector<double>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<float>(size_t vocabSize, const std::vector<const std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<double>(size_t vocabSize, const std::vector<const std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<float>(const NDShape& sampleShape, const std::vector<std::vector<float>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<double>(const NDShape& sampleShape, const std::vector<std::vector<double>>& sequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<float>(size_t vocabSize, const std::vector<std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
template /*static*/ CNTK_API ValuePtr Value::Create<double>(size_t vocabSize, const std::vector<std::vector<size_t>>& oneHotSequences, const DeviceDescriptor& device, bool readOnly/* = false*/);
|
||||
}
|
||||
|
|
|
@ -4412,6 +4412,7 @@ template void GPUMatrix<char>::SetValue(GPUMatrix<char> const&);
|
|||
template void GPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
|
||||
template void GPUMatrix<char>::Reshape(const size_t, const size_t);
|
||||
template GPUMatrix<char>& GPUMatrix<char>::operator*=(char);
|
||||
template DEVICEID_TYPE GPUMatrix<char>::PrepareDevice(DEVICEID_TYPE deviceId) const;
|
||||
|
||||
template GPUMatrix<int>::GPUMatrix(const size_t, const size_t, int, int*, const size_t);
|
||||
template GPUMatrix<int>::~GPUMatrix();
|
||||
|
|
Загрузка…
Ссылка в новой задаче