diff --git a/Examples/Image/MNIST/Config/02_Convolution.cntk b/Examples/Image/MNIST/Config/02_Convolution.cntk index 94a4bbba2..1124f6fc8 100644 --- a/Examples/Image/MNIST/Config/02_Convolution.cntk +++ b/Examples/Image/MNIST/Config/02_Convolution.cntk @@ -12,7 +12,7 @@ ModelDir = "$OutputDir$/Models" deviceId = 0 imageLayout = "cudnn" # override the above as follows when running on CPU: - deviceId = -1 +# deviceId = -1 command = train:test diff --git a/Examples/Image/MNIST/Config/02_Convolution.ndl b/Examples/Image/MNIST/Config/02_Convolution.ndl index 8088dd9b0..14cb12e42 100644 --- a/Examples/Image/MNIST/Config/02_Convolution.ndl +++ b/Examples/Image/MNIST/Config/02_Convolution.ndl @@ -31,6 +31,7 @@ DNN=[ pool1H = 2 pool1hStride = 2 pool1vStride = 2 + # MaxPooling is a standard NDL node. pool1 = MaxPooling(conv1, pool1W, pool1H, pool1hStride, pool1vStride, imageLayout=$imageLayout$) # conv2 @@ -41,16 +42,15 @@ DNN=[ vStride2 = 1 # weight[cMap2, kW2 * kH2 * cMap1] # ConvNDReLULayer is defined in Macros.ndl - #conv2 = ConvNDReLULayer(pool1, kW2, kH2, cMap1, 400, cMap2, hStride2, vStride2, 10, 1) - conv2 = Conv2DReLULayer(pool1, cMap2, 400, kW2, kH2, hStride2, vStride2, 10, 1) - + conv2 = ConvNDReLULayer(pool1, kW2, kH2, cMap1, 400, cMap2, hStride2, vStride2, 10, 1) + # pool2 pool2W = 2 pool2H = 2 pool2hStride = 2 pool2vStride = 2 - #pool2 = MaxNDPooling(conv2, pool2W, pool2H, pool2hStride, pool2vStride, imageLayout=$imageLayout$) - pool2 = MaxPooling(conv2, pool2W, pool2H, pool2hStride, pool2vStride, imageLayout=$imageLayout$) + # MaxNDPooling is defined in Macros.ndl + pool2 = MaxNDPooling(conv2, pool2W, pool2H, pool2hStride, pool2vStride, imageLayout=$imageLayout$) h1Dim = 128 # DNNImageSigmoidLayer and DNNLayer are defined in Macros.ndl diff --git a/Makefile b/Makefile index 9494e5e8b..eb8d7d4c2 100644 --- a/Makefile +++ b/Makefile @@ -255,7 +255,7 @@ MATH_SRC =\ $(SOURCEDIR)/Math/TensorView.cpp \ $(SOURCEDIR)/Math/CUDAPageLockedMemAllocator.cpp \ $(SOURCEDIR)/Math/ConvolutionEngine.cpp \ - $(SOURCEDIR)/Math/BatchNormalizationEngine.cpp \ + $(SOURCEDIR)/Math/BatchNormalizationEngine.cpp \ ifdef CUDA_PATH MATH_SRC +=\ @@ -264,9 +264,9 @@ MATH_SRC +=\ $(SOURCEDIR)/Math/GPUSparseMatrix.cu \ $(SOURCEDIR)/Math/GPUWatcher.cu \ $(SOURCEDIR)/Math/MatrixQuantizerGPU.cu \ - $(SOURCEDIR)/Math/CuDnnCommon.cu \ + $(SOURCEDIR)/Math/CuDnnCommon.cu \ $(SOURCEDIR)/Math/CuDnnConvolutionEngine.cu \ - $(SOURCEDIR)/Math/CuDnnBatchNormalization.cu \ + $(SOURCEDIR)/Math/CuDnnBatchNormalization.cu \ $(SOURCEDIR)/Math/GPUDataTransferer.cpp \ else @@ -384,7 +384,10 @@ LUSEQUENCEREADER_SRC =\ $(SOURCEDIR)/Readers/LUSequenceReader/DataWriterLocal.cpp \ $(SOURCEDIR)/Readers/LUSequenceReader/LUSequenceParser.cpp \ $(SOURCEDIR)/Readers/LUSequenceReader/LUSequenceReader.cpp \ +<<<<<<< HEAD $(SOURCEDIR)/Readers/LUSequenceReader/LUSequenceWriter.cpp \ +======= +>>>>>>> Updated Makefile and samples. LUSEQUENCEREADER_OBJ := $(patsubst %.cpp, $(OBJDIR)/%.o, $(LUSEQUENCEREADER_SRC))