Merge branch 'master' into qiwye/asgd-dev

This commit is contained in:
unknown 2016-11-11 11:01:05 +08:00
Родитель 8fad703f27 cbcea1b534
Коммит a62037dbc7
18 изменённых файлов: 466 добавлений и 74 удалений

Просмотреть файл

@ -2320,8 +2320,8 @@ Global
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|Any CPU.ActiveCfg = Release_CpuOnly|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|Mixed Platforms.ActiveCfg = Release_CpuOnly|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|Mixed Platforms.Build.0 = Release_CpuOnly|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|x64.ActiveCfg = Release_CpuOnly|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|x64.Build.0 = Release_CpuOnly|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|x64.ActiveCfg = Release_NoOpt|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release_NoOpt|x64.Build.0 = Release_NoOpt|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release|Any CPU.ActiveCfg = Release|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release|Mixed Platforms.ActiveCfg = Release|x64
{5D29C76D-648A-456F-920D-48230F2FB3C8}.Release|Mixed Platforms.Build.0 = Release|x64

Просмотреть файл

@ -1176,7 +1176,7 @@ python: $(ALL_LIBS)
declare -A py_paths; \
py_paths[34]=$(PYTHON34_PATH); \
py_paths[35]=$(PYTHON35_PATH); \
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$$(echo $(LIBPATH) $(KALDI_LIBPATH) | tr " " :); \
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$$(echo $(GDK_NVML_LIB_PATH) $(LIBPATH) $(KALDI_LIBPATH) | tr " " :); \
ldd $(LIBDIR)/* | grep "not found" && false; \
export CNTK_EXTRA_LIBRARIES=$$(ldd $(LIBDIR)/* | grep "^\s.*=> " | cut -d ">" -f 2- --only-delimited | cut -d "(" -f 1 --only-delimited | sort -u | grep -Ff <(echo $(EXTRA_LIBS_BASENAMES) | xargs -n1)); \
test -x $(SWIG_PATH); \

Просмотреть файл

@ -5,11 +5,13 @@ dependencies:
- jupyter=1.0.0=py34_3
- matplotlib=1.5.3=np111py34_0
- numpy=1.11.2=py34_0
- pandas=0.19.1=np111py34_0
- pillow=3.4.2=py34_0
- pip=8.1.2=py34_0
- python=3.4.4=5
- pyyaml=3.12=py34_0
- scipy=0.18.1=np111py34_0
- seaborn=0.7.1=py34_0
- setuptools=27.2.0=py34_0
- six=1.10.0=py34_0
- wheel=0.29.0=py34_0
@ -18,3 +20,4 @@ dependencies:
- sphinx==1.4.8
- sphinx-rtd-theme==0.1.9
- twine==1.8.1
- gym==0.5.2

Просмотреть файл

@ -3,11 +3,13 @@ dependencies:
- jupyter=1.0.0=py34_3
- matplotlib=1.5.3=np111py34_0
- numpy=1.11.2=py34_0
- pandas=0.19.1=np111py34_0
- pillow=3.4.2=py34_0
- pip=8.1.2=py34_0
- python=3.4.4=5
- pyyaml=3.12=py34_0
- scipy=0.18.1=np111py34_0
- seaborn=0.7.1=py34_0
- setuptools=27.2.0=py34_1
- six=1.10.0=py34_0
- wheel=0.29.0=py34_0
@ -16,3 +18,4 @@ dependencies:
- sphinx==1.4.8
- sphinx-rtd-theme==0.1.9
- twine==1.8.1
- gym==0.5.2

Просмотреть файл

@ -824,7 +824,20 @@ namespace CNTK
auto autoPadding = AsVector<bool>(functionConfig[PrimitiveFunction::AttributeNameAutoPadding].Value<std::vector<DictionaryValue>>());
NDShape outputMapCount = { 1 };
std::vector<bool> sharing = { true };
outputShape = ConvolutionOpOutputShape(op, inputs[0].Shape(), poolingWindowsShape, outputMapCount, strides, sharing, autoPadding, lowerPad, upperPad, false, inferDimensions);
auto inputShape = inputs[0].Shape();
// In case of pooling if the kernel shape is unknown, then treat it as global pooling.
if (poolingWindowsShape == NDShape::Unknown)
{
if ((std::find(autoPadding.begin(), autoPadding.end(), true) != autoPadding.end()) ||
(lowerPad.TotalSize() > 0) || (upperPad.TotalSize() > 0))
RuntimeError("Padding isn't allowed for Unknown shape!");
poolingWindowsShape = inputShape.SubShape(0, inputShape.Rank()-1);
functionConfig[PrimitiveFunction::AttributeNamePoolingWindowShape] = poolingWindowsShape;
}
outputShape = ConvolutionOpOutputShape(op, inputShape, poolingWindowsShape, outputMapCount, strides, sharing, autoPadding, lowerPad, upperPad, false, inferDimensions);
break;
}
case PrimitiveOpType::SumAll:
@ -863,9 +876,9 @@ namespace CNTK
case PrimitiveOpType::Times:
{
assert(inputs.size() == 2);
auto outputRank = functionConfig[PrimitiveFunction::AttributeNameOutputRank].Value<size_t>();
auto inferInputRankToMap = functionConfig[PrimitiveFunction::AttributeNameInferInputRankToMap].Value<int>();
outputShape = TimesOpOutputShape(inputs[0], inputs[1], outputRank, inferInputRankToMap, inferDimensions);
auto outputRank = functionConfig[PrimitiveFunction::AttributeNameOutputRank].Value<size_t>();
auto inferInputRankToMap = functionConfig[PrimitiveFunction::AttributeNameInferInputRankToMap].Value<int>();
outputShape = TimesOpOutputShape(inputs[0], inputs[1], outputRank, inferInputRankToMap, inferDimensions);
break;
}
case PrimitiveOpType::TransposeTimes:

Просмотреть файл

@ -551,6 +551,15 @@ namespace CNTK
{
if (inferDimensions)
{
size_t inputRank = operandShape.Rank();
// Unknown kernel shape valid only for pooling, however, the shape should have expanded before
// this call.
if (kernelShape == NDShape::Unknown)
{
RuntimeError("Kernel shape can't be Unknown!");
}
// infer reduction dimensions if not given
// If kernel has a lower rank than the input then the remaining dimensions are to be reduced over.
size_t filterRank = kernelShape.Rank();
@ -565,7 +574,6 @@ namespace CNTK
kernelShape = kernelShape.SubShape(0, filterRank);
}
size_t inputRank = operandShape.Rank();
NDShape fromShape;
if (op == PrimitiveOpType::Convolution)
fromShape = operandShape;

Просмотреть файл

@ -321,6 +321,15 @@ public:
ComputationNetworkOwnedNodeState::CopyTo(*node);
TimeStamp::CopyTo(*node);
}
if (flags & CopyNodeFlags::copyNodeAll)
{
for (auto tag : this->GetTags())
{
node->SetTag(tag);
}
}
node->ClearConfigMemberCache();
}

Просмотреть файл

@ -5,6 +5,10 @@
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release_NoOpt|x64">
<Configuration>Release_NoOpt</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>

Просмотреть файл

@ -52,7 +52,6 @@
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Release_NoOpt|x64'">
<OutputPath>bin\x64\Release_NoOpt\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<Optimize>true</Optimize>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
@ -60,7 +59,6 @@
<PlatformTarget>x64</PlatformTarget>
<ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<Prefer32Bit>true</Prefer32Bit>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />

Просмотреть файл

@ -21,10 +21,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libswscale-dev \
libtiff-dev \
pkg-config \
python-dev \
python-numpy \
python-pip \
python-yaml \
wget \
zlib1g-dev \
# Protobuf
@ -32,12 +28,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
unzip \
# For Kaldi
python-dev \
automake \
libtool \
autoconf \
subversion \
# For Kaldi's dependencies
libapr1 libaprutil1 libltdl-dev libltdl7 libserf-1-1 libsigsegv2 libsvn1 m4 && \
libapr1 libaprutil1 libltdl-dev libltdl7 libserf-1-1 libsigsegv2 libsvn1 m4 \
# For SWIG
libpcre++-dev && \
rm -rf /var/lib/apt/lists/*
RUN OPENMPI_VERSION=1.10.3 && \
@ -102,19 +101,6 @@ RUN mkdir /usr/local/CNTKCustomMKL && \
wget --no-verbose -O - https://www.cntk.ai/mkl/CNTKCustomMKL-Linux-2.tgz | \
tar -xzf - -C /usr/local/CNTKCustomMKL
RUN pip install pytest sphinx-rtd-theme Pillow
ENV BLAS=/usr/local/openblas/lib/libopenblas.so
ENV LAPACK=/usr/local/openblas/lib/libopenblas.so
RUN SCIPY_VERSION=0.18.0 && \
wget -q -O - https://github.com/scipy/scipy/releases/download/v${SCIPY_VERSION}/scipy-${SCIPY_VERSION}.tar.gz | tar -zxf - && \
cd scipy-${SCIPY_VERSION} && \
python setup.py build && \
python setup.py install && \
cd .. && \
rm -rf scipy-${SCIPY_VERSION}
# Install Kaldi
ENV KALDI_VERSION=c024e8aa
ENV KALDI_PATH /usr/local/kaldi-$KALDI_VERSION
@ -137,17 +123,41 @@ RUN mv /bin/sh /bin/sh.orig && \
for dir in $KALDI_PATH/src/*bin; do make -C $dir clean; done && \
mv -f /bin/sh.orig /bin/sh
## PYTHON
# Swig
RUN cd /root && \
wget -q http://prdownloads.sourceforge.net/swig/swig-3.0.10.tar.gz -O - | tar xvfz - && \
cd swig-3.0.10 && \
./configure --without-java --without-perl5 && \
make -j $(nproc) && \
make install
# Anaconda
RUN wget -q https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh && \
bash Anaconda3-4.2.0-Linux-x86_64.sh -b && \
rm Anaconda3-4.2.0-Linux-x86_64.sh
RUN wget -q https://raw.githubusercontent.com/Microsoft/CNTK/master/Scripts/linux/conda-linux-cntk-py34-environment.yml -O /tmp/conda-linux-cntk-py34-environment.yml && \
/root/anaconda3/bin/conda env create -p /root/anaconda3/envs/cntk-py34/ --file /tmp/conda-linux-cntk-py34-environment.yml
ENV PATH /root/anaconda3/envs/cntk-py34/bin:$PATH
WORKDIR /cntk
# Build CNTK
RUN git clone --depth=1 -b master https://github.com/Microsoft/CNTK.git . && \
CONFIGURE_OPTS="\
--with-kaldi=${KALDI_PATH} \
--with-py34-path=/root/anaconda3/envs/cntk-py34" && \
mkdir -p build/cpu/release && \
cd build/cpu/release && \
../../../configure --with-openblas=/usr/local/openblas --with-kaldi=${KALDI_PATH} && \
../../../configure $CONFIGURE_OPTS --with-openblas=/usr/local/openblas && \
make -j"$(nproc)" all && \
cd ../../.. && \
mkdir -p build-mkl/cpu/release && \
cd build-mkl/cpu/release && \
../../../configure --with-mkl=/usr/local/CNTKCustomMKL --with-kaldi=${KALDI_PATH} && \
../../../configure $CONFIGURE_OPTS --with-mkl=/usr/local/CNTKCustomMKL && \
make -j"$(nproc)" all
RUN cd Examples/Image/DataSets/CIFAR-10 && \
@ -158,4 +168,4 @@ RUN cd Examples/Image/DataSets/MNIST && \
python install_mnist.py && \
cd ../../../..
ENV PATH=/cntk/build/cpu/release/bin:$PATH
ENV PATH=/cntk/build/cpu/release/bin:$PATH PYTHONPATH=/cntk/bindings/python LD_LIBRARY_PATH=/cntk/bindings/python/cntk/libs:$LD_LIBRARY_PATH

Просмотреть файл

@ -0,0 +1,196 @@
FROM nvidia/cuda:7.5-cudnn5-devel
ARG ENABLE_1BIT_SGD
RUN test "$ENABLE_1BIT_SGD" = "true" || ( echo "\033[0;31m\
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\
You are attempting to build an image of CNTK with 1-bit Stochastic Gradient\n\
Descent (1bit-SGD) enabled. Before proceeding further please ensure you \n\
understand the license difference between CNTK and 1bit-SGD. See \n\
https://github.com/Microsoft/CNTK/wiki/Enabling-1bit-SGD#license-difference-between-cntk-and-1bit-sgd\n\
\n\
To build the image pass the following parameter to Docker:\n\
\n\
--build-arg ENABLE_1BIT_SGD=true\n\
\n\
Example:\n\
\n\
docker build -t cntk --build-arg ENABLE_1BIT_SGD=true ./CNTK-GPU-1bit-Image\n\
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\
\n\033[0m"; exit 1)
RUN apt-get update && apt-get install -y --no-install-recommends \
autotools-dev \
build-essential \
cmake \
git \
gfortran-multilib \
libavcodec-dev \
libavformat-dev \
libjasper-dev \
libjpeg-dev \
libpng-dev \
liblapacke-dev \
libswscale-dev \
libtiff-dev \
pkg-config \
wget \
zlib1g-dev \
# Protobuf
ca-certificates \
curl \
unzip \
# For Kaldi
python-dev \
automake \
libtool \
autoconf \
subversion \
# For Kaldi's dependencies
libapr1 libaprutil1 libltdl-dev libltdl7 libserf-1-1 libsigsegv2 libsvn1 m4 \
# For SWIG
libpcre++-dev && \
rm -rf /var/lib/apt/lists/*
RUN OPENMPI_VERSION=1.10.3 && \
wget -q -O - https://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-${OPENMPI_VERSION}.tar.gz | tar -xzf - && \
cd openmpi-${OPENMPI_VERSION} && \
./configure --prefix=/usr/local/mpi && \
make -j"$(nproc)" install && \
rm -rf /openmpi-${OPENMPI_VERSION}
ENV PATH /usr/local/mpi/bin:$PATH
ENV LD_LIBRARY_PATH /usr/local/mpi/lib:$LD_LIBRARY_PATH
RUN LIBZIP_VERSION=1.1.2 && \
wget -q -O - http://nih.at/libzip/libzip-${LIBZIP_VERSION}.tar.gz | tar -xzf - && \
cd libzip-${LIBZIP_VERSION} && \
./configure && \
make -j"$(nproc)" install && \
rm -rf /libzip-${LIBZIP_VERSION}
ENV LD_LIBRARY_PATH /usr/local/lib:$LD_LIBRARY_PATH
RUN wget -q -O - https://github.com/NVlabs/cub/archive/1.4.1.tar.gz | tar -C /usr/local -xzf -
RUN OPENCV_VERSION=3.1.0 && \
wget -q -O - https://github.com/Itseez/opencv/archive/${OPENCV_VERSION}.tar.gz | tar -xzf - && \
cd opencv-${OPENCV_VERSION} && \
cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=/usr/local/opencv-${OPENCV_VERSION} . && \
make -j"$(nproc)" install && \
rm -rf /opencv-${OPENCV_VERSION}
RUN OPENBLAS_VERSION=0.2.18 && \
wget -q -O - https://github.com/xianyi/OpenBLAS/archive/v${OPENBLAS_VERSION}.tar.gz | tar -xzf - && \
cd OpenBLAS-${OPENBLAS_VERSION} && \
make -j"$(nproc)" USE_OPENMP=1 | tee make.log && \
grep -qF 'OpenBLAS build complete. (BLAS CBLAS LAPACK LAPACKE)' make.log && \
grep -qF 'Use OpenMP in the multithreading.' make.log && \
make PREFIX=/usr/local/openblas install && \
rm -rf /OpenBLAS-${OPENBLAS_VERSION}
ENV LD_LIBRARY_PATH /usr/local/openblas/lib:$LD_LIBRARY_PATH
# Install Boost
RUN BOOST_VERSION=1_60_0 && \
BOOST_DOTTED_VERSION=$(echo $BOOST_VERSION | tr _ .) && \
wget -q -O - https://sourceforge.net/projects/boost/files/boost/${BOOST_DOTTED_VERSION}/boost_${BOOST_VERSION}.tar.gz/download | tar -xzf - && \
cd boost_${BOOST_VERSION} && \
./bootstrap.sh --prefix=/usr/local/boost-${BOOST_DOTTED_VERSION} --with-libraries=filesystem,system,test && \
./b2 -d0 -j"$(nproc)" install && \
rm -rf /boost_${BOOST_VERSION}
# Install Protobuf
RUN PROTOBUF_VERSION=3.1.0 \
PROTOBUF_STRING=protobuf-$PROTOBUF_VERSION && \
wget -O - --no-verbose https://github.com/google/protobuf/archive/v${PROTOBUF_VERSION}.tar.gz | tar -xzf - && \
cd $PROTOBUF_STRING && \
./autogen.sh && \
./configure CFLAGS=-fPIC CXXFLAGS=-fPIC --disable-shared --prefix=/usr/local/$PROTOBUF_STRING && \
make -j $(nproc) install && \
cd .. && \
rm -rf $PROTOBUF_STRING
# Install CNTK custom MKL, version 2
RUN mkdir /usr/local/CNTKCustomMKL && \
wget --no-verbose -O - https://www.cntk.ai/mkl/CNTKCustomMKL-Linux-2.tgz | \
tar -xzf - -C /usr/local/CNTKCustomMKL
RUN mkdir -p /usr/local/cudnn/cuda/include && \
ln -s /usr/include/cudnn.h /usr/local/cudnn/cuda/include/cudnn.h && \
mkdir -p /usr/local/cudnn/cuda/lib64 && \
ln -s /etc/alternatives/libcudnn_so /usr/local/cudnn/cuda/lib64/libcudnn.so
# Install Kaldi
ENV KALDI_VERSION=c024e8aa
ENV KALDI_PATH /usr/local/kaldi-$KALDI_VERSION
RUN mv /bin/sh /bin/sh.orig && \
ln -s -f /bin/bash /bin/sh && \
mkdir $KALDI_PATH && \
wget --no-verbose -O - https://github.com/kaldi-asr/kaldi/archive/$KALDI_VERSION.tar.gz | tar -xzf - --strip-components=1 -C $KALDI_PATH && \
cd $KALDI_PATH && \
cd tools && \
perl -pi -e 's/^# (OPENFST_VERSION = 1.4.1)$/\1/' Makefile && \
./extras/check_dependencies.sh && \
make -j $(nproc) all && \
cd ../src && \
./configure --openblas-root=/usr/local/openblas --shared && \
make -j $(nproc) depend && \
make -j $(nproc) all && \
# Remove some unneeded stuff in $KALDI_PATH to reduce size
find $KALDI_PATH -name '*.o' -print0 | xargs -0 rm && \
for dir in $KALDI_PATH/src/*bin; do make -C $dir clean; done && \
mv -f /bin/sh.orig /bin/sh
## PYTHON
# Swig
RUN cd /root && \
wget -q http://prdownloads.sourceforge.net/swig/swig-3.0.10.tar.gz -O - | tar xvfz - && \
cd swig-3.0.10 && \
./configure --without-java --without-perl5 && \
make -j $(nproc) && \
make install
# Anaconda
RUN wget -q https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh && \
bash Anaconda3-4.2.0-Linux-x86_64.sh -b && \
rm Anaconda3-4.2.0-Linux-x86_64.sh
RUN wget -q https://raw.githubusercontent.com/Microsoft/CNTK/master/Scripts/linux/conda-linux-cntk-py34-environment.yml -O /tmp/conda-linux-cntk-py34-environment.yml && \
/root/anaconda3/bin/conda env create -p /root/anaconda3/envs/cntk-py34/ --file /tmp/conda-linux-cntk-py34-environment.yml
ENV PATH /root/anaconda3/envs/cntk-py34/bin:$PATH
WORKDIR /cntk
# Build CNTK
RUN git clone --depth=1 --recursive -b master https://github.com/Microsoft/CNTK.git . && \
CONFIGURE_OPTS="\
--1bitsgd=yes \
--with-cuda=/usr/local/cuda \
--with-gdk-include=/usr/local/cuda/include \
--with-gdk-nvml-lib=/usr/local/cuda/lib64/stubs \
--with-kaldi=${KALDI_PATH} \
--with-py34-path=/root/anaconda3/envs/cntk-py34 \
--with-cudnn=/usr/local/cudnn" && \
mkdir -p build/gpu/release && \
cd build/gpu/release && \
../../../configure $CONFIGURE_OPTS --with-openblas=/usr/local/openblas && \
make -j"$(nproc)" all && \
cd ../../.. && \
mkdir -p build-mkl/gpu/release && \
cd build-mkl/gpu/release && \
../../../configure $CONFIGURE_OPTS --with-mkl=/usr/local/CNTKCustomMKL && \
make -j"$(nproc)" all
RUN cd Examples/Image/DataSets/CIFAR-10 && \
python install_cifar10.py && \
cd ../../../..
RUN cd Examples/Image/DataSets/MNIST && \
python install_mnist.py && \
cd ../../../..
ENV PATH=/cntk/build/gpu/release/bin:$PATH PYTHONPATH=/cntk/bindings/python LD_LIBRARY_PATH=/cntk/bindings/python/cntk/libs:$LD_LIBRARY_PATH

Просмотреть файл

@ -15,10 +15,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libswscale-dev \
libtiff-dev \
pkg-config \
python-dev \
python-numpy \
python-pip \
python-yaml \
wget \
zlib1g-dev \
# Protobuf
@ -26,12 +22,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
unzip \
# For Kaldi
python-dev \
automake \
libtool \
autoconf \
subversion \
# For Kaldi's dependencies
libapr1 libaprutil1 libltdl-dev libltdl7 libserf-1-1 libsigsegv2 libsvn1 m4 && \
libapr1 libaprutil1 libltdl-dev libltdl7 libserf-1-1 libsigsegv2 libsvn1 m4 \
# For SWIG
libpcre++-dev && \
rm -rf /var/lib/apt/lists/*
RUN OPENMPI_VERSION=1.10.3 && \
@ -73,6 +72,15 @@ RUN OPENBLAS_VERSION=0.2.18 && \
ENV LD_LIBRARY_PATH /usr/local/openblas/lib:$LD_LIBRARY_PATH
# Install Boost
RUN BOOST_VERSION=1_60_0 && \
BOOST_DOTTED_VERSION=$(echo $BOOST_VERSION | tr _ .) && \
wget -q -O - https://sourceforge.net/projects/boost/files/boost/${BOOST_DOTTED_VERSION}/boost_${BOOST_VERSION}.tar.gz/download | tar -xzf - && \
cd boost_${BOOST_VERSION} && \
./bootstrap.sh --prefix=/usr/local/boost-${BOOST_DOTTED_VERSION} --with-libraries=filesystem,system,test && \
./b2 -d0 -j"$(nproc)" install && \
rm -rf /boost_${BOOST_VERSION}
# Install Protobuf
RUN PROTOBUF_VERSION=3.1.0 \
PROTOBUF_STRING=protobuf-$PROTOBUF_VERSION && \
@ -84,15 +92,6 @@ RUN PROTOBUF_VERSION=3.1.0 \
cd .. && \
rm -rf $PROTOBUF_STRING
# Install Boost
RUN BOOST_VERSION=1_60_0 && \
BOOST_DOTTED_VERSION=$(echo $BOOST_VERSION | tr _ .) && \
wget -q -O - https://sourceforge.net/projects/boost/files/boost/${BOOST_DOTTED_VERSION}/boost_${BOOST_VERSION}.tar.gz/download | tar -xzf - && \
cd boost_${BOOST_VERSION} && \
./bootstrap.sh --prefix=/usr/local/boost-${BOOST_DOTTED_VERSION} --with-libraries=filesystem,system,test && \
./b2 -d0 -j"$(nproc)" install && \
rm -rf /boost_${BOOST_VERSION}
# Install CNTK custom MKL, version 2
RUN mkdir /usr/local/CNTKCustomMKL && \
wget --no-verbose -O - https://www.cntk.ai/mkl/CNTKCustomMKL-Linux-2.tgz | \
@ -103,20 +102,6 @@ RUN mkdir -p /usr/local/cudnn/cuda/include && \
mkdir -p /usr/local/cudnn/cuda/lib64 && \
ln -s /etc/alternatives/libcudnn_so /usr/local/cudnn/cuda/lib64/libcudnn.so
RUN pip install pytest sphinx-rtd-theme Pillow
ENV BLAS=/usr/local/openblas/lib/libopenblas.so
ENV LAPACK=/usr/local/openblas/lib/libopenblas.so
RUN SCIPY_VERSION=0.18.0 && \
wget -q -O - https://github.com/scipy/scipy/releases/download/v${SCIPY_VERSION}/scipy-${SCIPY_VERSION}.tar.gz | tar -zxf - && \
cd scipy-${SCIPY_VERSION} && \
python setup.py build && \
python setup.py install && \
cd .. && \
rm -rf scipy-${SCIPY_VERSION}
# Install Kaldi
ENV KALDI_VERSION=c024e8aa
ENV KALDI_PATH /usr/local/kaldi-$KALDI_VERSION
@ -139,14 +124,36 @@ RUN mv /bin/sh /bin/sh.orig && \
for dir in $KALDI_PATH/src/*bin; do make -C $dir clean; done && \
mv -f /bin/sh.orig /bin/sh
## PYTHON
# Swig
RUN cd /root && \
wget -q http://prdownloads.sourceforge.net/swig/swig-3.0.10.tar.gz -O - | tar xvfz - && \
cd swig-3.0.10 && \
./configure --without-java --without-perl5 && \
make -j $(nproc) && \
make install
# Anaconda
RUN wget -q https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh && \
bash Anaconda3-4.2.0-Linux-x86_64.sh -b && \
rm Anaconda3-4.2.0-Linux-x86_64.sh
RUN wget -q https://raw.githubusercontent.com/Microsoft/CNTK/master/Scripts/linux/conda-linux-cntk-py34-environment.yml -O /tmp/conda-linux-cntk-py34-environment.yml && \
/root/anaconda3/bin/conda env create -p /root/anaconda3/envs/cntk-py34/ --file /tmp/conda-linux-cntk-py34-environment.yml
ENV PATH /root/anaconda3/envs/cntk-py34/bin:$PATH
WORKDIR /cntk
# Build CNTK
RUN git clone --depth=1 -b master https://github.com/Microsoft/CNTK.git . && \
CONFIGURE_OPTS="\
--with-cuda=/usr/local/cuda \
--with-gdk-include=/usr/local/cuda/include \
--with-gdk-nvml-lib=/usr/local/cuda/lib64/stubs \
--with-kaldi=${KALDI_PATH} \
--with-py34-path=/root/anaconda3/envs/cntk-py34 \
--with-cudnn=/usr/local/cudnn" && \
mkdir -p build/gpu/release && \
cd build/gpu/release && \
@ -166,4 +173,4 @@ RUN cd Examples/Image/DataSets/MNIST && \
python install_mnist.py && \
cd ../../../..
ENV PATH=/cntk/build/gpu/release/bin:/usr/local/mpi/bin:$PATH
ENV PATH=/cntk/build/gpu/release/bin:$PATH PYTHONPATH=/cntk/bindings/python LD_LIBRARY_PATH=/cntk/bindings/python/cntk/libs:$LD_LIBRARY_PATH

8
Tools/docker/README.md Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# CNTK Dockerfiles
The Dockerfiles provided here can be used to build CNTK on machines with only CPU, or with GPU with or without 1bit-SGD.
If you plan to use the 1bit-SGD version, please make sure you understand the
[license difference between CNTK and 1bit-SGD](https://github.com/Microsoft/CNTK/wiki/Enabling-1bit-SGD).
See also [this page](https://github.com/Microsoft/CNTK/wiki/CNTK-Docker-Containers)
that provides general instructions on getting things working with Docker.

Просмотреть файл

@ -1,4 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
@ -13,7 +14,6 @@ def dfs_walk(node, visitor):
node (graph node): the node to start the journey from
visitor (Python function or lambda): function that takes a node as
argument and returns ``True`` if that node should be returned.
Returns:
List of nodes, for which ``visitor`` was ``True``
'''
@ -48,13 +48,112 @@ def find_nodes_by_name(node, node_name):
'''
Finds nodes in the graph starting from `node` and doing a depth-first
search.
Args:
node (graph node): the node to start the journey from
node_name (`str`): name for which we are search nodes
Returns:
List of nodes having the specified name
'''
return dfs_walk(node, lambda x: x.name == node_name)
def output_function_graph(node,dot_file_path=None,png_file_path=None):
'''
Walks through every node of the graph starting at ``node``,
creates a network graph, and saves it as a string. If dot_file_name or
png_file_name specified corresponding files will be saved.
Requirements for DOT output: pydot_ng
Requirements for PNG output: pydot_ng and graphviz
Args:
node (graph node): the node to start the journey from
dot_file_path (`str`, optional): DOT file path
png_file_path (`str`, optional): PNG file path
Returns:
`str` containing all nodes and edges
'''
dot = (dot_file_path != None)
png = (png_file_path != None)
if (dot or png):
try:
import pydot_ng as pydot
except ImportError:
raise ImportError("PNG and DOT format requires pydot_ng package. Unable to import pydot_ng.")
# initialize a dot object to store vertices and edges
dot_object = pydot.Dot(graph_name="network_graph",rankdir='TB')
dot_object.set_node_defaults(shape='rectangle', fixedsize='false',
height=.85, width=.85, fontsize=12)
dot_object.set_edge_defaults(fontsize=10)
# string to store model
model = ''
# walk every node of the graph iteratively
visitor = lambda x: True
stack = [node]
accum = []
visited = set()
while stack:
node = stack.pop()
if node in visited:
continue
try:
# Function node
node = node.root_function
stack.extend(node.inputs)
# add current node
model += node.op_name + '('
if (dot or png):
cur_node = pydot.Node(node.op_name+' '+node.uid,label=node.op_name,shape='circle',
fixedsize='true', height=1, width=1)
dot_object.add_node(cur_node)
# add node's inputs
for i in range(len(node.inputs)):
child = node.inputs[i]
model += child.uid
if (i != len(node.inputs) - 1):
model += ", "
if (dot or png):
child_node = pydot.Node(child.uid)
dot_object.add_node(child_node)
dot_object.add_edge(pydot.Edge(child_node, cur_node,label=str(child.shape)))
# ad node's output
model += ") -> " + node.outputs[0].uid +'\n'
if (dot or png):
out_node = pydot.Node(node.outputs[0].uid)
dot_object.add_node(out_node)
dot_object.add_edge(pydot.Edge(cur_node,out_node,label=str(node.outputs[0].shape)))
except AttributeError:
# OutputVariable node
try:
if node.is_output:
stack.append(node.owner)
except AttributeError:
pass
if visitor(node):
accum.append(node)
if (png):
dot_object.write_png(png_file_path, prog='dot')
if (dot):
dot_object.write_raw(dot_file_path)
# return lines in reversed order
return "\n".join(model.split("\n")[::-1])

Просмотреть файл

@ -171,17 +171,17 @@ def Convolution(filter_shape, # e.g. (3,3)
apply_x = apply_x >> activation
return Block(apply_x, 'Convolution', Record(W=W, b=b))
# MaxPooling, AveragePooling -- create a max- or average-pooling layer
# TODO: do we need MaxPooling and AveragePooling?
# TODO: This is not really a layer as it does not hold learnable parameters. So:
# - keep it in layer format, since users may think about it this way?
# - turn it into a function (lower-case)? Then how would it work inside Sequential() (we'd need partial application)?
from cntk.cntk_py import PoolingType_Max, PoolingType_Average
# Create a Pooling layer with one of following types:
#
# MaxPooling and GlobalMaxPooling
# AveragePooling and GlobalAveragePooling
#
# Setting the filter_shape to None, mean global pooling.
from cntk.cntk_py import PoolingType_Max, PoolingType_Average, NDShape
def Pooling(op, # PoolingType_Max or _Average
filter_shape, # e.g. (3,3)
strides=1,
pad=False):
#UntestedBranchError("Pooling")
x = Placeholder(name='pooling_arg')
apply_x = pooling (x, op, filter_shape, strides=_as_tuple(strides), auto_padding=_as_tuple(pad))
@ -193,16 +193,26 @@ def Pooling(op, # PoolingType_Max or _Average
raise ValueError('Pooling: op must be PoolingType_Max or PoolingType_average')
return Block(apply_x, op_name)
# MaxPooling
def MaxPooling(filter_shape, # e.g. (3,3)
strides=1,
pad=False):
return Pooling(PoolingType_Max, filter_shape, strides=strides, pad=pad)
# AveragePooling
def AveragePooling(filter_shape, # e.g. (3,3)
strides=1,
pad=False):
return Pooling(PoolingType_Average, filter_shape, strides=strides, pad=pad)
# GlobalMaxPooling
def GlobalMaxPooling():
return Pooling(PoolingType_Max, NDShape.unknown.dimensions(), pad=False)
# GlobalAveragePooling
def GlobalAveragePooling():
return Pooling(PoolingType_Average, NDShape.unknown.dimensions(), pad=False)
# Recurrence() -- run a block recurrently over a time sequence
def Recurrence(over, go_backwards=False, initial_state=initial_state_default_or_None):
# helper to compute previous value

Просмотреть файл

@ -41,6 +41,18 @@ def _graph_dict():
d['root'] = d['first']
return d
def _simple_dict():
d = {}
d['i1'] = input_variable(shape=(2,3), name='i1')
d['i2'] = input_variable(shape=(2,3), name='i2')
d['p1'] = parameter(shape=(3,2), name='p1')
d['op1'] = plus(d['i1'], d['i2'], name='op1')
d['op2'] = times(d['op1'], d['p1'], name='op2')
d['root'] = d['op2']
return d
def test_find_nodes():
@ -57,3 +69,16 @@ def test_find_nodes():
none = find_nodes_by_name(d['root'], 'none')
assert none == []
def test_output_funtion_graph():
d = _simple_dict()
m = output_function_graph(d['root'])
p = "\nPlus"
t = "\nTimes"
assert len(m) != 0
assert p in m
assert t in m
assert m.find(p) < m.find(t)

Просмотреть файл

@ -1,4 +1,4 @@
# Copyright (c) Microsoft. All rights reserved.
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================

Просмотреть файл

@ -9,7 +9,7 @@ import math
import numpy as np
from cntk.blocks import default_options
from cntk.layers import Convolution, MaxPooling, AveragePooling, Dropout, BatchNormalization, Dense
from cntk.layers import Convolution, AveragePooling, GlobalAveragePooling, Dropout, BatchNormalization, Dense
from cntk.models import Sequential, LayerStack
from cntk.utils import *
from cntk.io import MinibatchSource, ImageDeserializer, StreamDef, StreamDefs
@ -136,8 +136,7 @@ def create_resnet_model(input, num_classes):
r3_1 = resnet_basic_inc(r2_2, 64)
r3_2 = resnet_basic_stack(r3_1, 64, 2)
# Global average pooling
pool = AveragePooling(filter_shape=(8,8), strides=(1,1))(r3_2)
pool = GlobalAveragePooling()(r3_2)
net = Dense(num_classes, init=he_normal(), activation=None)(pool)
return net