зеркало из https://github.com/microsoft/caffe.git
Merge caffe/windows@{2016-03-08} into master
This commit is contained in:
Коммит
d7b4e744c9
|
@ -47,6 +47,8 @@ if(UNIX OR APPLE)
|
|||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall")
|
||||
endif()
|
||||
|
||||
caffe_set_caffe_link()
|
||||
|
||||
if(USE_libstdcpp)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
|
||||
message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)")
|
||||
|
|
15
Makefile
15
Makefile
|
@ -248,6 +248,8 @@ ifeq ($(UNAME), Linux)
|
|||
LINUX := 1
|
||||
else ifeq ($(UNAME), Darwin)
|
||||
OSX := 1
|
||||
OSX_MAJOR_VERSION := $(shell sw_vers -productVersion | cut -f 1 -d .)
|
||||
OSX_MINOR_VERSION := $(shell sw_vers -productVersion | cut -f 2 -d .)
|
||||
endif
|
||||
|
||||
# Linux
|
||||
|
@ -277,15 +279,22 @@ ifeq ($(OSX), 1)
|
|||
endif
|
||||
# clang throws this warning for cuda headers
|
||||
WARNINGS += -Wno-unneeded-internal-declaration
|
||||
# 10.11 strips DYLD_* env vars so link CUDA (rpath is available on 10.5+)
|
||||
OSX_10_OR_LATER := $(shell [ $(OSX_MAJOR_VERSION) -ge 10 ] && echo true)
|
||||
OSX_10_5_OR_LATER := $(shell [ $(OSX_MINOR_VERSION) -ge 5 ] && echo true)
|
||||
ifeq ($(OSX_10_OR_LATER),true)
|
||||
ifeq ($(OSX_10_5_OR_LATER),true)
|
||||
LDFLAGS += -Wl,-rpath,$(CUDA_LIB_DIR)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
# gtest needs to use its own tuple to not conflict with clang
|
||||
COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1
|
||||
# boost::thread is called boost_thread-mt to mark multithreading on OS X
|
||||
LIBRARIES += boost_thread-mt
|
||||
# we need to explicitly ask for the rpath to be obeyed
|
||||
DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so
|
||||
ORIGIN := @loader_path
|
||||
VERSIONFLAGS += -Wl,-install_name,$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../../build/lib
|
||||
VERSIONFLAGS += -Wl,-install_name,@rpath/$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../../build/lib
|
||||
else
|
||||
ORIGIN := \$$ORIGIN
|
||||
endif
|
||||
|
@ -552,7 +561,7 @@ $(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)
|
|||
|
||||
$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
|
||||
@ echo LD -o $@
|
||||
$(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS)
|
||||
$(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS)
|
||||
@ cd $(BUILD_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
|
||||
|
||||
$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
|
||||
|
|
|
@ -20,7 +20,7 @@ caffe_option(MKL_MULTI_THREADED "Use multi-threading" ON IF NOT MKL_USE_SINGL
|
|||
|
||||
# ---[ Root folders
|
||||
set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs")
|
||||
find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKL_ROOT} ${INTEL_ROOT}/mkl
|
||||
find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKLROOT} ${INTEL_ROOT}/mkl
|
||||
DOC "Folder contains MKL")
|
||||
|
||||
# ---[ Find include dir
|
||||
|
|
|
@ -23,7 +23,7 @@ endif()
|
|||
|
||||
# place where to generate protobuf sources
|
||||
set(proto_gen_folder "${PROJECT_BINARY_DIR}/include/caffe/proto")
|
||||
include_directories(SYSTEM "${PROJECT_BINARY_DIR}/include")
|
||||
include_directories("${PROJECT_BINARY_DIR}/include")
|
||||
|
||||
set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
|
||||
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
################################################################################################
|
||||
# Defines global Caffe_LINK flag, This flag is required to prevent linker from excluding
|
||||
# some objects which are not addressed directly but are registered via static constructors
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set(Caffe_LINK caffe)
|
||||
else()
|
||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(Caffe_LINK -Wl,-force_load caffe)
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set(Caffe_LINK -Wl,--whole-archive caffe -Wl,--no-whole-archive)
|
||||
macro(caffe_set_caffe_link)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set(Caffe_LINK caffe)
|
||||
else()
|
||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(Caffe_LINK -Wl,-force_load caffe)
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set(Caffe_LINK -Wl,--whole-archive caffe -Wl,--no-whole-archive)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endmacro()
|
||||
################################################################################################
|
||||
# Convenient command to setup source group for IDEs that support this feature (VS, XCode)
|
||||
# Usage:
|
||||
|
|
|
@ -346,10 +346,11 @@ function(caffe_parse_linker_libs Caffe_LINKER_LIBS_variable folders_var flags_va
|
|||
elseif(lib MATCHES "^-l.*")
|
||||
list(APPEND libflags ${lib})
|
||||
elseif(IS_ABSOLUTE ${lib})
|
||||
get_filename_component(name_we ${lib} NAME_WE)
|
||||
get_filename_component(folder ${lib} PATH)
|
||||
get_filename_component(filename ${lib} NAME)
|
||||
string(REGEX REPLACE "\\.[^.]*$" "" filename_without_shortest_ext ${filename})
|
||||
|
||||
string(REGEX MATCH "^lib(.*)" __match ${name_we})
|
||||
string(REGEX MATCH "^lib(.*)" __match ${filename_without_shortest_ext})
|
||||
list(APPEND libflags -l${CMAKE_MATCH_1})
|
||||
list(APPEND folders ${folder})
|
||||
else()
|
||||
|
|
|
@ -6,19 +6,10 @@ cd $DIR
|
|||
|
||||
echo "Downloading..."
|
||||
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
|
||||
|
||||
echo "Unzipping..."
|
||||
|
||||
gunzip train-images-idx3-ubyte.gz
|
||||
gunzip train-labels-idx1-ubyte.gz
|
||||
gunzip t10k-images-idx3-ubyte.gz
|
||||
gunzip t10k-labels-idx1-ubyte.gz
|
||||
|
||||
# Creation is split out because leveldb sometimes causes segfault
|
||||
# and needs to be re-created.
|
||||
|
||||
echo "Done."
|
||||
for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte
|
||||
do
|
||||
if [ ! -e $fname ]; then
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz
|
||||
gunzip ${fname}.gz
|
||||
fi
|
||||
done
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
# A makefile to build the docker images for caffe.
|
||||
# Two caffe images will be built:
|
||||
# caffe:cpu --> A CPU-only build of caffe.
|
||||
# caffe:gpu --> A GPU-enabled build using the latest CUDA and CUDNN versions.
|
||||
|
||||
DOCKER ?= docker
|
||||
|
||||
all: docker_files standalone
|
||||
|
||||
.PHONY: standalone devel
|
||||
|
||||
standalone: cpu_standalone gpu_standalone
|
||||
|
||||
|
||||
cpu_standalone: standalone/cpu/Dockerfile
|
||||
$(DOCKER) build -t caffe:cpu standalone/cpu
|
||||
|
||||
gpu_standalone: standalone/gpu/Dockerfile
|
||||
$(DOCKER) build -t caffe:gpu standalone/gpu
|
||||
|
||||
docker_files: standalone_files
|
||||
|
||||
standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile
|
||||
|
||||
FROM_GPU = "nvidia/cuda:cudnn"
|
||||
FROM_CPU = "ubuntu:14.04"
|
||||
GPU_CMAKE_ARGS = -DUSE_CUDNN=1
|
||||
CPU_CMAKE_ARGS = -DCPU_ONLY=1
|
||||
|
||||
# A make macro to select the CPU or GPU base image.
|
||||
define from_image
|
||||
$(if $(strip $(findstring gpu,$@)),$(FROM_GPU),$(FROM_CPU))
|
||||
endef
|
||||
|
||||
# A make macro to select the CPU or GPU build args.
|
||||
define build_args
|
||||
$(if $(strip $(findstring gpu,$@)),$(GPU_CMAKE_ARGS),$(CPU_CMAKE_ARGS))
|
||||
endef
|
||||
|
||||
# A make macro to construct the CPU or GPU Dockerfile from the template
|
||||
define create_docker_file
|
||||
@echo creating $@
|
||||
@echo "FROM "$(from_image) > $@
|
||||
@cat $^ | sed 's/$${CMAKE_ARGS}/$(build_args)/' >> $@
|
||||
endef
|
||||
|
||||
|
||||
standalone/%/Dockerfile: templates/Dockerfile.template
|
||||
$(create_docker_file)
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
# Caffe standalone Dockerfiles.
|
||||
|
||||
The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running:
|
||||
|
||||
```
|
||||
docker build -t caffe:cpu standalone/cpu
|
||||
```
|
||||
for example. (Here `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail).
|
||||
|
||||
Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned.
|
||||
|
||||
# Running Caffe using the docker image
|
||||
|
||||
In order to test the Caffe image, run:
|
||||
```
|
||||
docker run -ti caffe:cpu caffe --version
|
||||
```
|
||||
which should show a message like:
|
||||
```
|
||||
libdc1394 error: Failed to initialize libdc1394
|
||||
caffe version 1.0.0-rc3
|
||||
```
|
||||
|
||||
One can also build and run the Caffe tests in the image using:
|
||||
```
|
||||
docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"
|
||||
```
|
||||
|
||||
In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running:
|
||||
```
|
||||
docker run -ti --volume=$(pwd):/workspace caffe:cpu caffe train --solver=example_solver.prototxt
|
||||
```
|
||||
will train a network defined in the `example_solver.prototxt` file in the current directory (`$(pwd)` is maped to the container volume `/workspace` using the `--volume=` Docker flag).
|
||||
|
||||
Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used:
|
||||
```
|
||||
docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=example_solver.prototxt
|
||||
```
|
||||
where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option.
|
||||
|
||||
# Other use-cases
|
||||
|
||||
Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe).
|
||||
|
||||
Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running:
|
||||
```
|
||||
docker run -ti caffe:cpu python
|
||||
```
|
||||
(`ipython` is also available in the container).
|
||||
|
||||
Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py`
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
FROM ubuntu:14.04
|
||||
MAINTAINER caffe-maint@googlegroups.com
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
git \
|
||||
wget \
|
||||
libatlas-base-dev \
|
||||
libboost-all-dev \
|
||||
libgflags-dev \
|
||||
libgoogle-glog-dev \
|
||||
libhdf5-serial-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libopencv-dev \
|
||||
libprotobuf-dev \
|
||||
libsnappy-dev \
|
||||
protobuf-compiler \
|
||||
python-dev \
|
||||
python-numpy \
|
||||
python-pip \
|
||||
python-scipy && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CAFFE_ROOT=/opt/caffe
|
||||
WORKDIR $CAFFE_ROOT
|
||||
|
||||
# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
|
||||
ENV CLONE_TAG=master
|
||||
|
||||
RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \
|
||||
for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
|
||||
mkdir build && cd build && \
|
||||
cmake -DCPU_ONLY=1 .. && \
|
||||
make -j"$(nproc)"
|
||||
|
||||
ENV PYCAFFE_ROOT $CAFFE_ROOT/python
|
||||
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
|
||||
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
|
||||
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
|
||||
|
||||
WORKDIR /workspace
|
|
@ -0,0 +1,43 @@
|
|||
FROM nvidia/cuda:cudnn
|
||||
MAINTAINER caffe-maint@googlegroups.com
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
git \
|
||||
wget \
|
||||
libatlas-base-dev \
|
||||
libboost-all-dev \
|
||||
libgflags-dev \
|
||||
libgoogle-glog-dev \
|
||||
libhdf5-serial-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libopencv-dev \
|
||||
libprotobuf-dev \
|
||||
libsnappy-dev \
|
||||
protobuf-compiler \
|
||||
python-dev \
|
||||
python-numpy \
|
||||
python-pip \
|
||||
python-scipy && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CAFFE_ROOT=/opt/caffe
|
||||
WORKDIR $CAFFE_ROOT
|
||||
|
||||
# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
|
||||
ENV CLONE_TAG=master
|
||||
|
||||
RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \
|
||||
for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
|
||||
mkdir build && cd build && \
|
||||
cmake -DUSE_CUDNN=1 .. && \
|
||||
make -j"$(nproc)"
|
||||
|
||||
ENV PYCAFFE_ROOT $CAFFE_ROOT/python
|
||||
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
|
||||
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
|
||||
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
|
||||
|
||||
WORKDIR /workspace
|
|
@ -0,0 +1,42 @@
|
|||
MAINTAINER caffe-maint@googlegroups.com
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
git \
|
||||
wget \
|
||||
libatlas-base-dev \
|
||||
libboost-all-dev \
|
||||
libgflags-dev \
|
||||
libgoogle-glog-dev \
|
||||
libhdf5-serial-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libopencv-dev \
|
||||
libprotobuf-dev \
|
||||
libsnappy-dev \
|
||||
protobuf-compiler \
|
||||
python-dev \
|
||||
python-numpy \
|
||||
python-pip \
|
||||
python-scipy && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CAFFE_ROOT=/opt/caffe
|
||||
WORKDIR $CAFFE_ROOT
|
||||
|
||||
# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
|
||||
ENV CLONE_TAG=master
|
||||
|
||||
RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \
|
||||
for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
|
||||
mkdir build && cd build && \
|
||||
cmake ${CMAKE_ARGS} .. && \
|
||||
make -j"$(nproc)"
|
||||
|
||||
ENV PYCAFFE_ROOT $CAFFE_ROOT/python
|
||||
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
|
||||
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
|
||||
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
|
||||
|
||||
WORKDIR /workspace
|
|
@ -54,7 +54,8 @@ There are several implementations of this library. The choice is yours:
|
|||
* [ATLAS](http://math-atlas.sourceforge.net/): free, open source, and so the default for Caffe.
|
||||
* [Intel MKL](http://software.intel.com/en-us/intel-mkl): commercial and optimized for Intel CPUs, with a free trial and [student](http://software.intel.com/en-us/intel-education-offerings) licenses.
|
||||
1. Install MKL.
|
||||
2. Set `BLAS := mkl` in `Makefile.config`
|
||||
2. Set up MKL environment (Details: [Linux](https://software.intel.com/en-us/node/528499), [OS X](https://software.intel.com/en-us/node/528659)). Example: *source /opt/intel/mkl/bin/mklvars.sh intel64*
|
||||
3. Set `BLAS := mkl` in `Makefile.config`
|
||||
* [OpenBLAS](http://www.openblas.net/): free and open source; this optimized and parallel BLAS could require more effort to install, although it might offer a speedup.
|
||||
1. Install OpenBLAS
|
||||
2. Set `BLAS := open` in `Makefile.config`
|
||||
|
|
|
@ -17,7 +17,7 @@ updated model, 0\-\>2, and then 0\-\>1, 2\-\>3.
|
|||
|
||||
For best performance, P2P DMA access between devices is needed. Without P2P access, for example crossing PCIe root complex, data is copied through host and effective exchange bandwidth is greatly reduced.
|
||||
|
||||
Current implementation has a "soft" assumption that the devices being used are homogeneous. In practice, any devices of the same general class should work together, but performance and total size is limited by the smallest device being used. e.g. if you combine a TitanX and a GTX980, peformance will be limited by the 980. Mixing vastly different levels of boards, e.g. Kepler and Fermi, is not supported.
|
||||
Current implementation has a "soft" assumption that the devices being used are homogeneous. In practice, any devices of the same general class should work together, but performance and total size is limited by the smallest device being used. e.g. if you combine a TitanX and a GTX980, performance will be limited by the 980. Mixing vastly different levels of boards, e.g. Kepler and Fermi, is not supported.
|
||||
|
||||
"nvidia-smi topo -m" will show you the connectivity matrix. You can do P2P through PCIe bridges, but not across socket level links at this time, e.g. across CPU sockets on a multi-socket motherboard.
|
||||
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -1,947 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fine-tuning a Pretrained Network for Style Recognition\n",
|
||||
"\n",
|
||||
"In this example, we'll explore a common approach that is particularly useful in real-world applications: take a pre-trained Caffe network and fine-tune the parameters on your custom data.\n",
|
||||
"\n",
|
||||
"The upside of such approach is that, since pre-trained networks are learned on a large set of images, the intermediate layers capture the \"semantics\" of the general visual appearance. Think of it as a very powerful feature that you can treat as a black box. On top of that, only a few layers will be needed to obtain a very good performance of the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, we will need to prepare the data. This involves the following parts:\n",
|
||||
"(1) Get the ImageNet ilsvrc pretrained model with the provided shell scripts.\n",
|
||||
"(2) Download a subset of the overall Flickr style dataset for this demo.\n",
|
||||
"(3) Compile the downloaded Flickr dataset into a database that Caffe can then consume."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.chdir('..')\n",
|
||||
"import sys\n",
|
||||
"sys.path.insert(0, './python')\n",
|
||||
"\n",
|
||||
"import caffe\n",
|
||||
"import numpy as np\n",
|
||||
"from pylab import *\n",
|
||||
"%matplotlib inline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This downloads the ilsvrc auxiliary data (mean file, etc),\n",
|
||||
"# and a subset of 2000 images for the style recognition task.\n",
|
||||
"!data/ilsvrc12/get_ilsvrc_aux.sh\n",
|
||||
"!scripts/download_model_binary.py models/bvlc_reference_caffenet\n",
|
||||
"!python examples/finetune_flickr_style/assemble_data.py \\\n",
|
||||
" --workers=-1 --images=2000 --seed=1701 --label=5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's show what is the difference between the fine-tuning network and the original caffe model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1c1\r\n",
|
||||
"< name: \"CaffeNet\"\r\n",
|
||||
"---\r\n",
|
||||
"> name: \"FlickrStyleCaffeNet\"\r\n",
|
||||
"4c4\r\n",
|
||||
"< type: \"Data\"\r\n",
|
||||
"---\r\n",
|
||||
"> type: \"ImageData\"\r\n",
|
||||
"15,26c15,19\r\n",
|
||||
"< # mean pixel / channel-wise mean instead of mean image\r\n",
|
||||
"< # transform_param {\r\n",
|
||||
"< # crop_size: 227\r\n",
|
||||
"< # mean_value: 104\r\n",
|
||||
"< # mean_value: 117\r\n",
|
||||
"< # mean_value: 123\r\n",
|
||||
"< # mirror: true\r\n",
|
||||
"< # }\r\n",
|
||||
"< data_param {\r\n",
|
||||
"< source: \"examples/imagenet/ilsvrc12_train_lmdb\"\r\n",
|
||||
"< batch_size: 256\r\n",
|
||||
"< backend: LMDB\r\n",
|
||||
"---\r\n",
|
||||
"> image_data_param {\r\n",
|
||||
"> source: \"data/flickr_style/train.txt\"\r\n",
|
||||
"> batch_size: 50\r\n",
|
||||
"> new_height: 256\r\n",
|
||||
"> new_width: 256\r\n",
|
||||
"31c24\r\n",
|
||||
"< type: \"Data\"\r\n",
|
||||
"---\r\n",
|
||||
"> type: \"ImageData\"\r\n",
|
||||
"42,51c35,36\r\n",
|
||||
"< # mean pixel / channel-wise mean instead of mean image\r\n",
|
||||
"< # transform_param {\r\n",
|
||||
"< # crop_size: 227\r\n",
|
||||
"< # mean_value: 104\r\n",
|
||||
"< # mean_value: 117\r\n",
|
||||
"< # mean_value: 123\r\n",
|
||||
"< # mirror: true\r\n",
|
||||
"< # }\r\n",
|
||||
"< data_param {\r\n",
|
||||
"< source: \"examples/imagenet/ilsvrc12_val_lmdb\"\r\n",
|
||||
"---\r\n",
|
||||
"> image_data_param {\r\n",
|
||||
"> source: \"data/flickr_style/test.txt\"\r\n",
|
||||
"53c38,39\r\n",
|
||||
"< backend: LMDB\r\n",
|
||||
"---\r\n",
|
||||
"> new_height: 256\r\n",
|
||||
"> new_width: 256\r\n",
|
||||
"323a310\r\n",
|
||||
"> # Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer\r\n",
|
||||
"360c347\r\n",
|
||||
"< name: \"fc8\"\r\n",
|
||||
"---\r\n",
|
||||
"> name: \"fc8_flickr\"\r\n",
|
||||
"363c350,351\r\n",
|
||||
"< top: \"fc8\"\r\n",
|
||||
"---\r\n",
|
||||
"> top: \"fc8_flickr\"\r\n",
|
||||
"> # lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained\r\n",
|
||||
"365c353\r\n",
|
||||
"< lr_mult: 1\r\n",
|
||||
"---\r\n",
|
||||
"> lr_mult: 10\r\n",
|
||||
"369c357\r\n",
|
||||
"< lr_mult: 2\r\n",
|
||||
"---\r\n",
|
||||
"> lr_mult: 20\r\n",
|
||||
"373c361\r\n",
|
||||
"< num_output: 1000\r\n",
|
||||
"---\r\n",
|
||||
"> num_output: 20\r\n",
|
||||
"384a373,379\r\n",
|
||||
"> name: \"loss\"\r\n",
|
||||
"> type: \"SoftmaxWithLoss\"\r\n",
|
||||
"> bottom: \"fc8_flickr\"\r\n",
|
||||
"> bottom: \"label\"\r\n",
|
||||
"> top: \"loss\"\r\n",
|
||||
"> }\r\n",
|
||||
"> layer {\r\n",
|
||||
"387c382\r\n",
|
||||
"< bottom: \"fc8\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc8_flickr\"\r\n",
|
||||
"393,399d387\r\n",
|
||||
"< }\r\n",
|
||||
"< layer {\r\n",
|
||||
"< name: \"loss\"\r\n",
|
||||
"< type: \"SoftmaxWithLoss\"\r\n",
|
||||
"< bottom: \"fc8\"\r\n",
|
||||
"< bottom: \"label\"\r\n",
|
||||
"< top: \"loss\"\r\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!diff models/bvlc_reference_caffenet/train_val.prototxt models/finetune_flickr_style/train_val.prototxt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For your record, if you want to train the network in pure C++ tools, here is the command:\n",
|
||||
"\n",
|
||||
"<code>\n",
|
||||
"build/tools/caffe train \\\n",
|
||||
" -solver models/finetune_flickr_style/solver.prototxt \\\n",
|
||||
" -weights models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel \\\n",
|
||||
" -gpu 0\n",
|
||||
"</code>\n",
|
||||
"\n",
|
||||
"However, we will train using Python in this example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"iter 0, finetune_loss=3.360094, scratch_loss=3.136188\n",
|
||||
"iter 10, finetune_loss=2.672608, scratch_loss=9.736364\n",
|
||||
"iter 20, finetune_loss=2.071996, scratch_loss=2.250404\n",
|
||||
"iter 30, finetune_loss=1.758295, scratch_loss=2.049553\n",
|
||||
"iter 40, finetune_loss=1.533391, scratch_loss=1.941318\n",
|
||||
"iter 50, finetune_loss=1.561658, scratch_loss=1.839706\n",
|
||||
"iter 60, finetune_loss=1.461696, scratch_loss=1.880035\n",
|
||||
"iter 70, finetune_loss=1.267941, scratch_loss=1.719161\n",
|
||||
"iter 80, finetune_loss=1.192778, scratch_loss=1.627453\n",
|
||||
"iter 90, finetune_loss=1.541176, scratch_loss=1.822061\n",
|
||||
"iter 100, finetune_loss=1.029039, scratch_loss=1.654087\n",
|
||||
"iter 110, finetune_loss=1.138547, scratch_loss=1.735837\n",
|
||||
"iter 120, finetune_loss=0.917412, scratch_loss=1.851918\n",
|
||||
"iter 130, finetune_loss=0.971519, scratch_loss=1.801927\n",
|
||||
"iter 140, finetune_loss=0.868252, scratch_loss=1.745545\n",
|
||||
"iter 150, finetune_loss=0.790020, scratch_loss=1.844925\n",
|
||||
"iter 160, finetune_loss=1.092668, scratch_loss=1.695591\n",
|
||||
"iter 170, finetune_loss=1.055344, scratch_loss=1.661715\n",
|
||||
"iter 180, finetune_loss=0.969769, scratch_loss=1.823639\n",
|
||||
"iter 190, finetune_loss=0.780566, scratch_loss=1.820862\n",
|
||||
"done\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"niter = 200\n",
|
||||
"# losses will also be stored in the log\n",
|
||||
"train_loss = np.zeros(niter)\n",
|
||||
"scratch_train_loss = np.zeros(niter)\n",
|
||||
"\n",
|
||||
"caffe.set_device(0)\n",
|
||||
"caffe.set_mode_gpu()\n",
|
||||
"# We create a solver that fine-tunes from a previously trained network.\n",
|
||||
"solver = caffe.SGDSolver('models/finetune_flickr_style/solver.prototxt')\n",
|
||||
"solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n",
|
||||
"# For reference, we also create a solver that does no finetuning.\n",
|
||||
"scratch_solver = caffe.SGDSolver('models/finetune_flickr_style/solver.prototxt')\n",
|
||||
"\n",
|
||||
"# We run the solver for niter times, and record the training loss.\n",
|
||||
"for it in range(niter):\n",
|
||||
" solver.step(1) # SGD by Caffe\n",
|
||||
" scratch_solver.step(1)\n",
|
||||
" # store the train loss\n",
|
||||
" train_loss[it] = solver.net.blobs['loss'].data\n",
|
||||
" scratch_train_loss[it] = scratch_solver.net.blobs['loss'].data\n",
|
||||
" if it % 10 == 0:\n",
|
||||
" print 'iter %d, finetune_loss=%f, scratch_loss=%f' % (it, train_loss[it], scratch_train_loss[it])\n",
|
||||
"print 'done'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's look at the training loss produced by the two training procedures respectively."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[<matplotlib.lines.Line2D at 0x7fbb36f0ad50>,\n",
|
||||
" <matplotlib.lines.Line2D at 0x7fbb36f0afd0>]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"image/png": [
|
||||
"iVBORw0KGgoAAAANSUhEUgAAAXUAAAEACAYAAABMEua6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\n",
|
||||
"AAALEgAACxIB0t1+/AAAIABJREFUeJzt3XmcXFWd9/HPtzt7AlkkJCGAgbCIqCSyuIDaRECEYZvB\n",
|
||||
"EQRFB5iMo8CjzuMwOlpdioo4IM4iM6wTgdHhgRFBRAhLM6gQtgQCIQQkYc8CJIEQQpb+PX+c01hp\n",
|
||||
"eqmqrl5SfN+vV7266tZdzr11+3tPnXvuLUUEZmZWHxr6uwBmZlY7DnUzszriUDczqyMOdTOzOuJQ\n",
|
||||
"NzOrIw51M7M6UlaoS2qUNFfS9fn1OEmzJS2SdLOkMb1bTDMzK0e5NfUzgAVAW6f2M4HZEbEbcGt+\n",
|
||||
"bWZm/azbUJe0PXAYcDGgPPhIYFZ+Pgs4uldKZ2ZmFSmnpv5j4P8CrSXDJkTEsvx8GTCh1gUzM7PK\n",
|
||||
"dRnqkv4MWB4Rc/lTLX0zke4z4HsNmJkNAIO6ef/DwJGSDgOGAVtLuhxYJmliRCyVNAlY3tHEkhz2\n",
|
||||
"ZmZViIgOK9LdUbk39JL0MeDvIuIISecAL0XEDyWdCYyJiLecLJUU1RbMNiepOSKa+7sc9cLbs7a8\n",
|
||||
"PWurJ9lZaT/1tiPA2cDBkhYBM/JrMzPrZ901v7wpIu4A7sjPXwYO6q1CmZlZdXxF6Zajpb8LUGda\n",
|
||||
"+rsAdaalvwtgSdlt6lXN3G3qZmYV68s2dTMzG8Ac6mZmdcShbmZWRxzqZmZ1xKFuZlZHHOpmZnXE\n",
|
||||
"oW5mVkcc6mZmdcShbmZWRxzqZmZ1xKFuZlZHHOpmZnXEoW5mVkcc6mZmdaTPQ11FSUUd1tfLNTN7\n",
|
||||
"O+iPmvo44HoV5fusm5nVWH+FegMwqh+WbWZW17oNdUnDJM2RNE/SAkk/yMObJT0raW5+HFrmMse2\n",
|
||||
"+2tmZjXS7Q9PR8Q6SQdGxFpJg4DfSToACOC8iDivwmW2hfkY4OkKpzUzsy6U1fwSEWvz0yFAI7Ay\n",
|
||||
"v66mXXxc/jumimnNzKwLZYW6pAZJ84BlwO0R8Uh+6zRJD0q6RFK5Ie3mFzOzXlJuTb01IqYB2wMf\n",
|
||||
"ldQEXADsBEwDXgDOLXOZpc0vZmZWQ922qZeKiNWSbgD2iYiWtuGSLgau72gaSc0lL1toZhypPd41\n",
|
||||
"dTMzIFeUm2oxr25DXdI2wMaIWCVpOHAwUJQ0MSKW5tGOAeZ3NH1ENG82v6I+BzyHa+pmZgDkSnJL\n",
|
||||
"22tJhWrnVU5NfRIwS1IDqbnm8oi4VdLPJE0j1boXAzPLXObYPL5r6mZmNVZOl8b5wPs7GP65Kpc5\n",
|
||||
"DngS19TNzGquP64oHUsKddfUzcxqrL9uE+CauplZL+jPmrpD3cysxvo01FXUUGAwqfeLm1/MzGqs\n",
|
||||
"r2vqY4FVpNsMuKZuZlZj/RHqLwOvAsNV1OA+Xr6ZWV3rj1BfGYUIYDUwuo+Xb2ZW1/o61Mfxpzs8\n",
|
||||
"rsTt6mZmNdVfzS+Q2tbdrm5mVkP90vySn6/CNXUzs5rq7+YX19TNzGrIzS9mZnWkP5tffKLUzKzG\n",
|
||||
"+rtN3TV1M7Ma6utQHw68np+7pm5mVmN9HeqDgI35+avA1n28fDOzutbXoT4Y2JCfvw4M6+Plm5nV\n",
|
||||
"tf4O9eF9vHwzs7rmUDczqyNdhrqkYZLmSJonaYGkH+Th4yTNlrRI0s2Syu3FUhrq63Com5nVVJeh\n",
|
||||
"HhHrgAMjYhrwPuBASQcAZwKzI2I34Nb8uhylJ0rdpm5mVmPdNr9ExNr8dAjQSOqKeCQwKw+fBRxd\n",
|
||||
"5vLc/GJm1ou6DXVJDZLmAcuA2yPiEWBCRCzLoywDJpS5PIe6mVkvGtTdCBHRCkyTNBq4SdKB7d4P\n",
|
||||
"SdHZ9JKa33zxWUYy1aFuZlZKUhPQVIt5dRvqbSJitaQbgL2BZZImRsRSSZOA5V1M19z2XEV9GdfU\n",
|
||||
"zcw2ExEtQEvba0mFaufVXe+Xbdp6tkgaDhwMzAWuA07Ko50EXFvm8kpPlK4j/U6pKi20mZl1rLs2\n",
|
||||
"9UnAbblNfQ5wfUTcCpwNHCxpETAjvy7Hm23qUYiNwKY8zMzMaqDL5peImA+8v4PhLwMHVbG80hOl\n",
|
||||
"8KcmmPVVzMvMzNrpsytKczNLZ6FuZmY10Je3CWgEWqMQrSXDHOpmZjXUl6FeepK0jUPdzKyG+jLU\n",
|
||||
"2ze9QOoB41sFmJnVSH+HumvqZmY15FA3M6sjDnUzszriE6VmZnVkINTUfaLUzKxG+jvU/etHZmY1\n",
|
||||
"1N+h7uYXM7Ma6us2dYe6mVkv6uuauk+Umpn1ooHQ/OITpWZmNTIQQt01dTOzGunvUHfvFzOzGvLF\n",
|
||||
"R2ZmdaS/a+oOdTOzGnKom5nVkW5DXdIOkm6X9IikhyWdnoc3S3pW0tz8OLSbWbn3i5lZL+vyh6ez\n",
|
||||
"DcBXImKepFHA/ZJmAwGcFxHnlbks19TNzHpZt6EeEUuBpfn5GkmPApPz26pwWe1PlLr3i5lZDVXU\n",
|
||||
"pi5pCjAduDsPOk3Sg5IukTSmm8ldUzcz62XlNL8AkJtergbOyDX2C4Dv5Le/C5wLnNzBdM0A7MZ+\n",
|
||||
"7Mkb7d52qJvZ256kJqCpFvMqK9QlDQauAa6IiGsBImJ5yfsXA9d3NG1ENAOoqK8CO7R72ydKzext\n",
|
||||
"LyJagJa215IK1c6rnN4vAi4BFkTE+SXDJ5WMdgwwv5tZufnFzKyXlVNT3x84EXhI0tw87BvA8ZKm\n",
|
||||
"kXrBLAZmlrGsDk+UqihFIaL8YpuZWUfK6f3yOzqu0d9Y4bLeUlOPQmxUUa35vfUVzs/MzNrp7ytK\n",
|
||||
"wU0wZmY1M1BC3SdLzcxqYKCEumvqZmY10N+33gWHuplZzbimbmZWRwZCqPv+L2ZmNTIQQt0nSs3M\n",
|
||||
"amSghPqIPiyHmVndGggnSl/DoW5mVhMDoab+GjCyD8thZla3HOpmZnXEoW5mVkcc6mZmdWSgnCh1\n",
|
||||
"qJuZ1YBr6mZmdcShbmZWRwZKqI/qw3KYmdWtgRLqrqmbmdWAT5SamdWRbkNd0g6Sbpf0iKSHJZ2e\n",
|
||||
"h4+TNFvSIkk3SxrTzaxcUzcz62Xl1NQ3AF+JiD2BDwJfkrQHcCYwOyJ2A27Nr7viUDcz62XdhnpE\n",
|
||||
"LI2Iefn5GuBRYDJwJDArjzYLOLqbWTnUzcx6WUVt6pKmANOBOcCEiFiW31oGTOhmcoe6mVkvG1Tu\n",
|
||||
"iJJGAdcAZ0TEq5LefC8iQlJ0Ml0zAB9lBA/zQQr8pt0orwEjVZSiEB3Ow8ysnklqAppqMq8oI0cl\n",
|
||||
"DQZ+DdwYEefnYQuBpohYKmkScHtEvKvddBERAlBR64CxUYjX3zL/ot4ARkch1vV4jczMtnCl2Vmp\n",
|
||||
"cnq/CLgEWNAW6Nl1wEn5+UnAtd3MqrPmF3ATjJlZTZTTpr4/cCJwoKS5+XEocDZwsKRFwIz8ukMq\n",
|
||||
"qiEva1MnozjUzcxqoNs29Yj4HZ2H/0FlLmcwsKGLNnOHuplZDfTVFaWdXU3axqFuZlYDfRXqXbWn\n",
|
||||
"g0PdzKwmHOpmZnXEoW5mVkf6sk29u1D3PdXNzHqoL2vqPlFqZtbL3PxiZlZHHOpmZnXEoW5mVkcG\n",
|
||||
"0olSh7qZWQ8NlBOla3Com5n1mJtfzMzqiEPdzKyOONTNzOqIT5SamdWRgXKi1KFuZlYDbn4xM6sj\n",
|
||||
"DnUzszoyUEJ9DTBKRVX169lmZpZ0G+qSLpW0TNL8kmHNkp5t90PUXeky1KMQG0i19THlFtzMzN6q\n",
|
||||
"nJr6ZUD70A7gvIiYnh+/7WYeQ4E3uhlnObBtGeUxM7NOdBvqEXEnsLKDtyppKikn1FcA4yuYp5mZ\n",
|
||||
"tdOTNvXTJD0o6RJJ3TWblFtTd6ibmfXAoCqnuwD4Tn7+XeBc4OSORpTUzJ4cSNCqZjVFREsn81xB\n",
|
||||
"bn5RUX8OzItCPFll+czMthiSmoCmWsyrqlCPiOUlhbkYuL6LcZtV1AjgpSh0GuiweU39NFJbvkPd\n",
|
||||
"zOperuy2tL2WVKh2XlU1v0iaVPLyGGB+Z+Nm5bapt50o3REYXU3ZzMzezrqtqUv6OfAxYBtJzwAF\n",
|
||||
"oEnSNFIvmMXAzG5mU26b+gdUVAOwAw51M7OKdRvqEXF8B4MvrXA5ldTUtyX1a9+6wmWYmb3t9dUV\n",
|
||||
"pZX0ftkxv3ZN3cysQgMp1Ntq6juQmnUc6mZmFRpIof4i8A7gnaReLw51M7MKDZhQj0KsJ93Y632k\n",
|
||||
"3jQOdTOzCvVlqK8vY7zlwN7AwzjUzcwqNmBq6tkK4N24pm5mVpWBFurLSWVyTd3MrAoDLdRXAJuA\n",
|
||||
"x4ERKqqxV0tlZlZnBlqoLweeyz+a8Sq+AMnMrCJ9FepDKL+m/kx+vho3wZiZVaTaW+9Wqtya+sPA\n",
|
||||
"dvm5Q93MrEIDKtSjELcBt+WXDnUzswoNtDb1Ug51M7MK9XqoqyiR2tTLufiolEPdzKxCfVFTHwJs\n",
|
||||
"iEK0VjidQ93MrEJ9EerVNL2AQ93MrGIOdTOzOuJQNzOrIw51M7M60m2oS7pU0jJJ80uGjZM0W9Ii\n",
|
||||
"STdLGtPFLBzqZmZ9pJya+mXAoe2GnQnMjojdgFvz68441M3M+ki3oR4RdwIr2w0+EpiVn88Cju5i\n",
|
||||
"Fg51M7M+Um2b+oSIWJafLwMmdDGuQ93MrI/0+N4vERGSotMRLuIUJjNZzWoGWiKipcxZO9TN7G1B\n",
|
||||
"UhPQVIt5VRvqyyRNjIilkiaR7oPesVO5Chgbc6K5wmW8CoxUUQ1VXI1qZrbFyJXdlrbXkgrVzqva\n",
|
||||
"5pfrgJPy85OAa7sYt6rmlxzkrwMjKi6dmdnbVDldGn8O/AHYXdIzkr4AnA0cLGkRMCO/7sxQKr+Z\n",
|
||||
"V5vXgJFVTmtm9rbTbfNLRBzfyVsHlbmMak+UgkPdzKwiA/mKUkihPqqGZTEzq2sDPdTX4Jq6mVnZ\n",
|
||||
"Bnqou6ZuZlaBLSHUXVM3MyvTQA91N7+YmVWgr37Ozs0vZmZ9YKDX1N38YmZWgYEe6m5+MTOrwEAP\n",
|
||||
"dTe/mJlVYEsIddfUzczKNNBD3c0vZmYVGOih7uYXM7MKbAmh7pq6mVmZBnqou/nFzKwCAz3U3fxi\n",
|
||||
"ZlaBLSHUXVM3MyvTQA91N7+YmVVgoIe6m1/MzCrQ7c/ZdUXSEuAVYBOwISL262A0N7+YmfWRntbU\n",
|
||||
"A2iKiOmdBDr07Ien1wNSUUOqnN7M7G2lFs0v6ub9qmvqUYjAtXUzs7LVoqZ+i6T7JJ3ayTg9aX4B\n",
|
||||
"h7qZWdl61KYO7B8RL0gaD8yWtDAi7uxgGdU2v4BD3cysbD0K9Yh4If9dIemXwH7A5qF+O5u4g4Ka\n",
|
||||
"BdASES0VLmYN7gFjZnVMUhPQVIt5VR3qkkYAjRHxqqSRwCFA8S0jHshr0RLNVZfQNXUzq3O5stvS\n",
|
||||
"9lpSodp59aSmPgH4paS2+VwZETd3MN6iHiwDHOpmZmWrOtQjYjEwrYxRH6h2GZmbX8zMytQXV5T2\n",
|
||||
"NNRdUzczK5ND3cysjvRFqM/v4fRufjEzK1Ovh3oUYl0PZ+GauplZmfqipt5TvlOjmVmZtoRQ9z3V\n",
|
||||
"zczKtCWE+oBsflFRE1TUNSrqnf1dFjOzNj2990u3JBRB9GAWC4FzVNRuwMvA/yHdv/3F/JgETAa+\n",
|
||||
"G4VY1UkZPgCsiODJkmHvAv4S2Bk4I4LV5RZIRe0K3AS0AscDZ1exXmZmNdcXNfV3VTORxGCJM2mO\n",
|
||||
"B4F/BG4BHgS2YcUeE3lyxgnAUcBOpFC/RkXtrKJuU1FfLJmPoPViaP2HkmHbALOBscAuwKfKLldR\n",
|
||||
"Q4FrgPOAmcBfVLN+Zma9QRE9qUR3M3MpIL4UwU8rn5avAz8AvhnB2SrqBOC5KESLxC+BI4G9I5in\n",
|
||||
"ohqBXwKH0Np4Adp0ImKPKMSLmvjggRwx8yZGLW1k62fORq1bsfCoo3j4+Dv51HGfpTmOJtXUm8oq\n",
|
||||
"V1HfB/YA/hxoBF4A9olCPFXpOpqZdURSRER3v1XR8bR9EOpXR6SasMT2wPKIjm/FK3EE8FfAfwBX\n",
|
||||
"AMcBvwD2iWBJHmcn4F7gR0BTBJ8EUFHDeeITe3HFby/kswe/xtRbHgLOZ+let7B+5HPc+v3RHPbl\n",
|
||||
"+3lptwZe3GN/PvK99YhNbBr8W/51wYms3GVaBE93uT5FTQduBPaKQizLwy4BlgA7knrpfC0K8Xx+\n",
|
||||
"7wxgqyjEWdVuQzN7+xnoob4a+DkwGjiaFMhHR7By83HZClgAXA18HvinCL4n8Q3go8BhEbRK/BPp\n",
|
||||
"xzm+mcf/TgQ/k5gA3ADMZ8SKI/m7iauBVh44ZXvu+urOvLT7ycBU4FDgcJo1F9gLOJb1I7/Gsvc+\n",
|
||||
"wg53XwT8KgrplsIAKmpYXt564A7g8ijERSXvHw78GrgMeI7UJPMDYAXwfdIvQ50ShbipNlu1exKT\n",
|
||||
"gfdFcGNfLdPMamegh/puwDF50H8AzcAnga+Sgu8HpJqugMYIPi8xHFgXQUgMJt2j/RekNvWrSc0u\n",
|
||||
"SySmA/8NPAW8H/jXPP//BJblYX+MYKbEnsDDwIURzNysnO+67iDGPnkDk+fALjdt5KVdj46L5sxW\n",
|
||||
"UVNIgT0Y+K+8HntHITa9OW1q+tk3CnF3fv1u4BzgY8ABpHb7/wL+Igpxlybf+3l2/N0pTJu1inc8\n",
|
||||
"di6D17UAXyEdYH4KPApsjEKs7XS7FrUDK/YYy3//zzHMnP4rBq9rJF25Owh4N7cXz2fYyg8y/dK7\n",
|
||||
"GPbKH/J2D+D1KMTSTub5UeCHwJejEPe3e0/AoCjEhpJ1PhB4D+kguLizspZLQnz0rP2Y8a1v5vX4\n",
|
||||
"HOl8x4nAT6IQj1c0v6JGAu8D5kQhWkvWY0oUYnFehwuApcD3oxDrVNRY4N9I+9k5+ecUa0JFDYlC\n",
|
||||
"rFdRDcAMYCvg91GI5R2MOwKYGIV4sv17ZS5rIrANqSK1FelOqYtruT4ly2ok7Vt7Ax8G7qFkm5eM\n",
|
||||
"N4r0zfuxKLzlh3TKXZaA7YAVUYie/PDOgKOitibly9woxEMDOtQ7KpjEMcBZwDtIJ0F3JNXiD4rg\n",
|
||||
"rTu52BmYQ+r1cmIEt5S8N4oUADdE8FQetiup18wlwBcj2JROmHI28KMIXuxgGQ3ABD7wk3+nqXgE\n",
|
||||
"oacZ/vIwFh1xAxuHPcee/28m8Gma43lSCG9PCtLrgDnte/ioqBFtwZzPB5xFawNsGPlOVr1zEU8c\n",
|
||||
"Opq9L9qaoasXIjYC19Da+EU2Dd4ONJhNg29h2CtXk/rpfwI4CPgNsBH4S9Zs20jj+tGodQnDXllD\n",
|
||||
"MBUQm4YuZuHRU2ltvJ0nPjGFo7/wWxpaTyR1Dd0qz2NJ3uZzgeeBg4HDgYuAU4EZUYhH80Htb4Bj\n",
|
||||
"Sbdavoj0jeVE0kHzQeAI4HHgKtLPFo7Pj02kwFwKvMwrk0ewZsJTbPfA3cA+wKlsGvRJkGjc8D+8\n",
|
||||
"MO0jbP3sNF7a7Up2/MNzwEkEwaoptzJ2yaHAY8CupIP/WmAI6acSh5K+sd1FOsexNelAegCwEniW\n",
|
||||
"tK8tBZoJDkdcCmwghf4KYF/g7vz3xly+52ltPJ+NwwYx5LVv5uX9nhRgytttEzAceIRUsdid9LsC\n",
|
||||
"B+Vt8iCpQrAP8CHgJWAdsCqX50N5fovzZ/IaMA7Yn9Sz6sK83n+fP/fHSQH9ev48JpCC+2Xgj8BD\n",
|
||||
"wGdI4fpCXs5rwHvzNJeSvs2uJR2UZ+Tt81iefi/SDzWszWV9iVQR+iNwAjA9l2E46YC7Sy6vSP9v\n",
|
||||
"vwc+SOqRdhepI8ZUkvGkytk+wM15uncABeAPwLtJ33IPyp/bE6QDxOWk/4Ef5XXbSOrivIB0X6nf\n",
|
||||
"ATdGIVbkA8cHgHeSKgb3k/bxrUn7/gZgWRQiVNSeuQx/yON+lLSv75rXcW0uf0Meb5f8ea8g9bp7\n",
|
||||
"mNREPBX4OCkHXif9psRo4Mn8ua4BJuay3JTX91DSPnMTcBvwJdL/VAtwVhTi3i0u1NN7NJBq5hvK\n",
|
||||
"mxf7AksjeKbM8d8FPFZNd0ptP+dAGjZcyIaR27J0+nxgMrTeDw3jSTvfBaSdZTqp9jGS9I81K4J/\n",
|
||||
"6qAsUxmzeCPjH/kui2dsig0jviAxmlEv3M0BP1zAwiNPZ8mMGcC5wH8xcukrvOcXn2e/nz7CS7vt\n",
|
||||
"z/N7r2T5e2Zx7HHraGgdyYX3LOT5fb8D/AvpH/NTDHl1HpuGrGHT0KdJtf1/JPXweR6YGcFaFTUa\n",
|
||||
"OJm0gz9Hql1tB/wvcGUUYqmK+hzBT9kwYh2D3mikYdOlpJ33ReA0UthcHoV4BEBFDSYF2dH5vRX5\n",
|
||||
"0UDamScRGseSj32YbR4bxqgX1iBeZ+24y5h12+dR61g++ON5PH3ANF7Z4Uye+OR3gD+jWQ1cOGcn\n",
|
||||
"nt/vcva5YAZ/9reNwELmf3oIzxywJ4ed9gDpILKRdAvoffPrV/LjrlzmU0ndTiexascWLrzvM5wx\n",
|
||||
"9U6Gvro98JEoxCoVtRewJ6kGODs3uf0dr044lQ2jtmfR4b9g2qwrGLZ6b9I/Nnm7iXSQ2wvYgRS4\n",
|
||||
"d5IODFPzPN8AFnDx75/g8zPWMeiNMcBDOVjE7c27snTad5l8z3p2v+4BJjy8hBRUyvvZGFJQrAZ2\n",
|
||||
"y49hpIPCsjx8LKmXWVtgXhSFePN3gXMNdzrw16TAH0lqBv0N6cCwOynMHsvTDyIF7vg83R6kHl8t\n",
|
||||
"pN5ma0mh+0T+rCn9FqCitiOF+yZSQLYCq/L+NQ44PW+rVuB7pN5rTwFXkr6Jj8xlaiJ1SFhJqtR9\n",
|
||||
"Nc+j7VvY3qSD0yHAiPxZ3J+XGXldd86fwWpSBWAT6UC2HbCcVLlpqxhckrfL1DysNY+/Oq9rI+kb\n",
|
||||
"0HjSgfcE0sF4NqkX3Bjg26QD3E552Vvlz+lgUvhfTPo2uDupZn4I6cD1gyjEije3YQ9CnYjotUea\n",
|
||||
"fe/Nv3fLHkMg3pWfj4T4GsQREEPbjSeInSD2hVgCcTzElyHuyc+/CvEixIr8mFAy7TshbodYBfEQ\n",
|
||||
"xLSSeV4BsQziaIhPQNwNcRnEcXk+H4QYnp//HOIqiFMgHoPYtqTcV0A8AXEfxG8gppYsfyTEdvn5\n",
|
||||
"cIgTIH6GNqxg/MP3MHTVcogP5GWdADEKohHi/XndvghxEMQOEFtD/Divx/g8z10gxkMUIG6BTQt4\n",
|
||||
"7xUzmX7RKIjfQpwPsSfEXRCfyNN8CmIBxGCI2RB/gLgmfx7/lrfVaogZefxdS7dpB5/joRDn5G36\n",
|
||||
"nxDPw6bzaKaxm89/XN7+n4K4KZej0+XkaQZD/FX+7K8oWac9IDZAHN7BNN/J8/9ZXobyY5v+/h8o\n",
|
||||
"8/+ky+3Y7fTNNNBMQxfvb08zh9CcKqCdjCOaaezoM6WZEaXT0swUmvk4zQzOr7ejmZHt1mkiRKdl\n",
|
||||
"6qIMnZYxjzO8/O1KVL1Ne/cDr75gW+IDYi+INTmkPp3/ue+CmJL/UYd0Mp0g1G7YIIhRJa9HQtwM\n",
|
||||
"8Vxb+Ofh5+RgH9/FvA+A2I90YHoRogXi3lzWl3IQr4C4EWImxJQ87eEQayEezQeEl/PjEYj/hriI\n",
|
||||
"dFB6AWJTDs1z8zp/Ly9rVQ7H7SAOgXga4kmIX0AM6qS8N0P8ewrgGJ3X+W6IayG2IR3kni5Zn1UQ\n",
|
||||
"d0L8EOLb+SBwIMTQvKxnIM7O6/revA5j8nb5YV63n0Ic2/bPDPHPED/Nzxsgirk8V5EOUvtCfAHi\n",
|
||||
"DojLIS6AWApxK8RhEH+Txz8K4pa8nW7J87sA4jyIYXnb7J6XMZd0EPkuxDqIY8rc746DOKHCfXUk\n",
|
||||
"xF4dDG/MZbsVYlweNikPuxri/XnYqLwPzMlln0SqUOxSMq/d87bZuQ//B3eFmNxu2BCIbSGGtxve\n",
|
||||
"kPfTr5EPohBTIVZCzOxk/g2U/K/mef8NFR4Eul8Pouppe3cDV1+wLfUBMZke1l66mPcgiK3bDdsa\n",
|
||||
"Yo8K5rFjDrwP53/sxvx8aifjD2/biUk1mEmdjDcs/xXELIhfQ0zI/wRDS8Y7k1zL7qKMe0JshPh2\n",
|
||||
"fn1cDtlBJeP8mHRw2SWH46EQ38rhfTrEcogLIa4r+Uf9Xp72CojHSYH/7Ry8p5O+zdwDcT/p2802\n",
|
||||
"7cq1F+kbyo9I3yZuytOeDPF1iF3bjb8v6VvFgxAjcsh/E+KPEIty2W4oGX8GxCsQC0kHrudJB86/\n",
|
||||
"Jn0bEsT0HEQPQPwLxDdIB60XyQfjTrZpI0QT6eD0q1yul/J6j4H4D1JI3wJxG8RP8vb9Vd5250Oc\n",
|
||||
"kcu0GOJZiItJB9vPkb5FPg5xbV7eyblMl0E8BfEZiOtJlZChpG81Y/K478jLPpN08O+yxtvJ+o3P\n",
|
||||
"+9wruUwNpArBhXk9l0O8Tvpm1LY//wjid6RvSS+TDqYPQlyZP9+2b02D8vgHQrwG0Zo/+6F5ewbE\n",
|
||||
"sbX9Xyeqnbbf2tTNuiJxJPC/EXR26wcBDRFvtnG3f/84Ui+o90WwSGIq8HwEr+cT74cAl0XwRsk0\n",
|
||||
"DaR2zleBWyJo7WDWla7Hh4GVETyau+eeReoZtYp0cvao2PzE/1eAqyN4RmLHXJ69gcNI5w9eI7U7\n",
|
||||
"30g6gf4B0rUdx5PaoBfm13eTTnBOIJ24nAo8A/yWdAL3VtJ1FXeQTgJfQzoPMQa4KIL1Ep8lte3/\n",
|
||||
"KvJtNHJvtB1JJx/nkdrOr+dP5xbuJp1v2gs4IoKFEqeQzuVcmtfjfaSTj5DOF3yGdNJzcF7fBtKJ\n",
|
||||
"xPv4Uxv5aNJndiypff3XpLb4e0lXm/8KeBr4el63y0jt8UuBb+XtuS3phOZrpPbyHYEPRfBy3tZn\n",
|
||||
"kT77L+d1+wZwUl7Hc4Bvkc6htfXGG53X5cy83Pfksm+VH/vk9yeROlXcBJwCbAucGsEGiUl5mw/K\n",
|
||||
"j2cieLHfTpRKOhQ4n3QC4eKI+GG79x3q1m8ktorg1f4uRxuJrYGPRXB9fj2ms4NWB9MOJvXq+GPE\n",
|
||||
"W0/+SwwhncicT+rltRcp9F4gBdtTETzXwXQ7ATtHcGtVK5Xm8RPg9xFcJXEYcAapl9qKDsYVqWdS\n",
|
||||
"W1BfQgrQ09rWS+IdpB47++THzqSD4L2kLsz7kw6Mz5JCdj7p5OWH88Ho/aTgv510fcvGkuWPIN0W\n",
|
||||
"5GXgruigJ1we7wukLsa3kTLuLODfIvhZfn846UB4GekgewfphOqHSMG+jnSgeoC0/T9C6vVyZV6f\n",
|
||||
"l/Pwk0knUjfmR3ME1/ZLqEtqJJ0tP4jUk+Je4PiIeLRkHId6jUhqioiW/i5HvfD2rK3+2p75osNz\n",
|
||||
"gO9FsKhk+BHAneUeNDuY7zBSL51/jqDbH/rJ18wcC1wcQZfXbeQDy/Wk3jendXRg6Ul29uQujfsB\n",
|
||||
"T0TEklz7beZeAAADf0lEQVSIX5BusPVoVxNZ1ZpINTGrjSa8PWupiX7YnhEsIzWRtB9+fQ/nu450\n",
|
||||
"sCh3/LmkZqdyxl1L6t7YK3pyl8bJsFmf8WfzMDMz6yc9CfXeO8NqZmZV6Unzy3Okq+ja7ECqrW8m\n",
|
||||
"3f/FakFSob/LUE+8PWvL23Ng6MmJ0kGkE6UfJ12Kfg/tTpSamVnfqrqmHhEbJX2Z1PeyEbjEgW5m\n",
|
||||
"1r969eIjMzPrW73yG6WSDpW0UNLjkv6+N5ZR7yQtkfSQpLmS7snDxkmaLWmRpJsljenvcg5Uki6V\n",
|
||||
"tEzS/JJhnW4/Sf+Q99eFkg7pn1IPTJ1sy2ZJz+b9c66kT5a8523ZBUk7SLpd0iOSHpZ0eh5em/2z\n",
|
||||
"lvcryLX+RtJtKqeQLvudB5R9bxI/3tyOi4Fx7YadA3w9P/974Oz+LudAfZCu4JsOzO9u+5Fupzwv\n",
|
||||
"769T8v5b0xs0bcmPTrZlAfhqB+N6W3a/PScC+Y6sjCKdm9yjVvtnb9TU37woKSI2kO6RcFQvLOft\n",
|
||||
"oP0VZUcCs/LzWaR7mFsHIuJO2PwnE+l8+x0F/DwiNkS6mO4J0n5sdLot4a37J3hbdisilkbEvPx8\n",
|
||||
"DemCzcnUaP/sjVD3RUm1EcAtku6TdGoeNiEi/eA16X4RE/qnaFuszrbfdmzeHdf7bHlOk/SgpEtK\n",
|
||||
"mgq8LSsgaQrpW9AcarR/9kao+8xrbewfEdNJv+f6JUkfKX0z0vcyb+sqlbH9vG27dgHp132mkW4a\n",
|
||||
"dm4X43pbdkDSKNJNwc6IiM1uPNeT/bM3Qr2si5KsaxHxQv67Avgl6evWMkkTASRNgrf+nqt1qbPt\n",
|
||||
"136f3T4Ps05ExPLISD/R1tYc4G1ZBkmDSYF+eURcmwfXZP/sjVC/D9hV0hRJQ4BPk+5hbGWSNELS\n",
|
||||
"Vvn5SNJ9pNt+5Lrt5kUnAdd2PAfrRGfb7zrgOElDJO1E+r3Oe/qhfFuMHDptjiHtn+Bt2S1JIt1y\n",
|
||||
"eEFEnF/yVk32z57cJqBD4YuSamEC8Mv02TMIuDIibpZ0H3CVpJNJP3j7l/1XxIFN0s9J99zeRtIz\n",
|
||||
"pB8EPpsOtl9ELJB0FenHhzcCf5troEaH27IANEmaRmoGWAzMBG/LMu0PnAg8JKntzo7/QI32T198\n",
|
||||
"ZGZWR3rl4iMzM+sfDnUzszriUDczqyMOdTOzOuJQNzOrIw51M7M64lA3M6sjDnUzszry/wFBsEB8\n",
|
||||
"UlvRigAAAABJRU5ErkJggg==\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"<matplotlib.figure.Figure at 0x7fbb37f20990>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"plot(np.vstack([train_loss, scratch_train_loss]).T)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice how the fine-tuning procedure produces a more smooth loss function change, and ends up at a better loss. A closer look at small values, clipping to avoid showing too large loss during training:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[<matplotlib.lines.Line2D at 0x7fbb347a8310>,\n",
|
||||
" <matplotlib.lines.Line2D at 0x7fbb347a8590>]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"image/png": [
|
||||
"iVBORw0KGgoAAAANSUhEUgAAAXgAAAEACAYAAAC57G0KAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\n",
|
||||
"AAALEgAACxIB0t1+/AAAIABJREFUeJzsnXeYHNWVt98jgXIY5ZyQMNlIJJMMwhhssI0Dxsbr8Dms\n",
|
||||
"zTpne9e73qa9tnFYrzMYe53WOeyuFzA4YBAYTEYiCQQCCSRAaZQTEtL5/jj3TlXXVHdX9/SMZsR5\n",
|
||||
"n2ee6a6uqq5Ov3vu7557rqgqjuM4zv5Hv319AY7jOE734ALvOI6zn+IC7ziOs5/iAu84jrOf4gLv\n",
|
||||
"OI6zn+IC7ziOs59SSOBFpL+ILBSRK6s8/g0ReURE7hGRea29RMdxHKcZikbwHwQWA52S5kXkXGCO\n",
|
||||
"qh4MvAu4rHWX5ziO4zRLXYEXkanAucB/ApKzy3nAjwFU9TagTUQmtPIiHcdxnMYpEsF/Ffg4sLfK\n",
|
||||
"41OAFan7K4GpXbwux3Ecp4vUFHgReTmwRlUXkh+9d+yaue/1DxzHcfYxB9R5/GTgvOCzDwJGiMh/\n",
|
||||
"qepbUvs8CUxL3Z8atlUgIi76juM4TaCqtQLsqkjRYmMicjrwMVV9RWb7ucD7VPVcETkR+Jqqnphz\n",
|
||||
"vHIxy4EXaUmXNXyhZbkK+I6W9KpGj90fEZGLVfXifX0d+wP+XrYWfz9bi4hoswJfL4LPouEJLwJQ\n",
|
||||
"1ctV9WoROVdElgLbgLfVOH4QsLOZCwWeAQY2eazjOM5zjsICr6o3ADeE25dnHntfwdN0ReB3AQOa\n",
|
||||
"PNZxHOc5R0/PZPUIvnUs2NcXsB+xYF9fwH7Ggn19AY7R0wI/EBPqZvAIPoWqLtjX17C/4O9la/H3\n",
|
||||
"s/fQ0wL/rJa0Wj59PTyCdxzHaYCeFvhm7RnwCN5xHKch+prAewTvOI5TkL4k8M/gEbzjOE5helrg\n",
|
||||
"mx1gBY/gHcdxGsIjeMdxnP2UviTwPsjqOI7TAH1J4D1N0nEcpwH6mgfvEbzjOE5BPIJ3HMfZT+lL\n",
|
||||
"Au8RvOM4TgP0NYH3CN5xHKcgfUngPU3ScRynAfraIKtH8I7jOAXxCN5xHGc/pS8JvEfwjuM4DdCX\n",
|
||||
"BN4jeMdxnAboSwLvaZKO4zgN0JcGWX2ik+M4TgN4BO84jrOf0tcE3iN4x3GcgtQVeBEZJCK3icgi\n",
|
||||
"EVksIpfk7DNfRDaJyMLw9y9VTueDrI7jOD3EAfV2UNWdInKGqm4XkQOAm0TkVFW9KbPrDap6Xp3T\n",
|
||||
"+UQnx3GcHqKQRaOq28PNAUB/YH3OblLgVF2J4HcDB0hZetpWchzH6ZMUEksR6Scii4DVwPWqujiz\n",
|
||||
"iwIni8g9InK1iBxe5VRNC7yWVPGBVsdxnMLUtWgAVHUvMFdERgJ/FJH5qrogtcvdwLRg45wD/A54\n",
|
||||
"XqcTXcqb5GI5I9xbkDlHEaIP35WegOM4Tq9FROYD81tyLlVt9Mk/DexQ1X+vsc8y4FhVXZ/aplzM\n",
|
||||
"XC3pPU1fbFnWAYdpSdc2ew7HcZy+hIioqhaxwDtRJItmrIi0hduDgbOAhZl9JoiIhNsnYA1Hnk/f\n",
|
||||
"lUHWeLxbNI7jOAUoYtFMAn4sIv2wBuEnqvoXEbkIQFUvB14LvFtEngW2AxdWOVdXrRX34B3HcQpS\n",
|
||||
"JE3yPuCYnO2Xp25/G/h2gedrhcB7qqTjOE4B+tJMVnCLxnEcpzB9TeA9gnccxylITwv8ri4e7xG8\n",
|
||||
"4zhOQXpU4LWke7t4Co/gHcdxCtLXpv17BO84jlOQvibwHsE7juMUpK8JvEfwjuM4BelrAu8RvOM4\n",
|
||||
"TkH6osB7BO84jlOAvibwvvC24zhOQfqawHsE7ziOU5C+JvA+yOo4jlOQvibwPsjqOI5TkL4m8B7B\n",
|
||||
"O47jFKSvCbxH8I7jOAXpawLvEbzjOE5B+prAewTvOI5TkL4o8B7BO47jFKCvCbxPdHIcxylIXxN4\n",
|
||||
"j+Adx3EK0tcE3iN4x3GcgvQ1gfcI3nEcpyB9TeB7bZqklGWelGXWvr4Ox3GcSE2BF5FBInKbiCwS\n",
|
||||
"kcUickmV/b4hIo+IyD0iMq97LhXo3WmSHwReva8vwnEcJ1JT4FV1J3CGqs4Fng+cISKnpvcRkXOB\n",
|
||||
"Oap6MPAu4LLuulh6cQQPTKb3Nj6O4zwHqWvRqOr2cHMA0B9Yn9nlPODHYd/bgDYRmdDKi0zRmyP4\n",
|
||||
"SfTea3Mc5zlIXYEXkX4isghYDVyvqoszu0wBVqTurwSm5p+ry56/R/CO4zgFOaDeDqq6F5grIiOB\n",
|
||||
"P4rIfFVdkNlNsofln21gWWTXnnBnQc556tErI3gpy0BgNL3w2hzH6VuIyHxgfivOVVfgI6q6SUR+\n",
|
||||
"DxwHLEg99CQwLXV/atiWwzNfVGVrw1eZ0FvTJCeF/4P26VU4jtPnCYHvgnhfRErNnqteFs1YEWkL\n",
|
||||
"twcDZwELM7tdAbwl7HMisFFVV1c5ZVfFubdOdJoc/vfGa3Mc5zlKPU98EnBd8OBvA65U1b+IyEUi\n",
|
||||
"chGAql4NPCYiS4HLgffUOF9XBX4LMELKkrWE9jUxgneBdxyn11DTolHV+4BjcrZfnrn/voLPd2Dx\n",
|
||||
"S8u5npJul7LsAYZhYt9bmAysxQXecZxeRE/PZG2Ff74WGNeC87SSScBy3IN3HKcX4QLfGiYDy/AI\n",
|
||||
"3nGcXoQLfGuYhAu84zi9jJ4W+C558IF1wNgWnKcTIswXqUj5LIpH8I7j9Dr6dAQvZZkkZanwvaUs\n",
|
||||
"35OyHN/oSUUYBPwSeHET1+QevOM4vY4+LfCYIN8lZTkWQMpyJPD3wGHVDhZhugjn5Dz0dmACDYp0\n",
|
||||
"mMU6HJvc5RG84zi9hr5o0aQFfjbwI+APUpb5wMeAbcCYGse/EKt62YEIBwKfwHL9G43CJ2J1enbg\n",
|
||||
"Au84Ti+icKmCFtGyCD5EzuOArwJ3Ar/GXs93qe3RD6azEL8Yi8D/SuMCPxl4mt47y9ZxnOcofVbg\n",
|
||||
"genAk1rSZ4HrpSzvxCpbKnB0jePzBH4i8DCwk8YFfjTQjgu84zi9jL4s8DOxgU0AtKT/ByBleR21\n",
|
||||
"LZrBdBbxNmAjJvCjGryekcAmTOB9kNVxnF5DX/bgZ2KpiVnaqW3RDKJzpD0K2EBzEXwU+J0553Uc\n",
|
||||
"x9ln9MUsms3hPIeRiuBTrCNE8FKWfsGrT5Nn0YwiieCbEfiNwG6gv5Slry1k7jjOfkqfE3gtqWIi\n",
|
||||
"fjz5Ap+O4F9P5zVi8wS+jeYj+DZgU7iuXrkgieM4z036okUDZtMcQ3WLZkwoKXwwMD7zeK0IfgfN\n",
|
||||
"WzTQXAPhOI7TLfS5CD6wFhgCLBdhsAh3xge0pDuAZ4GhWKbNsMyxrY7g0wLvmTSO4/Qa+ozAizBF\n",
|
||||
"hP7h7jrM834aGAEck1nQO9o007FZpmnqefCDG7w0F3jHcXolfUbggZ9hs1DBIvjHtaR7sEheqBTm\n",
|
||||
"ONA6jc4Cn5dF01UPfmO47QLvOE6voS958KNIctTXkgywDgn/01ZMvQh+kAjpZf+6mkXjHrzjOL2O\n",
|
||||
"vjTRaUT4A1iCeeyQRO7DsJowYAL/POz15XnwEh7bLcLAcHs77sE7jrMf0Zcsmg6B15L+Wkv6j2F7\n",
|
||||
"XgS/DpiHlR8YkslNjw1CFOI2YKMqigu84zj7EX3Cogl2SjqCT1PNojkGs3F2kET7kC/wG8LthgRe\n",
|
||||
"ytI/PH9cANwF3nGcXkNfieCjjZL106F6BH8E8AQmvunjooBHIY7+OzQewY8AtmpJ9zZ5vOM4TrfR\n",
|
||||
"VwR+ROZ/mrQHH2nHGoQngK2kBX7ONSN5xTv30IIInkp7BjyCdxynF1FX4EVkmohcLyIPiMj9IvKB\n",
|
||||
"nH3mi8gmEVkY/v6lyum6Q+CrWTSQRPDJY2MeHswhVwr9n0lH8C7wjuPsdxTJotkNfFhVF4nIMOAu\n",
|
||||
"Efmzqj6Y2e8GVT2vzrmaTZNsVODXhf+dLZrB7QMYtrofx3x/JrznISrz2HcBB4rQX5U9Ba4rFhqL\n",
|
||||
"uMA7jtNrqBvBq+oqVV0Ubm8FHsRWMcoiOduyNBvBD8cW8mjEooGMwIvQnyHt9prnXHNM2Kcjgk9l\n",
|
||||
"0hQV6TYqI3j34B3H6TU05MGLyEws/fC2zEMKnCwi94jI1SJyeJVTdMWiWU31QdZnqRT4tVhe+9NU\n",
|
||||
"RvCDGbpmD+uet43RS+dKWQbz5rNexwE7NqeObaTgmFs0juP0WgoLfLBnfgt8METyae4Gpqnq0cA3\n",
|
||||
"gd/ln+X9zxORi8Pf/AaucwSwkuoWzVpS4q8l3QbMCsv5pQdZBzF09V6WnbmGYauPAt7F7GuP5vDf\n",
|
||||
"psscNBKFu8A7jtNSwphm1MmLu3KuQgIvIgcC/w38VFU7ibeqblHV7eH2NcCBIjK685m++aSqXhz+\n",
|
||||
"FjRwnbUEfjCwhsyMVS3pmnAzPcg6mKFr4eGXPc3AzbOAT7J+9kae9/t0SeFGCo65B+84TktR1QUp\n",
|
||||
"nby4K+cqkkUjwPeBxar6tSr7TAj7ISInAKKq63N27YoH/yTVI/hOAp+i0qIZ0t6P9udtYufIp4A7\n",
|
||||
"uPeNq5m4cFpq/0Yi+DwP3gXecZxeQZEI/hTgTcAZqTTIc0TkIhG5KOzzWuA+EVkEfA24sMq5uuLB\n",
|
||||
"r8EyXLKZOMUFvt+uwQza0I/NU7Zw99//BvgIj58Gw5+ck9q/qxaND7I6jtMrqJsmqao3UachUNVv\n",
|
||||
"A98u8HxdSZN8FFuPdTiQ7h0MAR4BplY5dgtWeAym3TKeZwft5dkhO7j2i0v0z198VD67fQADtk+S\n",
|
||||
"sgzSku7EPXjHcfYT+tJM1s3hL2vT5HrwKbZ2PDbuwYk8M3I3aSF+dkgbe/s/DMwN+zcq8O7BO47T\n",
|
||||
"K+lLAr8l/GUFvqpFI8LZ/OlLryJaNMOeGsfOthilD+woYiZ7b8MW8YauRfDuwTuO02voE9UkMYGu\n",
|
||||
"FsHHNMm8CP4INs6cRBT4Ie3j2DlyJ0mkPQR4hn57bwdOCMd0ZZDVPXjHcXoNfSmCb8aiGceOMQcS\n",
|
||||
"BX7QxrE8M3I7icDHnsF1wFmhbnyXPXgpyxgpy9sKnsNxHKdb6LUCL8JIEf4S7qYFPjubtSOCzyzD\n",
|
||||
"BzCeHW0DOo4ZuGU0O0duJRH44cAWLelj4RwvoDGBj9cViec9GagouCZlmS1l+UXB8zqO43SZXivw\n",
|
||||
"wATgRSIdC33U8uA3Y+UKsv73OHaOGkCM7gdsbWNn2xYyAh/2vQI4j4ICH6L9wcC21ObowU8HZkhZ\n",
|
||||
"0pbUHOC4eud1HMdpFb3Zg4+R+iHU9uAHY3VnkmyZhPHsbBvUca4Dt7axY/RmEq88nhcaFHhslajt\n",
|
||||
"qcU+SJ13OtAfSE+gGkOyaLjjOE6309MC30+E/gX3jUJ+GCamW8kIvAj9MEHdSb7Aj+OZEUOI67IO\n",
|
||||
"2DqcHaM3kUTasWcAcAcwmuk3DqKYwKej/0jsGcwI92enHhsDtElZilTddBzH6TI9LfC7KB7Fxwj+\n",
|
||||
"OGB7qM+e9eAHATtV2Uu1CF77DyWuyzpg23C2TthAjkUTIvE/ccRvplBc4LNF1+J5pwNLgINSj43B\n",
|
||||
"ovqhOI7j9AC9WeBHYIJ5HEmknPXgh2DiDZml+UQYhNk3Q9Eg/gO2DmXLlHbyPXiAexm3eDTNR/Bp\n",
|
||||
"D34BlRH82PDfbRrHcXqEnhb43YSBVhEGi1SdfQom5Aux+vPRJ8968NF/h84R/DgsffJZEBP/AVsH\n",
|
||||
"s372OioFPp0Fs5i25eMoVk2ymkUzHBgP/JXOETxY7rzjOE63sy8i+JhJ83HgX2vsOxy4C4umqwn8\n",
|
||||
"EGoL/FpgG3v7bwMuZPvY7WyZspHOefCRxQxbPYFiEfww8gV+ArbQyBI6e/DgAu84Tg+xLwX+aCqz\n",
|
||||
"TLKMAFYBK6gt8NGiqVxc26LoNcA29h64A/gX/vzFRWj/9EzWbBT+BP2fGcSwp/NWjspSLYK388Bj\n",
|
||||
"wOzUoOrY8Hpc4B3H6RH2pQd/BDCpxr7RPnmISg8+Lb71LBqL4J8duBP4H+5702asQYipkBUirSVV\n",
|
||||
"dratZPKd4wq8lrxB1p3h/xNa0vXAHpLIfQxWEdM9eMdxeoR94sGHAdA55C/eHYn2yRKas2iSCP7W\n",
|
||||
"D18GvBtrEHZQPYKHHaMfZ8J9Y6hPXgS/K/x/Ivx/lMSmGQMsxSN4x3F6iJ4W+I2YR30IZlcUieD/\n",
|
||||
"Ctwftm3G6r9EinnwN/zrai3pBjoLfLbUAGybsIwxS4qIcCeBD6mWu0kE/jHgICnLIMyaWoELvOM4\n",
|
||||
"PURPC/x1wFmYPXMLNvGpWibNCGCLKr9W5cth2yZgeJjgBIlgQy2BT3LP60fwG6c/wuileUsDZsmL\n",
|
||||
"4AnnjgL/CHAwFr23Axtwi8ZxnB6ipwX+GuBcTOAfwLJNqkXx2RRGwmSnrSQ2TTaCT/vziUVTKfC1\n",
|
||||
"Bllh5UlLGPXYkAKvZRidPXjC+aPAPwQcSiLwG/EI3nGcHqKnBf42bBLQWSQCX82Hz6YwRtIimRb4\n",
|
||||
"tZioR2IEv5VE4AdRL4Jf9NZlHLijv5RlVp3XUi2CLwMPh9su8I7j7DN6VOBVeRb4M7Z60v3UjuA7\n",
|
||||
"++PGRhKbI50muRyYmdqvWgQfs2jyPfjdQ3ew6P9tBj5a5+XkCryW9Fta0pgu+RA23jCOxKLpUYEX\n",
|
||||
"YYgIv+/J53Qcp3fQ0xE8mE2zG8soqWfR5EXIaZFMp0k+TlLkC+p78J3SJAM7uPkTO4G/k7JMqPE6\n",
|
||||
"ql1fB1rSLeF655FE8D3twY8BXpJTK99xnP2cfSHwVwJfVWU3VQQ+iFHeTFGobtGsBw4QoS0M3B6A\n",
|
||||
"Ree1BllR7ZicFNnJlikDgF8CH6jxOuoKfOAh4BRgHfvGohmKFTkrMq7gOM5+RI8LvCrtqnwy3H2K\n",
|
||||
"/Ah+KLAjDKpmyRV4VRSzaWZgJYaXhG3bgKEiHIgJ3a5QffJZ8gU6ToL6NXBajZeSN9Epjwex9V73\n",
|
||||
"iUVDIuxFMoMcx9mPqCvwIjJNRK4XkQdE5H4RyY1qReQbIvKIiNwjIvMKPn+1QdZqA6xQaXOk0yQh\n",
|
||||
"sWkOBxaHbTGCHwesCaIPFsVXE/jB7DlwJTAl/UBYa/XccLdaDyPLQ1hvoR3rUQwLq0H1FLH3MrLm\n",
|
||||
"Xo7j7HcUEZrdwIdV9QjgROC9InJYegcROReYo6oHA+8CLiv4/NU8+E4pkinSUXDaooFkoDWmYUIi\n",
|
||||
"8JOwyVWRZ/KeIwwEK7d8aB0wObNAx3zg31LXWETgHwz/28NEqK30rNi6wDvOc5S6Aq+qq1R1Ubi9\n",
|
||||
"FROsbNR9HvDjsM9tQJtIzQHKSDWBrxfBVxP4x6ku8BPpLPDVnuNPXPulz6FsJ6klAxbRz6yyHms1\n",
|
||||
"Hgr/23OuvyfoswIvZZkoZSnt6+twnL5KQ1aBiMzEMkJuyzw0BZuGH1kJTC1wyvXAEBGrvy7CiSKc\n",
|
||||
"S/UUSagUyKF0juCjRVNP4HdSXeDfAJzO9rE7qLRpJgOjsUYpux5rNZ4Or2VduN/TPnz04BsSeCnL\n",
|
||||
"BQXmAnQ3JwB/v6+eXMrSX8qSXcjdcfoMhQVeRIYBvwU+GCL5Trtk7munHUQuTv3ND354uibNa7Ef\n",
|
||||
"dC37I+3BZ22X5cCRWL2bx8K2tMA/ndq3agSvyibgvaw7ZCidBZ7wHEUGWK1CJbyIZPJTx/WLcIAI\n",
|
||||
"J6X3l7IMkrKcX+TcBbEIfuziSVKW2XX2TfMB4PQWXkczzAImSVmKruPbNFKWN0hZPp/Z/P+Ab3T3\n",
|
||||
"cztOGhGZn9bKrpzrgIJPeCDw38BPVfV3Obs8SWVt96lhWwWqenHOsSuwqPsx4HlY9F0rgk9HwFOp\n",
|
||||
"7Dk8Hs6xKJWBE2eyTsRqw0RyPfgUS9hw0CBm3JwV+J3AURTz3wHQkt6VupvugRwNXCXCuJDZA9ZD\n",
|
||||
"+qGU5X8L9hDqYQJ/3rsuBM4EXlnwuBkkywzuK2ZhmU/jgaelLKcBf9OSPtsNz3UwNgEvu62RRtFx\n",
|
||||
"uoyqLsCW/ARApHmbskgWjQDfBxar6teq7HYF8Jaw/4nARlVdXfAaHsBEHUycZ2OReU2LJuS6D8QE\n",
|
||||
"P7IWy6pZnNq2Dct4yRtkrSXSq9k8rR87R6SX3ZsM3I5F8IUFPu/6w+0RwGjec8TXpdyRnTQN68HM\n",
|
||||
"aeSkIpxVpXDbUAZsgcl3Hos1HvXPVZYDsJ5Lkbr43Um0iGIj+xvg2G56rrF0fs+nUcxqdJxeSRGL\n",
|
||||
"5hTgTcAZIrIw/J0jIheJyEUAqno18JiILAUuB97TwDU8ABwpwgHYAOkDwMnUH2SdCqxMpT2mc+Ef\n",
|
||||
"SO3fzCCrnWvH6DU8M+KQ1ObJwN9oMILPkK4oOQIUhj99IYn4xp7QMQ2e9wt0jkABhnD0j7ew+qgV\n",
|
||||
"wAgpSxHRnoJ9NwoLvJTl/G5I/5yF9QSnSlniWrcHt/g5ImOB6RnPfVp4bp8F3CBSlqG13jcpy1FS\n",
|
||||
"lrf24CX1KVr1nSuSRXOTqvZT1bmqOi/8XaOql6vq5an93qeqc1T1aFW9u4FruB/LepmJeeS3Y41K\n",
|
||||
"rQh+FJ3tmci9wB2p+00JPAA7Rq1A+80E+8JiPYZFWI+jkAefwzISkRrOhHt3MGjjWJI6OtOwAdlG\n",
|
||||
"I9U28hcLH8rxlx3Anf/wGHA3xRqOWPKhkMCHkg6/xT7HlhC+4LOw9QCmkCxg3l0CPwb7PaQHlqdj\n",
|
||||
"351uy0CSsgyXshzXXeffh/wBqDWW9BrgrT1zKX2SN0pZvtXVk+yLUgVZHsCE4RDMI78X+7FVE98t\n",
|
||||
"2EzTWVi2TgWqXKjKn1Obqg2y1sqiMXaMeYT+uyaGe5OxmbfLwvM3G8EvAuaG28M5/rLHefjc3WiF\n",
|
||||
"wF9B4xF8vsAfd9mhDNyyl0Vv3UpjAr+c4hH8yeF/Kwdlx2CzjR/ABH42sJcmBV7KcmCdXcZiFt+c\n",
|
||||
"sH8/7DN/jO61ac4HftcTA8k9ReglngK8vMZux1PZmDqVzCdJsW6a3iDwa7Af7mlYpsm9YXtuBB9s\n",
|
||||
"mE2YD54XwWf33x3OL1RG3TvCeaqzadr9DNgW7ZTJmF2wPNxvVuDvBY4MP+jhPO/Ksdzx3idBpgTv\n",
|
||||
"eyrwf8AxRbppUpZ+csSv+wFtjHx8lJTlnanHhjK/dDJ/+dyNaP8RmMAX6RnMAO6k+CDrKdiXsXUC\n",
|
||||
"f8uHXoayjCTldjaWnttsBH9LNlKWsrxIyhLHLcYCt5L48BOw3uKjdK/An4A1YC+qtoOUpaHxmCrn\n",
|
||||
"OEjKcp2U5dNdPVcBzsG+ay/Ns+3C9/o4YEqBhve5yhnA9V09yT4X+CDYDwCvxgT+vvBQLQHdiPng\n",
|
||||
"nSL4KmwDVqX9euDjmJBWZ+3h93DAzoHBl40RfBzIbUrgtaSbgNXAHGZeP4MBWwfy6FlXs2vYNkxI\n",
|
||||
"pmE/jq0Ui3Dezflv/Av9dvfjNW+6EPiulOX54bESTx+7gXvffDdmM9yFNRwTpSxfqnHO6WHfohH8\n",
|
||||
"KcAlwOkt86ufPvar7Bi9DmtUYwT/B+DgRp9DyjIaa9iOzDz0DZJ6Q2OxBiSK6XQsgCg6p6NZXgD8\n",
|
||||
"nCp2hZRlMHC/lGVG3uP1kLKIlOVd2Gu7H3hHD4wpvBy4FPudzs15fCoWcK3A3uemCPZWoUzAvoSU\n",
|
||||
"ZTqWaLG43r712OcCH7gfi8weVmUtyeSgajQl8OkNqjyuWmcm6p5BS9k2fi8m7pOBp0Je+3Kaj+Ah\n",
|
||||
"2jTP/+nRrJp7L3rAY2yduA3LIhqLvf67gJdIWU6p030/Htk7jwtfDZMWHg98C7goTFJ6B1ddvgxr\n",
|
||||
"mEZiFtg44FrgY1KWiVXOOQNraAfnTfSRspyYuj0YeD6WRrsNK/TWdUasGM7Wie0kAn8QNrayl8bT\n",
|
||||
"N08J/zui/yByM4Fp4TX0B+4hEfhp2MpcLRd4Kcs4KcuA8LyHAZ8EXiZlyfP6T8XGfpq1M34EXIR1\n",
|
||||
"+T+IBScvaPJcdQkR+VnA1Vhp8HNydjsO6yEuIxlbaYb/g47lPPcn5gMLgtZ0id4i8DHrJU4G+hxJ\n",
|
||||
"JJ/HBuxHXteiCWyj0n8vyuNsmtaPbWNnkkTwYALf7CArRIGfueAQHjn3VqCdjTOewbzs1VrSPVj3\n",
|
||||
"rMze/n9g15Az0wenbAWAw3j45Z9i+JNw88euBL6IzcT9D+AbbJo+IFz3yJBXfztwQzh/XnQFiQff\n",
|
||||
"TkZMpSxTMbsjTk47DlisJd0WztuUTSNlaZOynAcgwmDaHj+A9bO3UGnRPEpY5zYbhUpZBkpZpgbR\n",
|
||||
"zHIaVmIjbe+MxcZmpmJ+/7pw7rTAd1cE/zOsptFc4EEt6UpsIZyP5OwbP/uGI/gQGFwAnKElfSAI\n",
|
||||
"xi+x70dLCdH097AGZamWdBXVBf54rLFeRpMNV7DbDgH+n5Sl2qpwfZUzSOXBd4XeIvD3Y0XNHgdQ\n",
|
||||
"5duq1Mqj3xj+Nx3BF0GVZ9k+djsbDjqWSoG/jq51nxYB5zFk3XBuf+/dQDsbZu8BXkhotLSkX9eS\n",
|
||||
"jueO9+xl/ZzXZI6/WcrywiByh/G3jz/B5QvhhouXBbG4EWss/gMTsRjBg3Wf3wcsJCcvPpxzOvZZ\n",
|
||||
"rKWzTXN2+H90+H8KcHO43bTAY6m4Pw6iNJq2ZbBq7g4t6WZsVnS8priQ+delLHdJWc6SslyGjac8\n",
|
||||
"EF5zlhcCP6BS4GeG/1MxsV+HCc60EIVGi2YFKYGXsoyXshxNQaQsr5KypHsOw7DP5h3YD/n28NAH\n",
|
||||
"gbdJWV6fOcWZ2Oc5Mxz/DilLUYGejQUM6d7wL4DXdcOg7kex92kR8I9h243AYSEoSJOO4GcBSFk6\n",
|
||||
"9cqkLIfVsJM+DnwF+CHwT12++hyqjQ9IWQ6WsmTtvqbOL2W5OOezmE8L/HfoPQJ/N/CvVeq/57ER\n",
|
||||
"62quL7h/UwIPwMMvX8q4xR/CIrsnAbSk/64lvbqp8xn3AIez7EVr2T1sI9DOukP6Y9U6O3olIoxg\n",
|
||||
"2RkjGLKuI/NFyjIKs0ROw6yL7TxxavyCxOj1U8AbwopSQ7D3qZ8IA7WkO0Ikl87mSTMO2KEl3YoJ\n",
|
||||
"fPaHdzb2XsZjTwVuCrcX0LwP/xpsQtox9Ns1ijEPw8qT4/dhJWaPPYMJ/DnA3wHfw+Zd7MEmss0D\n",
|
||||
"zk/7siG99Sgsap6TurZZmA0YBb5dS7oL+4xnUt2ieTPWWBTlEuBjqfsvBm7B0j//kSDwWtKngFcA\n",
|
||||
"34oNQhg7OAT4FUkEfz5wacH5DEdgwVMHWtKHsc+1S6mZUpYxqdvjgfcD79WSfllL+pfwXDvDtb81\n",
|
||||
"jAUskLIsxoKCDoEPabYr0j1TKcsIrMG+POuzS1lmYg3f94AvYSmF1ezGZl/f84Hl4feW3t4fWyvi\n",
|
||||
"2no9BylX2NA3AAAgAElEQVTL66Qs36vxe5gDlLDxx3jMBCwY63IGDfQSgVdlmypfaOCQjWQmOdVh\n",
|
||||
"K80K/F0X/Y31c57GfhBPZR8WYZ4IVzW4JN4KYD33v34z5uWvY90hAzExTttOh7PyJBiy7vBUNsIL\n",
|
||||
"gF1YFHgY1pNIL2GIlnSxlvS6sG0o1sBtpnLRj2oCHyNlyETw4cv9YuDrwNxw/1QsUgOzdXZjYwmF\n",
|
||||
"CdHbsdiM6Rcz/zMnov1g6dnx830Ss2fABP71wNe1pN/Rkh6kJX2flnSDlvQx7P1LL9RyIrBQS/o0\n",
|
||||
"lhobq5zOwnoe00gieLBg4zyqWzSHYwPV6Qlw1V7XVKwRPl/KMiBsPhfzp7+GDaTFCB4t6T2YYMUZ\n",
|
||||
"4/PDNT5C0uM4HGscLsk810Apy6VSlqNSm4+kctJf5GbsfUkf32kWtJTljKzAhe2zgaekLPFz/jTw\n",
|
||||
"8/D+Z/k+1ls5H/uevhV4T7BwHsM+h1dgqcfp780xWCA0AxuwTXMScL2WdIuWdDXwP8Dbcp67K5yK\n",
|
||||
"fffLme3vwn5L3wJ+I2WptVLafGyG/yerPD4Hs5s/lWoEjgDub4X/Dr1E4JtgA8XtGbBlAm9t8rke\n",
|
||||
"5RdX3An8b5XnnA+8jNo5vxWED+9sFr82rirVzvqD4w8sLfBHsnUiPDtoJxbJgQn7z7Av+RGYt9yG\n",
|
||||
"NWJ5X7Yo8JuonLDzEDZLc5iU5Z9SXc6DqCLwmAivBq7CGoe5WGS9JvW6Otk0BSL68zAP+krgLA7+\n",
|
||||
"/Su55y1Av/jcaYG/DxtPqVY24zfABcGP/yU2+Pvr8NhSEptmJiaUaYsGrPfzT9j7vQILJg4Um0kL\n",
|
||||
"9p7fRTEf+8WYD70YGzAXTOB/jzWKbyFZLyDydayncTEmLtcQqqQGER6PFUE7V8ryAuh4f7+NBSHX\n",
|
||||
"SlliAbsjyUTwgVvJCDxwp5TlTfFOCCh+A/xfzkD7C7Be079LWV6KRaCfqfIe3I29h98HPqElvV1L\n",
|
||||
"+uPwWLRoXol9fw9NHXcs1jO8AHitlCVdE2o6SboyWC/undLamdQnYNH166UsH5KyfE7Kcin2Ot8P\n",
|
||||
"fB77DT0oZemo7yRlOTTV4zgMG+B+v1gdpSxzsN/yASRjFenFirpMXxX4dSQiVBdVLlXN/aIX4VE2\n",
|
||||
"T5umJX2NlnRHzuNzMcH7jEjx91NLehd6QKyauZFN04dhBccqBR620X7Ik9BRdfIkLGLZiP2wFmMz\n",
|
||||
"e58mM9Ep9CqGkCPwoWDXYqx65+eBd4eHXo6NMYC9z2mBPxv4I7AE+5G9jM6DQR0CH7rlXwDuk7LU\n",
|
||||
"KpH86vCabgSOZ+xDp3Pf320ksYduI/j8WtL7gIOC/ZTHb7GqpDdjP8BDtKSxImT078GE5V4sK2d2\n",
|
||||
"eK3RwvgqZhc9HRqtlVjOtmA/wE8DbwiNyAeyFkKKs7CMpZ9jYv56rBfxiJZUtaQ/yRaUCzbRe4GX\n",
|
||||
"AJ/FhDuOAxwBLNGSbsCiwm+HXtQnMdE9E4uWfxuuqZNFk3o/OzJppCzTsEj5c6lB6mPCe7Ia+Hna\n",
|
||||
"ksEaki+G9+KXwBu1pGvz3oDw/n0TuFFL+qfMw6ux93k+ZntlBf6uMH7wc+AfUo9Nx+yzyJ3Y7+HF\n",
|
||||
"edfQJCdgjes7sdf7DPZevk5Leq+WdK+W9B1YY/uT1HfgKqwRBxP4a4EPY2NG/UNDEXtfc7DEkq9i\n",
|
||||
"nxu4wAPwEyp9ze7kUUJFQRFOEOH7mcfnYa36XhqI4gMjgC2q7GHPwI1ov6eoFPgjgFtYeWI7cFL4\n",
|
||||
"MZ+ARWB/I8kOacPso2wEPwhbg3YPnSN4MJvmy5iQvDb41a/ARBI6R/CnAddpSXdjX8KLMEFPswDz\n",
|
||||
"4fsD38UGEm8FfpY3sBe2zQf+EER7IdsmrGX9wQ8RBF5LeqmW9EfxmODt5qIlXYo1FJ/Wkl4cuvCR\n",
|
||||
"dJbMTCyCXIF9hu2p/b6M/ZDjGMAKrGdjYx6Wj38A9oO/CPhKGDB7v5TloPC6BBOcP2Pv5yuwgcgP\n",
|
||||
"1et+a0mv1ZKepCX9lZZ0T3i96zEBjz/+n2LjUH/FBqhfFiyLq7Dg57zwWvO83IeBUcE7B2uQf4/1\n",
|
||||
"TN4ftp0dXudbMCFeImW5IDx2PBZdvwP4qJY0+x3Ivp4fhOvJbo8px7dh35FOAh9ufwuL0GNPokLg\n",
|
||||
"w3kux8S4y4ilq04HHtCSXqElfZOW9DPhe1gx+KklXYAFAM8P4wCzgROCtTUE633+BgvkfovV6XpJ\n",
|
||||
"OHwO1qv8C/DCVADx3BZ4VbarVvwgu5PHgJkhOj8dEyMARBiERYT3YStavazBc6fr3rdz5z+8l+RL\n",
|
||||
"DRbB38jD527FunDnY1kR6zCBh8SDf4rOpQrSC6LkCfxtWDZNCROxL5L41ZAaZA1fvmOxaAmscZhM\n",
|
||||
"Z4F/FJvE8mcsSj4T6x0MBr6TI/IHA2u4WDeLcCjwNW765N8wMa5M0SzYQ9KSnq8l/a+ch5ZiKZb9\n",
|
||||
"sIj1cezHOZfEokFLuktL+j+p427AovEjsJRQxayD52MDhi8J1/tJLLsDQkE6Leny8HmN1ZIeHwS4\n",
|
||||
"GZZjkeHicI2Kva8PA6doSdMR7fexVOPH8xrD0Gu4gySKPx1rmP8R+LhYCuxLgD+FQfn3YL2Pz4RI\n",
|
||||
"dR4WXd+gJc0GPLnUaNSWAr/DGqJDoUNgpxDsKy3pQ1hjGhuJGVRG8GDiebaUZVDcEBrdi6QsV0hZ\n",
|
||||
"XlXt2kJPc3Rq07HAohDIFOFmzDqNRRJfgEXvD4WemgIfwmyxM4FDw3XOwVJKn8Aa61guPW/cpCn6\n",
|
||||
"pMD3JGEy1EbsCzcXmCFCTJ86HFiqyk4skjq16HlFGIBNrok/wHau/vb62GUXYQwWAdzHo2eD/WB/\n",
|
||||
"gGVggH2p1mPRVbUIPvrvkC/wPwBOD8/5K8wa+FXq8XQEPx14JgyOgQn8ktR9oOOH/AcsIn6ZlnRr\n",
|
||||
"+KG8Eouar5SyfF3K8uFwyDyskTkZ+JWW9H+4892bMOHKZvDcIdL8zEdMhJ+HDbRuDrn7K7H3ZV2N\n",
|
||||
"467AxKVjKUgt6d1a0ie0pBuxxvcj2A/49WHg7b1Y5EbYvyvzJsAao5NIRXda0vu1pG9Vmx2d5leY\n",
|
||||
"pVNLKG4lEfj52MSah7HG4VLMokk33tcBA7DewlPhdbeCd2I9vYexxrc/9p24Vyvr/i8gyfzJWjRo\n",
|
||||
"SduxQCs9/vMWrBFcTu3o/mTgrtRY0fGkBr8L8DesoT8F+00dj2lDx/iKlvRuYEr4vyTsM4XEav4r\n",
|
||||
"ZlUeSLMJITm4wBcj2jQx6ySmrEVxAvtyTQ3CXIThmD0TI5t2Ktd/jWLSDv1GaUkvw6KCT0FHxsWx\n",
|
||||
"QVBzPXgqBT6bRUPwEeO4wq+xga505Jr24NNdZrBB54+Tzzu1pBeEtMb4XFswC+tG1s9+ht2DLgl5\n",
|
||||
"xsdg7+EYkmnro7Av/iAR0gN8E+hajfoHsPfknzF7BpKB81oCfy/2wzufnO6zlnSZlvR/1OYg3BrO\n",
|
||||
"/xpaO8tyOfZ7rdt9D43Jz0i+m3nchllp07H3O57337DP+vbQAMZzKvBfWPbOHbQILenq0GPahtWl\n",
|
||||
"mkHn7xrY7+uoEN0fQH6K9FVU2qRvAS7GxkxeGCzIPE7Cgo+YxXMCjQl8jOBPwXojG7CApmIAPTXe\n",
|
||||
"cif2XXoyjLmAWV7vJOkhtgQX+GI8itkls7DWOq7yMxeLZG1SlP24T8k7QQ7ZZQnXkS/wG7B1YNGS\n",
|
||||
"rtCSdqyUpSVdHm5WWDQivEqET5AMsEJ+BN+BlvRxYGJmsCwdwR+LZUTE/Z/Ukl5Z5Vy5K1FpSZ/R\n",
|
||||
"kn6Bbyy9kY2z9mIRZGwkR2MLuQzHBGd9znsyhGQR8YYJjdmrsZS65SKUWHtofH+qWn7hB3cl9iOu\n",
|
||||
"133+T6wR/rKWtOg8jSI8jqXHPlpvx8AHoGbq8Q3YBLI7gRviZxaE9g1k0jADP8Gqst6Z81greAgL\n",
|
||||
"Yl5O53Wf78Nsr+nAE1VE8Crg5cFymYVF0VeHHs4dwItD1tibxQrNxeDhBOzzf0mwTk4jsUCL8Aj2\n",
|
||||
"3ZyLNQy3Yb26bIZU5E7gdZg9FfkrNs7TMv8dXOCL8ijWIi/BPrROAh9oxKYZTmW9nWxZgIOxbut6\n",
|
||||
"kjVc+6fsoTRZi+Zw7Etaz4OvIB2xpa5pSJjQcQydo6pmaePRs3ZiPu88rOGIQj4NE/sNmMCn35Mh\n",
|
||||
"kLtqVWHUsnBeh2VmnMfy+XFMoFYEDzvargFg19B6P8CrsCJm3+zKdeawHLPECi1XGKLiqh5yiPJP\n",
|
||||
"xyyML2ceu1lLem3OMY9hPb3rso+1iIewAf9hVFqFYK+/DZtBnfXfIw9gmnYENiHtl6kIOdpsP8Je\n",
|
||||
"8zcx2xMs0PgqNrB8ATa+UO05OhEam79httJ2TOD7U1vgJ1Ep8A9iv3UX+H3Ao1g2yKJwe3YQ2qOp\n",
|
||||
"7AbfhE2LL0I2gs9aNDMxG6EjgsfSrfKqQGYHWUdi0UDaokmvJFWIICbfAf6FTATfRdp45GWKTXrZ\n",
|
||||
"FXz8+BqnURnBj4OOAdaBdFHgAbSkv9eSXgEMYdXcrdg4yPaaB/38qnu56RPw+a15tW7S596lJf2g\n",
|
||||
"5qfUdoW/0OIaMmEA8L+1pIXniGhJXx/swe7gQWyg9S3Zxin0MB7ABppzxTcI7fexaP2fMEspciWW\n",
|
||||
"0jgDK818AXBhGFAejn3PT8NKRmQnVhXhD1haJVgUvwtL0MjjvvB4h8CH1/dDkkmDLcEFvhiPYi1y\n",
|
||||
"h8Bjrf5S1Yo1YW8HjhIpVK2vnsDPApapsgNQEQZjX/6z0icRoT8memtIIviRWAMxjETgn6K5olmX\n",
|
||||
"ABdi3flOC6k3SRvLTx+ICXlsIEdjqaZR4LMRfBTWLgt8iqEsOW8T8JW6vueKUwZy7RehVdUyGyQ0\n",
|
||||
"HC3Lruil/BY4V0taLfK9D3gpNebAaEk/g/2O5mpJ70htfwyL0l+rJd2pJV2MBREfBu4Ig7SLsYDi\n",
|
||||
"941euJb0Mi1pXBz7duAd1XpboVexEHME0ts/piVtVS8ZsMEKpz7R91yEWR2zsXSnv6R3UmW7CO8H\n",
|
||||
"fiXCNaodk4fyyPPg03bETJLZeusxAZwFHCHCeFXWhMdGYlbPNmBwmNw0AhPEWSQCv4JkvdfCaEnX\n",
|
||||
"SVn+AzihhYM/bewZOBTlKqRjgZfR2Be+msDHxqtpDz6HoWyddKCW9F8K7Btnsh4GZCfsOC0gpJPW\n",
|
||||
"KrJ1HzYxr6Z9EmySJTnbs0kBv8IGYKNF9X1ANZn/0BSh9/HTOrudR41xn1bhAl+MdZivtgiLMg/C\n",
|
||||
"JrF8LrujKj8U4XZSKXJVqBrBi9CG9RjiIF20V2ZivvxpJJOR2oANqjwrwh4slS167UeSlDVeAUwT\n",
|
||||
"QRqo4RP5HPllEJrFZrWuOKXE9JtjSthorPbIYcAzquwSyRX41kbwxV9XWuCdfUMsIV7YH6/DrzDP\n",
|
||||
"/zYALen3WnTeumgo79HduEVTAFVUlRNV2aTKFiwqPh4bVM2jncRTrkYti2YmsDwlxOuxruNUzFec\n",
|
||||
"nzqujaR88g4sch+JWTJHUjnICplUySIEr7b24iiNYQL/g5tWq1VRhETgj4YO2ytdzbKlFk2wtmKB\n",
|
||||
"tyIMxz73w1vx/NUQ4TIRWloZcT+ipQKvNuv5SyTVUPc7XOCb41Hg9horQq0HRtepMFkrTXIWlcWU\n",
|
||||
"NmBivQ6rBTM/9dgoEoHfTiLwi7Bocxt0LI3YlE0jgoSJWa0i1qUZnto2Brvmg0kEvjsj+Gj1NCLw\n",
|
||||
"d9H9EfzbyB9If84TLJx/pvhCP0XO+ckWTtrqdbjAN8cjWBGhXFTZhRUnqiVGI6gU+HSjMJNkIk58\n",
|
||||
"7JiwbRE2oSrWEclG8EMwgV+IRajpRqgpgcemqf+47l7FacOsrrTAxwheSKyp7hT4IZn/9RiOfe4H\n",
|
||||
"ijS8ZGAhQq9iAHCGSOFsrOcUWtLPF00VdQoIvIj8QERWi0juEnoiMl9ENonIwvBXZMCqr/Nx8lcO\n",
|
||||
"SrMeas5qrYjgQ6OwAxPnvAj+WCyr5llsZuUR4bE2kog3G8FDawT+NJpYMq4GMa1zOHTU9IlTtDeR\n",
|
||||
"vJ703IDcQdYu9CyGZv7XI35eD9J9Ufwg7DvwaZJVkRynaYpE8D/EUpNqcYOqzgt/n23BdfVqVFmt\n",
|
||||
"WndN1mzaIwAinCbCyXSe6AQWsU4gP4I/nET0nyZZuGIMiSDuwCLcYVg0DJX53c0K/Imp52sFbeFa\n",
|
||||
"YgQ/ClifspHi69mS2mcwVoM8G8HfK1KxFF9RmrFoWiLwInxAJHfG8xDs83qQrpVkcByggMCr6l+h\n",
|
||||
"Itc7j2aWaNvfiamNWd6EpWVlPXiw9Lu3kR/B9ycR/dXQMRA3iWRB8e2YMGwPx++lMoJfSYMCL8JQ\n",
|
||||
"TNBaMvAXLKiRVAr8aJKUMVvtykgL/BBs0DVZ1s0Kjx3S5LU1KvDDSAS+qwOtp2NjKlmiwG+htdlC\n",
|
||||
"znOUVnjwCpwsIveIyNUi0q1ZBn2I3AgeE9iTsYlSWYH/HLYk2GwqBT4KXty2ikqBj5koO8L2Tars\n",
|
||||
"xrINumrRHIfZPSLSEtEZio1PbCAR7zEkr/EJCgo8SeXAWouJ1LqOeN4ixAb5UZLl85plRJXnjQK/\n",
|
||||
"lcrxCcdpilbkwd8NTFPV7SJyDlZNLXdNThG5OHV3gaouaMHz91aqpUpOw1bBuZCMwKuyQoSfAW9S\n",
|
||||
"JT2yH3tQMYJfRbIy0WQqI/hJJCmR11OZUtaMwJ+IlSiegDUeS2vvniDCcaqdClPFQeG0eI8mEfVL\n",
|
||||
"sAYA7PUMFOEAzKJZg5VYjZyO9VKaFfjtNC7wT2auoRmGV3lej+AdRGQ+lZlyTdNlgVfV1EChXiMi\n",
|
||||
"l4rIaNXOlfRU9eKuPl8fomOQVYS3Ar9Q5RlMYF+DVTXMevBgEy+yNSzWY/5zTA+rG8EDqPL2zHlW\n",
|
||||
"YBk4Eh4vMuHpJKww10mYyBcS+CDKt4gwVZX0qko1BV41mYauioqwFRO7GMGnF7uej+UwNyPw8Xzd\n",
|
||||
"KvBh0trWMDgeKRTBNzkpzenjhMB3QbwvIqWqO9ehyxaNiEwQsUL5InICIHni/hykHRgTxPTbwFwR\n",
|
||||
"RmBe+iNYUbLO06ltAPermc1PAQ+mRGI1MDGcOx3BR4HPazgIA8PPYEWrltTJ049++YlYGeR0o1KE\n",
|
||||
"GVgAkU0prBfBZ4n7VVg0IkwJ57qZggIfqnHGBZKH0pzArwLGhgasCD+gcj3ReK5qAr8j2GvPQkUt\n",
|
||||
"fMdpmCJpkr/ASmEeIiIrROTtInKRiFwUdnktcJ+ILMJWur+w+y63TxEHWcdhP9xDseh9RZgZe4fa\n",
|
||||
"Itt1UbU1H1ObotgOB/aG2bXQ2aLJYwW2fuUU6Milr8Y8LJpcgTUqjWTSxJLK2XGIRgU+G8EPCw3P\n",
|
||||
"6dhM4vXUKYOcYiYmuGACny7QVo/hJJH4Ooo3dvOw3lqaahH8YJKsJ/fhnS5TNwpR1ZolSlX121iE\n",
|
||||
"6lQSB1lnhfuHYiK5suoRNch01ddgkfE0EnsGkgg+d85C4Nck67AeFK6pGhcAvwlWySoaE/i4uHVR\n",
|
||||
"ga82OzHuNzjssxebDDQXKwu7kcrFmmsxHBgVSg/HCH5eA8fGhjTaNDU/y7B4yQRsAttoVdaHxim+\n",
|
||||
"nizRooGkYVubs5/jFMJnsnYf0YM/CBPeQwgRfFdPHLrwG7EVbp5OPbSdlAdf5djPqvJ7zOc/qNp+\n",
|
||||
"QYguICmalk7NLEIjAp/OosmStmi2Y1lBQ8O1PIW91qIe/HDoqLY5FIvEG53oBMV9+COxErTXkyzI\n",
|
||||
"PjRcQy0PHirfH8dpChf47iNm0czCfuAdFk2Lzr8aiz6zEfwgals0kWUkvYs8jsa+H7Fee0cEL9Kx\n",
|
||||
"+HEHIpyQ8fTnYAOyRT34aqVT0wK/gySynYg1bhspLvAxM2U0zXvwUFzgj8J6U78DXhW2xWJv9QQ+\n",
|
||||
"vk7HaRoX+O4jWjQHYROYDsIEtVUCvwqrT5MW+GzlyFrUjOBJ2TPhfhzYnQjcIZIsHiLSsYZmevxl\n",
|
||||
"dtjWVQ8+bdHEDJNh2FjDKhoT+PTzDQnHUmUZxA6CpTOEpPTyUzQm8FcBZ4WB2XRefxaP4J2W4gLf\n",
|
||||
"fWzEBv9mY930J7GBwVYK/DwqLZq4TFwrIvgzgaszzzcBW7oQbKJWTAP8DlY75SsitAVBPAhb2abV\n",
|
||||
"Fk06go8CPzJcy7w6q2nFiHgUyXKGRXLhhwLbU4PiDUXwqqzDspdG4RG804O4wHcTIdtiCzYYuAxb\n",
|
||||
"ULiVFs0qTBhbFsGLWI55KOB1FJWLbEcP/kxM4E4M28vAlap8EVvY+HOY+G3AJlnFuQBfEOF9ZAQ+\n",
|
||||
"FBobT/XlALMWzbZwjjbMQ09H8G+kdpGudATfiMBny0rUFfhgV8UIHpLCacPD7appkuF2RwMo0r01\n",
|
||||
"6J3qiPAGEd64r6+jWVzgu5eYwvcEJvDQWg8emo/gV2CWywAAEY4EHgoWzFHAY+mCaqH2/bPAy7Fa\n",
|
||||
"Oi8IlsMbSOqX/zNm05yN+e/papBHAx/FBD8dwc/BFjepWGQ5RV4EPxtYq8qe8FrbgqBOwUrtVssO\n",
|
||||
"6zGBxywkJfmcYunjEVjjXCRNMkbwN4e6O07Pczz23e2TuMB3L+1Y3vsuTOA3pXLWu0pc6i4vgs+d\n",
|
||||
"6JQmCOpT0CEccWHts7Ev9R05h63CMkB+hPn/Z2Cvb1k4ZzvwLUzwo8BHi2YGJmCnYgK/E0vTPZKk\n",
|
||||
"8csjz4OfE64lllnejQnmVEwUj61yrmHYjOBGLZpcga8zUSzaM3EMIwr8cKoLfCcPXoSBWA9lUp1r\n",
|
||||
"3KeIMFIk+RxFGN/AZLDeTByv6ZO4wHcv60nqxzxAZQngrhIFvtkIHoIPH4TqQsxLfylwAuafZ1kN\n",
|
||||
"XK/KJmxl+zLw35l9voo1AksJq1SF888ALsZEfWMQvi1YMbNOM3pT5EXwc6h83dGmmYKNG7y4yrmG\n",
|
||||
"Y+KcjuC30bjAxwa01vKHp2AzgCPpCH51fE4RThThv8I+eR587AHtE4EXYYgIRUqAzwQOTjV6P6L6\n",
|
||||
"59CXcIF3qtJOUlfmFiw6bhWrsJmVaeFpVOCjD38cNoHoEuwaX0B+BP8E8Odw+1asPs1v0zuEImlv\n",
|
||||
"CdvXY8I7FtiFzSK9mcS22IL1FopE8GkPviOCD2zEovLJ2MpTtQT+cZIf7fbwVy8XPrs4i5Jj04gk\n",
|
||||
"lhfWu7k+9XBckrEjgk+t3hUnauVl0cS68Psqgp8DfCTbWxHpVEhvKqYncQLXqPDX13GBd6qyBisv\n",
|
||||
"GxfubuWsxEeA7OpZjQyygkXwLw3n+aUqK7DIeA75s2HfiUVmYAJ/vyoPZ3dS5SpVHg4DzdswD/Nx\n",
|
||||
"VXapcmqqUdqCWT1FIvi0RTObzgI/OzzXH4HjRDoi5GkiHWUThmGNVFWLJmTiZAU/r3b/Sjqnmf4Y\n",
|
||||
"+GB47nlYYxaJ4xEjsIZPsVWs2kiEsFYEv68W4p6MvfcdvZUwHrA0I/oxbXZ46n9FmmemAewrjCJ/\n",
|
||||
"1nGfwAW+e/ks8M3uOLEqO1X5embzDpJiVUX4C8lCIrHcxB+AhcHbzj7ntjCwCfAz4PwCz9GOifjj\n",
|
||||
"OY/Fsri1BD7WZElPdBpIZ4vmSGBlGBi+FTgvPPYV4MPh9nBM4HMHWUM+/B/pnIkzHDqt4PVj4Asi\n",
|
||||
"9uMPYncsVs//VGBRZlH2tAe/OfW8I8kX+KYieBEmiXBJkX0LMjnn+edi15we+I1lqOPAcCeBx4KD\n",
|
||||
"l7fw2lqKCB8V4SOZzR7BO/mo0q5af8CzhawjVWa0Hqrcqsp5qnwoVdL3P4EvFDh2e170nkM7Fs1W\n",
|
||||
"E/h1YXC2GlswkdsdGpcotNkI/kiSVMvLgPeLMAkr9BUHetMWTV4E/zLsPXxPxoLIi+B/go2rxAyi\n",
|
||||
"KdgA7k6szs/1mf3THvwWMgKfmkwVbbYYwY/DMp6KWjRHYuWoW0UU+HQPIha+OyK1rW4Ej/WyevNg\n",
|
||||
"8fNI2Xuh0W5I4EUYWq9Ka0/iAr8focoWVc7t4jmWqPK7Vl0TJmzHULnwSGQLtf33uM94ksg2RsVp\n",
|
||||
"gd+EiU0s/nUFJjjfDM8bbY5o0UzEqnDuplLg34algP4v8KHU+eNyfR0EH/4i4M0ijMcasYXAd7EV\n",
|
||||
"u/IEPnrw2Qi+H5U2VHzdMYK/j+IWzURqD/42Sl4E/3zs/U/n53cIfKqgWofAhwZsBq1d27fVjMHS\n",
|
||||
"f6NAD8YK2zUSwV+Nfd97BS7wTnfTjkVG1SL4WvZM3Gckld40dLZoDiUIfPD+L8MspEuojOCj4MeG\n",
|
||||
"Yjs24DkROA0rrnYJ8N7UDz0vgo8DytcBL8Fsi4XAT7ESDbdkdq8VwYNZHtU8+PsoHvl2h8BnexBH\n",
|
||||
"YVVJ0xH8NOy9HYZZaOmyDITjD6T3C/xokkJ5sRfXiAc/kfylOvcJLvBOdxPtlzyBXwPcW+f4dK17\n",
|
||||
"SAQ+XeZ4IyYe6dmw3wU+jWX9xAg+ZrDsIiPwmJVzhSpbVXkUs1smpo6rNn/hGuAcLIJfpMoGVU5U\n",
|
||||
"7bBaImmBz0bwUJnZE193jODvB8aHKLgeE7EGq6Ec9ODd51kLk7AZzRPDfoOxSPx/CQIfjpuKLUie\n",
|
||||
"jtzTAh/LYvSYwIswQoRzGjhkDPAwySzt0dhn1UgEP5JeVGLCBd7pbmoJ/D8Dl9Y5ficmtmlvemt6\n",
|
||||
"li10rF/bUZ9dlfWqfJbK2bTRatlAZ4E/ksrSDA+RpC/WE/izsQHWhVX2AbORhmGikY7g27CJWtUi\n",
|
||||
"+HFYw7WZYpFhFNBGC5XdTP4EscnYussxgj8cE8FFwGGh0WnDZjk/Fa45Pnda6GZiEX5PRvDnAp9p\n",
|
||||
"YP+xWGG4WM9oFPbeu8A7ThVioa1OKaIhbfLZzodU7BMnRKVn6T6V2S0KfF49m21Av1AULc58XU+l\n",
|
||||
"pz8EOAyLQCNLSAQ+2ip517cyXM8YaqxXGwqVbcAyT2IEP5iklMVYzO+NC46nI/i1mCVVxKaJvY7C\n",
|
||||
"No0Is7AIe1Jmez9MkBemzvt8bIbuxvA6poW/lalrzovgZ2LWVU8K/HEUbOhCL2QM5qGnI/jCAh9S\n",
|
||||
"QAdRfI2BbscF3ulu2oEnii5PWIW0wN9HkgIZiXn/nVZYCg3EOkxgokivpzKCH4qJeXrA9yHg0PDD\n",
|
||||
"PxqrCFqNa4B7CrzGdZiVlPXgl2NZONtTpQ22Y172REzgV1Fc4PfQmA8fK4SOy2wfhzWeT6Se+/kk\n",
|
||||
"tjqGaqcAABLySURBVNoDmE0zFXvvY0rrcKyhyhP4estEtpJjybwPYd2CdhE+GVNcA0Ox9+1vWM9k\n",
|
||||
"MInAD8qzr3Iss2i3eQTvPGd4CMu37wpbCBaNKntVOw3MbsQEu9oEr3YqBT5r0UzCxCjdQCwhWYXr\n",
|
||||
"AGqXmfgeFMo9Xxf+b6VS4JdhVkiHbx+EfhtmE6zHIvgimTQTw/kaFfh2OotvXNA93XuYS77AryCZ\n",
|
||||
"1zAc69VkPfj7gf45E8laThDfY+gcwZ+Drc9wHvC+1PYxQHsYO1mCWXajSHqgg3Ke5gYRHhLhg+G+\n",
|
||||
"C7zz3EKVe1V5dxdPk47g81gJ3JZZtzbNOkxgom+fjeCPBZZkjo8e/EnALTXOjSpLVbmq7quw64iT\n",
|
||||
"xbaTiOHjhAg+s/9WYENI56xq0YgwV4SDw0St2GAUEvgQmZ6B1RTKRvCTMaFux9IfR2C2R6yxczNW\n",
|
||||
"onkOnS2arMDPDNe1GpggwgHhfN3FbMIAaSbSPgObw/DLcE2RsSQN8IOYZTcaCwaqFaSbAXyeZEZ5\n",
|
||||
"FHi3aBynAWoKvCorVTmzxvH1LJrxVPrvYLbJBOBFdE55bJZ2kkJlO0jy+9diUXD2NW4hGbuo5cH/\n",
|
||||
"G/CecL61mCgVFc/ZWHG4v4XjSdkRk4GngvW0Bot6HwzF5sAahaXAB7AIPm3RdAi8CP3D63ucIPDA\n",
|
||||
"m7BJdZ0Q4VSRQguq1OJYrJ5SbEgJaw8cB/w1XF/6/RxDkhAQG/e40tgO8gV+DHAlNlGtP8m6BB7B\n",
|
||||
"O04DREujWeoJPGQmXIXB38eA19M6gV9HZdrnJMxW2kD1CD4K/CpyLJpQTvgMLE0zrnK1meICfyY2\n",
|
||||
"KWsNSQT/DhG+E64vDmg/jUXrHRO4Qq/m77HofQmVFs0qYHCInidjM5afIRH440nyzbN8HGsAusJx\n",
|
||||
"WFZUeuWwk7D6SVvC65qc2j9P4KM91imCDx79gSRrG7ThFo3jNEWHB98k7VRaNDeSjAvkCnxq21Dg\n",
|
||||
"zi48d5p1JBH8dkxgNmEikifw6Qj+CeDwnMG+UzCxmhfOV1jgg/i+H6srtJZE4Odhs3RfSSLwq4Cz\n",
|
||||
"sIldHYRsmkNVuYlKiyZmCg3D3vvl4ZAo8PMwiyOPcVjJ6q5wLPa5bSYR+HSFzzyBjxbNQyQWTRT4\n",
|
||||
"7GSnMVijFQfxx2ACv5m+JPAi8gMRWS0iedUF4z7fEJFHROQeEZnX2kt0nLoefD0qInhVblDlN+Gx\n",
|
||||
"eN6sRQP2Q1+k2qXnzl5HtQh+APkRfBSdW7Hfa7YU8kuAX2DCciImoEUj+FdjDecfMIGPg6wzsHLP\n",
|
||||
"6UXdn8YqYN6cOUfs7cTrjQK/hUTwZ5DMg1gTXvfzgWEiuWmM4+m6wB+OZVxtIXkv0gK/ClvRLGpg\n",
|
||||
"OoJ/BGuUJlDdg0/vHyexjcSybvqUB/9DrKRsLiJyLjBHVQ/GKuld1qJrc5zICiprzzTKOqpPVtqG\n",
|
||||
"pcfl5bD/gSo+cZPch/m/0Fng47Y0HRF88MEvAT6V2eclWAXMhViGSKEIPvQEPg18JkSha4FxqcVZ\n",
|
||||
"PodF9jFjZhVwe2aCWZa0RZMW+MkkcxRWY72O1ZgFlhfFjwNGi1RE2IVJFQlrp9KiOQyboEWwizaT\n",
|
||||
"TIIbG/ZHlZ2Y7XQ4VSwaKgU+TqaLAt93InhV/SvJFzCP87DSqajqbUCbiPTmehNOH0OVL6jyrS6c\n",
|
||||
"Iv4Q88RpFXBRlfLIf1Xl8i48b/Z8d6tSCne3YxHiRkxEoLMNtRmLeCO/BGaIcDJYeQFs4tTtmMDP\n",
|
||||
"o7hF8wIsz/6qcG3bsdmoMeJersqb4nKMwLXUL32dtmi2kMzGjemWYMJ+Wrje5WQEPowpDMFstGaj\n",
|
||||
"+CFY9dEo4iNSM27Xp/ZLD7SmBRus99af6oOs6aybtEXTtwS+AFOoXEh6JUllOcfpDcQfYl7BsGdV\n",
|
||||
"+X4PXw+YwPfDIvjNmP2RjeAvxnrQQIcV8j1seUUIA6Rh+91hW1GBfzFwdSb9cw1WGE6Dt96BKjeq\n",
|
||||
"8ss656xm0aQHa1djdtRCzLaZmTlHFM5baV7g20hmN8drGImVuEjPnE778GkPHsyyUyrrBqWpZtE8\n",
|
||||
"RS+yaFq1KG524Cc3Z1hELk7dXaCqC1r0/I5Ti6oCvw/pWH1Llb0ibCQj8KoVFTMj15KI/otIBotj\n",
|
||||
"HZxV2KScegJ/JlYaOc1aLPskr25QEaJFE0s7pAU+HcHH691DiOBFGKXKBsyeWYP1Sj7a5HWkBT4O\n",
|
||||
"smYjdOgs8NkIfkP4bKoNsuZZNHfTxQheROYD87tyjkgrBP5JktVcwKL3vJogqOrFLXg+x2mUWhbN\n",
|
||||
"viK7vGK6Pk4t7gamiDABE+m44MgKTKCfxESyqsCHJQWPx2yQNF0SeFV2iaCY+GU9+Cjw0XJaiAni\n",
|
||||
"MSKMA5aLMJak9s4dwPEiDMizz+qQjeBHkGTEpKkl8A+m9o8lpQWYptpROyg6F+uw+QRttMCiCYHv\n",
|
||||
"gnhfREpVd65DKyyaK7BFlhGRE4GNqrq69iGO06Nsw6ab98oIPvzfQAGBD7Ngb8Dyzw8k1NMPVstc\n",
|
||||
"VZaSsmhE+IecGaNxScFsg7cGE/5mI3iwRnQy1SP49diYx9MkHvw5mAUykyDwqqzDBqSbieLzLJp6\n",
|
||||
"EfzYzOO3Aq8Nt6NFcyhJFlHa0klbNKuw2jX9m7jullMkTfIX2Cy3Q0RkhYi8XUQuEpGLAFT1auAx\n",
|
||||
"EVkKXI7NqHOcXkMqV7k3CnwUokICH7gO+BhwXdpDV+3wuePAogBfxPLZ05yJWT1Z1mK1Zboi8Fuo\n",
|
||||
"jOCnAnviQuth8fnvhn2jB/9yYC+WmhgjeLAc/Y+KdFrcHAAR/l2EN+Y8lLVoRpAv8E8Dk0MVyIEk\n",
|
||||
"cxRizaN7wt04yDoZmCrCMKpbNLEuUq9Yx7WuRaOqbyiwz/vq7eM4+5h17B8WDZjAf53MpKMUUdTG\n",
|
||||
"h/+vxOqvEFauuoDQ686wBssc6arAx/9bsAYjbywBLNodiaV6XoEJfCy3gCrLRPh3bKzg/Jzjz8WE\n",
|
||||
"9WeZ7XkRfDWLZhIm1utr1Bvajs1qjfME5tB5kDVm0WwiyR6qGVCEBni+aqflHVtGqwZZHae383Ys\n",
|
||||
"D723kBX432B54UV4AKvMmBeFQyIwh2CTds4KdVhmYLn9PyBnwhJJ5NxViyb+34Jl5WTr9wMWJYuw\n",
|
||||
"AhPeWzGBH0GSEQRhXV0RpgfvG7BBWez15bkQjVo0B1GZQZMlzjqO6d8H0zlNcmx4nhjBF/HhZwN/\n",
|
||||
"FGFgrWJ2XcFLFTjPCUIO+u59fR0pKgRelf9WrbkiVAfB5jgqLXiZx2O1ynlYHZ17sFmrVwGfV+Xf\n",
|
||||
"qghKKwR+C1bXfg+JwFeL4MF8+Kuwxi1aNB25/6psw3of/5A57kSskZos0rF2aqSoRbMKi8r/E/hK\n",
|
||||
"jWuMHvx4bK7A8zLnW4/1EPphK5BtpUqqpAjDwqAy2Nq2B9LaNXQrcIF3nH1DnNRUrYZ9V9mMZcQ8\n",
|
||||
"DPwfllr5J1W+V+OYNdhg9Joa+9RjK0kUvwVLL8yN4AP/io3dLcMi6bQHH7kMK4A2MLXtJOAmrN7M\n",
|
||||
"C6BjkhQUtGhCg78BuEk1mW+QQ/Tgx2MFzA7HIvSN4TzPYu/3ptBwxh5UBeH6rgG+HTYdGf5nyzS3\n",
|
||||
"DLdoHGcfoMoeEbZRe5Z4V4gCfyUWxc8FPlLnmEeB/2rB6ltpHx5qRPCqVqlThD1YBL+ajMCrskSE\n",
|
||||
"e4HXEcYSMIH/Ojbn5sQwEPtKbH3cohYN2HjE7XVeU4zgh2ONyqsIOfKpfdaRzAfqZNEEv/3SsM8p\n",
|
||||
"4f5R4eFx1FjusSt4BO84+46jVJPMjRazGfOoH1ZlhSpvDlP3q6LKRlXe1cXnTQt8jORrWTSR9Zge\n",
|
||||
"zSRn/V7gq8BHRJCQgngC5tvfimXhfA6zTiDfoom1aSoIM3R31rm2ONFpPCbws3PO1U7SG8uzaE7A\n",
|
||||
"Jqa9FAusp2ER/EqSejgtxwXecfYRqTov3cFmLFrslsiwBnFwldT/WhYN0JHKugzL4snr1fwBS2V8\n",
|
||||
"EVblcnXIlb8t3P8B5sfHhTfyIvhsFk1RYgQ/gaRCZVbg11Ep8FmL5u3Ad8Pcg1uwmaqzsFx/t2gc\n",
|
||||
"x2mIzdhqTD2dGtqQRZNhGTAxzyIKGTdfwQZDJxKWyVNljQgfwgZK34iJcFbgh2GNXZ5FU4S4MPt4\n",
|
||||
"zEJ6OOdc60gW/KiwaMLM4QtILJm/YYL/GMnM4wrC5LTRqh119JvCI3jH2T/ZjAlRT9NVga81wPsz\n",
|
||||
"LPPlDapJGWdVvh6ybVZg1keHwIdsnp1YBN7sgPYOTIQ1NJiP0DmtslYEfz62rm8s4XILcDrWG+hY\n",
|
||||
"aEWE14rwcxHuxno972zyejvwCN5x9k/2lcAvhI7aMRtJVlUqwjLy/Xego0571bUpqBT4tJjH1M1m\n",
|
||||
"c823Y1VzY/roYqygW5rVJIOsW4GhIhwJfAZ4IVZaInInlm55P/Z6Dw/bP4FlPH0TuLvemEkRXOAd\n",
|
||||
"Z/9kAfR83r8qfyUsahIE+fgGDr+RrrkKK7CB5d0ZcdwMXcoM2o6Jd+xdfJnOFXQvJdHTrViJhndi\n",
|
||||
"kf0LVZMlIVXZIcKd2OIjQmLRHAR8X7VLi9tU4ALvOPshqvzvvr6GRlFlEWHFpSZZga3FujGzfQvU\n",
|
||||
"zZSpRZyUtho6Gq4KgkUUiR78kcCn0uKe4mXhOk/AVtIagdlILS3U6B684zj7Cyuwgcw8gW82gwaS\n",
|
||||
"SWlFJ4BtxWyio7GJUZ1QZX0YTI4e/CxgWatLFrjAO46zvxAtmqzAb6b5DJo4ULuL4tH1VqyUwuOx\n",
|
||||
"imYN1mJ58LMoXouoMC7wjuPsL6zE8ujzIvimBT6wneIR/DZssPeOAvtuwZYw/P/t3VuIVVUcx/Hv\n",
|
||||
"r9IHMwgJxi4D+uDD+OQQDJFI8yT60oWiFAIfeoju0EMiSPrQgwVBD0EEGViEJUViEGRBRRAkkrdS\n",
|
||||
"KcEBLS8DRSQSKP17WOvk8Xgue2b2OXtm+/vAxj1775mz/LP8u2fv9V9rBMqvi3CCN7O6+J00dUHZ\n",
|
||||
"j2hgagm+UXvQM8E3rVUwhu/gzczay5OHneHaBP8ezPil80Wm9ogGes9x0zBJmjCt9ATvUTRmVien\n",
|
||||
"aEnwEXxfws/9hrw8YgEXSENUD/W6MJskvZAt/RGNE7yZ1ck1Cb4MEVOqKj0FPDyFQqVGVawTvJlZ\n",
|
||||
"Fx+RXrZWJo+6+WwK3zJJWmi89HmDnODNrDYi+LjqNkzDJH14/g5+yWpmVrWz9GlaZ0X0Za3Xaz9I\n",
|
||||
"iohonb/BzOy6lhdEXxDRfijnTHJnoTt4SWskHZf0q6SNbc6PS/pL0oG8bZ5OY8zMrjcR/NMpuc9U\n",
|
||||
"zwQv6UbgTdI0ncuB9ZJG2lz6bUSM5u2VkttpLSSNV92GunAsy+V4zh5F7uDHgBMRMRERl4APSYvb\n",
|
||||
"tvLjl8Ear7oBNTJedQNqZrzqBlhSJMHfSRrX2XA6H2sWwL2SDkn6XNJyzMysUkWGSRZ5C/sjMBwR\n",
|
||||
"FyWtBXZzZYVzMzOrQM9RNJLuAbZGxJr89Sbg34h4tcv3nATujog/mo4NZriOmVnNTHcUTZE7+P3A\n",
|
||||
"MklLSLO1PQasb75A0hBwPiJC0hjpP46r3gp7iKSZ2WD1TPARcVnSs8AXpLmWt0fEMUlP5vNvA48A\n",
|
||||
"T0m6TJp1bV0f22xmZgUMrNDJzMwGayBTFfQqlLLuJE1IOpyLyPblY4skfSnpF0l7Jd1adTtnK0nv\n",
|
||||
"Sjon6UjTsY7xk7Qp99XjklZX0+rZqUMst0o63VTouLbpnGPZhaRhSV9L+lnST5Kez8fL6Z8R0deN\n",
|
||||
"9FjnBLAEmEdaNX2k359bp400jeiilmOvAS/l/Y3AtqrbOVs3YBUwChzpFT9SMd/B3FeX5L57Q9V/\n",
|
||||
"h9mydYjlFuDFNtc6lr3juRhYkfcXkuacHymrfw7iDr5ooZR11/qS+n5gR97fATw42ObMHRHxHfBn\n",
|
||||
"y+FO8XsA2BkRlyJigvQPaGwQ7ZwLOsQS2hc6OpY9RMTZiDiY9y8Ax0h1RqX0z0Ek+CKFUtZdAF9J\n",
|
||||
"2i+psfDAUEQ0lhA7BwxV07Q5q1P87uDq+cTdX4t5Lhc6bm96nOBYTkEeqTgK/EBJ/XMQCd5vcWdu\n",
|
||||
"ZUSMAmuBZyStaj4Z6Xc3x3maCsTPse3uLWApsIK0JurrXa51LNuQtBD4BHghIv5uPjeT/jmIBP8b\n",
|
||||
"MNz09TAVr7gy10TEmfznJGnx4DHgnKTFAJJup/iK75Z0il9rf70rH7MOIuJ8ZMA7XHlk4FgWIGke\n",
|
||||
"Kbm/HxG78+FS+ucgEvz/hVKS5pMKpfYM4HNrQdICSbfk/ZuB1cARUgw35Ms2kKaHsOI6xW8PsE7S\n",
|
||||
"fElLgWXAvgraN2fkBNTwEKl/gmPZkyQB24GjEfFG06lS+mffl+yLDoVS/f7cGhkCPk39gJuADyJi\n",
|
||||
"r6T9wC5JTwATwKPVNXF2k7QTuA+4TdIp4GVgG23iFxFHJe0CjgKXgafznanRNpZbgHFJK0iPCk4C\n",
|
||||
"jSJIx7K3lcDjwGFJB/KxTZTUP13oZGZWU16T1cysppzgzcxqygnezKymnODNzGrKCd7MrKac4M3M\n",
|
||||
"asoJ3sysppzgzcxq6j+vUsbacqJa4gAAAABJRU5ErkJggg==\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"<matplotlib.figure.Figure at 0x7fbb37f207d0>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"plot(np.vstack([train_loss, scratch_train_loss]).clip(0, 4).T)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's take a look at the testing accuracy after running 200 iterations. Note that we are running a classification task of 5 classes, thus a chance accuracy is 20%. As we will reasonably expect, the finetuning result will be much better than the one from training from scratch. Let's see."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Accuracy for fine-tuning: 0.570000001788\n",
|
||||
"Accuracy for training from scratch: 0.224000000954\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_iters = 10\n",
|
||||
"accuracy = 0\n",
|
||||
"scratch_accuracy = 0\n",
|
||||
"for it in arange(test_iters):\n",
|
||||
" solver.test_nets[0].forward()\n",
|
||||
" accuracy += solver.test_nets[0].blobs['accuracy'].data\n",
|
||||
" scratch_solver.test_nets[0].forward()\n",
|
||||
" scratch_accuracy += scratch_solver.test_nets[0].blobs['accuracy'].data\n",
|
||||
"accuracy /= test_iters\n",
|
||||
"scratch_accuracy /= test_iters\n",
|
||||
"print 'Accuracy for fine-tuning:', accuracy\n",
|
||||
"print 'Accuracy for training from scratch:', scratch_accuracy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Huzzah! So we did finetuning and it is awesome. Let's take a look at what kind of results we are able to get with a longer, more complete run of the style recognition dataset. Note: the below URL might be occassionally down because it is run on a research machine.\n",
|
||||
"\n",
|
||||
"http://demo.vislab.berkeleyvision.org/"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"description": "Fine-tune the ImageNet-trained CaffeNet on new data.",
|
||||
"example_name": "Fine-tuning for Style Recognition",
|
||||
"include_in_docs": true,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 2",
|
||||
"language": "python",
|
||||
"name": "python2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.9"
|
||||
},
|
||||
"priority": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -1,12 +1,11 @@
|
|||
name: "CIFAR10_full_deploy"
|
||||
# N.B. input image must be in CIFAR-10 format
|
||||
# as described at http://www.cs.toronto.edu/~kriz/cifar.html
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 1
|
||||
dim: 3
|
||||
dim: 32
|
||||
dim: 32
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 1 dim: 3 dim: 32 dim: 32 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "CIFAR10_quick_test"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 1
|
||||
dim: 3
|
||||
dim: 32
|
||||
dim: 32
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 1 dim: 3 dim: 32 dim: 32 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -159,7 +159,7 @@ std::vector<float> Classifier::Predict(const cv::Mat& img) {
|
|||
|
||||
Preprocess(img, &input_channels);
|
||||
|
||||
net_->ForwardPrefilled();
|
||||
net_->Forward();
|
||||
|
||||
/* Copy the output layer to a std::vector */
|
||||
Blob<float>* output_layer = net_->output_blobs()[0];
|
||||
|
|
|
@ -14,9 +14,9 @@ Let's fine-tune the BVLC-distributed CaffeNet model on a different dataset, [Fli
|
|||
## Explanation
|
||||
|
||||
The Flickr-sourced images of the Style dataset are visually very similar to the ImageNet dataset, on which the `bvlc_reference_caffenet` was trained.
|
||||
Since that model works well for object category classification, we'd like to use it architecture for our style classifier.
|
||||
Since that model works well for object category classification, we'd like to use this architecture for our style classifier.
|
||||
We also only have 80,000 images to train on, so we'd like to start with the parameters learned on the 1,000,000 ImageNet images, and fine-tune as needed.
|
||||
If we give provide the `weights` argument to the `caffe train` command, the pretrained weights will be loaded into our model, matching layers by name.
|
||||
If we provide the `weights` argument to the `caffe train` command, the pretrained weights will be loaded into our model, matching layers by name.
|
||||
|
||||
Because we are predicting 20 classes instead of a 1,000, we do need to change the last layer in the model.
|
||||
Therefore, we change the name of the last layer from `fc8` to `fc8_flickr` in our prototxt.
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt"
|
||||
test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt"
|
||||
test_iter: 250
|
||||
test_interval: 1000
|
||||
base_lr: 0.01
|
||||
lr_policy: "step"
|
||||
gamma: 0.1
|
||||
stepsize: 5000
|
||||
display: 1000
|
||||
max_iter: 10000
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
snapshot: 10000
|
||||
snapshot_prefix: "examples/hdf5_classification/data/train"
|
||||
solver_mode: CPU
|
|
@ -1,15 +0,0 @@
|
|||
train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
|
||||
test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
|
||||
test_iter: 250
|
||||
test_interval: 1000
|
||||
base_lr: 0.01
|
||||
lr_policy: "step"
|
||||
gamma: 0.1
|
||||
stepsize: 5000
|
||||
display: 1000
|
||||
max_iter: 10000
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
snapshot: 10000
|
||||
snapshot_prefix: "examples/hdf5_classification/data/train"
|
||||
solver_mode: CPU
|
|
@ -1,10 +1,9 @@
|
|||
name: "LeNet"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 64
|
||||
dim: 1
|
||||
dim: 28
|
||||
dim: 28
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 64 dim: 1 dim: 28 dim: 28 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# The train/test net protocol buffer definition
|
||||
train_net: "examples/mnist/lenet_auto_train.prototxt"
|
||||
test_net: "examples/mnist/lenet_auto_test.prototxt"
|
||||
train_net: "mnist/lenet_auto_train.prototxt"
|
||||
test_net: "mnist/lenet_auto_test.prototxt"
|
||||
# test_iter specifies how many forward passes the test should carry out.
|
||||
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
|
||||
# covering the full 10,000 testing images.
|
||||
|
@ -21,4 +21,4 @@ display: 100
|
|||
max_iter: 10000
|
||||
# snapshot intermediate results
|
||||
snapshot: 5000
|
||||
snapshot_prefix: "examples/mnist/lenet"
|
||||
snapshot_prefix: "mnist/lenet"
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
#!/usr/bin/env sh
|
||||
set -e
|
||||
# The following example allows for the MNIST example (using LeNet) to be
|
||||
# trained using the caffe docker image instead of building from source.
|
||||
#
|
||||
# The GPU-enabled version of Caffe can be used, assuming that nvidia-docker
|
||||
# is installed, and the GPU-enabled Caffe image has been built.
|
||||
# Setting the GPU environment variable to 1 will enable the use of nvidia-docker.
|
||||
# e.g.
|
||||
# GPU=1 ./examples/mnist/train_lenet_docker.sh [ADDITIONAL_CAFFE_ARGS]
|
||||
#
|
||||
# With any arguments following the script being passed directly to caffe
|
||||
# when training the network.
|
||||
#
|
||||
# The steps that are performed by the script are as follows:
|
||||
# 1. The MNIST data set is downloaded
|
||||
# (see data/mnist/get_mnist.sh)
|
||||
# 2. An LMDB database is created from the downloaded data
|
||||
# (see examples/mnist/create_mnist.sh.
|
||||
# 3. A caffe network based on the LeNet solver is trained.
|
||||
# (see examples/mnist/lenet_solver.prototxt)
|
||||
#
|
||||
# For each of these, a step is executed to ensure that certain prerequisites
|
||||
# are available, after which a command that actually performs the work is
|
||||
# executed.
|
||||
#
|
||||
# In order to provide additional flexibility, the following shell (environment)
|
||||
# variables can be used to controll the execution of each of the phases:
|
||||
#
|
||||
# DOWNLOAD_DATA: Enable (1) or disable (0) the downloading of the MNIST dataset
|
||||
# CREATE_LMDB: Enable (1) or disable (0) the creation of the LMDB database
|
||||
# TRAIN: Enable (1) or disable (0) the training of the LeNet networkd.
|
||||
#
|
||||
# As an example, assuming that the data set has been downloaded, and an LMDB
|
||||
# database created, the following command can be used to train the LeNet
|
||||
# network with GPU computing enabled.
|
||||
#
|
||||
# DOWNLOAD_DATA=0 CREATE_LMDB=0 GPU=1 ./examples/mnist/train_lenet_docker.sh
|
||||
#
|
||||
|
||||
|
||||
if [ x"$(uname -s)" != x"Linux" ]
|
||||
then
|
||||
echo ""
|
||||
echo "This script is designed to run on Linux."
|
||||
echo "There may be problems with the way Docker mounts host volumes on other"
|
||||
echo "systems which will cause the docker commands to fail."
|
||||
echo ""
|
||||
read -p "Press [ENTER] to continue..." key
|
||||
echo ""
|
||||
fi
|
||||
|
||||
|
||||
# Check if GPU mode has been enabled and set the docker executable accordingly
|
||||
if [ ${GPU:-0} -eq 1 ]
|
||||
then
|
||||
DOCKER_CMD=nvidia-docker
|
||||
IMAGE=caffe:gpu
|
||||
else
|
||||
DOCKER_CMD=docker
|
||||
IMAGE=caffe:cpu
|
||||
fi
|
||||
echo "Using $DOCKER_CMD to launch $IMAGE"
|
||||
|
||||
# On non-Linux systems, the Docker host is typically a virtual machine.
|
||||
# This means that the user and group id's may be different.
|
||||
# On OS X, for example, the user and group are 1000 and 50, respectively.
|
||||
if [ x"$(uname -s)" != x"Linux" ]
|
||||
then
|
||||
CUID=1000
|
||||
CGID=50
|
||||
else
|
||||
CUID=$(id -u)
|
||||
CGID=$(id -g)
|
||||
fi
|
||||
|
||||
# Define some helper variables to make the running of the actual docker
|
||||
# commands less verbose.
|
||||
# Note:
|
||||
# -u $CUID:$CGID runs the docker image as the current user to ensure
|
||||
# that the file permissions are compatible with the
|
||||
# host system. The variables CUID and CGID have been
|
||||
# set above depending on the host operating system.
|
||||
# --volume $(pwd):/workspace mounts the current directory as the docker volume
|
||||
# /workspace
|
||||
# --workdir /workspace Ensures that the docker container starts in the right
|
||||
# working directory
|
||||
DOCKER_OPTIONS="--rm -ti -u $CUID:$CGID --volume=$(pwd):/workspace --workdir=/workspace"
|
||||
DOCKER_RUN="$DOCKER_CMD run $DOCKER_OPTIONS $IMAGE"
|
||||
|
||||
# Download the data
|
||||
if [ ${DOWNLOAD_DATA:-1} -eq 1 ]
|
||||
then
|
||||
$DOCKER_RUN bash -c "mkdir -p ./data/mnist;
|
||||
cp -ru \$CAFFE_ROOT/data/mnist/get_mnist.sh ./data/mnist/"
|
||||
$DOCKER_RUN ./data/mnist/get_mnist.sh
|
||||
fi
|
||||
|
||||
# Create the LMDB database
|
||||
if [ ${CREATE_LMDB:-1} -eq 1 ]
|
||||
then
|
||||
$DOCKER_RUN bash -c "mkdir -p ./examples/mnist;
|
||||
cp -ru \$CAFFE_ROOT/examples/mnist/create_mnist.sh ./examples/mnist/;
|
||||
sed -i s#BUILD=build#BUILD=\$CAFFE_ROOT/build## ./examples/mnist/create_mnist.sh"
|
||||
$DOCKER_RUN ./examples/mnist/create_mnist.sh
|
||||
fi
|
||||
|
||||
# Train the network
|
||||
if [ ${TRAIN:-1} -eq 1 ]
|
||||
then
|
||||
$DOCKER_RUN bash -c "cp \$CAFFE_ROOT/examples/mnist/lenet_solver.prototxt ./examples/mnist/;
|
||||
cp \$CAFFE_ROOT/examples/mnist/lenet_train_test.prototxt ./examples/mnist/"
|
||||
# Ensure that the solver_mode is compatible with the desired GPU mode.
|
||||
if [ ${GPU:-0} -eq 0 ]
|
||||
then
|
||||
$DOCKER_RUN sed -i 's#solver_mode: GPU#solver_mode: CPU##' ./examples/mnist/lenet_solver.prototxt
|
||||
fi
|
||||
$DOCKER_RUN caffe train --solver=examples/mnist/lenet_solver.prototxt $*
|
||||
fi
|
|
@ -5494,48 +5494,47 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1,2c1\r\n",
|
||||
"1,2c1,2\r\n",
|
||||
"< # Fully convolutional network version of CaffeNet.\r\n",
|
||||
"< name: \"CaffeNetConv\"\r\n",
|
||||
"---\r\n",
|
||||
"> name: \"CaffeNet\"\r\n",
|
||||
"4c3\r\n",
|
||||
"< input_dim: 1\r\n",
|
||||
"> input: \"data\"\r\n",
|
||||
"7,11c7\r\n",
|
||||
"< input_param {\r\n",
|
||||
"< # initial shape for a fully convolutional network:\r\n",
|
||||
"< # the shape can be set for each input by reshape.\r\n",
|
||||
"< shape: { dim: 1 dim: 3 dim: 451 dim: 451 }\r\n",
|
||||
"< }\r\n",
|
||||
"---\r\n",
|
||||
"> input_dim: 10\r\n",
|
||||
"6,7c5,6\r\n",
|
||||
"< input_dim: 451\r\n",
|
||||
"< input_dim: 451\r\n",
|
||||
"---\r\n",
|
||||
"> input_dim: 227\r\n",
|
||||
"> input_dim: 227\r\n",
|
||||
"152,153c151,152\r\n",
|
||||
"> input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }\r\n",
|
||||
"157,158c153,154\r\n",
|
||||
"< name: \"fc6-conv\"\r\n",
|
||||
"< type: \"Convolution\"\r\n",
|
||||
"---\r\n",
|
||||
"> name: \"fc6\"\r\n",
|
||||
"> type: \"InnerProduct\"\r\n",
|
||||
"155,156c154,155\r\n",
|
||||
"160,161c156,157\r\n",
|
||||
"< top: \"fc6-conv\"\r\n",
|
||||
"< convolution_param {\r\n",
|
||||
"---\r\n",
|
||||
"> top: \"fc6\"\r\n",
|
||||
"> inner_product_param {\r\n",
|
||||
"158d156\r\n",
|
||||
"163d158\r\n",
|
||||
"< kernel_size: 6\r\n",
|
||||
"164,165c162,163\r\n",
|
||||
"169,170c164,165\r\n",
|
||||
"< bottom: \"fc6-conv\"\r\n",
|
||||
"< top: \"fc6-conv\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc6\"\r\n",
|
||||
"> top: \"fc6\"\r\n",
|
||||
"170,171c168,169\r\n",
|
||||
"175,176c170,171\r\n",
|
||||
"< bottom: \"fc6-conv\"\r\n",
|
||||
"< top: \"fc6-conv\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc6\"\r\n",
|
||||
"> top: \"fc6\"\r\n",
|
||||
"177,181c175,179\r\n",
|
||||
"182,186c177,181\r\n",
|
||||
"< name: \"fc7-conv\"\r\n",
|
||||
"< type: \"Convolution\"\r\n",
|
||||
"< bottom: \"fc6-conv\"\r\n",
|
||||
|
@ -5547,21 +5546,21 @@
|
|||
"> bottom: \"fc6\"\r\n",
|
||||
"> top: \"fc7\"\r\n",
|
||||
"> inner_product_param {\r\n",
|
||||
"183d180\r\n",
|
||||
"188d182\r\n",
|
||||
"< kernel_size: 1\r\n",
|
||||
"189,190c186,187\r\n",
|
||||
"194,195c188,189\r\n",
|
||||
"< bottom: \"fc7-conv\"\r\n",
|
||||
"< top: \"fc7-conv\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc7\"\r\n",
|
||||
"> top: \"fc7\"\r\n",
|
||||
"195,196c192,193\r\n",
|
||||
"200,201c194,195\r\n",
|
||||
"< bottom: \"fc7-conv\"\r\n",
|
||||
"< top: \"fc7-conv\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc7\"\r\n",
|
||||
"> top: \"fc7\"\r\n",
|
||||
"202,206c199,203\r\n",
|
||||
"207,211c201,205\r\n",
|
||||
"< name: \"fc8-conv\"\r\n",
|
||||
"< type: \"Convolution\"\r\n",
|
||||
"< bottom: \"fc7-conv\"\r\n",
|
||||
|
@ -5573,9 +5572,9 @@
|
|||
"> bottom: \"fc7\"\r\n",
|
||||
"> top: \"fc8\"\r\n",
|
||||
"> inner_product_param {\r\n",
|
||||
"208d204\r\n",
|
||||
"213d206\r\n",
|
||||
"< kernel_size: 1\r\n",
|
||||
"214c210\r\n",
|
||||
"219c212\r\n",
|
||||
"< bottom: \"fc8-conv\"\r\n",
|
||||
"---\r\n",
|
||||
"> bottom: \"fc8\"\r\n"
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
# Fully convolutional network version of CaffeNet.
|
||||
name: "CaffeNetConv"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 1
|
||||
dim: 3
|
||||
dim: 451
|
||||
dim: 451
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param {
|
||||
# initial shape for a fully convolutional network:
|
||||
# the shape can be set for each input by reshape.
|
||||
shape: { dim: 1 dim: 3 dim: 451 dim: 451 }
|
||||
}
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
# Simple single-layer network to showcase editing model parameters.
|
||||
name: "convolution"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 1
|
||||
dim: 1
|
||||
dim: 100
|
||||
dim: 100
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 1 dim: 1 dim: 100 dim: 100 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv"
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,216 @@
|
|||
# imports
|
||||
import json
|
||||
import time
|
||||
import pickle
|
||||
import scipy.misc
|
||||
import skimage.io
|
||||
import caffe
|
||||
|
||||
import numpy as np
|
||||
import os.path as osp
|
||||
|
||||
from xml.dom import minidom
|
||||
from random import shuffle
|
||||
from threading import Thread
|
||||
from PIL import Image
|
||||
|
||||
from tools import SimpleTransformer
|
||||
|
||||
|
||||
class PascalMultilabelDataLayerSync(caffe.Layer):
|
||||
|
||||
"""
|
||||
This is a simple syncronous datalayer for training a multilabel model on
|
||||
PASCAL.
|
||||
"""
|
||||
|
||||
def setup(self, bottom, top):
|
||||
|
||||
self.top_names = ['data', 'label']
|
||||
|
||||
# === Read input parameters ===
|
||||
|
||||
# params is a python dictionary with layer parameters.
|
||||
params = eval(self.param_str)
|
||||
|
||||
# Check the paramameters for validity.
|
||||
check_params(params)
|
||||
|
||||
# store input as class variables
|
||||
self.batch_size = params['batch_size']
|
||||
|
||||
# Create a batch loader to load the images.
|
||||
self.batch_loader = BatchLoader(params, None)
|
||||
|
||||
# === reshape tops ===
|
||||
# since we use a fixed input image size, we can shape the data layer
|
||||
# once. Else, we'd have to do it in the reshape call.
|
||||
top[0].reshape(
|
||||
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
|
||||
# Note the 20 channels (because PASCAL has 20 classes.)
|
||||
top[1].reshape(self.batch_size, 20)
|
||||
|
||||
print_info("PascalMultilabelDataLayerSync", params)
|
||||
|
||||
def forward(self, bottom, top):
|
||||
"""
|
||||
Load data.
|
||||
"""
|
||||
for itt in range(self.batch_size):
|
||||
# Use the batch loader to load the next image.
|
||||
im, multilabel = self.batch_loader.load_next_image()
|
||||
|
||||
# Add directly to the caffe data layer
|
||||
top[0].data[itt, ...] = im
|
||||
top[1].data[itt, ...] = multilabel
|
||||
|
||||
def reshape(self, bottom, top):
|
||||
"""
|
||||
There is no need to reshape the data, since the input is of fixed size
|
||||
(rows and columns)
|
||||
"""
|
||||
pass
|
||||
|
||||
def backward(self, top, propagate_down, bottom):
|
||||
"""
|
||||
These layers does not back propagate
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class BatchLoader(object):
|
||||
|
||||
"""
|
||||
This class abstracts away the loading of images.
|
||||
Images can either be loaded singly, or in a batch. The latter is used for
|
||||
the asyncronous data layer to preload batches while other processing is
|
||||
performed.
|
||||
"""
|
||||
|
||||
def __init__(self, params, result):
|
||||
self.result = result
|
||||
self.batch_size = params['batch_size']
|
||||
self.pascal_root = params['pascal_root']
|
||||
self.im_shape = params['im_shape']
|
||||
# get list of image indexes.
|
||||
list_file = params['split'] + '.txt'
|
||||
self.indexlist = [line.rstrip('\n') for line in open(
|
||||
osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
|
||||
self._cur = 0 # current image
|
||||
# this class does some simple data-manipulations
|
||||
self.transformer = SimpleTransformer()
|
||||
|
||||
print "BatchLoader initialized with {} images".format(
|
||||
len(self.indexlist))
|
||||
|
||||
def load_next_image(self):
|
||||
"""
|
||||
Load the next image in a batch.
|
||||
"""
|
||||
# Did we finish an epoch?
|
||||
if self._cur == len(self.indexlist):
|
||||
self._cur = 0
|
||||
shuffle(self.indexlist)
|
||||
|
||||
# Load an image
|
||||
index = self.indexlist[self._cur] # Get the image index
|
||||
image_file_name = index + '.jpg'
|
||||
im = np.asarray(Image.open(
|
||||
osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
|
||||
im = scipy.misc.imresize(im, self.im_shape) # resize
|
||||
|
||||
# do a simple horizontal flip as data augmentation
|
||||
flip = np.random.choice(2)*2-1
|
||||
im = im[:, ::flip, :]
|
||||
|
||||
# Load and prepare ground truth
|
||||
multilabel = np.zeros(20).astype(np.float32)
|
||||
anns = load_pascal_annotation(index, self.pascal_root)
|
||||
for label in anns['gt_classes']:
|
||||
# in the multilabel problem we don't care how MANY instances
|
||||
# there are of each class. Only if they are present.
|
||||
# The "-1" is b/c we are not interested in the background
|
||||
# class.
|
||||
multilabel[label - 1] = 1
|
||||
|
||||
self._cur += 1
|
||||
return self.transformer.preprocess(im), multilabel
|
||||
|
||||
|
||||
def load_pascal_annotation(index, pascal_root):
|
||||
"""
|
||||
This code is borrowed from Ross Girshick's FAST-RCNN code
|
||||
(https://github.com/rbgirshick/fast-rcnn).
|
||||
It parses the PASCAL .xml metadata files.
|
||||
See publication for further details: (http://arxiv.org/abs/1504.08083).
|
||||
|
||||
Thanks Ross!
|
||||
|
||||
"""
|
||||
classes = ('__background__', # always index 0
|
||||
'aeroplane', 'bicycle', 'bird', 'boat',
|
||||
'bottle', 'bus', 'car', 'cat', 'chair',
|
||||
'cow', 'diningtable', 'dog', 'horse',
|
||||
'motorbike', 'person', 'pottedplant',
|
||||
'sheep', 'sofa', 'train', 'tvmonitor')
|
||||
class_to_ind = dict(zip(classes, xrange(21)))
|
||||
|
||||
filename = osp.join(pascal_root, 'Annotations', index + '.xml')
|
||||
# print 'Loading: {}'.format(filename)
|
||||
|
||||
def get_data_from_tag(node, tag):
|
||||
return node.getElementsByTagName(tag)[0].childNodes[0].data
|
||||
|
||||
with open(filename) as f:
|
||||
data = minidom.parseString(f.read())
|
||||
|
||||
objs = data.getElementsByTagName('object')
|
||||
num_objs = len(objs)
|
||||
|
||||
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
|
||||
gt_classes = np.zeros((num_objs), dtype=np.int32)
|
||||
overlaps = np.zeros((num_objs, 21), dtype=np.float32)
|
||||
|
||||
# Load object bounding boxes into a data frame.
|
||||
for ix, obj in enumerate(objs):
|
||||
# Make pixel indexes 0-based
|
||||
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
|
||||
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
|
||||
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
|
||||
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
|
||||
cls = class_to_ind[
|
||||
str(get_data_from_tag(obj, "name")).lower().strip()]
|
||||
boxes[ix, :] = [x1, y1, x2, y2]
|
||||
gt_classes[ix] = cls
|
||||
overlaps[ix, cls] = 1.0
|
||||
|
||||
overlaps = scipy.sparse.csr_matrix(overlaps)
|
||||
|
||||
return {'boxes': boxes,
|
||||
'gt_classes': gt_classes,
|
||||
'gt_overlaps': overlaps,
|
||||
'flipped': False,
|
||||
'index': index}
|
||||
|
||||
|
||||
def check_params(params):
|
||||
"""
|
||||
A utility function to check the parameters for the data layers.
|
||||
"""
|
||||
assert 'split' in params.keys(
|
||||
), 'Params must include split (train, val, or test).'
|
||||
|
||||
required = ['batch_size', 'pascal_root', 'im_shape']
|
||||
for r in required:
|
||||
assert r in params.keys(), 'Params must include {}'.format(r)
|
||||
|
||||
|
||||
def print_info(name, params):
|
||||
"""
|
||||
Ouput some info regarding the class
|
||||
"""
|
||||
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
|
||||
name,
|
||||
params['split'],
|
||||
params['batch_size'],
|
||||
params['im_shape'])
|
|
@ -0,0 +1,121 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class SimpleTransformer:
|
||||
|
||||
"""
|
||||
SimpleTransformer is a simple class for preprocessing and deprocessing
|
||||
images for caffe.
|
||||
"""
|
||||
|
||||
def __init__(self, mean=[128, 128, 128]):
|
||||
self.mean = np.array(mean, dtype=np.float32)
|
||||
self.scale = 1.0
|
||||
|
||||
def set_mean(self, mean):
|
||||
"""
|
||||
Set the mean to subtract for centering the data.
|
||||
"""
|
||||
self.mean = mean
|
||||
|
||||
def set_scale(self, scale):
|
||||
"""
|
||||
Set the data scaling.
|
||||
"""
|
||||
self.scale = scale
|
||||
|
||||
def preprocess(self, im):
|
||||
"""
|
||||
preprocess() emulate the pre-processing occuring in the vgg16 caffe
|
||||
prototxt.
|
||||
"""
|
||||
|
||||
im = np.float32(im)
|
||||
im = im[:, :, ::-1] # change to BGR
|
||||
im -= self.mean
|
||||
im *= self.scale
|
||||
im = im.transpose((2, 0, 1))
|
||||
|
||||
return im
|
||||
|
||||
def deprocess(self, im):
|
||||
"""
|
||||
inverse of preprocess()
|
||||
"""
|
||||
im = im.transpose(1, 2, 0)
|
||||
im /= self.scale
|
||||
im += self.mean
|
||||
im = im[:, :, ::-1] # change to RGB
|
||||
|
||||
return np.uint8(im)
|
||||
|
||||
|
||||
class CaffeSolver:
|
||||
|
||||
"""
|
||||
Caffesolver is a class for creating a solver.prototxt file. It sets default
|
||||
values and can export a solver parameter file.
|
||||
Note that all parameters are stored as strings. Strings variables are
|
||||
stored as strings in strings.
|
||||
"""
|
||||
|
||||
def __init__(self, testnet_prototxt_path="testnet.prototxt",
|
||||
trainnet_prototxt_path="trainnet.prototxt", debug=False):
|
||||
|
||||
self.sp = {}
|
||||
|
||||
# critical:
|
||||
self.sp['base_lr'] = '0.001'
|
||||
self.sp['momentum'] = '0.9'
|
||||
|
||||
# speed:
|
||||
self.sp['test_iter'] = '100'
|
||||
self.sp['test_interval'] = '250'
|
||||
|
||||
# looks:
|
||||
self.sp['display'] = '25'
|
||||
self.sp['snapshot'] = '2500'
|
||||
self.sp['snapshot_prefix'] = '"snapshot"' # string withing a string!
|
||||
|
||||
# learning rate policy
|
||||
self.sp['lr_policy'] = '"fixed"'
|
||||
|
||||
# important, but rare:
|
||||
self.sp['gamma'] = '0.1'
|
||||
self.sp['weight_decay'] = '0.0005'
|
||||
self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
|
||||
self.sp['test_net'] = '"' + testnet_prototxt_path + '"'
|
||||
|
||||
# pretty much never change these.
|
||||
self.sp['max_iter'] = '100000'
|
||||
self.sp['test_initialization'] = 'false'
|
||||
self.sp['average_loss'] = '25' # this has to do with the display.
|
||||
self.sp['iter_size'] = '1' # this is for accumulating gradients
|
||||
|
||||
if (debug):
|
||||
self.sp['max_iter'] = '12'
|
||||
self.sp['test_iter'] = '1'
|
||||
self.sp['test_interval'] = '4'
|
||||
self.sp['display'] = '1'
|
||||
|
||||
def add_from_file(self, filepath):
|
||||
"""
|
||||
Reads a caffe solver prototxt file and updates the Caffesolver
|
||||
instance parameters.
|
||||
"""
|
||||
with open(filepath, 'r') as f:
|
||||
for line in f:
|
||||
if line[0] == '#':
|
||||
continue
|
||||
splitLine = line.split(':')
|
||||
self.sp[splitLine[0].strip()] = splitLine[1].strip()
|
||||
|
||||
def write(self, filepath):
|
||||
"""
|
||||
Export solver parameters to INPUT "filepath". Sorted alphabetically.
|
||||
"""
|
||||
f = open(filepath, 'w')
|
||||
for key, value in sorted(self.sp.items()):
|
||||
if not(type(value) is str):
|
||||
raise TypeError('All solver parameters must be strings')
|
||||
f.write('%s: %s\n' % (key, value))
|
|
@ -1,10 +1,11 @@
|
|||
name: "mnist_siamese"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10000
|
||||
dim: 1
|
||||
dim: 28
|
||||
dim: 28
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param {
|
||||
shape: { dim: 10000 dim: 1 dim: 28 dim: 28 }
|
||||
}
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -153,6 +153,11 @@ class Caffe {
|
|||
static void SetDevice(const int device_id);
|
||||
// Prints the current GPU status.
|
||||
static void DeviceQuery();
|
||||
// Check if specified device is available
|
||||
static bool CheckDevice(const int device_id);
|
||||
// Search from start_id to the highest possible device ordinal,
|
||||
// return the ordinal of the first available device.
|
||||
static int FindDevice(const int start_id = 0);
|
||||
// Parallel training info
|
||||
inline static int solver_count() { return Get().solver_count_; }
|
||||
inline static void set_solver_count(int val) { Get().solver_count_ = val; }
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
#ifndef CAFFE_CROP_LAYER_HPP_
|
||||
#define CAFFE_CROP_LAYER_HPP_
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/proto/caffe.pb.h"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
/**
|
||||
* @brief Takes a Blob and crop it, to the shape specified by the second input
|
||||
* Blob, across all dimensions after the specified axis.
|
||||
*
|
||||
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
|
||||
*/
|
||||
|
||||
template <typename Dtype>
|
||||
class CropLayer : public Layer<Dtype> {
|
||||
public:
|
||||
explicit CropLayer(const LayerParameter& param)
|
||||
: Layer<Dtype>(param) {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
|
||||
virtual inline const char* type() const { return "Crop"; }
|
||||
virtual inline int ExactNumBottomBlobs() const { return 2; }
|
||||
virtual inline int ExactNumTopBlobs() const { return 1; }
|
||||
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
|
||||
vector<int> offsets;
|
||||
|
||||
private:
|
||||
void crop_copy(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top,
|
||||
const vector<int>& offsets,
|
||||
vector<int> indices,
|
||||
int cur_dim,
|
||||
const Dtype* src_data,
|
||||
Dtype* dest_data,
|
||||
bool is_forward);
|
||||
|
||||
void crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top,
|
||||
const vector<int>& offsets,
|
||||
vector<int> indices,
|
||||
int cur_dim,
|
||||
const Dtype* src_data,
|
||||
Dtype* dest_data,
|
||||
bool is_forward);
|
||||
};
|
||||
} // namespace caffe
|
||||
|
||||
#endif // CAFFE_CROP_LAYER_HPP_
|
|
@ -44,6 +44,7 @@ class InnerProductLayer : public Layer<Dtype> {
|
|||
int N_;
|
||||
bool bias_term_;
|
||||
Blob<Dtype> bias_multiplier_;
|
||||
bool transpose_; ///< if true, assume transposed weights
|
||||
};
|
||||
|
||||
} // namespace caffe
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
#ifndef CAFFE_INPUT_LAYER_HPP_
|
||||
#define CAFFE_INPUT_LAYER_HPP_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/proto/caffe.pb.h"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
/**
|
||||
* @brief Provides data to the Net by assigning tops directly.
|
||||
*
|
||||
* This data layer is a container that merely holds the data assigned to it;
|
||||
* forward, backward, and reshape are all no-ops.
|
||||
*/
|
||||
template <typename Dtype>
|
||||
class InputLayer : public Layer<Dtype> {
|
||||
public:
|
||||
explicit InputLayer(const LayerParameter& param)
|
||||
: Layer<Dtype>(param) {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
// Data layers should be shared by multiple solvers in parallel
|
||||
virtual inline bool ShareInParallel() const { return true; }
|
||||
// Data layers have no bottoms, so reshaping is trivial.
|
||||
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {}
|
||||
|
||||
virtual inline const char* type() const { return "Input"; }
|
||||
virtual inline int ExactNumBottomBlobs() const { return 0; }
|
||||
virtual inline int MinTopBlobs() const { return 1; }
|
||||
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {}
|
||||
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
|
||||
};
|
||||
|
||||
} // namespace caffe
|
||||
|
||||
#endif // CAFFE_INPUT_LAYER_HPP_
|
|
@ -32,11 +32,16 @@ class Net {
|
|||
void Init(const NetParameter& param);
|
||||
|
||||
/**
|
||||
* @brief Run Forward with the input Blob%s already fed separately.
|
||||
* @brief Run Forward and return the result.
|
||||
*
|
||||
* You can get the input blobs using input_blobs().
|
||||
*/
|
||||
const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL);
|
||||
const vector<Blob<Dtype>*>& Forward(Dtype* loss = NULL);
|
||||
/// @brief DEPRECATED; use Forward() instead.
|
||||
const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL) {
|
||||
LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: ForwardPrefilled() "
|
||||
<< "will be removed in a future version. Use Forward().";
|
||||
return Forward(loss);
|
||||
}
|
||||
|
||||
/**
|
||||
* The From and To variants of Forward and Backward operate on the
|
||||
|
@ -49,14 +54,9 @@ class Net {
|
|||
Dtype ForwardFromTo(int start, int end);
|
||||
Dtype ForwardFrom(int start);
|
||||
Dtype ForwardTo(int end);
|
||||
/// @brief Run forward using a set of bottom blobs, and return the result.
|
||||
/// @brief DEPRECATED; set input blobs then use Forward() instead.
|
||||
const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
|
||||
Dtype* loss = NULL);
|
||||
/**
|
||||
* @brief Run forward using a serialized BlobProtoVector and return the
|
||||
* result as a serialized BlobProtoVector
|
||||
*/
|
||||
string Forward(const string& input_blob_protos, Dtype* loss = NULL);
|
||||
|
||||
/**
|
||||
* @brief Zeroes out the diffs of all net parameters.
|
||||
|
@ -82,9 +82,9 @@ class Net {
|
|||
*/
|
||||
void Reshape();
|
||||
|
||||
Dtype ForwardBackward(const vector<Blob<Dtype>* > & bottom) {
|
||||
Dtype ForwardBackward() {
|
||||
Dtype loss;
|
||||
Forward(bottom, &loss);
|
||||
Forward(&loss);
|
||||
Backward();
|
||||
return loss;
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ class Net {
|
|||
|
||||
protected:
|
||||
// Helpers for Init.
|
||||
/// @brief Append a new input or top blob to the net.
|
||||
/// @brief Append a new top blob to the net.
|
||||
void AppendTop(const NetParameter& param, const int layer_id,
|
||||
const int top_id, set<string>* available_blobs,
|
||||
map<string, int>* blob_name_to_idx);
|
||||
|
@ -241,8 +241,6 @@ class Net {
|
|||
void AppendParam(const NetParameter& param, const int layer_id,
|
||||
const int param_id);
|
||||
|
||||
/// @brief Helper for displaying debug info in Forward about input Blobs.
|
||||
void InputDebugInfo(const int layer_id);
|
||||
/// @brief Helper for displaying debug info in Forward.
|
||||
void ForwardDebugInfo(const int layer_id);
|
||||
/// @brief Helper for displaying debug info in Backward.
|
||||
|
|
|
@ -93,7 +93,10 @@ class P2PSync : public GPUParams<Dtype>, public Solver<Dtype>::Callback,
|
|||
return solver_;
|
||||
}
|
||||
|
||||
void run(const vector<int>& gpus);
|
||||
void Run(const vector<int>& gpus);
|
||||
void Prepare(const vector<int>& gpus,
|
||||
vector<shared_ptr<P2PSync<Dtype> > >* syncs);
|
||||
inline const int initial_iter() const { return initial_iter_; }
|
||||
|
||||
protected:
|
||||
void on_start();
|
||||
|
|
|
@ -59,6 +59,12 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
|
|||
|
||||
const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type);
|
||||
|
||||
// Return true iff the Net contains input fields.
|
||||
bool NetNeedsInputUpgrade(const NetParameter& net_param);
|
||||
|
||||
// Perform all necessary transformations to upgrade input fields into layers.
|
||||
void UpgradeNetInput(NetParameter* net_param);
|
||||
|
||||
// Return true iff the solver contains any old solver_type specified as enums
|
||||
bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param);
|
||||
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "AlexNet"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10
|
||||
dim: 3
|
||||
dim: 227
|
||||
dim: 227
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "GoogleNet"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10
|
||||
dim: 3
|
||||
dim: 224
|
||||
dim: 224
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 10 dim: 3 dim: 224 dim: 224 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1/7x7_s2"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "CaffeNet"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10
|
||||
dim: 3
|
||||
dim: 227
|
||||
dim: 227
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "R-CNN-ilsvrc13"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10
|
||||
dim: 3
|
||||
dim: 227
|
||||
dim: 227
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
name: "FlickrStyleCaffeNet"
|
||||
input: "data"
|
||||
input_shape {
|
||||
dim: 10
|
||||
dim: 3
|
||||
dim: 227
|
||||
dim: 227
|
||||
layer {
|
||||
name: "data"
|
||||
type: "Input"
|
||||
top: "data"
|
||||
input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }
|
||||
}
|
||||
layer {
|
||||
name: "conv1"
|
||||
|
|
|
@ -255,6 +255,7 @@ BOOST_PYTHON_MODULE(_caffe) {
|
|||
.def("_set_input_arrays", &Net_SetInputArrays,
|
||||
bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >())
|
||||
.def("save", &Net_Save);
|
||||
bp::register_ptr_to_python<shared_ptr<Net<Dtype> > >();
|
||||
|
||||
bp::class_<Blob<Dtype>, shared_ptr<Blob<Dtype> >, boost::noncopyable>(
|
||||
"Blob", bp::no_init)
|
||||
|
@ -274,6 +275,7 @@ BOOST_PYTHON_MODULE(_caffe) {
|
|||
NdarrayCallPolicies()))
|
||||
.add_property("diff", bp::make_function(&Blob<Dtype>::mutable_cpu_diff,
|
||||
NdarrayCallPolicies()));
|
||||
bp::register_ptr_to_python<shared_ptr<Blob<Dtype> > >();
|
||||
|
||||
bp::class_<Layer<Dtype>, shared_ptr<PythonLayer<Dtype> >,
|
||||
boost::noncopyable>("Layer", bp::init<const LayerParameter&>())
|
||||
|
@ -297,6 +299,7 @@ BOOST_PYTHON_MODULE(_caffe) {
|
|||
.def("step", &Solver<Dtype>::Step)
|
||||
.def("restore", &Solver<Dtype>::Restore)
|
||||
.def("snapshot", &Solver<Dtype>::Snapshot);
|
||||
bp::register_ptr_to_python<shared_ptr<Solver<Dtype> > >();
|
||||
|
||||
bp::class_<SGDSolver<Dtype>, bp::bases<Solver<Dtype> >,
|
||||
shared_ptr<SGDSolver<Dtype> >, boost::noncopyable>(
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
"""
|
||||
Determine spatial relationships between layers to relate their coordinates.
|
||||
Coordinates are mapped from input-to-output (forward), but can
|
||||
be mapped output-to-input (backward) by the inverse mapping too.
|
||||
This helps crop and align feature maps among other uses.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
import numpy as np
|
||||
from caffe import layers as L
|
||||
|
||||
PASS_THROUGH_LAYERS = ['AbsVal', 'BatchNorm', 'Bias', 'BNLL', 'Dropout',
|
||||
'Eltwise', 'ELU', 'Log', 'LRN', 'Exp', 'MVN', 'Power',
|
||||
'ReLU', 'PReLU', 'Scale', 'Sigmoid', 'Split', 'TanH',
|
||||
'Threshold']
|
||||
|
||||
|
||||
def conv_params(fn):
|
||||
"""
|
||||
Extract the spatial parameters that determine the coordinate mapping:
|
||||
kernel size, stride, padding, and dilation.
|
||||
|
||||
Implementation detail: Convolution, Deconvolution, and Im2col layers
|
||||
define these in the convolution_param message, while Pooling has its
|
||||
own fields in pooling_param. This method deals with these details to
|
||||
extract canonical parameters.
|
||||
"""
|
||||
params = fn.params.get('convolution_param', fn.params)
|
||||
axis = params.get('axis', 1)
|
||||
ks = np.array(params['kernel_size'], ndmin=1)
|
||||
dilation = np.array(params.get('dilation', 1), ndmin=1)
|
||||
assert len({'pad_h', 'pad_w', 'kernel_h', 'kernel_w', 'stride_h',
|
||||
'stride_w'} & set(fn.params)) == 0, \
|
||||
'cropping does not support legacy _h/_w params'
|
||||
return (axis, np.array(params.get('stride', 1), ndmin=1),
|
||||
(ks - 1) * dilation + 1,
|
||||
np.array(params.get('pad', 0), ndmin=1))
|
||||
|
||||
|
||||
def crop_params(fn):
|
||||
"""
|
||||
Extract the crop layer parameters with defaults.
|
||||
"""
|
||||
params = fn.params.get('crop_param', fn.params)
|
||||
axis = params.get('axis', 2) # default to spatial crop for N, C, H, W
|
||||
offset = np.array(params.get('offset', 0), ndmin=1)
|
||||
return (axis, offset)
|
||||
|
||||
|
||||
class UndefinedMapException(Exception):
|
||||
"""
|
||||
Exception raised for layers that do not have a defined coordinate mapping.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def coord_map(fn):
|
||||
"""
|
||||
Define the coordinate mapping by its
|
||||
- axis
|
||||
- scale: output coord[i * scale] <- input_coord[i]
|
||||
- shift: output coord[i] <- output_coord[i + shift]
|
||||
s.t. the identity mapping, as for pointwise layers like ReLu, is defined by
|
||||
(None, 1, 0) since it is independent of axis and does not transform coords.
|
||||
"""
|
||||
if fn.type_name in ['Convolution', 'Pooling', 'Im2col']:
|
||||
axis, stride, ks, pad = conv_params(fn)
|
||||
return axis, 1 / stride, (pad - (ks - 1) / 2) / stride
|
||||
elif fn.type_name == 'Deconvolution':
|
||||
axis, stride, ks, pad = conv_params(fn)
|
||||
return axis, stride, (ks - 1) / 2 - pad
|
||||
elif fn.type_name in PASS_THROUGH_LAYERS:
|
||||
return None, 1, 0
|
||||
elif fn.type_name == 'Crop':
|
||||
axis, offset = crop_params(fn)
|
||||
axis -= 1 # -1 for last non-coordinate dim.
|
||||
return axis, 1, - offset
|
||||
else:
|
||||
raise UndefinedMapException
|
||||
|
||||
|
||||
class AxisMismatchException(Exception):
|
||||
"""
|
||||
Exception raised for mappings with incompatible axes.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def compose(base_map, next_map):
|
||||
"""
|
||||
Compose a base coord map with scale a1, shift b1 with a further coord map
|
||||
with scale a2, shift b2. The scales multiply and the further shift, b2,
|
||||
is scaled by base coord scale a1.
|
||||
"""
|
||||
ax1, a1, b1 = base_map
|
||||
ax2, a2, b2 = next_map
|
||||
if ax1 is None:
|
||||
ax = ax2
|
||||
elif ax2 is None or ax1 == ax2:
|
||||
ax = ax1
|
||||
else:
|
||||
raise AxisMismatchException
|
||||
return ax, a1 * a2, a1 * b2 + b1
|
||||
|
||||
|
||||
def inverse(coord_map):
|
||||
"""
|
||||
Invert a coord map by de-scaling and un-shifting;
|
||||
this gives the backward mapping for the gradient.
|
||||
"""
|
||||
ax, a, b = coord_map
|
||||
return ax, 1 / a, -b / a
|
||||
|
||||
|
||||
def coord_map_from_to(top_from, top_to):
|
||||
"""
|
||||
Determine the coordinate mapping betweeen a top (from) and a top (to).
|
||||
Walk the graph to find a common ancestor while composing the coord maps for
|
||||
from and to until they meet. As a last step the from map is inverted.
|
||||
"""
|
||||
# We need to find a common ancestor of top_from and top_to.
|
||||
# We'll assume that all ancestors are equivalent here (otherwise the graph
|
||||
# is an inconsistent state (which we could improve this to check for)).
|
||||
# For now use a brute-force algorithm.
|
||||
|
||||
def collect_bottoms(top):
|
||||
"""
|
||||
Collect the bottoms to walk for the coordinate mapping.
|
||||
The general rule is that all the bottoms of a layer can be mapped, as
|
||||
most layers have the same coordinate mapping for each bottom.
|
||||
Crop layer is a notable exception. Only the first/cropped bottom is
|
||||
mappable; the second/dimensions bottom is excluded from the walk.
|
||||
"""
|
||||
bottoms = top.fn.inputs
|
||||
if top.fn.type_name == 'Crop':
|
||||
bottoms = bottoms[:1]
|
||||
return bottoms
|
||||
|
||||
# walk back from top_from, keeping the coord map as we go
|
||||
from_maps = {top_from: (None, 1, 0)}
|
||||
frontier = {top_from}
|
||||
while frontier:
|
||||
top = frontier.pop()
|
||||
try:
|
||||
bottoms = collect_bottoms(top)
|
||||
for bottom in bottoms:
|
||||
from_maps[bottom] = compose(from_maps[top], coord_map(top.fn))
|
||||
frontier.add(bottom)
|
||||
except UndefinedMapException:
|
||||
pass
|
||||
|
||||
# now walk back from top_to until we hit a common blob
|
||||
to_maps = {top_to: (None, 1, 0)}
|
||||
frontier = {top_to}
|
||||
while frontier:
|
||||
top = frontier.pop()
|
||||
if top in from_maps:
|
||||
return compose(to_maps[top], inverse(from_maps[top]))
|
||||
try:
|
||||
bottoms = collect_bottoms(top)
|
||||
for bottom in bottoms:
|
||||
to_maps[bottom] = compose(to_maps[top], coord_map(top.fn))
|
||||
frontier.add(bottom)
|
||||
except UndefinedMapException:
|
||||
continue
|
||||
|
||||
# if we got here, we did not find a blob in common
|
||||
raise RuntimeError('Could not compute map between tops; are they '
|
||||
'connected by spatial layers?')
|
||||
|
||||
|
||||
def crop(top_from, top_to):
|
||||
"""
|
||||
Define a Crop layer to crop a top (from) to another top (to) by
|
||||
determining the coordinate mapping between the two and net spec'ing
|
||||
the axis and shift parameters of the crop.
|
||||
"""
|
||||
ax, a, b = coord_map_from_to(top_from, top_to)
|
||||
assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a)
|
||||
assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b)
|
||||
assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \
|
||||
'(b = {})'.format(b)
|
||||
return L.Crop(top_from, top_to,
|
||||
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
|
||||
offset=list(-np.round(b).astype(int))))
|
|
@ -175,6 +175,12 @@ class NetSpec(object):
|
|||
def __getattr__(self, name):
|
||||
return self.tops[name]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__setattr__(key, value)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.__getattr__(item)
|
||||
|
||||
def to_proto(self):
|
||||
names = {v: k for k, v in six.iteritems(self.tops)}
|
||||
autonames = Counter()
|
||||
|
|
|
@ -14,6 +14,8 @@ from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \
|
|||
RMSPropSolver, AdaDeltaSolver, AdamSolver
|
||||
import caffe.io
|
||||
|
||||
import six
|
||||
|
||||
# We directly update methods from Net here (rather than using composition or
|
||||
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
|
||||
# automatically have the improved interface.
|
||||
|
@ -97,8 +99,8 @@ def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
|
|||
raise Exception('Input blob arguments do not match net inputs.')
|
||||
# Set input according to defined shapes and make arrays single and
|
||||
# C-contiguous as Caffe expects.
|
||||
for in_, blob in kwargs.iteritems():
|
||||
if blob.shape[0] != self.blobs[in_].num:
|
||||
for in_, blob in six.iteritems(kwargs):
|
||||
if blob.shape[0] != self.blobs[in_].shape[0]:
|
||||
raise Exception('Input is not batch sized')
|
||||
self.blobs[in_].data[...] = blob
|
||||
|
||||
|
@ -145,8 +147,8 @@ def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
|
|||
raise Exception('Top diff arguments do not match net outputs.')
|
||||
# Set top diffs according to defined shapes and make arrays single and
|
||||
# C-contiguous as Caffe expects.
|
||||
for top, diff in kwargs.iteritems():
|
||||
if diff.shape[0] != self.blobs[top].num:
|
||||
for top, diff in six.iteritems(kwargs):
|
||||
if diff.shape[0] != self.blobs[top].shape[0]:
|
||||
raise Exception('Diff is not batch sized')
|
||||
self.blobs[top].diff[...] = diff
|
||||
|
||||
|
@ -174,13 +176,13 @@ def _Net_forward_all(self, blobs=None, **kwargs):
|
|||
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
|
||||
for batch in self._batch(kwargs):
|
||||
outs = self.forward(blobs=blobs, **batch)
|
||||
for out, out_blob in outs.iteritems():
|
||||
for out, out_blob in six.iteritems(outs):
|
||||
all_outs[out].extend(out_blob.copy())
|
||||
# Package in ndarray.
|
||||
for out in all_outs:
|
||||
all_outs[out] = np.asarray(all_outs[out])
|
||||
# Discard padding.
|
||||
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
|
||||
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
|
||||
if pad:
|
||||
for out in all_outs:
|
||||
all_outs[out] = all_outs[out][:-pad]
|
||||
|
@ -215,16 +217,16 @@ def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
|
|||
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
|
||||
batch_blobs = self.forward(blobs=blobs, **fb)
|
||||
batch_diffs = self.backward(diffs=diffs, **bb)
|
||||
for out, out_blobs in batch_blobs.iteritems():
|
||||
for out, out_blobs in six.iteritems(batch_blobs):
|
||||
all_outs[out].extend(out_blobs.copy())
|
||||
for diff, out_diffs in batch_diffs.iteritems():
|
||||
for diff, out_diffs in six.iteritems(batch_diffs):
|
||||
all_diffs[diff].extend(out_diffs.copy())
|
||||
# Package in ndarray.
|
||||
for out, diff in zip(all_outs, all_diffs):
|
||||
all_outs[out] = np.asarray(all_outs[out])
|
||||
all_diffs[diff] = np.asarray(all_diffs[diff])
|
||||
# Discard padding at the end and package in ndarray.
|
||||
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
|
||||
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
|
||||
if pad:
|
||||
for out, diff in zip(all_outs, all_diffs):
|
||||
all_outs[out] = all_outs[out][:-pad]
|
||||
|
@ -256,10 +258,10 @@ def _Net_batch(self, blobs):
|
|||
------
|
||||
batch: {blob name: list of blobs} dict for a single batch.
|
||||
"""
|
||||
num = len(blobs.itervalues().next())
|
||||
batch_size = self.blobs.itervalues().next().num
|
||||
num = len(six.next(six.itervalues(blobs)))
|
||||
batch_size = six.next(six.itervalues(self.blobs)).shape[0]
|
||||
remainder = num % batch_size
|
||||
num_batches = num / batch_size
|
||||
num_batches = num // batch_size
|
||||
|
||||
# Yield full batches.
|
||||
for b in range(num_batches):
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
import caffe
|
||||
from caffe import layers as L
|
||||
from caffe import params as P
|
||||
from caffe.coord_map import coord_map_from_to, crop
|
||||
|
||||
|
||||
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
|
||||
"""
|
||||
Define net spec for simple conv-pool-deconv pattern common to all
|
||||
coordinate mapping tests.
|
||||
"""
|
||||
n = caffe.NetSpec()
|
||||
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
|
||||
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
|
||||
n.conv = L.Convolution(
|
||||
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
|
||||
n.pool = L.Pooling(
|
||||
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
|
||||
# for upsampling kernel size is 2x stride
|
||||
try:
|
||||
deconv_ks = [s*2 for s in dstride]
|
||||
except:
|
||||
deconv_ks = dstride*2
|
||||
n.deconv = L.Deconvolution(
|
||||
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
|
||||
return n
|
||||
|
||||
|
||||
class TestCoordMap(unittest.TestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def test_conv_pool_deconv(self):
|
||||
"""
|
||||
Map through conv, pool, and deconv.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
# identity for 2x pool, 2x deconv
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(ax, 1)
|
||||
self.assertEquals(a, 1)
|
||||
self.assertEquals(b, 0)
|
||||
# shift-by-one for 4x pool, 4x deconv
|
||||
n = coord_net_spec(pool=4, dstride=4)
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(ax, 1)
|
||||
self.assertEquals(a, 1)
|
||||
self.assertEquals(b, -1)
|
||||
|
||||
def test_pass(self):
|
||||
"""
|
||||
A pass-through layer (ReLU) and conv (1x1, stride 1, pad 0)
|
||||
both do identity mapping.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
n.relu = L.ReLU(n.deconv)
|
||||
n.conv1x1 = L.Convolution(
|
||||
n.relu, num_output=10, kernel_size=1, stride=1, pad=0)
|
||||
for top in [n.relu, n.conv1x1]:
|
||||
ax_pass, a_pass, b_pass = coord_map_from_to(top, n.data)
|
||||
self.assertEquals(ax, ax_pass)
|
||||
self.assertEquals(a, a_pass)
|
||||
self.assertEquals(b, b_pass)
|
||||
|
||||
def test_padding(self):
|
||||
"""
|
||||
Padding conv adds offset while padding deconv subtracts offset.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
pad = random.randint(0, 10)
|
||||
# conv padding
|
||||
n = coord_net_spec(pad=pad)
|
||||
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(a, a_pad)
|
||||
self.assertEquals(b - pad, b_pad)
|
||||
# deconv padding
|
||||
n = coord_net_spec(dpad=pad)
|
||||
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(a, a_pad)
|
||||
self.assertEquals(b + pad, b_pad)
|
||||
# pad both to cancel out
|
||||
n = coord_net_spec(pad=pad, dpad=pad)
|
||||
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(a, a_pad)
|
||||
self.assertEquals(b, b_pad)
|
||||
|
||||
def test_multi_conv(self):
|
||||
"""
|
||||
Multiple bottoms/tops of a layer are identically mapped.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
# multi bottom/top
|
||||
n.conv_data, n.conv_aux = L.Convolution(
|
||||
n.data, n.aux, ntop=2, num_output=10, kernel_size=5, stride=2,
|
||||
pad=0)
|
||||
ax1, a1, b1 = coord_map_from_to(n.conv_data, n.data)
|
||||
ax2, a2, b2 = coord_map_from_to(n.conv_aux, n.aux)
|
||||
self.assertEquals(ax1, ax2)
|
||||
self.assertEquals(a1, a2)
|
||||
self.assertEquals(b1, b2)
|
||||
|
||||
def test_rect(self):
|
||||
"""
|
||||
Anisotropic mapping is equivalent to its isotropic parts.
|
||||
"""
|
||||
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
|
||||
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
|
||||
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
|
||||
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
|
||||
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
|
||||
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
|
||||
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
|
||||
self.assertEquals(a_3x3, a_3x5[0])
|
||||
self.assertEquals(b_3x3, b_3x5[0])
|
||||
self.assertEquals(a_5x5, a_3x5[1])
|
||||
self.assertEquals(b_5x5, b_3x5[1])
|
||||
|
||||
def test_nd_conv(self):
|
||||
"""
|
||||
ND conv maps the same way in more dimensions.
|
||||
"""
|
||||
n = caffe.NetSpec()
|
||||
# define data with 3 spatial dimensions, otherwise the same net
|
||||
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
|
||||
n.conv = L.Convolution(
|
||||
n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],
|
||||
pad=[0, 1, 2])
|
||||
n.pool = L.Pooling(
|
||||
n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
|
||||
n.deconv = L.Deconvolution(
|
||||
n.pool, num_output=10, kernel_size=4, stride=2, pad=0)
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
self.assertEquals(ax, 1)
|
||||
self.assertTrue(len(a) == len(b))
|
||||
self.assertTrue(np.all(a == 1))
|
||||
self.assertEquals(b[0] - 1, b[1])
|
||||
self.assertEquals(b[1] - 1, b[2])
|
||||
|
||||
def test_crop_of_crop(self):
|
||||
"""
|
||||
Map coordinates through Crop layer:
|
||||
crop an already-cropped output to the input and check change in offset.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
offset = random.randint(0, 10)
|
||||
ax, a, b = coord_map_from_to(n.deconv, n.data)
|
||||
n.crop = L.Crop(n.deconv, n.data, axis=2, offset=offset)
|
||||
ax_crop, a_crop, b_crop = coord_map_from_to(n.crop, n.data)
|
||||
self.assertEquals(ax, ax_crop)
|
||||
self.assertEquals(a, a_crop)
|
||||
self.assertEquals(b + offset, b_crop)
|
||||
|
||||
def test_crop_helper(self):
|
||||
"""
|
||||
Define Crop layer by crop().
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
crop(n.deconv, n.data)
|
||||
|
||||
def test_catch_unconnected(self):
|
||||
"""
|
||||
Catch mapping spatially unconnected tops.
|
||||
"""
|
||||
n = coord_net_spec()
|
||||
n.ip = L.InnerProduct(n.deconv, num_output=10)
|
||||
with self.assertRaises(RuntimeError):
|
||||
coord_map_from_to(n.ip, n.data)
|
||||
|
||||
def test_catch_scale_mismatch(self):
|
||||
"""
|
||||
Catch incompatible scales, such as when the top to be cropped
|
||||
is mapped to a differently strided reference top.
|
||||
"""
|
||||
n = coord_net_spec(pool=3, dstride=2) # pool 3x but deconv 2x
|
||||
with self.assertRaises(AssertionError):
|
||||
crop(n.deconv, n.data)
|
||||
|
||||
def test_catch_negative_crop(self):
|
||||
"""
|
||||
Catch impossible offsets, such as when the top to be cropped
|
||||
is mapped to a larger reference top.
|
||||
"""
|
||||
n = coord_net_spec(dpad=10) # make output smaller than input
|
||||
with self.assertRaises(AssertionError):
|
||||
crop(n.deconv, n.data)
|
|
@ -1564,7 +1564,7 @@ def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
|
|||
caffe_alt_function_list = (
|
||||
('memset', ['caffe_set', 'caffe_memset']),
|
||||
('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']),
|
||||
('memcpy', ['caffe_copy', 'caffe_memcpy']),
|
||||
('memcpy', ['caffe_copy']),
|
||||
('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']),
|
||||
)
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ if [ "$PYTHON_VERSION" -eq "3" ] && [ ! -e "$CONDA_DIR/bin/protoc" ]; then
|
|||
fi
|
||||
|
||||
if [ "$PYTHON_VERSION" -eq "3" ]; then
|
||||
pip install --pre protobuf
|
||||
pip install --pre protobuf==3.0.0b2
|
||||
else
|
||||
pip install protobuf
|
||||
fi
|
||||
|
|
|
@ -79,6 +79,15 @@ void Caffe::DeviceQuery() {
|
|||
NO_GPU;
|
||||
}
|
||||
|
||||
bool Caffe::CheckDevice(const int device_id) {
|
||||
NO_GPU;
|
||||
return false;
|
||||
}
|
||||
|
||||
int Caffe::FindDevice(const int start_id) {
|
||||
NO_GPU;
|
||||
return -1;
|
||||
}
|
||||
|
||||
class Caffe::RNG::Generator {
|
||||
public:
|
||||
|
@ -201,6 +210,39 @@ void Caffe::DeviceQuery() {
|
|||
return;
|
||||
}
|
||||
|
||||
bool Caffe::CheckDevice(const int device_id) {
|
||||
// This function checks the availability of GPU #device_id.
|
||||
// It attempts to create a context on the device by calling cudaFree(0).
|
||||
// cudaSetDevice() alone is not sufficient to check the availability.
|
||||
// It lazily records device_id, however, does not initialize a
|
||||
// context. So it does not know if the host thread has the permission to use
|
||||
// the device or not.
|
||||
//
|
||||
// In a shared environment where the devices are set to EXCLUSIVE_PROCESS
|
||||
// or EXCLUSIVE_THREAD mode, cudaSetDevice() returns cudaSuccess
|
||||
// even if the device is exclusively occupied by another process or thread.
|
||||
// Cuda operations that initialize the context are needed to check
|
||||
// the permission. cudaFree(0) is one of those with no side effect,
|
||||
// except the context initialization.
|
||||
bool r = ((cudaSuccess == cudaSetDevice(device_id)) &&
|
||||
(cudaSuccess == cudaFree(0)));
|
||||
// reset any error that may have occurred.
|
||||
cudaGetLastError();
|
||||
return r;
|
||||
}
|
||||
|
||||
int Caffe::FindDevice(const int start_id) {
|
||||
// This function finds the first available device by checking devices with
|
||||
// ordinal from start_id to the highest available value. In the
|
||||
// EXCLUSIVE_PROCESS or EXCLUSIVE_THREAD mode, if it succeeds, it also
|
||||
// claims the device due to the initialization of the context.
|
||||
int count = 0;
|
||||
CUDA_CHECK(cudaGetDeviceCount(&count));
|
||||
for (int i = start_id; i < count; i++) {
|
||||
if (CheckDevice(i)) return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
class Caffe::RNG::Generator {
|
||||
public:
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/layers/crop_layer.hpp"
|
||||
#include "caffe/net.hpp"
|
||||
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
// All logic that depends only on the number of dimensions is here,
|
||||
// the rest is in Reshape because it depends on Blob size.
|
||||
// bottom[0] supplies the data
|
||||
// bottom[1] supplies the size
|
||||
const CropParameter& param = this->layer_param_.crop_param();
|
||||
CHECK_EQ(bottom.size(), 2) << "Wrong number of bottom blobs.";
|
||||
int input_dim = bottom[0]->num_axes();
|
||||
const int start_axis = bottom[0]->CanonicalAxisIndex(param.axis());
|
||||
CHECK_LT(start_axis, input_dim) << "crop axis bigger than input dim";
|
||||
if (param.offset_size() > 1) {
|
||||
// the number of crop values specified must be equal to the number
|
||||
// of dimensions following axis
|
||||
CHECK_EQ(start_axis + param.offset_size(), input_dim)
|
||||
<< "number of offset values specified must be equal to the number of "
|
||||
<< "dimensions following axis.";
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
const CropParameter& param = this->layer_param_.crop_param();
|
||||
int input_dim = bottom[0]->num_axes();
|
||||
const int start_axis = bottom[0]->CanonicalAxisIndex(param.axis());
|
||||
|
||||
// initialize all offsets to 0
|
||||
offsets = vector<int>(input_dim, 0);
|
||||
// initialize new shape to bottom[0]
|
||||
vector<int> new_shape(bottom[0]->shape());
|
||||
|
||||
// apply crops
|
||||
for (int i = 0; i < input_dim; ++i) {
|
||||
int crop_offset = 0;
|
||||
int new_size = bottom[0]->shape(i);
|
||||
if (i >= start_axis) {
|
||||
new_size = bottom[1]->shape(i);
|
||||
|
||||
if (param.offset_size() == 1) {
|
||||
// if only one crop value is supplied, crop all dimensions after axis
|
||||
// by this crop value
|
||||
crop_offset = param.offset(0);
|
||||
} else if (param.offset_size() > 1) {
|
||||
// crop values specified must be equal to the number of dimensions
|
||||
// following axis
|
||||
crop_offset = param.offset(i - start_axis);
|
||||
}
|
||||
}
|
||||
// Check that the image we are cropping minus the margin is bigger
|
||||
// than the destination image.
|
||||
CHECK_GE(bottom[0]->shape(i) - crop_offset,
|
||||
bottom[1]->shape(i))
|
||||
<< "invalid crop parameters in dimension: " << i;
|
||||
// Now set new size and offsets
|
||||
new_shape[i] = new_size;
|
||||
offsets[i] = crop_offset;
|
||||
}
|
||||
top[0]->Reshape(new_shape);
|
||||
}
|
||||
|
||||
// recursive copy function
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::crop_copy(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top,
|
||||
const vector<int>& offsets,
|
||||
vector<int> indices,
|
||||
int cur_dim,
|
||||
const Dtype* src_data,
|
||||
Dtype* dest_data,
|
||||
bool is_forward) {
|
||||
if (cur_dim + 1 < top[0]->num_axes()) {
|
||||
// We are not yet at the final dimension, call copy recursively
|
||||
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
|
||||
indices[cur_dim] = i;
|
||||
crop_copy(bottom, top, offsets, indices, cur_dim+1,
|
||||
src_data, dest_data, is_forward);
|
||||
}
|
||||
} else {
|
||||
// We are at the last dimensions, which is stored continously in memory
|
||||
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
|
||||
// prepare index vector reduced(red) and with offsets(off)
|
||||
std::vector<int> ind_red(cur_dim, 0);
|
||||
std::vector<int> ind_off(cur_dim+1, 0);
|
||||
for (int j = 0; j < cur_dim; ++j) {
|
||||
ind_red[j] = indices[j];
|
||||
ind_off[j] = indices[j] + offsets[j];
|
||||
}
|
||||
ind_off[cur_dim] = offsets[cur_dim];
|
||||
// do the copy
|
||||
if (is_forward) {
|
||||
caffe_copy(top[0]->shape(cur_dim),
|
||||
src_data + bottom[0]->offset(ind_off),
|
||||
dest_data + top[0]->offset(ind_red));
|
||||
} else {
|
||||
// in the backwards pass the src_data is top_diff
|
||||
// and the dest_data is bottom_diff
|
||||
caffe_copy(top[0]->shape(cur_dim),
|
||||
src_data + top[0]->offset(ind_red),
|
||||
dest_data + bottom[0]->offset(ind_off));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
std::vector<int> indices(top[0]->num_axes(), 0);
|
||||
const Dtype* bottom_data = bottom[0]->cpu_data();
|
||||
Dtype* top_data = top[0]->mutable_cpu_data();
|
||||
crop_copy(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
const Dtype* top_diff = top[0]->cpu_diff();
|
||||
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
|
||||
|
||||
if (propagate_down[0]) {
|
||||
caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
|
||||
std::vector<int> indices(top[0]->num_axes(), 0);
|
||||
crop_copy(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CPU_ONLY
|
||||
STUB_GPU(CropLayer);
|
||||
#endif
|
||||
|
||||
INSTANTIATE_CLASS(CropLayer);
|
||||
REGISTER_LAYER_CLASS(Crop);
|
||||
|
||||
} // namespace caffe
|
|
@ -0,0 +1,124 @@
|
|||
#include <vector>
|
||||
|
||||
#include "caffe/layers/crop_layer.hpp"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
// Copy (one line per thread) from one array to another, with arbitrary
|
||||
// strides in the last two dimensions.
|
||||
template <typename Dtype>
|
||||
__global__ void copy_kernel(const int n, const int height, const int width,
|
||||
const int src_outer_stride, const int src_inner_stride,
|
||||
const int dest_outer_stride, const int dest_inner_stride,
|
||||
const Dtype* src, Dtype* dest) {
|
||||
CUDA_KERNEL_LOOP(index, n) {
|
||||
int src_start = index / height * src_outer_stride
|
||||
+ index % height * src_inner_stride;
|
||||
int dest_start = index / height * dest_outer_stride
|
||||
+ index % height * dest_inner_stride;
|
||||
for (int i = 0; i < width; ++i) {
|
||||
dest[dest_start + i] = src[src_start + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursive copy function, this function is similar to crop_copy but loops
|
||||
// over all but the last two dimensions. It is implemented this way to allow
|
||||
// for ND cropping while still relying on a CUDA kernel for the innermost
|
||||
// two dimensions for performance reasons.
|
||||
// An alternative way to implement ND cropping relying more on the kernel
|
||||
// would require passing offsets to the kernel, which is a bit problematic
|
||||
// because it is of variable length. Since in the standard (N,C,W,H) case
|
||||
// N,C are usually not cropped a speedup could be achieved by not looping
|
||||
// the application of the copy_kernel around these dimensions.
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top,
|
||||
const vector<int>& offsets,
|
||||
vector<int> indices,
|
||||
int cur_dim,
|
||||
const Dtype* src_data,
|
||||
Dtype* dest_data,
|
||||
bool is_forward) {
|
||||
if (cur_dim + 2 < top[0]->num_axes()) {
|
||||
// We are not yet at the final dimension, call copy recursivley
|
||||
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
|
||||
indices[cur_dim] = i;
|
||||
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
|
||||
src_data, dest_data, is_forward);
|
||||
}
|
||||
} else {
|
||||
// We are at the last two dimensions, which are stored continously in memory
|
||||
// With (N,C,H,W)
|
||||
// (0,1,2,3) cur_dim -> H
|
||||
// cur_dim+1 -> W
|
||||
const int lines = top[0]->shape(cur_dim);
|
||||
const int height = top[0]->shape(cur_dim);
|
||||
const int width = top[0]->shape(cur_dim+1);
|
||||
std::vector<int> ind_off(cur_dim+2, 0);
|
||||
for (int j = 0; j < cur_dim; ++j) {
|
||||
ind_off[j] = indices[j] + offsets[j];
|
||||
}
|
||||
ind_off[cur_dim] = offsets[cur_dim];
|
||||
ind_off[cur_dim+1] = offsets[cur_dim+1];
|
||||
// Compute copy strides
|
||||
const int src_outer_stride =
|
||||
bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1);
|
||||
const int src_inner_stride = bottom[0]->shape(cur_dim+1);
|
||||
const int dest_outer_stride =
|
||||
top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1);
|
||||
const int dest_inner_stride = top[0]->shape(cur_dim+1);
|
||||
|
||||
if (is_forward) {
|
||||
const Dtype* bottom_data = bottom[0]->gpu_data() +
|
||||
bottom[0]->offset(ind_off);
|
||||
Dtype* top_data = top[0]->mutable_gpu_data() +
|
||||
top[0]->offset(indices);
|
||||
// NOLINT_NEXT_LINE(whitespace/operators)
|
||||
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
|
||||
lines, height, width,
|
||||
src_outer_stride, src_inner_stride,
|
||||
dest_outer_stride, dest_inner_stride,
|
||||
bottom_data, top_data);
|
||||
|
||||
} else {
|
||||
const Dtype* top_diff = top[0]->gpu_diff() +
|
||||
top[0]->offset(indices);
|
||||
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
|
||||
bottom[0]->offset(ind_off);
|
||||
// NOLINT_NEXT_LINE(whitespace/operators)
|
||||
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
|
||||
lines, height, width,
|
||||
dest_outer_stride, dest_inner_stride,
|
||||
src_outer_stride, src_inner_stride,
|
||||
top_diff, bottom_diff);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
std::vector<int> indices(top[0]->num_axes(), 0);
|
||||
const Dtype* bottom_data = bottom[0]->gpu_data();
|
||||
Dtype* top_data = top[0]->mutable_gpu_data();
|
||||
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
const Dtype* top_diff = top[0]->gpu_diff();
|
||||
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
|
||||
|
||||
if (propagate_down[0]) {
|
||||
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
|
||||
std::vector<int> indices(top[0]->num_axes(), 0);
|
||||
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
|
||||
|
||||
} // namespace caffe
|
|
@ -23,8 +23,8 @@ void DropoutLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
|
|||
const vector<Blob<Dtype>*>& top) {
|
||||
NeuronLayer<Dtype>::Reshape(bottom, top);
|
||||
// Set up the cache for random number generation
|
||||
rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(),
|
||||
bottom[0]->height(), bottom[0]->width());
|
||||
// ReshapeLike does not work because rand_vec_ is of Dtype uint
|
||||
rand_vec_.Reshape(bottom[0]->shape());
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
|
|
|
@ -11,6 +11,7 @@ void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
|||
const vector<Blob<Dtype>*>& top) {
|
||||
const int num_output = this->layer_param_.inner_product_param().num_output();
|
||||
bias_term_ = this->layer_param_.inner_product_param().bias_term();
|
||||
transpose_ = this->layer_param_.inner_product_param().transpose();
|
||||
N_ = num_output;
|
||||
const int axis = bottom[0]->CanonicalAxisIndex(
|
||||
this->layer_param_.inner_product_param().axis());
|
||||
|
@ -27,10 +28,15 @@ void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
|||
} else {
|
||||
this->blobs_.resize(1);
|
||||
}
|
||||
// Intialize the weight
|
||||
// Initialize the weights
|
||||
vector<int> weight_shape(2);
|
||||
weight_shape[0] = N_;
|
||||
weight_shape[1] = K_;
|
||||
if (transpose_) {
|
||||
weight_shape[0] = K_;
|
||||
weight_shape[1] = N_;
|
||||
} else {
|
||||
weight_shape[0] = N_;
|
||||
weight_shape[1] = K_;
|
||||
}
|
||||
this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
|
||||
// fill the weights
|
||||
shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
|
||||
|
@ -80,7 +86,8 @@ void InnerProductLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
|||
const Dtype* bottom_data = bottom[0]->cpu_data();
|
||||
Dtype* top_data = top[0]->mutable_cpu_data();
|
||||
const Dtype* weight = this->blobs_[0]->cpu_data();
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans,
|
||||
M_, N_, K_, (Dtype)1.,
|
||||
bottom_data, weight, (Dtype)0., top_data);
|
||||
if (bias_term_) {
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
|
||||
|
@ -97,8 +104,17 @@ void InnerProductLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
|||
const Dtype* top_diff = top[0]->cpu_diff();
|
||||
const Dtype* bottom_data = bottom[0]->cpu_data();
|
||||
// Gradient with respect to weight
|
||||
caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
|
||||
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff());
|
||||
if (transpose_) {
|
||||
caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
|
||||
K_, N_, M_,
|
||||
(Dtype)1., bottom_data, top_diff,
|
||||
(Dtype)1., this->blobs_[0]->mutable_cpu_diff());
|
||||
} else {
|
||||
caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
|
||||
N_, K_, M_,
|
||||
(Dtype)1., top_diff, bottom_data,
|
||||
(Dtype)1., this->blobs_[0]->mutable_cpu_diff());
|
||||
}
|
||||
}
|
||||
if (bias_term_ && this->param_propagate_down_[1]) {
|
||||
const Dtype* top_diff = top[0]->cpu_diff();
|
||||
|
@ -110,9 +126,17 @@ void InnerProductLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
|||
if (propagate_down[0]) {
|
||||
const Dtype* top_diff = top[0]->cpu_diff();
|
||||
// Gradient with respect to bottom data
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
|
||||
top_diff, this->blobs_[0]->cpu_data(), (Dtype)0.,
|
||||
bottom[0]->mutable_cpu_diff());
|
||||
if (transpose_) {
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
|
||||
M_, K_, N_,
|
||||
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
|
||||
(Dtype)0., bottom[0]->mutable_cpu_diff());
|
||||
} else {
|
||||
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
|
||||
M_, K_, N_,
|
||||
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
|
||||
(Dtype)0., bottom[0]->mutable_cpu_diff());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,9 @@ void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
|||
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
|
||||
this->blobs_[1]->gpu_data(), top_data);
|
||||
} else {
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans,
|
||||
transpose_ ? CblasNoTrans : CblasTrans,
|
||||
M_, N_, K_, (Dtype)1.,
|
||||
bottom_data, weight, (Dtype)0., top_data);
|
||||
if (bias_term_)
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
|
||||
|
@ -36,8 +38,17 @@ void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
|||
const Dtype* top_diff = top[0]->gpu_diff();
|
||||
const Dtype* bottom_data = bottom[0]->gpu_data();
|
||||
// Gradient with respect to weight
|
||||
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
|
||||
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff());
|
||||
if (transpose_) {
|
||||
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
|
||||
K_, N_, M_,
|
||||
(Dtype)1., bottom_data, top_diff,
|
||||
(Dtype)1., this->blobs_[0]->mutable_gpu_diff());
|
||||
} else {
|
||||
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
|
||||
N_, K_, M_,
|
||||
(Dtype)1., top_diff, bottom_data,
|
||||
(Dtype)1., this->blobs_[0]->mutable_gpu_diff());
|
||||
}
|
||||
}
|
||||
if (bias_term_ && this->param_propagate_down_[1]) {
|
||||
const Dtype* top_diff = top[0]->gpu_diff();
|
||||
|
@ -49,9 +60,17 @@ void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
|||
if (propagate_down[0]) {
|
||||
const Dtype* top_diff = top[0]->gpu_diff();
|
||||
// Gradient with respect to bottom data
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
|
||||
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
|
||||
bottom[0]->mutable_gpu_diff());
|
||||
if (transpose_) {
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
|
||||
M_, K_, N_,
|
||||
(Dtype)1., top_diff, this->blobs_[0]->gpu_data(),
|
||||
(Dtype)0., bottom[0]->mutable_gpu_diff());
|
||||
} else {
|
||||
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
|
||||
M_, K_, N_,
|
||||
(Dtype)1., top_diff, this->blobs_[0]->gpu_data(),
|
||||
(Dtype)0., bottom[0]->mutable_gpu_diff());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
#include <vector>
|
||||
|
||||
#include "caffe/layers/input_layer.hpp"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
void InputLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
const int num_top = top.size();
|
||||
const InputParameter& param = this->layer_param_.input_param();
|
||||
const int num_shape = param.shape_size();
|
||||
CHECK(num_shape == 0 || num_shape == 1 || num_shape == num_top)
|
||||
<< "Must specify 'shape' once, once per top blob, or not at all: "
|
||||
<< num_top << " tops vs. " << num_shape << " shapes.";
|
||||
if (num_shape > 0) {
|
||||
for (int i = 0; i < num_top; ++i) {
|
||||
const int shape_index = (param.shape_size() == 1) ? 0 : i;
|
||||
top[i]->Reshape(param.shape(shape_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_CLASS(InputLayer);
|
||||
REGISTER_LAYER_CLASS(Input);
|
||||
|
||||
} // namespace caffe
|
|
@ -56,22 +56,7 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
|
|||
name_ = param.name();
|
||||
map<string, int> blob_name_to_idx;
|
||||
set<string> available_blobs;
|
||||
CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0)
|
||||
<< "Must specify either input_shape OR deprecated input_dim, not both.";
|
||||
if (param.input_dim_size() > 0) {
|
||||
// Deprecated 4D dimensions.
|
||||
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
|
||||
<< "Incorrect input blob dimension specifications.";
|
||||
} else {
|
||||
CHECK_EQ(param.input_size(), param.input_shape_size())
|
||||
<< "Exactly one input_shape must be specified per input.";
|
||||
}
|
||||
memory_used_ = 0;
|
||||
// set the input blobs
|
||||
for (int input_id = 0; input_id < param.input_size(); ++input_id) {
|
||||
const int layer_id = -1; // inputs have fake layer ID -1
|
||||
AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx);
|
||||
}
|
||||
// For each layer, set up its input and output
|
||||
bottom_vecs_.resize(param.layer_size());
|
||||
top_vecs_.resize(param.layer_size());
|
||||
|
@ -118,6 +103,12 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
|
|||
int num_top = layer_param.top_size();
|
||||
for (int top_id = 0; top_id < num_top; ++top_id) {
|
||||
AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);
|
||||
// Collect Input layer tops as Net inputs.
|
||||
if (layer_param.type() == "Input") {
|
||||
const int blob_id = blobs_.size() - 1;
|
||||
net_input_blob_indices_.push_back(blob_id);
|
||||
net_input_blobs_.push_back(blobs_[blob_id].get());
|
||||
}
|
||||
}
|
||||
// If the layer specifies that AutoTopBlobs() -> true and the LayerParameter
|
||||
// specified fewer than the required number (as specified by
|
||||
|
@ -379,19 +370,17 @@ bool Net<Dtype>::StateMeetsRule(const NetState& state,
|
|||
return true;
|
||||
}
|
||||
|
||||
// Helper for Net::Init: add a new input or top blob to the net. (Inputs have
|
||||
// layer_id == -1, tops have layer_id >= 0.)
|
||||
// Helper for Net::Init: add a new top blob to the net.
|
||||
template <typename Dtype>
|
||||
void Net<Dtype>::AppendTop(const NetParameter& param, const int layer_id,
|
||||
const int top_id, set<string>* available_blobs,
|
||||
map<string, int>* blob_name_to_idx) {
|
||||
shared_ptr<LayerParameter> layer_param((layer_id >= 0) ?
|
||||
(new LayerParameter(param.layer(layer_id))) : NULL);
|
||||
const string& blob_name = layer_param ?
|
||||
(layer_param->top_size() > top_id ?
|
||||
layer_param->top(top_id) : "(automatic)") : param.input(top_id);
|
||||
shared_ptr<LayerParameter> layer_param(
|
||||
new LayerParameter(param.layer(layer_id)));
|
||||
const string& blob_name = (layer_param->top_size() > top_id) ?
|
||||
layer_param->top(top_id) : "(automatic)";
|
||||
// Check if we are doing in-place computation
|
||||
if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id &&
|
||||
if (blob_name_to_idx && layer_param->bottom_size() > top_id &&
|
||||
blob_name == layer_param->bottom(top_id)) {
|
||||
// In-place computation
|
||||
LOG_IF(INFO, Caffe::root_solver())
|
||||
|
@ -407,11 +396,7 @@ void Net<Dtype>::AppendTop(const NetParameter& param, const int layer_id,
|
|||
} else {
|
||||
// Normal output.
|
||||
if (Caffe::root_solver()) {
|
||||
if (layer_param) {
|
||||
LOG(INFO) << layer_param->name() << " -> " << blob_name;
|
||||
} else {
|
||||
LOG(INFO) << "Input " << top_id << " -> " << blob_name;
|
||||
}
|
||||
LOG(INFO) << layer_param->name() << " -> " << blob_name;
|
||||
}
|
||||
shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
|
||||
const int blob_id = blobs_.size();
|
||||
|
@ -419,22 +404,8 @@ void Net<Dtype>::AppendTop(const NetParameter& param, const int layer_id,
|
|||
blob_names_.push_back(blob_name);
|
||||
blob_need_backward_.push_back(false);
|
||||
if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; }
|
||||
if (layer_id == -1) {
|
||||
// Set the (explicitly specified) dimensions of the input blob.
|
||||
if (param.input_dim_size() > 0) {
|
||||
blob_pointer->Reshape(param.input_dim(top_id * 4),
|
||||
param.input_dim(top_id * 4 + 1),
|
||||
param.input_dim(top_id * 4 + 2),
|
||||
param.input_dim(top_id * 4 + 3));
|
||||
} else {
|
||||
blob_pointer->Reshape(param.input_shape(top_id));
|
||||
}
|
||||
net_input_blob_indices_.push_back(blob_id);
|
||||
net_input_blobs_.push_back(blob_pointer.get());
|
||||
} else {
|
||||
top_id_vecs_[layer_id].push_back(blob_id);
|
||||
top_vecs_[layer_id].push_back(blob_pointer.get());
|
||||
}
|
||||
top_id_vecs_[layer_id].push_back(blob_id);
|
||||
top_vecs_[layer_id].push_back(blob_pointer.get());
|
||||
}
|
||||
if (available_blobs) { available_blobs->insert(blob_name); }
|
||||
}
|
||||
|
@ -566,11 +537,6 @@ Dtype Net<Dtype>::ForwardFromTo(int start, int end) {
|
|||
CHECK_GE(start, 0);
|
||||
CHECK_LT(end, layers_.size());
|
||||
Dtype loss = 0;
|
||||
if (debug_info_) {
|
||||
for (int i = 0; i < net_input_blobs_.size(); ++i) {
|
||||
InputDebugInfo(i);
|
||||
}
|
||||
}
|
||||
for (int i = start; i <= end; ++i) {
|
||||
// LOG(ERROR) << "Forwarding " << layer_names_[i];
|
||||
Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]);
|
||||
|
@ -591,7 +557,7 @@ Dtype Net<Dtype>::ForwardTo(int end) {
|
|||
}
|
||||
|
||||
template <typename Dtype>
|
||||
const vector<Blob<Dtype>*>& Net<Dtype>::ForwardPrefilled(Dtype* loss) {
|
||||
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(Dtype* loss) {
|
||||
if (loss != NULL) {
|
||||
*loss = ForwardFromTo(0, layers_.size() - 1);
|
||||
} else {
|
||||
|
@ -603,32 +569,13 @@ const vector<Blob<Dtype>*>& Net<Dtype>::ForwardPrefilled(Dtype* loss) {
|
|||
template <typename Dtype>
|
||||
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(
|
||||
const vector<Blob<Dtype>*> & bottom, Dtype* loss) {
|
||||
// Copy bottom to internal bottom
|
||||
LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: Forward(bottom, loss) "
|
||||
<< "will be removed in a future version. Use Forward(loss).";
|
||||
// Copy bottom to net bottoms
|
||||
for (int i = 0; i < bottom.size(); ++i) {
|
||||
net_input_blobs_[i]->CopyFrom(*bottom[i]);
|
||||
}
|
||||
return ForwardPrefilled(loss);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
string Net<Dtype>::Forward(const string& input_blob_protos, Dtype* loss) {
|
||||
BlobProtoVector blob_proto_vec;
|
||||
if (net_input_blobs_.size()) {
|
||||
blob_proto_vec.ParseFromString(input_blob_protos);
|
||||
CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size())
|
||||
<< "Incorrect input size.";
|
||||
for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) {
|
||||
net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i));
|
||||
}
|
||||
}
|
||||
ForwardPrefilled(loss);
|
||||
blob_proto_vec.Clear();
|
||||
for (int i = 0; i < net_output_blobs_.size(); ++i) {
|
||||
net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs());
|
||||
}
|
||||
string output;
|
||||
blob_proto_vec.SerializeToString(&output);
|
||||
return output;
|
||||
return Forward(loss);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
|
@ -644,16 +591,6 @@ void Net<Dtype>::BackwardFromTo(int start, int end) {
|
|||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void Net<Dtype>::InputDebugInfo(const int input_id) {
|
||||
const Blob<Dtype>& blob = *net_input_blobs_[input_id];
|
||||
const string& blob_name = blob_names_[net_input_blob_indices_[input_id]];
|
||||
const Dtype data_abs_val_mean = blob.asum_data() / blob.count();
|
||||
LOG_IF(INFO, Caffe::root_solver())
|
||||
<< " [Forward] "
|
||||
<< "Input " << blob_name << " data: " << data_abs_val_mean;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void Net<Dtype>::ForwardDebugInfo(const int layer_id) {
|
||||
for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
|
||||
|
@ -912,9 +849,6 @@ void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {
|
|||
param->Clear();
|
||||
param->set_name(name_);
|
||||
// Add bottom and top
|
||||
for (int i = 0; i < net_input_blob_indices_.size(); ++i) {
|
||||
param->add_input(blob_names_[net_input_blob_indices_[i]]);
|
||||
}
|
||||
DLOG(INFO) << "Serializing " << layers_.size() << " layers";
|
||||
for (int i = 0; i < layers_.size(); ++i) {
|
||||
LayerParameter* layer_param = param->add_layer();
|
||||
|
|
|
@ -380,7 +380,8 @@ void P2PSync<Dtype>::on_gradients_ready() {
|
|||
}
|
||||
|
||||
template<typename Dtype>
|
||||
void P2PSync<Dtype>::run(const vector<int>& gpus) {
|
||||
void P2PSync<Dtype>::Prepare(const vector<int>& gpus,
|
||||
vector<shared_ptr<P2PSync<Dtype> > >* syncs) {
|
||||
// Pair devices for map-reduce synchronization
|
||||
vector<DevicePair> pairs;
|
||||
DevicePair::compute(gpus, &pairs);
|
||||
|
@ -391,15 +392,14 @@ void P2PSync<Dtype>::run(const vector<int>& gpus) {
|
|||
LOG(INFO)<< "GPUs pairs " << s.str();
|
||||
|
||||
SolverParameter param(solver_->param());
|
||||
vector<shared_ptr<P2PSync<Dtype> > > syncs(gpus.size());
|
||||
|
||||
// Build the GPU tree by finding the parent for each solver
|
||||
for (int attempts = 0; attempts < pairs.size(); ++attempts) {
|
||||
for (int i = 1; i < pairs.size(); ++i) {
|
||||
if (!syncs[i].get()) {
|
||||
if (!syncs->at(i).get()) {
|
||||
P2PSync<Dtype>* parent = NULL;
|
||||
for (int j = 0; j < syncs.size(); ++j) {
|
||||
P2PSync<Dtype>* sync = j == 0 ? this : syncs[j].get();
|
||||
for (int j = 0; j < syncs->size(); ++j) {
|
||||
P2PSync<Dtype>* sync = j == 0 ? this : syncs->at(j).get();
|
||||
if (sync) {
|
||||
const SolverParameter& p = sync->solver()->param();
|
||||
if (p.device_id() == pairs[i].parent()) {
|
||||
|
@ -409,12 +409,18 @@ void P2PSync<Dtype>::run(const vector<int>& gpus) {
|
|||
}
|
||||
if (parent) {
|
||||
param.set_device_id(pairs[i].device());
|
||||
syncs[i].reset(new P2PSync<Dtype>(solver_, parent, param));
|
||||
parent->children_.push_back((P2PSync<Dtype>*) syncs[i].get());
|
||||
syncs->at(i).reset(new P2PSync<Dtype>(solver_, parent, param));
|
||||
parent->children_.push_back((P2PSync<Dtype>*) syncs->at(i).get());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Dtype>
|
||||
void P2PSync<Dtype>::Run(const vector<int>& gpus) {
|
||||
vector<shared_ptr<P2PSync<Dtype> > > syncs(gpus.size());
|
||||
Prepare(gpus, &syncs);
|
||||
|
||||
LOG(INFO)<< "Starting Optimization";
|
||||
|
||||
|
|
|
@ -63,12 +63,12 @@ message FillerParameter {
|
|||
|
||||
message NetParameter {
|
||||
optional string name = 1; // consider giving the network a name
|
||||
// The input blobs to the network.
|
||||
// DEPRECATED. See InputParameter. The input blobs to the network.
|
||||
repeated string input = 3;
|
||||
// The shape of the input blobs.
|
||||
// DEPRECATED. See InputParameter. The shape of the input blobs.
|
||||
repeated BlobShape input_shape = 8;
|
||||
|
||||
// 4D input dimensions -- deprecated. Use "shape" instead.
|
||||
// 4D input dimensions -- deprecated. Use "input_shape" instead.
|
||||
// If specified, for each input blob there should be four
|
||||
// values specifying the num, channels, height and width of the input blob.
|
||||
// Thus, there should be a total of (4 * #input) numbers.
|
||||
|
@ -306,7 +306,7 @@ message ParamSpec {
|
|||
// NOTE
|
||||
// Update the next available ID when you add a new LayerParameter field.
|
||||
//
|
||||
// LayerParameter next available layer-specific ID: 143 (last added: scale_param)
|
||||
// LayerParameter next available layer-specific ID: 145 (last added: crop_param)
|
||||
message LayerParameter {
|
||||
optional string name = 1; // the layer name
|
||||
optional string type = 2; // the layer type
|
||||
|
@ -360,6 +360,7 @@ message LayerParameter {
|
|||
optional ConcatParameter concat_param = 104;
|
||||
optional ContrastiveLossParameter contrastive_loss_param = 105;
|
||||
optional ConvolutionParameter convolution_param = 106;
|
||||
optional CropParameter crop_param = 144;
|
||||
optional DataParameter data_param = 107;
|
||||
optional DropoutParameter dropout_param = 108;
|
||||
optional DummyDataParameter dummy_data_param = 109;
|
||||
|
@ -374,6 +375,7 @@ message LayerParameter {
|
|||
optional ImageDataParameter image_data_param = 115;
|
||||
optional InfogainLossParameter infogain_loss_param = 116;
|
||||
optional InnerProductParameter inner_product_param = 117;
|
||||
optional InputParameter input_param = 143;
|
||||
optional LogParameter log_param = 134;
|
||||
optional LRNParameter lrn_param = 118;
|
||||
optional MemoryDataParameter memory_data_param = 119;
|
||||
|
@ -433,7 +435,7 @@ message LossParameter {
|
|||
// Outputs that receive the ignore label will NOT be ignored in computing
|
||||
// the normalization factor.
|
||||
FULL = 0;
|
||||
// Divide by the total number of output locations that do not take the
|
||||
// Divide by the total number of output locations that do not take the
|
||||
// ignore_label. If ignore_label is not set, this behaves like FULL.
|
||||
VALID = 1;
|
||||
// Divide by the batch size.
|
||||
|
@ -599,6 +601,24 @@ message ConvolutionParameter {
|
|||
optional bool force_nd_im2col = 17 [default = false];
|
||||
}
|
||||
|
||||
message CropParameter {
|
||||
// To crop, elements of the first bottom are selected to fit the dimensions
|
||||
// of the second, reference bottom. The crop is configured by
|
||||
// - the crop `axis` to pick the dimensions for cropping
|
||||
// - the crop `offset` to set the shift for all/each dimension
|
||||
// to align the cropped bottom with the reference bottom.
|
||||
// All dimensions up to but excluding `axis` are preserved, while
|
||||
// the dimensions including and trailing `axis` are cropped.
|
||||
// If only one `offset` is set, then all dimensions are offset by this amount.
|
||||
// Otherwise, the number of offsets must equal the number of cropped axes to
|
||||
// shift the crop in each dimension accordingly.
|
||||
// Note: standard dimensions are N,C,H,W so the default is a spatial crop,
|
||||
// and `axis` may be negative to index from the end (e.g., -1 for the last
|
||||
// axis).
|
||||
optional int32 axis = 1 [default = 2];
|
||||
repeated uint32 offset = 2;
|
||||
}
|
||||
|
||||
message DataParameter {
|
||||
enum DB {
|
||||
LEVELDB = 0;
|
||||
|
@ -673,7 +693,7 @@ message EltwiseParameter {
|
|||
// Message that stores parameters used by ELULayer
|
||||
message ELUParameter {
|
||||
// Described in:
|
||||
// Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
|
||||
// Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
|
||||
// Deep Network Learning by Exponential Linear Units (ELUs). arXiv
|
||||
optional float alpha = 1 [default = 1];
|
||||
}
|
||||
|
@ -788,6 +808,19 @@ message InnerProductParameter {
|
|||
// all preceding axes are retained in the output.
|
||||
// May be negative to index from the end (e.g., -1 for the last axis).
|
||||
optional int32 axis = 5 [default = 1];
|
||||
// Specify whether to transpose the weight matrix or not.
|
||||
// If transpose == true, any operations will be performed on the transpose
|
||||
// of the weight matrix. The weight matrix itself is not going to be transposed
|
||||
// but rather the transfer flag of operations will be toggled accordingly.
|
||||
optional bool transpose = 6 [default = false];
|
||||
}
|
||||
|
||||
message InputParameter {
|
||||
// This layer produces N >= 1 top blob(s) to be assigned manually.
|
||||
// Define N shapes to set a shape for each top.
|
||||
// Define 1 shape to set the same shape for every top.
|
||||
// Define no shape to defer to reshaping manually.
|
||||
repeated BlobShape shape = 1;
|
||||
}
|
||||
|
||||
// Message that stores parameters used by LogLayer
|
||||
|
|
|
@ -192,7 +192,6 @@ void Solver<Dtype>::InitTestNets() {
|
|||
|
||||
template <typename Dtype>
|
||||
void Solver<Dtype>::Step(int iters) {
|
||||
vector<Blob<Dtype>*> bottom_vec;
|
||||
const int start_iter = iter_;
|
||||
const int stop_iter = iter_ + iters;
|
||||
int average_loss = this->param_.average_loss();
|
||||
|
@ -220,7 +219,7 @@ void Solver<Dtype>::Step(int iters) {
|
|||
// accumulate the loss and gradient
|
||||
Dtype loss = 0;
|
||||
for (int i = 0; i < param_.iter_size(); ++i) {
|
||||
loss += net_->ForwardBackward(bottom_vec);
|
||||
loss += net_->ForwardBackward();
|
||||
}
|
||||
loss /= param_.iter_size();
|
||||
// average the loss across iterations for smoothed reporting
|
||||
|
@ -311,7 +310,7 @@ void Solver<Dtype>::Solve(const char* resume_file) {
|
|||
if (param_.display() && iter_ % param_.display() == 0) {
|
||||
int average_loss = this->param_.average_loss();
|
||||
Dtype loss;
|
||||
net_->ForwardPrefilled(&loss);
|
||||
net_->Forward(&loss);
|
||||
|
||||
UpdateSmoothedLoss(loss, start_iter, average_loss);
|
||||
|
||||
|
@ -341,7 +340,6 @@ void Solver<Dtype>::Test(const int test_net_id) {
|
|||
ShareTrainedLayersWith(net_.get());
|
||||
vector<Dtype> test_score;
|
||||
vector<int> test_score_output_id;
|
||||
vector<Blob<Dtype>*> bottom_vec;
|
||||
const shared_ptr<Net<Dtype> >& test_net = test_nets_[test_net_id];
|
||||
Dtype loss = 0;
|
||||
for (int i = 0; i < param_.test_iter(test_net_id); ++i) {
|
||||
|
@ -362,7 +360,7 @@ void Solver<Dtype>::Test(const int test_net_id) {
|
|||
|
||||
Dtype iter_loss;
|
||||
const vector<Blob<Dtype>*>& result =
|
||||
test_net->Forward(bottom_vec, &iter_loss);
|
||||
test_net->Forward(&iter_loss);
|
||||
if (param_.test_compute_loss()) {
|
||||
loss += iter_loss;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,265 @@
|
|||
#include <vector>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/common.hpp"
|
||||
#include "caffe/filler.hpp"
|
||||
#include "caffe/layers/crop_layer.hpp"
|
||||
|
||||
#include "caffe/test/test_caffe_main.hpp"
|
||||
#include "caffe/test/test_gradient_check_util.hpp"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename TypeParam>
|
||||
class CropLayerTest : public MultiDeviceTest<TypeParam> {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
|
||||
protected:
|
||||
CropLayerTest()
|
||||
: blob_bottom_0_(new Blob<Dtype>(2, 4, 5, 4)),
|
||||
blob_bottom_1_(new Blob<Dtype>(2, 3, 4, 2)),
|
||||
blob_top_(new Blob<Dtype>()) {}
|
||||
virtual void SetUp() {
|
||||
// fill the values
|
||||
FillerParameter filler_param;
|
||||
GaussianFiller<Dtype> filler(filler_param);
|
||||
filler.Fill(this->blob_bottom_0_);
|
||||
filler.Fill(this->blob_bottom_1_);
|
||||
|
||||
blob_bottom_vec_.push_back(blob_bottom_0_);
|
||||
blob_bottom_vec_.push_back(blob_bottom_1_);
|
||||
blob_top_vec_.push_back(blob_top_);
|
||||
}
|
||||
|
||||
virtual ~CropLayerTest() {
|
||||
delete blob_bottom_0_; delete blob_bottom_1_;
|
||||
delete blob_top_;
|
||||
}
|
||||
|
||||
Blob<Dtype>* const blob_bottom_0_;
|
||||
Blob<Dtype>* const blob_bottom_1_;
|
||||
Blob<Dtype>* const blob_top_;
|
||||
vector<Blob<Dtype>*> blob_bottom_vec_;
|
||||
vector<Blob<Dtype>*> blob_top_vec_;
|
||||
};
|
||||
|
||||
|
||||
TYPED_TEST_CASE(CropLayerTest, TestDtypesAndDevices);
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestSetupShapeAll) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
// Crop all dimensions
|
||||
layer_param.mutable_crop_param()->set_axis(0);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int i = 0; i < this->blob_top_->num_axes(); ++i) {
|
||||
EXPECT_EQ(this->blob_bottom_1_->shape(i), this->blob_top_->shape(i));
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestSetupShapeDefault) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
// Crop last two dimensions, axis is 2 by default
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int i = 0; i < this->blob_top_->num_axes(); ++i) {
|
||||
if (i < 2) {
|
||||
EXPECT_EQ(this->blob_bottom_0_->shape(i), this->blob_top_->shape(i));
|
||||
} else {
|
||||
EXPECT_EQ(this->blob_bottom_1_->shape(i), this->blob_top_->shape(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestSetupShapeNegativeIndexing) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
// Crop last dimension by negative indexing
|
||||
layer_param.mutable_crop_param()->set_axis(-1);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int i = 0; i < this->blob_top_->num_axes(); ++i) {
|
||||
if (i < 3) {
|
||||
EXPECT_EQ(this->blob_bottom_0_->shape(i), this->blob_top_->shape(i));
|
||||
} else {
|
||||
EXPECT_EQ(this->blob_bottom_1_->shape(i), this->blob_top_->shape(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCropAll) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(0);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int n = 0; n < this->blob_bottom_0_->num(); ++n) {
|
||||
for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) {
|
||||
for (int h = 0; h < this->blob_bottom_0_->height(); ++h) {
|
||||
for (int w = 0; w < this->blob_bottom_0_->width(); ++w) {
|
||||
if ( n < this->blob_top_->shape(0) &&
|
||||
c < this->blob_top_->shape(1) &&
|
||||
h < this->blob_top_->shape(2) &&
|
||||
w < this->blob_top_->shape(3) ) {
|
||||
EXPECT_EQ(this->blob_top_->data_at(n, c, h, w),
|
||||
this->blob_bottom_0_->data_at(n, c, h, w));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCropAllOffset) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(0);
|
||||
layer_param.mutable_crop_param()->add_offset(0);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(2);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int n = 0; n < this->blob_bottom_0_->num(); ++n) {
|
||||
for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) {
|
||||
for (int h = 0; h < this->blob_bottom_0_->height(); ++h) {
|
||||
for (int w = 0; w < this->blob_bottom_0_->width(); ++w) {
|
||||
if ( n < this->blob_top_->shape(0) &&
|
||||
c < this->blob_top_->shape(1) &&
|
||||
h < this->blob_top_->shape(2) &&
|
||||
w < this->blob_top_->shape(3) ) {
|
||||
EXPECT_EQ(this->blob_top_->data_at(n, c, h, w),
|
||||
this->blob_bottom_0_->data_at(n, c+1, h+1, w+2));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCropHW) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(2);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(2);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
for (int n = 0; n < this->blob_bottom_0_->num(); ++n) {
|
||||
for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) {
|
||||
for (int h = 0; h < this->blob_bottom_0_->height(); ++h) {
|
||||
for (int w = 0; w < this->blob_bottom_0_->width(); ++w) {
|
||||
if (n < this->blob_top_->shape(0) &&
|
||||
c < this->blob_top_->shape(1) &&
|
||||
h < this->blob_top_->shape(2) &&
|
||||
w < this->blob_top_->shape(3)) {
|
||||
EXPECT_EQ(this->blob_top_->data_at(n, c, h, w),
|
||||
this->blob_bottom_0_->data_at(n, c, h+1, w+2));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCrop5D) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
// Add dimension to each bottom for >4D check
|
||||
vector<int> bottom_0_shape = this->blob_bottom_0_->shape();
|
||||
vector<int> bottom_1_shape = this->blob_bottom_1_->shape();
|
||||
bottom_0_shape.push_back(2);
|
||||
bottom_1_shape.push_back(1);
|
||||
this->blob_bottom_0_->Reshape(bottom_0_shape);
|
||||
this->blob_bottom_1_->Reshape(bottom_1_shape);
|
||||
FillerParameter filler_param;
|
||||
GaussianFiller<Dtype> filler(filler_param);
|
||||
filler.Fill(this->blob_bottom_0_);
|
||||
filler.Fill(this->blob_bottom_1_);
|
||||
// Make layer
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(2);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(2);
|
||||
layer_param.mutable_crop_param()->add_offset(0);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
vector<int> bottom_idx = vector<int>(5, 0);
|
||||
vector<int> top_idx = vector<int>(5, 0);
|
||||
for (int n = 0; n < this->blob_bottom_0_->shape(0); ++n) {
|
||||
for (int c = 0; c < this->blob_bottom_0_->shape(1); ++c) {
|
||||
for (int z = 0; z < this->blob_bottom_0_->shape(2); ++z) {
|
||||
for (int h = 0; h < this->blob_bottom_0_->shape(3); ++h) {
|
||||
for (int w = 0; w < this->blob_bottom_0_->shape(4); ++w) {
|
||||
if (n < this->blob_top_->shape(0) &&
|
||||
c < this->blob_top_->shape(1) &&
|
||||
z < this->blob_top_->shape(2) &&
|
||||
h < this->blob_top_->shape(3) &&
|
||||
w < this->blob_top_->shape(4)) {
|
||||
bottom_idx[0] = top_idx[0] = n;
|
||||
bottom_idx[1] = top_idx[1] = c;
|
||||
bottom_idx[2] = z;
|
||||
bottom_idx[3] = h;
|
||||
bottom_idx[4] = top_idx[4] = w;
|
||||
top_idx[2] = z+1;
|
||||
top_idx[3] = h+2;
|
||||
EXPECT_EQ(this->blob_top_->data_at(bottom_idx),
|
||||
this->blob_bottom_0_->data_at(top_idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCropAllGradient) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(0);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
GradientChecker<Dtype> checker(1e-2, 1e-3);
|
||||
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
|
||||
this->blob_top_vec_);
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCropHWGradient) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(2);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(2);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
GradientChecker<Dtype> checker(1e-2, 1e-3);
|
||||
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
|
||||
this->blob_top_vec_);
|
||||
}
|
||||
|
||||
TYPED_TEST(CropLayerTest, TestCrop5DGradient) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
LayerParameter layer_param;
|
||||
layer_param.mutable_crop_param()->set_axis(2);
|
||||
layer_param.mutable_crop_param()->add_offset(1);
|
||||
layer_param.mutable_crop_param()->add_offset(2);
|
||||
layer_param.mutable_crop_param()->add_offset(0);
|
||||
CropLayer<Dtype> layer(layer_param);
|
||||
// Add dimension to each bottom for >4D check
|
||||
vector<int> bottom_0_shape = this->blob_bottom_0_->shape();
|
||||
vector<int> bottom_1_shape = this->blob_bottom_1_->shape();
|
||||
bottom_0_shape.push_back(2);
|
||||
bottom_1_shape.push_back(1);
|
||||
this->blob_bottom_0_->Reshape(bottom_0_shape);
|
||||
this->blob_bottom_1_->Reshape(bottom_1_shape);
|
||||
GradientChecker<Dtype> checker(1e-2, 1e-3);
|
||||
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
|
||||
this->blob_top_vec_);
|
||||
}
|
||||
|
||||
} // namespace caffe
|
|
@ -3,7 +3,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "leveldb/db.h"
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/common.hpp"
|
||||
|
|
|
@ -188,9 +188,8 @@ class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
|
|||
this->InitSolverFromProtoString(proto.str());
|
||||
if (from_snapshot != NULL) {
|
||||
this->solver_->Restore(from_snapshot);
|
||||
vector<Blob<Dtype>*> empty_bottom_vec;
|
||||
for (int i = 0; i < this->solver_->iter(); ++i) {
|
||||
this->solver_->net()->Forward(empty_bottom_vec);
|
||||
this->solver_->net()->Forward();
|
||||
}
|
||||
}
|
||||
if (devices == 1) {
|
||||
|
@ -208,7 +207,7 @@ class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
|
|||
Caffe::set_solver_count(gpus.size());
|
||||
this->sync_.reset(new P2PSync<Dtype>(
|
||||
this->solver_, NULL, this->solver_->param()));
|
||||
this->sync_->run(gpus);
|
||||
this->sync_->Run(gpus);
|
||||
Caffe::set_solver_count(1);
|
||||
}
|
||||
if (snapshot) {
|
||||
|
@ -234,8 +233,7 @@ class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
|
|||
// Run a forward pass, and manually compute the update values from the
|
||||
// result.
|
||||
Net<Dtype>& net = *this->solver_->net();
|
||||
vector<Blob<Dtype>*> empty_bottom_vec;
|
||||
net.Forward(empty_bottom_vec);
|
||||
net.Forward();
|
||||
ASSERT_TRUE(net.has_blob("data"));
|
||||
const Blob<Dtype>& data = *net.blob_by_name("data");
|
||||
ASSERT_TRUE(net.has_blob("targets"));
|
||||
|
|
|
@ -60,6 +60,50 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) {
|
|||
EXPECT_EQ(this->blob_top_->channels(), 10);
|
||||
}
|
||||
|
||||
/** @brief TestSetUp while toggling tranpose flag
|
||||
*/
|
||||
TYPED_TEST(InnerProductLayerTest, TestSetUpTranposeFalse) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
LayerParameter layer_param;
|
||||
InnerProductParameter* inner_product_param =
|
||||
layer_param.mutable_inner_product_param();
|
||||
inner_product_param->set_num_output(10);
|
||||
inner_product_param->set_transpose(false);
|
||||
shared_ptr<InnerProductLayer<Dtype> > layer(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
EXPECT_EQ(2, this->blob_top_->num());
|
||||
EXPECT_EQ(1, this->blob_top_->height());
|
||||
EXPECT_EQ(1, this->blob_top_->width());
|
||||
EXPECT_EQ(10, this->blob_top_->channels());
|
||||
EXPECT_EQ(2, layer->blobs()[0]->num_axes());
|
||||
EXPECT_EQ(10, layer->blobs()[0]->shape(0));
|
||||
EXPECT_EQ(60, layer->blobs()[0]->shape(1));
|
||||
}
|
||||
|
||||
/** @brief TestSetUp while toggling tranpose flag
|
||||
*/
|
||||
TYPED_TEST(InnerProductLayerTest, TestSetUpTranposeTrue) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
LayerParameter layer_param;
|
||||
InnerProductParameter* inner_product_param =
|
||||
layer_param.mutable_inner_product_param();
|
||||
inner_product_param->set_num_output(10);
|
||||
inner_product_param->set_transpose(true);
|
||||
shared_ptr<InnerProductLayer<Dtype> > layer(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
EXPECT_EQ(2, this->blob_top_->num());
|
||||
EXPECT_EQ(1, this->blob_top_->height());
|
||||
EXPECT_EQ(1, this->blob_top_->width());
|
||||
EXPECT_EQ(10, this->blob_top_->channels());
|
||||
EXPECT_EQ(2, layer->blobs()[0]->num_axes());
|
||||
EXPECT_EQ(60, layer->blobs()[0]->shape(0));
|
||||
EXPECT_EQ(10, layer->blobs()[0]->shape(1));
|
||||
}
|
||||
|
||||
TYPED_TEST(InnerProductLayerTest, TestForward) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
|
@ -91,6 +135,79 @@ TYPED_TEST(InnerProductLayerTest, TestForward) {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Init. an IP layer without transpose + random weights,
|
||||
* run Forward, save the result.
|
||||
* Init. another IP layer with transpose.
|
||||
* manually copy and transpose the weights from the first IP layer,
|
||||
* then run Forward on the same input and check that the result is the same
|
||||
*/
|
||||
TYPED_TEST(InnerProductLayerTest, TestForwardTranspose) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
bool IS_VALID_CUDA = false;
|
||||
#ifndef CPU_ONLY
|
||||
IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2;
|
||||
#endif
|
||||
if (Caffe::mode() == Caffe::CPU ||
|
||||
sizeof(Dtype) == 4 || IS_VALID_CUDA) {
|
||||
LayerParameter layer_param;
|
||||
InnerProductParameter* inner_product_param =
|
||||
layer_param.mutable_inner_product_param();
|
||||
inner_product_param->set_num_output(10);
|
||||
inner_product_param->mutable_weight_filler()->set_type("uniform");
|
||||
inner_product_param->mutable_bias_filler()->set_type("uniform");
|
||||
inner_product_param->mutable_bias_filler()->set_min(1);
|
||||
inner_product_param->mutable_bias_filler()->set_max(2);
|
||||
inner_product_param->set_transpose(false);
|
||||
shared_ptr<InnerProductLayer<Dtype> > layer(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
const int count = this->blob_top_->count();
|
||||
Blob<Dtype>* const top = new Blob<Dtype>();
|
||||
top->ReshapeLike(*this->blob_top_);
|
||||
caffe_copy(count, this->blob_top_->cpu_data(), top->mutable_cpu_data());
|
||||
this->blob_top_vec_.clear();
|
||||
this->blob_top_vec_.push_back(new Blob<Dtype>());
|
||||
inner_product_param->set_transpose(true);
|
||||
shared_ptr<InnerProductLayer<Dtype> > ip_t(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
ip_t->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
const int count_w = layer->blobs()[0]->count();
|
||||
EXPECT_EQ(count_w, ip_t->blobs()[0]->count());
|
||||
// manually copy and transpose the weights from 1st IP layer into 2nd
|
||||
const Dtype* w = layer->blobs()[0]->cpu_data();
|
||||
Dtype* w_t = ip_t->blobs()[0]->mutable_cpu_data();
|
||||
const int width = layer->blobs()[0]->shape(1);
|
||||
const int width_t = ip_t->blobs()[0]->shape(1);
|
||||
for (int i = 0; i < count_w; ++i) {
|
||||
int r = i / width;
|
||||
int c = i % width;
|
||||
w_t[c*width_t+r] = w[r*width+c]; // copy while transposing
|
||||
}
|
||||
// copy bias from 1st IP layer to 2nd IP layer
|
||||
ASSERT_EQ(layer->blobs()[1]->count(), ip_t->blobs()[1]->count());
|
||||
caffe_copy(layer->blobs()[1]->count(), layer->blobs()[1]->cpu_data(),
|
||||
ip_t->blobs()[1]->mutable_cpu_data());
|
||||
ip_t->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
EXPECT_EQ(count, this->blob_top_->count())
|
||||
<< "Invalid count for top blob for IP with transpose.";
|
||||
Blob<Dtype>* const top_t = new Blob<Dtype>();\
|
||||
top_t->ReshapeLike(*this->blob_top_vec_[0]);
|
||||
caffe_copy(count,
|
||||
this->blob_top_vec_[0]->cpu_data(),
|
||||
top_t->mutable_cpu_data());
|
||||
const Dtype* data = top->cpu_data();
|
||||
const Dtype* data_t = top_t->cpu_data();
|
||||
for (int i = 0; i < count; ++i) {
|
||||
EXPECT_FLOAT_EQ(data[i], data_t[i]);
|
||||
}
|
||||
} else {
|
||||
LOG(ERROR) << "Skipping test due to old architecture.";
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_nobatch_);
|
||||
|
@ -148,4 +265,127 @@ TYPED_TEST(InnerProductLayerTest, TestGradient) {
|
|||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(InnerProductLayerTest, TestGradientTranspose) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
bool IS_VALID_CUDA = false;
|
||||
#ifndef CPU_ONLY
|
||||
IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2;
|
||||
#endif
|
||||
if (Caffe::mode() == Caffe::CPU ||
|
||||
sizeof(Dtype) == 4 || IS_VALID_CUDA) {
|
||||
LayerParameter layer_param;
|
||||
InnerProductParameter* inner_product_param =
|
||||
layer_param.mutable_inner_product_param();
|
||||
inner_product_param->set_num_output(11);
|
||||
inner_product_param->mutable_weight_filler()->set_type("gaussian");
|
||||
inner_product_param->mutable_bias_filler()->set_type("gaussian");
|
||||
inner_product_param->mutable_bias_filler()->set_min(1);
|
||||
inner_product_param->mutable_bias_filler()->set_max(2);
|
||||
inner_product_param->set_transpose(true);
|
||||
InnerProductLayer<Dtype> layer(layer_param);
|
||||
GradientChecker<Dtype> checker(1e-2, 1e-3);
|
||||
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
|
||||
this->blob_top_vec_);
|
||||
} else {
|
||||
LOG(ERROR) << "Skipping test due to old architecture.";
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(InnerProductLayerTest, TestBackwardTranspose) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->blob_bottom_vec_.push_back(this->blob_bottom_);
|
||||
bool IS_VALID_CUDA = false;
|
||||
#ifndef CPU_ONLY
|
||||
IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2;
|
||||
#endif
|
||||
if (Caffe::mode() == Caffe::CPU ||
|
||||
sizeof(Dtype) == 4 || IS_VALID_CUDA) {
|
||||
LayerParameter layer_param;
|
||||
InnerProductParameter* inner_product_param =
|
||||
layer_param.mutable_inner_product_param();
|
||||
inner_product_param->set_num_output(10);
|
||||
inner_product_param->mutable_weight_filler()->set_type("uniform");
|
||||
inner_product_param->mutable_bias_filler()->set_type("uniform");
|
||||
inner_product_param->mutable_bias_filler()->set_min(1);
|
||||
inner_product_param->mutable_bias_filler()->set_max(2);
|
||||
inner_product_param->set_transpose(false);
|
||||
shared_ptr<InnerProductLayer<Dtype> > layer(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
// copy top blob
|
||||
Blob<Dtype>* const top = new Blob<Dtype>();
|
||||
top->CopyFrom(*this->blob_top_, false, true);
|
||||
// fake top diff
|
||||
Blob<Dtype>* const diff = new Blob<Dtype>();
|
||||
diff->ReshapeLike(*this->blob_top_);
|
||||
{
|
||||
FillerParameter filler_param;
|
||||
UniformFiller<Dtype> filler(filler_param);
|
||||
filler.Fill(diff);
|
||||
}
|
||||
caffe_copy(this->blob_top_vec_[0]->count(),
|
||||
diff->cpu_data(),
|
||||
this->blob_top_vec_[0]->mutable_cpu_diff());
|
||||
vector<bool> propagate_down(1, true);
|
||||
layer->Backward(this->blob_top_vec_,
|
||||
propagate_down,
|
||||
this->blob_bottom_vec_);
|
||||
// copy first ip's weights and their diffs
|
||||
Blob<Dtype>* const w = new Blob<Dtype>();
|
||||
w->CopyFrom(*layer->blobs()[0], false, true);
|
||||
w->CopyFrom(*layer->blobs()[0], true, true);
|
||||
// copy bottom diffs
|
||||
Blob<Dtype>* const bottom_diff = new Blob<Dtype>();
|
||||
bottom_diff->CopyFrom(*this->blob_bottom_vec_[0], true, true);
|
||||
// repeat original top with tranposed ip
|
||||
this->blob_top_vec_.clear();
|
||||
this->blob_top_vec_.push_back(new Blob<Dtype>());
|
||||
inner_product_param->set_transpose(true);
|
||||
shared_ptr<InnerProductLayer<Dtype> > ip_t(
|
||||
new InnerProductLayer<Dtype>(layer_param));
|
||||
ip_t->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
// manually copy and transpose the weights from 1st IP layer into 2nd
|
||||
{
|
||||
const Dtype* w_src = w->cpu_data();
|
||||
Dtype* w_t = ip_t->blobs()[0]->mutable_cpu_data();
|
||||
const int width = layer->blobs()[0]->shape(1);
|
||||
const int width_t = ip_t->blobs()[0]->shape(1);
|
||||
for (int i = 0; i < layer->blobs()[0]->count(); ++i) {
|
||||
int r = i / width;
|
||||
int c = i % width;
|
||||
w_t[c*width_t+r] = w_src[r*width+c]; // copy while transposing
|
||||
}
|
||||
// copy bias from 1st IP layer to 2nd IP layer
|
||||
ASSERT_EQ(layer->blobs()[1]->count(), ip_t->blobs()[1]->count());
|
||||
caffe_copy(layer->blobs()[1]->count(), layer->blobs()[1]->cpu_data(),
|
||||
ip_t->blobs()[1]->mutable_cpu_data());
|
||||
}
|
||||
ip_t->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
|
||||
caffe_copy(this->blob_top_vec_[0]->count(),
|
||||
diff->cpu_data(),
|
||||
this->blob_top_vec_[0]->mutable_cpu_diff());
|
||||
ip_t->Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_);
|
||||
const Dtype* data = w->cpu_diff();
|
||||
const Dtype* data_t = ip_t->blobs()[0]->cpu_diff();
|
||||
const int WIDTH = layer->blobs()[0]->shape(1);
|
||||
const int WIDTH_T = ip_t->blobs()[0]->shape(1);
|
||||
for (int i = 0; i < layer->blobs()[0]->count(); ++i) {
|
||||
int r = i / WIDTH;
|
||||
int c = i % WIDTH;
|
||||
EXPECT_NE(Dtype(0.), data[r*WIDTH+c]);
|
||||
EXPECT_FLOAT_EQ(data[r*WIDTH+c], data_t[c*WIDTH_T+r]);
|
||||
}
|
||||
data = bottom_diff->cpu_diff();
|
||||
data_t = this->blob_bottom_vec_[0]->cpu_diff();
|
||||
for (int i = 0; i < this->blob_bottom_vec_[0]->count(); ++i) {
|
||||
EXPECT_NE(Dtype(0.), data[i]);
|
||||
EXPECT_FLOAT_EQ(data[i], data_t[i]);
|
||||
}
|
||||
} else {
|
||||
LOG(ERROR) << "Skipping test due to old architecture.";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace caffe
|
||||
|
|
|
@ -555,11 +555,14 @@ class NetTest : public MultiDeviceTest<TypeParam> {
|
|||
virtual void InitReshapableNet() {
|
||||
const string& proto =
|
||||
"name: 'ReshapableNetwork' "
|
||||
"input: 'data' "
|
||||
"input_dim: 1 "
|
||||
"input_dim: 3 "
|
||||
"input_dim: 100 "
|
||||
"input_dim: 100 "
|
||||
"layer { "
|
||||
" name: 'data' "
|
||||
" type: 'Input' "
|
||||
" top: 'data' "
|
||||
" input_param { "
|
||||
" shape: { dim: 1 dim: 3 dim: 100 dim: 100 } "
|
||||
" } "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'conv1' "
|
||||
" type: 'Convolution' "
|
||||
|
@ -821,7 +824,7 @@ TYPED_TEST(NetTest, TestLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
const bool kForceBackward = true;
|
||||
this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward);
|
||||
const Dtype loss = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss = this->net_->ForwardBackward();
|
||||
const bool kCopyDiff = true;
|
||||
vector<shared_ptr<Blob<Dtype> > > blob_grads;
|
||||
this->CopyNetBlobs(kCopyDiff, &blob_grads);
|
||||
|
@ -836,7 +839,7 @@ TYPED_TEST(NetTest, TestLossWeight) {
|
|||
for (int i = 0; i < kNumLossWeights; ++i) {
|
||||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward);
|
||||
const Dtype weighted_loss = this->net_->ForwardBackward(bottom);
|
||||
const Dtype weighted_loss = this->net_->ForwardBackward();
|
||||
const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]);
|
||||
EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin)
|
||||
<< "loss weight = " << kLossWeights[i];
|
||||
|
@ -865,14 +868,13 @@ TYPED_TEST(NetTest, TestLossWeight) {
|
|||
|
||||
TYPED_TEST(NetTest, TestLossWeightMidNet) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Caffe::set_random_seed(this->seed_);
|
||||
const bool kForceBackward = true;
|
||||
Dtype loss_weight = 0;
|
||||
Dtype midnet_loss_weight = 1;
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss = this->net_->ForwardBackward();
|
||||
const bool kCopyDiff = true;
|
||||
const bool kReshape = true;
|
||||
Blob<Dtype> data_grad;
|
||||
|
@ -887,7 +889,7 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i],
|
||||
kForceBackward);
|
||||
const Dtype weighted_loss = this->net_->ForwardBackward(bottom);
|
||||
const Dtype weighted_loss = this->net_->ForwardBackward();
|
||||
const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]);
|
||||
EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin)
|
||||
<< "loss weight = " << kLossWeights[i];
|
||||
|
@ -903,7 +905,6 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) {
|
|||
|
||||
TYPED_TEST(NetTest, TestComboLossWeight) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Dtype loss_weight;
|
||||
Dtype midnet_loss_weight;
|
||||
const bool kForceBackward = true;
|
||||
|
@ -916,7 +917,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss = this->net_->ForwardBackward();
|
||||
const bool kCopyDiff = true;
|
||||
vector<shared_ptr<Blob<Dtype> > > blob_grads;
|
||||
this->CopyNetBlobs(kCopyDiff, &blob_grads);
|
||||
|
@ -928,7 +929,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss_main_2 = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss_main_2 = this->net_->ForwardBackward();
|
||||
vector<shared_ptr<Blob<Dtype> > > blob_grads_loss_2;
|
||||
this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2);
|
||||
vector<shared_ptr<Blob<Dtype> > > param_grads_loss_2;
|
||||
|
@ -939,7 +940,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss_main_3 = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss_main_3 = this->net_->ForwardBackward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& blob_grads_loss_3 =
|
||||
this->net_->blobs();
|
||||
ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size());
|
||||
|
@ -974,7 +975,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss_midnet_2 = this->net_->ForwardBackward();
|
||||
this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2);
|
||||
this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2);
|
||||
|
||||
|
@ -983,7 +984,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight,
|
||||
kForceBackward);
|
||||
const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom);
|
||||
const Dtype loss_midnet_3 = this->net_->ForwardBackward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& blob_grads_midnet_loss_3 =
|
||||
this->net_->blobs();
|
||||
ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size());
|
||||
|
@ -1032,40 +1033,35 @@ TYPED_TEST(NetTest, TestComboLossWeight) {
|
|||
}
|
||||
|
||||
TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
const bool kForceBackward = false;
|
||||
const bool kAccuracyLayer = true;
|
||||
this->InitTinyNet(kForceBackward, kAccuracyLayer);
|
||||
EXPECT_TRUE(this->net_->has_blob("accuracy"));
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
// Test that we can do Backward even though we have an 'Accuracy' layer.
|
||||
this->net_->ForwardBackward(bottom);
|
||||
this->net_->ForwardBackward();
|
||||
}
|
||||
|
||||
TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->InitUnsharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Dtype loss;
|
||||
this->net_->Forward(bottom, &loss);
|
||||
this->net_->Forward(&loss);
|
||||
EXPECT_GT(loss, 0);
|
||||
}
|
||||
|
||||
TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->InitSharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Dtype loss;
|
||||
this->net_->Forward(bottom, &loss);
|
||||
this->net_->Forward(&loss);
|
||||
EXPECT_FLOAT_EQ(loss, 0);
|
||||
}
|
||||
|
||||
TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->InitUnsharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Net<Dtype>* net = this->net_.get();
|
||||
net->Forward(bottom);
|
||||
net->Forward();
|
||||
net->Backward();
|
||||
Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
|
||||
Layer<Dtype>* ip2_layer = net->layer_by_name("innerproduct2").get();
|
||||
|
@ -1081,10 +1077,9 @@ TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
|
|||
TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
this->InitSharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
Net<Dtype>* net = this->net_.get();
|
||||
Dtype loss;
|
||||
net->Forward(bottom, &loss);
|
||||
net->Forward(&loss);
|
||||
net->Backward();
|
||||
EXPECT_FLOAT_EQ(loss, 0);
|
||||
Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
|
||||
|
@ -1102,7 +1097,6 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
|
|||
typedef typename TypeParam::Dtype Dtype;
|
||||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitDiffDataSharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
|
||||
EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
|
||||
Blob<Dtype>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
|
||||
|
@ -1111,7 +1105,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
|
|||
// locations.
|
||||
EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
|
||||
EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
// Compute the expected update as the data minus the two diffs.
|
||||
Blob<Dtype> shared_params;
|
||||
|
@ -1146,7 +1140,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
|
|||
// locations in memory.
|
||||
EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data());
|
||||
EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
// Compute the expected update.
|
||||
Blob<Dtype> unshared_params1;
|
||||
|
@ -1186,7 +1180,6 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) {
|
|||
// Create a net with weight sharing; Update it once.
|
||||
Caffe::set_random_seed(this->seed_);
|
||||
this->InitDiffDataSharedWeightsNet();
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
|
||||
EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
|
||||
Blob<Dtype>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
|
||||
|
@ -1195,7 +1188,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) {
|
|||
// locations.
|
||||
EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
|
||||
EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
|
||||
this->net_->ForwardBackward(bottom);
|
||||
this->net_->ForwardBackward();
|
||||
this->net_->Update();
|
||||
Blob<Dtype> shared_params;
|
||||
const bool kReshape = true;
|
||||
|
@ -1228,7 +1221,6 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) {
|
|||
|
||||
TYPED_TEST(NetTest, TestParamPropagateDown) {
|
||||
typedef typename TypeParam::Dtype Dtype;
|
||||
vector<Blob<Dtype>*> bottom;
|
||||
const bool kBiasTerm = true, kForceBackward = false;
|
||||
const Dtype* kLossWeight1 = NULL;
|
||||
const Dtype* kLossWeight2 = NULL;
|
||||
|
@ -1238,7 +1230,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) {
|
|||
Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2;
|
||||
this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward,
|
||||
kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2);
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& params = this->net_->params();
|
||||
const int num_params = params.size();
|
||||
|
@ -1258,7 +1250,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) {
|
|||
blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2;
|
||||
this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward,
|
||||
kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2);
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& params2 = this->net_->params();
|
||||
ASSERT_EQ(num_params, params2.size());
|
||||
|
@ -1274,7 +1266,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) {
|
|||
blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1;
|
||||
this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward,
|
||||
kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2);
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& params3 = this->net_->params();
|
||||
ASSERT_EQ(num_params, params3.size());
|
||||
|
@ -1293,7 +1285,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) {
|
|||
blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0;
|
||||
this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward,
|
||||
kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2);
|
||||
this->net_->Forward(bottom);
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
const vector<shared_ptr<Blob<Dtype> > >& params4 = this->net_->params();
|
||||
ASSERT_EQ(num_params, params4.size());
|
||||
|
@ -1315,7 +1307,7 @@ TYPED_TEST(NetTest, TestFromTo) {
|
|||
// Run Forward and Backward, recording the data diff and loss.
|
||||
Blob<Dtype> data;
|
||||
data.ReshapeLike(*this->net_->blob_by_name("data"));
|
||||
this->net_->ForwardPrefilled();
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
data.CopyFrom(*this->net_->blob_by_name("data"), true, true);
|
||||
const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data();
|
||||
|
@ -2277,12 +2269,12 @@ TYPED_TEST(NetTest, TestReshape) {
|
|||
filler.Fill(&blob2);
|
||||
|
||||
this->InitReshapableNet();
|
||||
Blob<Dtype>* input_blob = this->net_->input_blobs()[0];
|
||||
shared_ptr<Blob<Dtype> > input_blob = this->net_->blob_by_name("data");
|
||||
Blob<Dtype>* output_blob = this->net_->output_blobs()[0];
|
||||
input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(),
|
||||
blob1.width());
|
||||
caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data());
|
||||
this->net_->ForwardPrefilled();
|
||||
this->net_->Forward();
|
||||
// call backward just to make sure it runs
|
||||
this->net_->Backward();
|
||||
Blob<Dtype> output1(output_blob->num(), output_blob->channels(),
|
||||
|
@ -2293,7 +2285,7 @@ TYPED_TEST(NetTest, TestReshape) {
|
|||
input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(),
|
||||
blob2.width());
|
||||
caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data());
|
||||
this->net_->ForwardPrefilled();
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
Blob<Dtype> output2(output_blob->num(), output_blob->channels(),
|
||||
output_blob->height(), output_blob->width());
|
||||
|
@ -2303,7 +2295,7 @@ TYPED_TEST(NetTest, TestReshape) {
|
|||
input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(),
|
||||
blob1.width());
|
||||
caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data());
|
||||
this->net_->ForwardPrefilled();
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
for (int i = 0; i < output1.count(); ++i) {
|
||||
EXPECT_FLOAT_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i));
|
||||
|
@ -2312,7 +2304,7 @@ TYPED_TEST(NetTest, TestReshape) {
|
|||
input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(),
|
||||
blob2.width());
|
||||
caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data());
|
||||
this->net_->ForwardPrefilled();
|
||||
this->net_->Forward();
|
||||
this->net_->Backward();
|
||||
for (int i = 0; i < output2.count(); ++i) {
|
||||
EXPECT_FLOAT_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i));
|
||||
|
|
|
@ -886,67 +886,6 @@ TEST_F(SplitLayerInsertionTest, TestInsertionTwoTop) {
|
|||
this->RunInsertionTest(input_proto, expected_output_proto);
|
||||
}
|
||||
|
||||
TEST_F(SplitLayerInsertionTest, TestInputInsertion) {
|
||||
const string& input_proto =
|
||||
"name: 'TestNetwork' "
|
||||
"input: 'data' "
|
||||
"input_dim: 10 "
|
||||
"input_dim: 3 "
|
||||
"input_dim: 227 "
|
||||
"input_dim: 227 "
|
||||
"layer { "
|
||||
" name: 'innerprod1' "
|
||||
" type: 'InnerProduct' "
|
||||
" bottom: 'data' "
|
||||
" top: 'innerprod1' "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'innerprod2' "
|
||||
" type: 'InnerProduct' "
|
||||
" bottom: 'data' "
|
||||
" top: 'innerprod2' "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'loss' "
|
||||
" type: 'EuclideanLoss' "
|
||||
" bottom: 'innerprod1' "
|
||||
" bottom: 'innerprod2' "
|
||||
"} ";
|
||||
const string& expected_output_proto =
|
||||
"name: 'TestNetwork' "
|
||||
"input: 'data' "
|
||||
"input_dim: 10 "
|
||||
"input_dim: 3 "
|
||||
"input_dim: 227 "
|
||||
"input_dim: 227 "
|
||||
"layer { "
|
||||
" name: 'data_input_0_split' "
|
||||
" type: 'Split' "
|
||||
" bottom: 'data' "
|
||||
" top: 'data_input_0_split_0' "
|
||||
" top: 'data_input_0_split_1' "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'innerprod1' "
|
||||
" type: 'InnerProduct' "
|
||||
" bottom: 'data_input_0_split_0' "
|
||||
" top: 'innerprod1' "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'innerprod2' "
|
||||
" type: 'InnerProduct' "
|
||||
" bottom: 'data_input_0_split_1' "
|
||||
" top: 'innerprod2' "
|
||||
"} "
|
||||
"layer { "
|
||||
" name: 'loss' "
|
||||
" type: 'EuclideanLoss' "
|
||||
" bottom: 'innerprod1' "
|
||||
" bottom: 'innerprod2' "
|
||||
"} ";
|
||||
this->RunInsertionTest(input_proto, expected_output_proto);
|
||||
}
|
||||
|
||||
TEST_F(SplitLayerInsertionTest, TestWithInPlace) {
|
||||
const string& input_proto =
|
||||
"name: 'TestNetwork' "
|
||||
|
|
|
@ -19,12 +19,6 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) {
|
|||
map<pair<int, int>, float> top_idx_to_loss_weight;
|
||||
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
|
||||
map<int, string> layer_idx_to_layer_name;
|
||||
layer_idx_to_layer_name[-1] = "input";
|
||||
// Determine the number of times each blob is used as an input (bottom) blob.
|
||||
for (int i = 0; i < param.input_size(); ++i) {
|
||||
const string& blob_name = param.input(i);
|
||||
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
|
||||
}
|
||||
for (int i = 0; i < param.layer_size(); ++i) {
|
||||
const LayerParameter& layer_param = param.layer(i);
|
||||
layer_idx_to_layer_name[i] = layer_param.name();
|
||||
|
@ -45,7 +39,7 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) {
|
|||
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
|
||||
}
|
||||
// A use of a top blob as a loss should be handled similarly to the use of
|
||||
// a top blob as an input (bottom) blob to another layer.
|
||||
// a top blob as a bottom blob to another layer.
|
||||
const int last_loss =
|
||||
std::min(layer_param.loss_weight_size(), layer_param.top_size());
|
||||
for (int j = 0; j < last_loss; ++j) {
|
||||
|
@ -57,19 +51,6 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Create split layer for any input blobs used by other layer as bottom
|
||||
// blobs more than once.
|
||||
for (int i = 0; i < param.input_size(); ++i) {
|
||||
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
|
||||
if (split_count > 1) {
|
||||
const string& layer_name = layer_idx_to_layer_name[-1];
|
||||
const string& blob_name = param.input(i);
|
||||
LayerParameter* split_layer_param = param_split->add_layer();
|
||||
const float kZeroLossWeight = 0;
|
||||
ConfigureSplitLayer(layer_name, blob_name, i, split_count,
|
||||
kZeroLossWeight, split_layer_param);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < param.layer_size(); ++i) {
|
||||
LayerParameter* layer_param = param_split->add_layer();
|
||||
layer_param->CopyFrom(param.layer(i));
|
||||
|
|
|
@ -13,7 +13,8 @@
|
|||
namespace caffe {
|
||||
|
||||
bool NetNeedsUpgrade(const NetParameter& net_param) {
|
||||
return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param);
|
||||
return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param)
|
||||
|| NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param);
|
||||
}
|
||||
|
||||
bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
|
||||
|
@ -60,6 +61,16 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
|
|||
<< "V1LayerParameter";
|
||||
}
|
||||
}
|
||||
// NetParameter uses old style input fields; try to upgrade it.
|
||||
if (NetNeedsInputUpgrade(*param)) {
|
||||
LOG(INFO) << "Attempting to upgrade input file specified using deprecated "
|
||||
<< "input fields: " << param_file;
|
||||
UpgradeNetInput(param);
|
||||
LOG(INFO) << "Successfully upgraded file specified using deprecated "
|
||||
<< "input fields.";
|
||||
LOG(WARNING) << "Note that future Caffe releases will only support "
|
||||
<< "input layers and not input fields.";
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
|
@ -645,12 +656,14 @@ void UpgradeNetDataTransformation(NetParameter* net_param) {
|
|||
}
|
||||
|
||||
bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) {
|
||||
bool is_fully_compatible = true;
|
||||
if (v1_net_param.layer_size() > 0) {
|
||||
LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
|
||||
<< "fields; these will be ignored for the upgrade.";
|
||||
is_fully_compatible = false;
|
||||
LOG(FATAL) << "Refusing to upgrade inconsistent NetParameter input; "
|
||||
<< "the definition includes both 'layer' and 'layers' fields. "
|
||||
<< "The current format defines 'layer' fields with string type like "
|
||||
<< "layer { type: 'Layer' ... } and not layers { type: LAYER ... }. "
|
||||
<< "Manually switch the definition to 'layer' format to continue.";
|
||||
}
|
||||
bool is_fully_compatible = true;
|
||||
net_param->CopyFrom(v1_net_param);
|
||||
net_param->clear_layers();
|
||||
net_param->clear_layer();
|
||||
|
@ -937,6 +950,47 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) {
|
|||
}
|
||||
}
|
||||
|
||||
bool NetNeedsInputUpgrade(const NetParameter& net_param) {
|
||||
return net_param.input_size() > 0;
|
||||
}
|
||||
|
||||
void UpgradeNetInput(NetParameter* net_param) {
|
||||
// Collect inputs and convert to Input layer definitions.
|
||||
// If the NetParameter holds an input alone, without shape/dim, then
|
||||
// it's a legacy caffemodel and simply stripping the input field is enough.
|
||||
bool has_shape = net_param->input_shape_size() > 0;
|
||||
bool has_dim = net_param->input_dim_size() > 0;
|
||||
if (has_shape || has_dim) {
|
||||
LayerParameter* layer_param = net_param->add_layer();
|
||||
layer_param->set_name("input");
|
||||
layer_param->set_type("Input");
|
||||
InputParameter* input_param = layer_param->mutable_input_param();
|
||||
// Convert input fields into a layer.
|
||||
for (int i = 0; i < net_param->input_size(); ++i) {
|
||||
layer_param->add_top(net_param->input(i));
|
||||
if (has_shape) {
|
||||
input_param->add_shape()->CopyFrom(net_param->input_shape(i));
|
||||
} else {
|
||||
// Turn legacy input dimensions into shape.
|
||||
BlobShape* shape = input_param->add_shape();
|
||||
int first_dim = i*4;
|
||||
int last_dim = first_dim + 4;
|
||||
for (int j = first_dim; j < last_dim; j++) {
|
||||
shape->add_dim(net_param->input_dim(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
// Swap input layer to beginning of net to satisfy layer dependencies.
|
||||
for (int i = net_param->layer_size() - 1; i > 0; --i) {
|
||||
net_param->mutable_layer(i-1)->Swap(net_param->mutable_layer(i));
|
||||
}
|
||||
}
|
||||
// Clear inputs.
|
||||
net_param->clear_input();
|
||||
net_param->clear_input_shape();
|
||||
net_param->clear_input_dim();
|
||||
}
|
||||
|
||||
// Return true iff the solver contains any old solver_type specified as enums
|
||||
bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param) {
|
||||
if (solver_param.has_solver_type()) {
|
||||
|
|
|
@ -215,7 +215,7 @@ int train() {
|
|||
|
||||
if (gpus.size() > 1) {
|
||||
caffe::P2PSync<float> sync(solver, NULL, solver->param());
|
||||
sync.run(gpus);
|
||||
sync.Run(gpus);
|
||||
} else {
|
||||
LOG(INFO) << "Starting Optimization";
|
||||
solver->Solve();
|
||||
|
@ -252,14 +252,13 @@ int test() {
|
|||
caffe_net.CopyTrainedLayersFrom(FLAGS_weights);
|
||||
LOG(INFO) << "Running for " << FLAGS_iterations << " iterations.";
|
||||
|
||||
vector<Blob<float>* > bottom_vec;
|
||||
vector<int> test_score_output_id;
|
||||
vector<float> test_score;
|
||||
float loss = 0;
|
||||
for (int i = 0; i < FLAGS_iterations; ++i) {
|
||||
float iter_loss;
|
||||
const vector<Blob<float>*>& result =
|
||||
caffe_net.Forward(bottom_vec, &iter_loss);
|
||||
caffe_net.Forward(&iter_loss);
|
||||
loss += iter_loss;
|
||||
int idx = 0;
|
||||
for (int j = 0; j < result.size(); ++j) {
|
||||
|
@ -323,7 +322,7 @@ int time() {
|
|||
// Note that for the speed benchmark, we will assume that the network does
|
||||
// not take any input blobs.
|
||||
float initial_loss;
|
||||
caffe_net.Forward(vector<Blob<float>*>(), &initial_loss);
|
||||
caffe_net.Forward(&initial_loss);
|
||||
LOG(INFO) << "Initial loss: " << initial_loss;
|
||||
LOG(INFO) << "Performing Backward";
|
||||
caffe_net.Backward();
|
||||
|
|
|
@ -133,10 +133,9 @@ int feature_extraction_pipeline(int argc, char** argv) {
|
|||
LOG(ERROR)<< "Extacting Features";
|
||||
|
||||
Datum datum;
|
||||
std::vector<Blob<float>*> input_vec;
|
||||
std::vector<int> image_indices(num_features, 0);
|
||||
for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
|
||||
feature_extraction_net->Forward(input_vec);
|
||||
feature_extraction_net->Forward();
|
||||
for (int i = 0; i < num_features; ++i) {
|
||||
const boost::shared_ptr<Blob<Dtype> > feature_blob =
|
||||
feature_extraction_net->blob_by_name(blob_names[i]);
|
||||
|
|
|
@ -16,6 +16,7 @@ using std::ofstream;
|
|||
using namespace caffe; // NOLINT(build/namespaces)
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging)
|
||||
::google::InitGoogleLogging(argv[0]);
|
||||
if (argc != 3) {
|
||||
LOG(ERROR) << "Usage: "
|
||||
|
@ -39,11 +40,11 @@ int main(int argc, char** argv) {
|
|||
<< "see details above.";
|
||||
}
|
||||
} else {
|
||||
LOG(ERROR) << "File already in V1 proto format: " << argv[1];
|
||||
LOG(ERROR) << "File already in latest proto format: " << input_filename;
|
||||
}
|
||||
|
||||
WriteProtoToBinaryFile(net_param, argv[2]);
|
||||
|
||||
LOG(ERROR) << "Wrote upgraded NetParameter binary proto to " << argv[2];
|
||||
LOG(INFO) << "Wrote upgraded NetParameter binary proto to " << argv[2];
|
||||
return !success;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ using std::ofstream;
|
|||
using namespace caffe; // NOLINT(build/namespaces)
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging)
|
||||
::google::InitGoogleLogging(argv[0]);
|
||||
if (argc != 3) {
|
||||
LOG(ERROR) << "Usage: "
|
||||
|
@ -31,7 +32,6 @@ int main(int argc, char** argv) {
|
|||
return 2;
|
||||
}
|
||||
bool need_upgrade = NetNeedsUpgrade(net_param);
|
||||
bool need_data_upgrade = NetNeedsDataUpgrade(net_param);
|
||||
bool success = true;
|
||||
if (need_upgrade) {
|
||||
success = UpgradeNetAsNeeded(input_filename, &net_param);
|
||||
|
@ -43,13 +43,9 @@ int main(int argc, char** argv) {
|
|||
LOG(ERROR) << "File already in latest proto format: " << input_filename;
|
||||
}
|
||||
|
||||
if (need_data_upgrade) {
|
||||
UpgradeNetDataTransformation(&net_param);
|
||||
}
|
||||
|
||||
// Save new format prototxt.
|
||||
WriteProtoToTextFile(net_param, argv[2]);
|
||||
|
||||
LOG(ERROR) << "Wrote upgraded NetParameter text proto to " << argv[2];
|
||||
LOG(INFO) << "Wrote upgraded NetParameter text proto to " << argv[2];
|
||||
return !success;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ using std::ofstream;
|
|||
using namespace caffe; // NOLINT(build/namespaces)
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging)
|
||||
::google::InitGoogleLogging(argv[0]);
|
||||
if (argc != 3) {
|
||||
LOG(ERROR) << "Usage: upgrade_solver_proto_text "
|
||||
|
@ -45,6 +46,6 @@ int main(int argc, char** argv) {
|
|||
// Save new format prototxt.
|
||||
WriteProtoToTextFile(solver_param, argv[2]);
|
||||
|
||||
LOG(ERROR) << "Wrote upgraded SolverParameter text proto to " << argv[2];
|
||||
LOG(INFO) << "Wrote upgraded SolverParameter text proto to " << argv[2];
|
||||
return !success;
|
||||
}
|
||||
|
|
|
@ -109,6 +109,7 @@
|
|||
<ClCompile Include="..\..\src\caffe\layers\concat_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\contrastive_loss_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\conv_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\crop_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\cudnn_conv_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\cudnn_lcn_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\cudnn_lrn_layer.cpp" />
|
||||
|
@ -135,6 +136,7 @@
|
|||
<ClCompile Include="..\..\src\caffe\layers\image_data_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\infogain_loss_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\inner_product_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\input_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\log_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\loss_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\layers\lrn_layer.cpp" />
|
||||
|
@ -208,6 +210,7 @@
|
|||
<ClInclude Include="..\..\include\caffe\layers\concat_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\contrastive_loss_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\conv_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\crop_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\cudnn_conv_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\cudnn_lcn_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\cudnn_lrn_layer.hpp" />
|
||||
|
@ -234,6 +237,7 @@
|
|||
<ClInclude Include="..\..\include\caffe\layers\image_data_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\infogain_loss_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\inner_product_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\input_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\log_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\loss_layer.hpp" />
|
||||
<ClInclude Include="..\..\include\caffe\layers\lrn_layer.hpp" />
|
||||
|
@ -296,6 +300,7 @@
|
|||
<CudaCompile Include="..\..\src\caffe\layers\concat_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\contrastive_loss_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\conv_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\crop_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\cudnn_conv_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\cudnn_lcn_layer.cu" />
|
||||
<CudaCompile Include="..\..\src\caffe\layers\cudnn_lrn_layer.cu" />
|
||||
|
@ -381,4 +386,4 @@
|
|||
<Error Condition="!Exists('..\..\..\NugetPackages\LevelDB-vc120.1.2.0.0\build\native\LevelDB-vc120.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\..\..\NugetPackages\LevelDB-vc120.1.2.0.0\build\native\LevelDB-vc120.targets'))" />
|
||||
<Error Condition="!Exists('..\..\..\NugetPackages\lmdb-v120.0.9.14\build\native\lmdb-v120.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\..\..\NugetPackages\lmdb-v120.0.9.14\build\native\lmdb-v120.targets'))" />
|
||||
</Target>
|
||||
</Project>
|
||||
</Project>
|
|
@ -165,6 +165,9 @@
|
|||
<ClCompile Include="..\..\src\caffe\layers\conv_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\layers\crop_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\layers\cudnn_conv_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
|
@ -240,6 +243,9 @@
|
|||
<ClCompile Include="..\..\src\caffe\layers\inner_product_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\layers\input_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\layers\log_layer.cpp">
|
||||
<Filter>src\layers</Filter>
|
||||
</ClCompile>
|
||||
|
@ -413,6 +419,9 @@
|
|||
<ClInclude Include="..\..\include\caffe\layers\conv_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\include\caffe\layers\crop_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\include\caffe\layers\cudnn_conv_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
|
@ -488,6 +497,9 @@
|
|||
<ClInclude Include="..\..\include\caffe\layers\inner_product_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\include\caffe\layers\input_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\include\caffe\layers\log_layer.hpp">
|
||||
<Filter>include\layers</Filter>
|
||||
</ClInclude>
|
||||
|
@ -727,6 +739,9 @@
|
|||
<CudaCompile Include="..\..\src\caffe\layers\conv_layer.cu">
|
||||
<Filter>cu\layers</Filter>
|
||||
</CudaCompile>
|
||||
<CudaCompile Include="..\..\src\caffe\layers\crop_layer.cu">
|
||||
<Filter>cu\layers</Filter>
|
||||
</CudaCompile>
|
||||
<CudaCompile Include="..\..\src\caffe\layers\batch_norm_layer.cu">
|
||||
<Filter>cu\layers</Filter>
|
||||
</CudaCompile>
|
||||
|
@ -794,4 +809,4 @@
|
|||
<ItemGroup>
|
||||
<None Include="packages.config" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
</Project>
|
|
@ -85,6 +85,7 @@
|
|||
<ClCompile Include="..\..\src\caffe\test\test_concat_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_contrastive_loss_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_convolution_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_crop_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_data_layer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_data_transformer.cpp" />
|
||||
<ClCompile Include="..\..\src\caffe\test\test_db.cpp" />
|
||||
|
|
|
@ -47,6 +47,9 @@
|
|||
<ClCompile Include="..\..\src\caffe\test\test_convolution_layer.cpp">
|
||||
<Filter>src</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\test\test_crop_layer.cpp">
|
||||
<Filter>src</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\caffe\test\test_data_layer.cpp">
|
||||
<Filter>src</Filter>
|
||||
</ClCompile>
|
||||
|
|
Загрузка…
Ссылка в новой задаче