[BUILD][DOCS] Migrate VTA CI, test, build, docs
This commit is contained in:
Родитель
bc41013028
Коммит
e531d02288
|
@ -8,11 +8,6 @@ include(cmake/util/FindVulkan.cmake)
|
|||
include(cmake/util/FindLLVM.cmake)
|
||||
include(cmake/util/FindROCM.cmake)
|
||||
|
||||
|
||||
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake)
|
||||
include(${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake)
|
||||
endif()
|
||||
|
||||
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
|
||||
include(${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
|
||||
else()
|
||||
|
@ -40,6 +35,8 @@ tvm_option(USE_RTTI "Build with RTTI" ON)
|
|||
tvm_option(USE_MSVC_MT "Build with MT" OFF)
|
||||
tvm_option(INSTALL_DEV "Install compiler infrastructure" OFF)
|
||||
|
||||
tvm_option(USE_VTA_CFG "Use a specific json file for VTA runtime" "")
|
||||
|
||||
# Contrib library options
|
||||
tvm_option(USE_BLAS "The blas library to be linked" none)
|
||||
tvm_option(USE_MKL_PATH "MKL root path when use MKL blas" none)
|
||||
|
@ -52,8 +49,9 @@ tvm_option(USE_NNPACK "Build with nnpack support" OFF)
|
|||
tvm_option(USE_RANDOM "Build with random support" OFF)
|
||||
|
||||
# include directories
|
||||
include_directories(BEFORE "nnvm/include")
|
||||
include_directories("include")
|
||||
include_directories("nnvm/include")
|
||||
include_directories("dmlc-core/include")
|
||||
include_directories("HalideIR/src")
|
||||
include_directories("dlpack/include")
|
||||
include_directories("topi/include")
|
||||
|
@ -148,20 +146,8 @@ if(USE_GRAPH_RUNTIME)
|
|||
endif(USE_GRAPH_RUNTIME_DEBUG)
|
||||
endif(USE_GRAPH_RUNTIME)
|
||||
|
||||
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include)
|
||||
if (INSTALL_DEV)
|
||||
install(
|
||||
DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include/." DESTINATION "include"
|
||||
FILES_MATCHING
|
||||
PATTERN "*.h"
|
||||
)
|
||||
endif()
|
||||
elseif(DMLC_CORE_PATH)
|
||||
include_directories(${DMLC_CORE_PATH}/include)
|
||||
endif()
|
||||
|
||||
# Module rules
|
||||
include(cmake/modules/VTA.cmake)
|
||||
include(cmake/modules/CUDA.cmake)
|
||||
include(cmake/modules/OpenCL.cmake)
|
||||
include(cmake/modules/OpenGL.cmake)
|
||||
|
@ -174,7 +160,6 @@ include(cmake/modules/contrib/Random.cmake)
|
|||
include(cmake/modules/contrib/Sort.cmake)
|
||||
include(cmake/modules/contrib/NNPack.cmake)
|
||||
|
||||
# Target rrules
|
||||
add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS})
|
||||
add_library(tvm_topi SHARED ${TOPI_SRCS})
|
||||
add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
|
||||
|
@ -207,7 +192,6 @@ endif()
|
|||
# Custom targets
|
||||
add_custom_target(runtime DEPENDS tvm_runtime)
|
||||
|
||||
|
||||
# Installation rulse
|
||||
install(TARGETS tvm_runtime DESTINATION lib${LIB_SUFFIX})
|
||||
if(WIN32)
|
||||
|
|
|
@ -8,7 +8,7 @@ tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
|
|||
tvm_lib = "build/libtvm.so, " + tvm_runtime
|
||||
// LLVM upstream lib
|
||||
tvm_multilib = "build/libtvm.so, " +
|
||||
"build/libtvm_topi.so, build/libnnvm_compiler.so, " + tvm_runtime
|
||||
"build/libvta.so, build/libtvm_topi.so, build/libnnvm_compiler.so, " + tvm_runtime
|
||||
|
||||
// command to start a docker container
|
||||
docker_run = 'docker/build.sh'
|
||||
|
@ -134,6 +134,7 @@ stage('Build') {
|
|||
pack_lib('cpu', tvm_lib)
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} ci_cpu ./tests/scripts/task_cpp_unittest.sh"
|
||||
sh "${docker_run} ci_cpu ./tests/scripts/task_python_vta.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -179,6 +180,7 @@ stage('Unit Test') {
|
|||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} ci_i386 ./tests/scripts/task_python_unittest.sh"
|
||||
sh "${docker_run} ci_i386 ./tests/scripts/task_python_integration.sh"
|
||||
sh "${docker_run} ci_i386 ./tests/scripts/task_python_vta.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
8
Makefile
8
Makefile
|
@ -1,7 +1,7 @@
|
|||
ROOTDIR = $(CURDIR)
|
||||
|
||||
.PHONY: clean all test doc pylint cpplint lint\
|
||||
cython cython2 cython3 web runtime
|
||||
cython cython2 cython3 web runtime vta
|
||||
|
||||
ifndef DMLC_CORE_PATH
|
||||
DMLC_CORE_PATH = $(ROOTDIR)/dmlc-core
|
||||
|
@ -20,9 +20,11 @@ all:
|
|||
@mkdir -p build && cd build && cmake .. && $(MAKE)
|
||||
|
||||
runtime:
|
||||
|
||||
@mkdir -p build && cd build && cmake .. && $(MAKE) runtime
|
||||
|
||||
vta:
|
||||
@mkdir -p build && cd build && cmake .. && $(MAKE) vta
|
||||
|
||||
cpptest:
|
||||
@mkdir -p build && cd build && cmake .. && $(MAKE) cpptest
|
||||
|
||||
|
@ -48,6 +50,7 @@ build/libtvm_web_runtime.js: build/libtvm_web_runtime.bc
|
|||
|
||||
# Lint scripts
|
||||
cpplint:
|
||||
python3 dmlc-core/scripts/lint.py vta cpp vta/include vta/src
|
||||
python3 dmlc-core/scripts/lint.py topi cpp topi/include;
|
||||
python3 dmlc-core/scripts/lint.py nnvm cpp nnvm/include nnvm/src;
|
||||
python3 dmlc-core/scripts/lint.py tvm cpp include src verilog\
|
||||
|
@ -57,6 +60,7 @@ pylint:
|
|||
python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
|
||||
python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
|
||||
python3 -m pylint nnvm/python/nnvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
|
||||
python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
|
||||
|
||||
jnilint:
|
||||
python3 dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
PROJROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" && pwd )"
|
||||
|
||||
export PYTHONPATH=${PYTHONPATH}:${PROJROOT}/python:${PROJROOT}/vta/python
|
||||
python -m vta.exec.rpc_server
|
|
@ -0,0 +1,51 @@
|
|||
# CMake Build rules for VTA
|
||||
find_program(PYTHON python)
|
||||
|
||||
if(MSVC)
|
||||
message(STATUS "VTA build is skipped in Windows..")
|
||||
elseif(PYTHON)
|
||||
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py)
|
||||
|
||||
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
|
||||
message(STATUS "Use VTA config " ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
|
||||
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py
|
||||
--use-cfg=${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND ${VTA_CONFIG} --target OUTPUT_VARIABLE __vta_target)
|
||||
string(STRIP ${__vta_target} VTA_TARGET)
|
||||
|
||||
message(STATUS "Build VTA runtime with target: " ${VTA_TARGET})
|
||||
|
||||
execute_process(COMMAND ${VTA_CONFIG} --defs OUTPUT_VARIABLE __vta_defs)
|
||||
|
||||
string(REGEX MATCHALL "(^| )-D[A-Za-z0-9_=.]*" VTA_DEFINITIONS "${__vta_defs}")
|
||||
|
||||
file(GLOB VTA_RUNTIME_SRCS vta/src/*.cc)
|
||||
file(GLOB __vta_target_srcs vta/src/${VTA_TARGET}/*.cc)
|
||||
list(APPEND VTA_RUNTIME_SRCS ${__vta_target_srcs})
|
||||
|
||||
add_library(vta SHARED ${VTA_RUNTIME_SRCS})
|
||||
|
||||
target_include_directories(vta PUBLIC vta/include)
|
||||
|
||||
foreach(__def ${VTA_DEFINITIONS})
|
||||
string(SUBSTRING ${__def} 3 -1 __strip_def)
|
||||
target_compile_definitions(vta PUBLIC ${__strip_def})
|
||||
endforeach()
|
||||
|
||||
if(APPLE)
|
||||
set_target_properties(vta PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
|
||||
endif(APPLE)
|
||||
|
||||
# PYNQ rules
|
||||
if(${VTA_TARGET} STREQUAL "pynq")
|
||||
find_library(__sds_lib NAMES sds_lib PATHS /usr/lib)
|
||||
find_library(__dma_lib NAMES dma PATHS
|
||||
"/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/"
|
||||
"/opt/python3.6/lib/python3.6/site-packages/pynq/lib/")
|
||||
target_link_libraries(vta ${__sds_lib} ${__dma_lib})
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Cannot found python in env, VTA build is skipped..")
|
||||
endif()
|
|
@ -753,7 +753,7 @@ WARN_LOGFILE =
|
|||
# spaces.
|
||||
# Note: If this tag is empty the current directory is searched.
|
||||
|
||||
INPUT = include/tvm topi/include/topi nnvm/include/nnvm
|
||||
INPUT = include/tvm topi/include/topi nnvm/include/nnvm vta/include/vta
|
||||
|
||||
# This tag can be used to specify the character encoding of the source files
|
||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
||||
|
|
|
@ -46,6 +46,8 @@ help:
|
|||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
rm -rf gen_modules
|
||||
rm -rf tutorials
|
||||
rm -rf vta/tutorials
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
|
|
|
@ -1,6 +1,28 @@
|
|||
The documentation of tvm is generated with recommonmark and sphinx.
|
||||
TVM Documentations
|
||||
==================
|
||||
This folder contains the source of TVM documents
|
||||
|
||||
- A hosted version of doc is at http://docs.tvm.ai
|
||||
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark
|
||||
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark Pillow
|
||||
- Build tvm first in the root folder.
|
||||
- To build locally, you need to enable USE_CUDA, USE_OPENCL, LLVM_CONFIG in config.mk and then type "make html" in this folder.
|
||||
|
||||
Only Execute Specified Tutorials
|
||||
--------------------------------
|
||||
The document build process will execute all the tutorials in the sphinx gallery.
|
||||
This will cause failure in some cases when certain machines do not have necessary
|
||||
environment. You can set ```TVM_TUTORIAL_EXEC_PATTERN``` to only execute
|
||||
the path that matches the regular expression pattern.
|
||||
|
||||
For example, to only build tutorials under /vta/tutorials, run
|
||||
|
||||
```bash
|
||||
TVM_TUTORIAL_EXEC_PATTERN=/vta/tutorials make html
|
||||
```
|
||||
|
||||
To only build one specific file, do
|
||||
|
||||
```bash
|
||||
# The slash \ is used to get . in regular expression
|
||||
TVM_TUTORIAL_EXEC_PATTERN=file_name\.py make html
|
||||
```
|
||||
|
|
|
@ -6,10 +6,6 @@ tvm.hybrid
|
|||
|
||||
tvm.hybrid.parse
|
||||
tvm.hybrid.script
|
||||
tvm.hybrid.popcount
|
||||
tvm.hybrid.sigmoid
|
||||
|
||||
.. autofunction:: tvm.hybrid.parse
|
||||
.. autofunction:: tvm.hybrid.script
|
||||
.. autofunction:: tvm.hybrid.popcount
|
||||
.. autofunction:: tvm.hybrid.sigmoid
|
||||
|
|
|
@ -20,5 +20,6 @@ Python API
|
|||
contrib
|
||||
dev
|
||||
topi
|
||||
vta/index
|
||||
nnvm/index
|
||||
hybrid
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
Python API
|
||||
==========
|
||||
VTA API
|
||||
=======
|
||||
|
||||
This document contains the python API to VTA compiler toolchain.
|
||||
|
10
docs/conf.py
10
docs/conf.py
|
@ -26,6 +26,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
|
|||
sys.path.insert(0, os.path.join(curr_path, '../python/'))
|
||||
sys.path.insert(0, os.path.join(curr_path, '../topi/python'))
|
||||
sys.path.insert(0, os.path.join(curr_path, '../nnvm/python'))
|
||||
sys.path.insert(0, os.path.join(curr_path, '../vta/python'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
|
@ -184,16 +185,17 @@ intersphinx_mapping = {
|
|||
|
||||
from sphinx_gallery.sorting import ExplicitOrder
|
||||
|
||||
examples_dirs = ['../tutorials/']
|
||||
gallery_dirs = ['tutorials']
|
||||
examples_dirs = ["../tutorials/", "../vta/tutorials/"]
|
||||
gallery_dirs = ["tutorials", "vta/tutorials"]
|
||||
|
||||
subsection_order = ExplicitOrder(
|
||||
['../tutorials/language',
|
||||
'../tutorials/optimize',
|
||||
'../tutorials/vta',
|
||||
'../tutorials/topi',
|
||||
'../tutorials/deployment',
|
||||
'../tutorials/nnvm'])
|
||||
|
||||
|
||||
def generate_doxygen_xml(app):
|
||||
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
|
||||
run_doxygen('..')
|
||||
|
@ -220,7 +222,7 @@ sphinx_gallery_conf = {
|
|||
'examples_dirs': examples_dirs,
|
||||
'gallery_dirs': gallery_dirs,
|
||||
'subsection_order': subsection_order,
|
||||
'filename_pattern': os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
|
||||
'find_mayavi_figures': False,
|
||||
'filename_pattern': '.py',
|
||||
'expected_failing_examples': []
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ Get Started
|
|||
|
||||
install/index
|
||||
tutorials/index
|
||||
vta/index
|
||||
deploy/index
|
||||
contribute/index
|
||||
faq
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
Installation
|
||||
============
|
||||
|
||||
To install TVM, please read :ref:`install-from-source`.
|
||||
If you are interested in deploying to mobile/embedded devices,
|
||||
you do not need to install the entire tvm stack on your device,
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
tutorials
|
|
@ -0,0 +1,2 @@
|
|||
VTA Hardware Design Overview
|
||||
============================
|
|
@ -0,0 +1,23 @@
|
|||
VTA: Deep Learning Accelerator Stack
|
||||
====================================
|
||||
Specialized accelerators are key enablers of future deep learning workloads. TVM stack targets specialized accelerators.
|
||||
VTA(versatile tensor accelerator) is a generic, modular open-source deep learning accelerator.
|
||||
This page contains links to all the resources related to VTA:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
install
|
||||
tutorials/index
|
||||
hardware
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
VTA have the following key features:
|
||||
|
||||
- Generic, modular open-source hardware
|
||||
- Streamlined workflow to deploy to FPGAs.
|
||||
- Simulator support to protoype compilation passes on regular workstations.
|
||||
- Driver and JIT runtime for both simulated and FPGA hardware backend.
|
||||
- End to end TVM stack integration
|
|
@ -1,5 +1,5 @@
|
|||
Installation Guides
|
||||
===================
|
||||
VTA Installation Guide
|
||||
======================
|
||||
|
||||
We present three installation guides, each extending on the previous one:
|
||||
1. VTA simulation-only installation
|
||||
|
@ -8,94 +8,12 @@ We present three installation guides, each extending on the previous one:
|
|||
|
||||
## VTA Simulation-Only Installation
|
||||
|
||||
This first guide details the installation of the VTA package to run hardware simulation tests locally on your development machine (in case you don't own the Pynq FPGA development board).
|
||||
This guide includes:
|
||||
1. Software dependences installation
|
||||
2. Simulation library compilation
|
||||
3. Python package installation
|
||||
4. Test examples to ensure that the VTA package was correctly installed
|
||||
|
||||
To get started, clone vta repo from [github](https://github.com/uwsaml/vta). It is important to clone the submodules along with ```--recursive``` option.
|
||||
```bash
|
||||
git clone --recursive https://github.com/uwsaml/vta
|
||||
```
|
||||
|
||||
### VTA Dependences
|
||||
|
||||
The VTA package depends on several other packages that need to be manually installed beforehand.
|
||||
|
||||
We list the dependences below:
|
||||
* LLVM 4.0 or newer
|
||||
* TVM
|
||||
* MxNet (to run the end-to-end examples)
|
||||
* Additional python packages
|
||||
|
||||
#### LLVM Installation
|
||||
|
||||
We provide the set of commands to install LLVM 6.0 (stable branch) on Ubuntu Xenial. Note that the [LLVM installation process](apt.llvm.org) can be adapted to different LLVM branches, and operating systems.
|
||||
Please follow the guide on install TVM from source.
|
||||
VTA simulator is library is built by default along with TVM.
|
||||
You only have to add vta to your python path.
|
||||
|
||||
```bash
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main"
|
||||
sudo apt-get update
|
||||
apt-get install clang-6.0 lldb-6.0 lld-6.0
|
||||
```
|
||||
|
||||
To ensure that LLVM 6.0 was properly installed, check that the following command gives the path to your `llvm-config` binary (you may have to append the version number to the executable name):
|
||||
|
||||
```bash
|
||||
which llvm-config-6.0
|
||||
```
|
||||
|
||||
#### TVM Installation
|
||||
|
||||
TVM is included as a top-level submodule to VTA, and can be found under `<vta root>/tvm`.
|
||||
|
||||
Follow the [installation instructions](https://docs.tvm.ai/install/index.html).
|
||||
|
||||
In the 'config.mk' file, make sure that:
|
||||
* `LLVM_CONFIG` points to the `llvm-config` executable which path was derived in the LLVM installation instructions above (e.g. `LLVM_CONFIG = /usr/bin/llvm-config-6.0`)
|
||||
* `USE_RPC` should be set to 1
|
||||
|
||||
For the *Python Package Installation*, we recommend updating your `~/.bashrc` file to extend your `PYTHONPATH` with the TVM Python libraries.
|
||||
```bash
|
||||
export PYTHONPATH=<tvm root>/python:<tvm root>/topi/python:<tvm root>/nnvm/python:${PYTHONPATH}
|
||||
```
|
||||
|
||||
#### MxNet Installation
|
||||
|
||||
Follow the [MxNet Installation Instructions](https://mxnet.incubator.apache.org)
|
||||
|
||||
#### Python Dependences
|
||||
|
||||
You'll need the following packages to be installed for the example to run properly. You can use `pip` to install those packages:
|
||||
* `decorator`
|
||||
* `enum34`
|
||||
* `Pillow`
|
||||
* `wget`
|
||||
|
||||
### VTA Shared Library Compilation
|
||||
|
||||
Before building the VTA shared library, the VTA configuration can be modified by changing `config.json` file.
|
||||
This file provides an architectural specification of the VTA accelerator that can be understood by both the TVM compiler stack and the VTA hardware stack.
|
||||
It also specifies the TVM compiler target. When `TARGET` is set to `sim`, it tells the TVM compiler to execute the TVM workloads on the VTA simulator.
|
||||
|
||||
To build the simulator library, copy the simulation configuration file `make/sim_sample.json` to the project root.
|
||||
Next, you can build the VTA simulation dynamic library with `make`.
|
||||
|
||||
```bash
|
||||
cd <vta root>
|
||||
cp make/sim_sample.json config.json
|
||||
make -j4
|
||||
```
|
||||
|
||||
### VTA Python Package Installation
|
||||
|
||||
The Python package can installed by extending your `PYTHONPATH` environment variable to point to the VTA python library path.
|
||||
You can run the following line in a terminal, or add it to your `~/.bashrc` file if you plan on using VTA regularly.
|
||||
|
||||
```bash
|
||||
export PYTHONPATH=<vta root>/python:${PYTHONPATH}
|
||||
export PYTHONPATH=/path/to/vta/python:${PYTHONPATH}
|
||||
```
|
||||
|
||||
### Testing your VTA Simulation Setup
|
||||
|
@ -105,22 +23,25 @@ Finally to ensure that you've properly installed the VTA package, we can run sim
|
|||
Let's first run the 2D convolution test bench that will only run the ResNet-18 convolution layers.
|
||||
|
||||
```bash
|
||||
python tests/python/integration/test_benchmark_topi_conv2d.py
|
||||
python vta/tests/python/integration/test_benchmark_topi_conv2d.py
|
||||
```
|
||||
|
||||
> Note: You'll notice that for every convolution layer, the throughput gets reported in GOPS. These numbers are actually the computational throughput that the simulator achieves, by evaluating the convolution in software.
|
||||
> Note: You'll notice that for every convolution layer, the throughput gets reported in GOPS. These numbers are actually the computational throughput that the simulator achieves, by evaluating the convolution in software. You can also try out other tutorials.
|
||||
|
||||
Next we can also run the ResNet-18 end to end example in the VTA simulator.
|
||||
This test will download the following files in your root:
|
||||
* `cat.jpg` the test image to classify
|
||||
* `synset.txt` the ImageNet categories
|
||||
* `quantize_graph.json` the 8-bit ResNet-18 inference NNVM graph
|
||||
* `quantize_params.plk` the 8-bit ResNet-18 model parameters
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
VTA is a generic configurable hardware. The configuration is specified by a `vta_config.json` under root of the TVM folder.
|
||||
This file provides an architectural specification of the VTA accelerator that can be understood by both the TVM compiler stack and the VTA hardware stack.
|
||||
It also specifies the TVM compiler target. When `TARGET` is set to `sim`, it tells the TVM compiler to execute the TVM workloads on the VTA simulator.
|
||||
You can modify the content to reconfigure VTA to a different mode. To do so,
|
||||
|
||||
```bash
|
||||
python examples/resnet18/pynq/imagenet_predict.py
|
||||
cd <tvm root>
|
||||
cp vta/config/vta_config.json vta_config.json
|
||||
edit vta_config.json
|
||||
make vta
|
||||
```
|
||||
> Note: This will run ResNet inference by offloading the compute-heavy convolution layers to the VTA simulator, and report the top-1 category, and the inference time cost in seconds.
|
||||
|
||||
## VTA Pynq-Based Testing Setup
|
||||
|
||||
|
@ -157,7 +78,7 @@ Because the direct board-to-computer connection prevents the board from directly
|
|||
mkdir <mountpoint>
|
||||
sshfs xilinx@192.168.2.99:/home/xilinx <mountpoint>
|
||||
cd <mountpoint>
|
||||
git clone --recursive https://github.com/uwsaml/vta
|
||||
git clone --recursive https://github.com/dmlc/tvm
|
||||
# When finished, you can leave the moutpoint and unmount the directory
|
||||
cd ~
|
||||
sudo umount <mountpoint>
|
||||
|
@ -169,20 +90,24 @@ The build process should take roughly 5 minutes.
|
|||
```bash
|
||||
ssh xilinx@192.168.2.99
|
||||
# Build TVM runtime library (takes 5 mins)
|
||||
cd /home/xilinx/vta/tvm
|
||||
cd /home/xilinx/tvm/vta
|
||||
mkdir build
|
||||
cp cmake/config.cmake build/.
|
||||
# copy pynq specific configuration
|
||||
cp vta/config/pynq_sample.json build/vta_config.json
|
||||
cd build
|
||||
cmake ..
|
||||
make runtime -j2
|
||||
make runtime vta -j2
|
||||
# Build VTA RPC server (takes 1 min)
|
||||
cd /home/xilinx/vta
|
||||
cd ..
|
||||
sudo ./apps/pynq_rpc/start_rpc_server.sh # pw is 'xilinx'
|
||||
```
|
||||
|
||||
Note that one key difference between the simulator build is that we changed the VTA configuration
|
||||
to be `vta/config/pynq_sample.json`, which specifies PYNQ as target.
|
||||
|
||||
You should see the following being displayed when starting the RPC server. In order to run the next examples, you'll need to leave the RPC server running in an `ssh` session.
|
||||
```
|
||||
INFO:root:Load additional library /home/xilinx/vta/lib/libvta.so
|
||||
INFO:root:RPCServer: bind to 0.0.0.0:9091
|
||||
```
|
||||
|
||||
|
@ -199,12 +124,12 @@ export VTA_PYNQ_RPC_HOST=192.168.2.99
|
|||
export VTA_PYNQ_RPC_PORT=9091
|
||||
```
|
||||
|
||||
In addition, you'll need to edit the `config.json` file to indicate that we are targeting the Pynq platform, by setting the `TARGET` field to the `"pynq"` value. Alternatively, you can copy the default `make/config.json` into the VTA root.
|
||||
In addition, you'll need to edit the `vta_config.json` file to indicate that we are targeting the Pynq platform, by setting the `TARGET` field to the `"pynq"` value. Alternatively, you can copy the default `make/config.json` into the VTA root.
|
||||
> Note: in contrast to our simulation setup, there are no libraries to compile on the host side since the host offloads all of the computation to the Pynq board.
|
||||
|
||||
```bash
|
||||
cd <vta root>
|
||||
cp make/config.json .
|
||||
cd <tvm root>
|
||||
cp vta/config/pynq_sample.json .
|
||||
```
|
||||
|
||||
This time again, we will run the 2D convolution testbench. But beforehand, we'll need to program the Pynq's own FPGA with a VTA bitstream, and build the VTA runtime on the Pynq via RPC. The following `test_program_rpc.py` script will perform two operations:
|
||||
|
@ -224,13 +149,8 @@ python tests/python/pynq/test_benchmark_conv2d.py
|
|||
```
|
||||
|
||||
The performance metrics measured on the Pynq board will be reported for each convolutional layer.
|
||||
You can also try out other tutorials.
|
||||
|
||||
Finally, we run the ResNet-18 end-to-end example on the Pynq.
|
||||
|
||||
```bash
|
||||
python examples/resnet18/pynq/imagenet_predict.py
|
||||
```
|
||||
This will run ResNet inference by offloading the compute-heavy convolution layers to the Pynq's FPGA-based VTA accelerator. The time cost is also measured in seconds here.
|
||||
|
||||
## VTA Hardware Toolchain Installation
|
||||
|
||||
|
@ -296,7 +216,7 @@ export PATH=${XILINX_SDK}/bin:${PATH}
|
|||
|
||||
### Custom VTA Bitstream Compilation
|
||||
|
||||
High-level parameters are listed under `<vta root>/make/config.json` and can be customized by the user. For this custom VTA Bitstream Compilation exercise, we'll change the frequency of our design, so it can be clocked a little faster.
|
||||
High-level parameters are listed under `tvm/vta/config/vta_config.json` and can be customized by the user. For this custom VTA Bitstream Compilation exercise, we'll change the frequency of our design, so it can be clocked a little faster.
|
||||
* Set the `HW_FREQ` field to `142`. The Pynq board supports 100, 142, 167 and 200MHz clocks. Note that the higher the frequency, the harder it will be to close timing. Increasing the frequency can lead to timing violation and thus faulty hardware.
|
||||
* Set the `HW_CLK_TARGET` to `6`. This parameters refers to the target clock period in ns passed to HLS - a lower clock period leads to more aggressive pipelining to achieve timing closure at higher frequencies. Technically a 142MHz clock would require a 7ns target, but we intentionally lower the clock target to 6ns to more aggressively pipeline our design.
|
||||
|
||||
|
@ -325,20 +245,13 @@ This process is lenghty, and can take around up to an hour to complete depending
|
|||
|
||||
Once the compilation completes, the generated bitstream can be found under `<vta root>/build/hardware/xilinx/vivado/<configuration>/export/vta.bit`.
|
||||
|
||||
### End-to-end ResNet-18 Example with the Custom Bitstream
|
||||
### Use the Custom Bitstream
|
||||
|
||||
Let's run the ResNet-18 example with our newly generated bitstream.
|
||||
|
||||
In `<vta root>/examples/resnet18/pynq/imagenet_predict.py`, change the line:
|
||||
```python
|
||||
vta.program_fpga(remote, bitstream=None)
|
||||
```
|
||||
to
|
||||
We can change the FPGA bitstream by simply change the bistream path to the configuring API.
|
||||
|
||||
```python
|
||||
vta.program_fpga(remote, bitstream="<vta root>/build/hardware/xilinx/vivado/<configuration>/export/vta.bit")
|
||||
```
|
||||
|
||||
Instead of downloading the bitstream from the bitstream repository, the programmer will instead use the custom bitstream you just generated, which is a VTA design clocked at a higher frequency.
|
||||
|
||||
Do you observe a noticable performance increase on the ImageNet inference workload?
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=python:nnvm/python:vta/python:topi/python
|
||||
|
||||
echo "Running unittest..."
|
||||
python -m nose -v vta/tests/python/unittest || exit -1
|
||||
python3 -m nose -v vta/tests/python/unittest || exit -1
|
||||
|
||||
echo "Running integration test..."
|
||||
python -m nose -v vta/tests/python/integration || exit -1
|
||||
python3 -m nose -v vta/tests/python/integration || exit -1
|
|
@ -1,38 +0,0 @@
|
|||
Contributing to VTA
|
||||
===================
|
||||
VTA is part of TVM software/hardware stack.
|
||||
We adopts Apache style committer model.
|
||||
The package is developed and used by the community.
|
||||
|
||||
We actively seek committers that come from community contributors who:
|
||||
- Made substantial contributions to the project.
|
||||
- All forms of contributions are valued (see detail in next section).
|
||||
- Willing to spend time on maintaining and lead the project.
|
||||
|
||||
Contributions
|
||||
-------------
|
||||
We value all forms of contributions, here is a non-comprehensive
|
||||
list of contributions that are welcomed
|
||||
|
||||
- Documentation and usage examples
|
||||
- Hardware implementations of the design.
|
||||
- Community participation, answering questions and issues.
|
||||
- Code readability and developer guide
|
||||
- We welcome contributions that add code comments
|
||||
to improve readability
|
||||
- We also welcome contributions to docs to explain the
|
||||
design choices of the internal.
|
||||
- Test cases to make the codebase more robust
|
||||
- Tutorials, blog posts, talks that promote the project.
|
||||
|
||||
|
||||
How to Contribute
|
||||
-----------------
|
||||
See [Contributor guide](docs/how_to/contribute.md) on how to contribute.
|
||||
|
||||
Committers
|
||||
----------
|
||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||
|
||||
- [Thierry Moreau](http://homes.cs.washington.edu/~moreau/), University of Washington
|
||||
- [Tianqi Chen](https://github.com/tqchen), University of Washington
|
|
@ -1,128 +0,0 @@
|
|||
#!groovy
|
||||
// -*- mode: groovy -*-
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
// nnvm libraries
|
||||
vta_lib = "lib/libvta.so, lib/libvta.so.json, config.json"
|
||||
vta_lib += ", tvm/build/libtvm.so, tvm/build/libtvm_topi.so, tvm/build/libnnvm_compiler.so"
|
||||
|
||||
|
||||
// command to start a docker container
|
||||
docker_run = 'tests/ci_build/ci_build.sh'
|
||||
// timeout in minutes
|
||||
max_time = 60
|
||||
|
||||
// initialize source codes
|
||||
def init_git() {
|
||||
checkout scm
|
||||
retry(5) {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
sh 'git submodule update --init --recursive'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def init_git_win() {
|
||||
checkout scm
|
||||
retry(5) {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
bat 'git submodule update --init --recursive'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Sanity Check") {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
node('linux') {
|
||||
ws('workspace/vta/sanity') {
|
||||
init_git()
|
||||
sh "${docker_run} lint ./tests/scripts/task_lint.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run make. First try to do an incremental make from a previous workspace in hope to
|
||||
// accelerate the compilation. If something wrong, clean the workspace and then
|
||||
// build from scratch.
|
||||
def make(docker_type, make_flag) {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} ${docker_type} cp make/sim_sample.json config.json"
|
||||
try {
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
|
||||
} catch (exc) {
|
||||
echo 'Incremental compilation failed. Fall back to build from scratch'
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pack libraries for later use
|
||||
def pack_lib(name, libs) {
|
||||
sh """
|
||||
echo "Packing ${libs} into ${name}"
|
||||
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
|
||||
"""
|
||||
stash includes: libs, name: name
|
||||
}
|
||||
|
||||
|
||||
// unpack libraries saved before
|
||||
def unpack_lib(name, libs) {
|
||||
unstash name
|
||||
sh """
|
||||
echo "Unpacked ${libs} from ${name}"
|
||||
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
|
||||
"""
|
||||
}
|
||||
|
||||
stage('Build') {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
node('linux') {
|
||||
ws('workspace/vta/build') {
|
||||
init_git()
|
||||
make('cpu', '-j2')
|
||||
pack_lib('cpu', vta_lib)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Tests') {
|
||||
parallel 'python': {
|
||||
node('linux') {
|
||||
ws('workspace/vta/it-python') {
|
||||
init_git()
|
||||
unpack_lib('cpu', vta_lib)
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} cpu ./tests/scripts/task_python_test.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
'docs': {
|
||||
node('linux') {
|
||||
ws('workspace/vta/docs-python') {
|
||||
init_git()
|
||||
unpack_lib('cpu', vta_lib)
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} cpu ./tests/scripts/task_python_docs.sh"
|
||||
}
|
||||
pack_lib('mydocs', 'docs.tgz')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Deploy') {
|
||||
node('docker' && 'doc') {
|
||||
ws('workspace/vta/deploy-docs') {
|
||||
if (env.BRANCH_NAME == "master") {
|
||||
unpack_lib('mydocs', 'docs.tgz')
|
||||
sh "tar xf docs.tgz -C /var/vta-docs"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
201
vta/LICENSE
201
vta/LICENSE
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
71
vta/Makefile
71
vta/Makefile
|
@ -1,71 +0,0 @@
|
|||
ROOTDIR = $(CURDIR)
|
||||
|
||||
export LDFLAGS = -pthread -lm
|
||||
export CFLAGS = -std=c++11 -Wall -O2 -Iinclude -fPIC
|
||||
|
||||
VTA_CONFIG = python make/vta_config.py
|
||||
CFLAGS += `${VTA_CONFIG} --cflags`
|
||||
LDFLAGS += `${VTA_CONFIG} --ldflags`
|
||||
VTA_TARGET := $(shell ${VTA_CONFIG} --target)
|
||||
|
||||
UNAME_S := $(shell uname -s)
|
||||
|
||||
ifeq ($(UNAME_S), Darwin)
|
||||
SHARED_LIBRARY_SUFFIX := dylib
|
||||
WHOLE_ARCH= -all_load
|
||||
NO_WHOLE_ARCH= -noall_load
|
||||
LDFLAGS += -undefined dynamic_lookup
|
||||
else
|
||||
SHARED_LIBRARY_SUFFIX := so
|
||||
WHOLE_ARCH= --whole-archive
|
||||
NO_WHOLE_ARCH= --no-whole-archive
|
||||
endif
|
||||
|
||||
|
||||
VTA_LIB_SRC = $(wildcard src/*.cc)
|
||||
|
||||
ifeq (${VTA_TARGET}, pynq)
|
||||
VTA_LIB_SRC += $(wildcard src/pynq/*.cc)
|
||||
endif
|
||||
|
||||
ifeq (${VTA_TARGET}, sim)
|
||||
VTA_LIB_SRC += $(wildcard src/sim/*.cc)
|
||||
endif
|
||||
|
||||
VTA_LIB_OBJ = $(patsubst src/%.cc, build/%.o, $(VTA_LIB_SRC))
|
||||
|
||||
all: lib/libvta.so lib/libvta.so.json
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
||||
$(CXX) -c $(CFLAGS) -c $< -o $@
|
||||
|
||||
lib/libvta.so.json: lib/libvta.so
|
||||
@mkdir -p $(@D)
|
||||
${VTA_CONFIG} --cfg-json > $@
|
||||
|
||||
lib/libvta.so: $(VTA_LIB_OBJ)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS)
|
||||
|
||||
|
||||
lint: pylint cpplint
|
||||
|
||||
cpplint:
|
||||
python3 tvm/dmlc-core/scripts/lint.py vta cpp include src
|
||||
|
||||
pylint:
|
||||
python3 -m pylint python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
|
||||
|
||||
doc:
|
||||
doxygen docs/Doxyfile
|
||||
|
||||
clean:
|
||||
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o
|
||||
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
-include build/*/*/*.d
|
||||
-include build/*/*/*/*.d
|
13
vta/NEWS.md
13
vta/NEWS.md
|
@ -1,13 +0,0 @@
|
|||
VTA Change Log
|
||||
==============
|
||||
|
||||
This file records the changes in VTA stack in reverse chronological order.
|
||||
|
||||
|
||||
## Initial version
|
||||
|
||||
- Vivado based hardware.
|
||||
- Driver for PYNQ board.
|
||||
- Runtime library.
|
||||
- TVM compiler stack.
|
||||
- Resnet-18 example.
|
|
@ -1,8 +1,5 @@
|
|||
VTA: Open, Modular, Deep Learning Accelerator Stack
|
||||
===================================================
|
||||
[![Build Status](http://mode-gpu.cs.washington.edu:8080/buildStatus/icon?job=uwsaml/vta/master)](http://mode-gpu.cs.washington.edu:8080/job/uwsaml/job/vta/job/master/)
|
||||
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
|
||||
|
||||
VTA(versatile tensor accelerator) is an open-source deep learning accelerator stack.
|
||||
It is not just an open-source hardware, but is an end to end solution that includes
|
||||
the entire software stack on top of VTA open-source hardware.
|
||||
|
@ -18,16 +15,4 @@ The key features include:
|
|||
- Customized and extendible TVM compiler backend.
|
||||
- Flexible RPC support to ease the deployment, and program FPGAs with Python
|
||||
|
||||
VTA is part of our effort on [TVM Stack](http://www.tvmlang.org/).
|
||||
|
||||
VTA Installation
|
||||
----------------
|
||||
To get started with VTA, please follow the [Installation Guide](docs/how_to/install.md)
|
||||
|
||||
ResNet-18 Inference Example
|
||||
---------------------------
|
||||
To offload ResNet-18 inference, follow the [ResNet-18 Guide](examples/resnet18/pynq/README.md)
|
||||
|
||||
License
|
||||
-------
|
||||
© Contributors, 2018. Licensed under an [Apache-2.0](https://github.com/tmoreau89/vta/blob/master/LICENSE) license.
|
||||
VTA is part of our effort on TVM Stack.
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
export PYTHONPATH=${PYTHONPATH}:/home/xilinx/vta/tvm/python:/home/xilinx/vta/python
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/
|
||||
python -m vta.exec.rpc_server
|
|
@ -0,0 +1,8 @@
|
|||
# VTA Configuration
|
||||
|
||||
Each VTA runtime/hardware configuration is specified by vta_config.json file.
|
||||
You can copy the vta_config.json to tvm project root and modify the configuration
|
||||
before you type make.
|
||||
|
||||
The config is going to affect the behavior of python package as well as
|
||||
the hardware runtime build.
|
|
@ -7,8 +7,8 @@ import argparse
|
|||
def get_pkg_config(cfg):
|
||||
"""Get the pkg config object."""
|
||||
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../"))
|
||||
pkg_config_py = os.path.join(proj_root, "python/vta/pkg_config.py")
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
|
||||
pkg_config_py = os.path.join(proj_root, "vta/python/vta/pkg_config.py")
|
||||
libpkg = {"__file__": pkg_config_py}
|
||||
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
|
||||
PkgConfig = libpkg["PkgConfig"]
|
||||
|
@ -18,14 +18,22 @@ def get_pkg_config(cfg):
|
|||
def main():
|
||||
"""Main funciton"""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--use-cfg", type=str, default="",
|
||||
help="path to the config json")
|
||||
parser.add_argument("--cflags", action="store_true",
|
||||
help="print the cflags")
|
||||
parser.add_argument("--defs", action="store_true",
|
||||
help="print the macro defs")
|
||||
parser.add_argument("--sources", action="store_true",
|
||||
help="print the source file paths")
|
||||
parser.add_argument("--update", action="store_true",
|
||||
help="Print out the json option.")
|
||||
parser.add_argument("--ldflags", action="store_true",
|
||||
help="print the cflags")
|
||||
parser.add_argument("--cfg-json", action="store_true",
|
||||
help="print all the config json")
|
||||
parser.add_argument("--save-cfg-json", type=str, default="",
|
||||
help="save config json to file")
|
||||
parser.add_argument("--target", action="store_true",
|
||||
help="print the target")
|
||||
parser.add_argument("--cfg-str", action="store_true",
|
||||
|
@ -66,11 +74,14 @@ def main():
|
|||
|
||||
curr_path = os.path.dirname(
|
||||
os.path.abspath(os.path.expanduser(__file__)))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../"))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
|
||||
path_list = [
|
||||
os.path.join(proj_root, "config.json"),
|
||||
os.path.join(proj_root, "make/config.json")
|
||||
os.path.join(proj_root, "vta_config.json"),
|
||||
os.path.join(proj_root, "build", "vta_config.json"),
|
||||
os.path.join(proj_root, "vta/config/vta_config.json")
|
||||
]
|
||||
if args.use_cfg:
|
||||
path_list = [args.use_cfg]
|
||||
ok_path_list = [p for p in path_list if os.path.exists(p)]
|
||||
if not ok_path_list:
|
||||
raise RuntimeError("Cannot find config in %s" % str(path_list))
|
||||
|
@ -82,6 +93,12 @@ def main():
|
|||
if args.target:
|
||||
print(pkg.target)
|
||||
|
||||
if args.defs:
|
||||
print(" ".join(pkg.macro_defs))
|
||||
|
||||
if args.sources:
|
||||
print(" ".join(pkg.lib_source))
|
||||
|
||||
if args.cflags:
|
||||
cflags_str = " ".join(pkg.cflags)
|
||||
if cfg["TARGET"] == "pynq":
|
||||
|
@ -94,6 +111,10 @@ def main():
|
|||
if args.cfg_json:
|
||||
print(pkg.cfg_json)
|
||||
|
||||
if args.save_cfg_json:
|
||||
with open(args.save_cfg_json, "w") as fo:
|
||||
fo.write(pkg.cfg_json)
|
||||
|
||||
if args.cfg_str:
|
||||
# Needs to match the BITSTREAM string in python/vta/environment.py
|
||||
cfg_str = "{}x{}x{}_{}bx{}b_{}_{}_{}_{}_{}MHz_{}ns_v{}".format(
|
|
@ -1,5 +0,0 @@
|
|||
doxygen
|
||||
modules
|
||||
tutorials
|
||||
_build
|
||||
gen_modules
|
2427
vta/docs/Doxyfile
2427
vta/docs/Doxyfile
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,193 +0,0 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = python3 -m sphinx
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
#ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
#$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively# you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
#endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " applehelp to make an Apple Help Book"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " coverage to run coverage check of the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
rm -rf gen_modules
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rabit.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rabit.qhc"
|
||||
|
||||
applehelp:
|
||||
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
|
||||
@echo
|
||||
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
|
||||
@echo "N.B. You won't be able to view it unless you put it in" \
|
||||
"~/Library/Documentation/Help or install it in your application" \
|
||||
"bundle."
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/rabit"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rabit"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
coverage:
|
||||
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
|
||||
@echo "Testing of coverage in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/coverage/python.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
|
@ -1,5 +0,0 @@
|
|||
The documentation of vta is generated with recommonmark and sphinx.
|
||||
|
||||
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark
|
||||
- Type "make html" to generate the doc
|
||||
- If we only want to build doxygen docs: at project root, type "make doc"
|
|
@ -1,11 +0,0 @@
|
|||
.rst-content .hidden-section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.rst-toc .hidden-section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
nav .hidden-section {
|
||||
display: inherit;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
Links to API References
|
||||
=======================
|
||||
|
||||
This page contains links to API references that are build with different doc build system.
|
||||
|
||||
* `C++ doyxgen API <doxygen/index.html>`_
|
213
vta/docs/conf.py
213
vta/docs/conf.py
|
@ -1,213 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# documentation build configuration file, created by
|
||||
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
import sys
|
||||
import os, subprocess
|
||||
import shlex
|
||||
import recommonmark
|
||||
import sphinx_gallery
|
||||
from tvm.contrib import rpc, graph_runtime
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
from recommonmark.transform import AutoStructify
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
|
||||
sys.path.insert(0, os.path.join(curr_path, '../python/'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# General information about the project.
|
||||
project = u'vta'
|
||||
author = u'%s developers' % project
|
||||
copyright = u'2018, %s' % author
|
||||
github_doc_root = 'https://github.com/uwsaml/vta/tree/master/docs/'
|
||||
|
||||
# add markdown parser
|
||||
CommonMarkParser.github_doc_root = github_doc_root
|
||||
source_parsers = {
|
||||
'.md': CommonMarkParser
|
||||
}
|
||||
os.environ['VTA_BUILD_DOC'] = '1'
|
||||
# Version information.
|
||||
import vta
|
||||
version = vta.__version__
|
||||
release = vta.__version__
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.autosummary',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx_gallery.gen_gallery',
|
||||
]
|
||||
|
||||
|
||||
breathe_projects = {'vta' : 'doxygen/xml/'}
|
||||
breathe_default_project = 'vta'
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# generate autosummary even if no references
|
||||
autosummary_generate = True
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme is set by the make target
|
||||
html_theme = os.environ.get('VTA_THEME', 'rtd')
|
||||
|
||||
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
|
||||
# only import rtd theme and set it if want to build docs locally
|
||||
if not on_rtd and html_theme == 'rtd':
|
||||
import sphinx_rtd_theme
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = project + 'doc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
latex_elements = {
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, '%s.tex' % project, project,
|
||||
author, 'manual'),
|
||||
]
|
||||
|
||||
# hook for doxygen
|
||||
def run_doxygen(folder):
|
||||
"""Run the doxygen make command in the designated folder."""
|
||||
try:
|
||||
retcode = subprocess.call("cd %s; make doc" % folder, shell=True)
|
||||
retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True)
|
||||
retcode = subprocess.call("mkdir -p _build/html", shell=True)
|
||||
retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True)
|
||||
if retcode < 0:
|
||||
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
|
||||
except OSError as e:
|
||||
sys.stderr.write("doxygen execution failed: %s" % e)
|
||||
|
||||
intersphinx_mapping = {
|
||||
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
|
||||
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
|
||||
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
|
||||
'matplotlib': ('http://matplotlib.org/', None),
|
||||
'tvm': ('https://docs.tvm.ai/', None),
|
||||
}
|
||||
|
||||
from sphinx_gallery.sorting import ExplicitOrder
|
||||
|
||||
examples_dirs = ['../tutorials/']
|
||||
gallery_dirs = ['tutorials']
|
||||
subsection_order = ExplicitOrder([])
|
||||
|
||||
def generate_doxygen_xml(app):
|
||||
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
|
||||
run_doxygen('..')
|
||||
|
||||
def setup(app):
|
||||
# Add hook for building doxygen xml when needed
|
||||
# no c++ API for now
|
||||
app.connect("builder-inited", generate_doxygen_xml)
|
||||
app.add_stylesheet('css/tvm_theme.css')
|
||||
app.add_config_value('recommonmark_config', {
|
||||
'url_resolver': lambda url: github_doc_root + url,
|
||||
'auto_doc_ref': True
|
||||
}, True)
|
||||
app.add_transform(AutoStructify)
|
||||
|
||||
|
||||
sphinx_gallery_conf = {
|
||||
'backreferences_dir': 'gen_modules/backreferences',
|
||||
'doc_module': ('vta', 'numpy'),
|
||||
'reference_url': {
|
||||
'vta': None,
|
||||
'tvm': 'https://docs.tvm.ai',
|
||||
'matplotlib': 'http://matplotlib.org',
|
||||
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
|
||||
'examples_dirs': examples_dirs,
|
||||
'gallery_dirs': gallery_dirs,
|
||||
'subsection_order': subsection_order,
|
||||
'find_mayavi_figures': False,
|
||||
'filename_pattern': '.py',
|
||||
'expected_failing_examples': []
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
VTA Design and Developer Guide
|
||||
==============================
|
||||
|
||||
Building an hardware stack for deep learning involves many
|
||||
many systems-level design decisions.
|
||||
In this part of documentation, we share the rationale for the specific choices made when designing VTA.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
runtime
|
|
@ -1,3 +0,0 @@
|
|||
# VTA Runtime System
|
||||
|
||||
TODO Document the hardware runtime system.
|
|
@ -1,102 +0,0 @@
|
|||
# Contribute to VTA
|
||||
|
||||
VTA has been developed by community members.
|
||||
Everyone is more than welcome to contribute.
|
||||
It is a way to make the project better and more accessible to more users.
|
||||
VTA is part of TVM software/hardware stack,
|
||||
you can improve the compiler performance by contributing to [TVM](https://github.com/dmlc/tvm)
|
||||
|
||||
|
||||
- Please add your name to [CONTRIBUTORS.md](https://github.com/dmlc/vta/blob/master/CONTRIBUTORS.md)
|
||||
- Please update [NEWS.md](https://github.com/dmlc/vta/blob/master/NEWS.md) to add note on your changes to the API or added a new document.
|
||||
|
||||
## Guidelines
|
||||
* [Submit Pull Request](#submit-pull-request)
|
||||
* [Git Workflow Howtos](#git-workflow-howtos)
|
||||
- [How to resolve conflict with master](#how-to-resolve-conflict-with-master)
|
||||
- [How to combine multiple commits into one](#how-to-combine-multiple-commits-into-one)
|
||||
- [What is the consequence of force push](#what-is-the-consequence-of-force-push)
|
||||
* [Document](#document)
|
||||
* [Testcases](#testcases)
|
||||
* [Core Library](#core-library)
|
||||
* [Python Package](#python-package)
|
||||
|
||||
## Submit Pull Request
|
||||
* Before submit, please rebase your code on the most recent version of master, you can do it by
|
||||
```bash
|
||||
git remote add upstream [url to vta repo]
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
```
|
||||
* If you have multiple small commits,
|
||||
it might be good to merge them together(use git rebase then squash) into more meaningful groups.
|
||||
* Send the pull request!
|
||||
- Fix the problems reported by automatic checks
|
||||
- If you are contributing a new module or new function, add a test.
|
||||
|
||||
## Git Workflow Howtos
|
||||
### How to resolve conflict with master
|
||||
- First rebase to most recent master
|
||||
```bash
|
||||
# The first two steps can be skipped after you do it once.
|
||||
git remote add upstream [url to vta repo]
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
```
|
||||
- The git may show some conflicts it cannot merge, say ```conflicted.py```.
|
||||
- Manually modify the file to resolve the conflict.
|
||||
- After you resolved the conflict, mark it as resolved by
|
||||
```bash
|
||||
git add conflicted.py
|
||||
```
|
||||
- Then you can continue rebase by
|
||||
```bash
|
||||
git rebase --continue
|
||||
```
|
||||
- Finally push to your fork, you may need to force push here.
|
||||
```bash
|
||||
git push --force
|
||||
```
|
||||
|
||||
### How to combine multiple commits into one
|
||||
Sometimes we want to combine multiple commits, especially when later commits are only fixes to previous ones,
|
||||
to create a PR with set of meaningful commits. You can do it by following steps.
|
||||
- Before doing so, configure the default editor of git if you haven't done so before.
|
||||
```bash
|
||||
git config core.editor the-editor-you-like
|
||||
```
|
||||
- Assume we want to merge last 3 commits, type the following commands
|
||||
```bash
|
||||
git rebase -i HEAD~3
|
||||
```
|
||||
- It will pop up an text editor. Set the first commit as ```pick```, and change later ones to ```squash```.
|
||||
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
|
||||
- Push the changes to your fork, you need to force push.
|
||||
```bash
|
||||
git push --force
|
||||
```
|
||||
|
||||
### Reset to the most recent master
|
||||
You can always use git reset to reset your version to the most recent master.
|
||||
Note that all your ***local changes will get lost***.
|
||||
So only do it when you do not have local changes or when your pull request just get merged.
|
||||
```bash
|
||||
git reset --hard [hash tag of master]
|
||||
git push --force
|
||||
```
|
||||
|
||||
### What is the consequence of force push
|
||||
The previous two tips requires force push, this is because we altered the path of the commits.
|
||||
It is fine to force push to your own fork, as long as the commits changed are only yours.
|
||||
|
||||
## Testcases
|
||||
- All the testcases are in tests
|
||||
|
||||
## Core Library
|
||||
- Follow Google C style for C++.
|
||||
- We use doxygen to document all the interface code.
|
||||
- You can reproduce the linter checks by typing ```make lint```
|
||||
|
||||
## Python Package
|
||||
- Always add docstring to the new functions in numpydoc format.
|
||||
- You can reproduce the linter checks by typing ```make lint```
|
|
@ -1,19 +0,0 @@
|
|||
VTA Documentation
|
||||
=================
|
||||
|
||||
Welcome to VTA documentation.
|
||||
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
self
|
||||
how_to/install
|
||||
tutorials/index
|
||||
how_to/contribute
|
||||
api/python/index
|
||||
dev/index
|
||||
api_links
|
|
@ -1,5 +0,0 @@
|
|||
quantize_graph.json
|
||||
quantize_params.pkl
|
||||
synset.txt
|
||||
*.jpg
|
||||
vta.bit
|
|
@ -1,26 +0,0 @@
|
|||
# Resnet-18 Example on Pynq-based VTA Design
|
||||
|
||||
Follow the first two parts of the [Installation Guide](../../../docs/how_to/install.md) to make sure that the VTA python libraries are installed, and that the RPC server is running on the Pynq FPGA dev board.
|
||||
|
||||
We recommend leaving the `config.json` to its default parameterization (of course you can change the target between "sim" and "pynq").
|
||||
|
||||
Simply run the example program. We rely on pickle to store parameters which now only works with python2.
|
||||
```bash
|
||||
python2 imagenet_predict.py
|
||||
```
|
||||
|
||||
The script will first download the following files into `_data/` directory:
|
||||
* `cat.jpg` which provides a test sample for the ImageNet classifier
|
||||
* `quantize_graph.json` which describes the NNVM graph of the 8-bit ResNet-18
|
||||
* `quantize_params.plk` which contains the network parameters
|
||||
* `synset.txt` which contains the ImageNet categories
|
||||
|
||||
Next, it will run imagenet classification using the ResNet18 architecture on a VTA design that performs 8-bit integer inference, to perform classification on a cat image `cat.jpg`.
|
||||
|
||||
The script reports runtime measured on the Pynq board (in seconds), and the top-1 result category:
|
||||
```
|
||||
('x', (1, 3, 224, 224))
|
||||
Build complete...
|
||||
('TVM prediction top-1:', 281, 'tabby, tabby cat')
|
||||
t-cost=0.41906
|
||||
```
|
|
@ -25,7 +25,7 @@ NO_DSP = false
|
|||
NO_ALU = false
|
||||
|
||||
# Process VTA JSON config
|
||||
VTA_CONFIG = python $(CURDIR)/../../make/vta_config.py
|
||||
VTA_CONFIG = python $(CURDIR)/../../config/vta_config.py
|
||||
CFLAGS := $(shell ${VTA_CONFIG} --cflags)
|
||||
VTA_TARGET := $(shell ${VTA_CONFIG} --target)
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
# VTA Configuration
|
||||
|
||||
Each VTA runtime/hardware configuration is specified by config.json file.
|
||||
You can copy the config.json to project root and modify the configuration
|
||||
before you type make.
|
||||
|
||||
The config is going to affect the behavior of python package as well as
|
||||
the hardware runtime build.
|
|
@ -297,11 +297,12 @@ def _init_env():
|
|||
"""Iniitalize the default global env"""
|
||||
curr_path = os.path.dirname(
|
||||
os.path.abspath(os.path.expanduser(__file__)))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../../"))
|
||||
path_list = [
|
||||
os.path.join(curr_path, "config.json"),
|
||||
os.path.join(proj_root, "config.json"),
|
||||
os.path.join(proj_root, "make/config.json")
|
||||
os.path.join(curr_path, "vta_config.json"),
|
||||
os.path.join(proj_root, "build", "vta_config.json"),
|
||||
os.path.join(proj_root, "vta_config.json"),
|
||||
os.path.join(proj_root, "vta/config/vta_config.json")
|
||||
]
|
||||
path_list = [p for p in path_list if os.path.exists(p)]
|
||||
if not path_list:
|
||||
|
|
|
@ -11,23 +11,25 @@ import ctypes
|
|||
import json
|
||||
import tvm
|
||||
from tvm._ffi.base import c_str
|
||||
from tvm.contrib import rpc, cc
|
||||
from tvm import rpc
|
||||
from tvm.contrib import cc
|
||||
|
||||
from ..environment import get_env
|
||||
from ..pkg_config import PkgConfig
|
||||
from ..libinfo import find_libvta
|
||||
|
||||
|
||||
@tvm.register_func("tvm.contrib.rpc.server.start", override=True)
|
||||
@tvm.register_func("tvm.rpc.server.start", override=True)
|
||||
def server_start():
|
||||
"""VTA RPC server extension."""
|
||||
# pylint: disable=unused-variable
|
||||
curr_path = os.path.dirname(
|
||||
os.path.abspath(os.path.expanduser(__file__)))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../.."))
|
||||
dll_path = os.path.abspath(os.path.join(proj_root, "lib/libvta.so"))
|
||||
cfg_path = os.path.abspath(os.path.join(proj_root, "lib/libvta.so.json"))
|
||||
proj_root = os.path.abspath(os.path.join(curr_path, "../../../../"))
|
||||
dll_path = find_libvta()[0]
|
||||
cfg_path = os.path.abspath(os.path.join(proj_root, "build/vta_config.json"))
|
||||
runtime_dll = []
|
||||
_load_module = tvm.get_global_func("tvm.contrib.rpc.server.load_module")
|
||||
_load_module = tvm.get_global_func("tvm.rpc.server.load_module")
|
||||
|
||||
def load_vta_dll():
|
||||
"""Try to load vta dll"""
|
||||
|
@ -36,7 +38,7 @@ def server_start():
|
|||
logging.info("Loading VTA library: %s", dll_path)
|
||||
return runtime_dll[0]
|
||||
|
||||
@tvm.register_func("tvm.contrib.rpc.server.load_module", override=True)
|
||||
@tvm.register_func("tvm.rpc.server.load_module", override=True)
|
||||
def load_module(file_name):
|
||||
load_vta_dll()
|
||||
return _load_module(file_name)
|
||||
|
@ -48,11 +50,11 @@ def server_start():
|
|||
|
||||
@tvm.register_func("tvm.contrib.vta.init", override=True)
|
||||
def program_fpga(file_name):
|
||||
path = tvm.get_global_func("tvm.contrib.rpc.server.workpath")(file_name)
|
||||
path = tvm.get_global_func("tvm.rpc.server.workpath")(file_name)
|
||||
load_vta_dll().VTAProgram(c_str(path))
|
||||
logging.info("Program FPGA with %s", file_name)
|
||||
|
||||
@tvm.register_func("tvm.contrib.rpc.server.shutdown", override=True)
|
||||
@tvm.register_func("tvm.rpc.server.shutdown", override=True)
|
||||
def server_shutdown():
|
||||
if runtime_dll:
|
||||
runtime_dll[0].VTARuntimeShutdown()
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
"""Library information."""
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
import os
|
||||
|
||||
def _get_lib_name():
|
||||
if sys.platform.startswith('win32'):
|
||||
return "vta.dll"
|
||||
if sys.platform.startswith('darwin'):
|
||||
return "libvta.dylib"
|
||||
return "libvta.so"
|
||||
|
||||
|
||||
def find_libvta(optional=False):
|
||||
"""Find VTA library"""
|
||||
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
|
||||
lib_search = [curr_path]
|
||||
lib_search += [os.path.join(curr_path, "..", "..", "..", "build",)]
|
||||
lib_search += [os.path.join(curr_path, "..", "..", "..", "build", "Release")]
|
||||
lib_name = _get_lib_name()
|
||||
lib_path = [os.path.join(x, lib_name) for x in lib_search]
|
||||
lib_found = [x for x in lib_path if os.path.exists(x)]
|
||||
if not lib_found and not optional:
|
||||
raise RuntimeError("Cannot find libvta: candidates are: " % str(lib_path))
|
||||
return lib_found
|
|
@ -41,14 +41,14 @@ class PkgConfig(object):
|
|||
# include path
|
||||
self.include_path = [
|
||||
"-I%s/include" % proj_root,
|
||||
"-I%s/tvm/include" % proj_root,
|
||||
"-I%s/tvm/dlpack/include" % proj_root,
|
||||
"-I%s/tvm/dmlc-core/include" % proj_root
|
||||
"-I%s/vta/include" % proj_root,
|
||||
"-I%s/dlpack/include" % proj_root,
|
||||
"-I%s/dmlc-core/include" % proj_root
|
||||
]
|
||||
# List of source files that can be used to build standalone library.
|
||||
self.lib_source = []
|
||||
self.lib_source += glob.glob("%s/src/*.cc" % proj_root)
|
||||
self.lib_source += glob.glob("%s/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
|
||||
self.lib_source += glob.glob("%s/vta/src/*.cc" % proj_root)
|
||||
self.lib_source += glob.glob("%s/vta/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
|
||||
# macro keys
|
||||
self.macro_defs = []
|
||||
self.cfg_dict = {}
|
||||
|
|
|
@ -1,23 +1,16 @@
|
|||
"""Utilities to start simulator."""
|
||||
import os
|
||||
import ctypes
|
||||
import json
|
||||
import tvm
|
||||
from ..libinfo import find_libvta
|
||||
|
||||
def _load_lib():
|
||||
"""Load local library, assuming they are simulator."""
|
||||
# pylint: disable=unused-variable
|
||||
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
|
||||
dll_path = [
|
||||
os.path.abspath(os.path.join(curr_path, "../../../lib/libvta.so")),
|
||||
]
|
||||
runtime_dll = []
|
||||
if not all(os.path.exists(f) for f in dll_path):
|
||||
lib_path = find_libvta(optional=True)
|
||||
if not lib_path:
|
||||
return []
|
||||
try:
|
||||
for fname in dll_path:
|
||||
runtime_dll.append(ctypes.CDLL(fname, ctypes.RTLD_GLOBAL))
|
||||
return runtime_dll
|
||||
return [ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)]
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
from __future__ import absolute_import as _abs
|
||||
|
||||
import os
|
||||
from tvm.contrib import rpc
|
||||
from tvm import rpc
|
||||
from ..environment import get_env
|
||||
from . import simulator
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import tvm
|
|||
import topi
|
||||
|
||||
from nnvm.top import registry as reg, OpPattern
|
||||
from nnvm.top import nn as _nn
|
||||
from ..environment import get_env
|
||||
|
||||
|
||||
|
@ -238,9 +239,9 @@ def is_packed_layout(layout):
|
|||
"""Check if layout is packed layout"""
|
||||
if layout == "NCHW":
|
||||
return False
|
||||
assert "n" in layout
|
||||
assert "c" in layout
|
||||
return True
|
||||
if "n" in layout and "c" in layout:
|
||||
return True
|
||||
return False
|
||||
|
||||
@reg.register_alter_op_layout("conv2d", level=15)
|
||||
def alter_conv2d_layout(*_):
|
||||
|
@ -255,27 +256,18 @@ def compute_conv2d(attrs, inputs, out):
|
|||
strides = attrs.get_int_tuple("strides")
|
||||
dilation = attrs.get_int_tuple("dilation")
|
||||
groups = attrs.get_int("groups")
|
||||
channels = attrs.get_int("channels")
|
||||
layout = attrs["layout"]
|
||||
out_dtype = attrs['out_dtype']
|
||||
assert dilation == (1, 1), "not support dilate now"
|
||||
assert attrs.get_bool("use_bias") is False
|
||||
if is_packed_layout(layout):
|
||||
assert groups == 1
|
||||
return packed_conv2d(inputs[0], inputs[1],
|
||||
padding, strides, out_dtype=out_dtype)
|
||||
if groups == 1:
|
||||
out = topi.nn.conv2d(inputs[0], inputs[1], strides, padding, out_dtype=out_dtype)
|
||||
elif groups == get_const_int(inputs[0].shape[1]) and groups == channels:
|
||||
out = topi.nn.depthwise_conv2d_nchw(
|
||||
inputs[0], inputs[1], strides, padding, out_dtype=out_dtype)
|
||||
else:
|
||||
raise ValueError("not support arbitrary group number for now")
|
||||
return out
|
||||
return _nn.compute_conv2d(attrs, inputs, out)
|
||||
|
||||
|
||||
@reg.register_schedule("conv2d", level=15)
|
||||
def schedule_quantized_conv2d(attrs, outs, target):
|
||||
def schedule_conv2d(attrs, outs, target):
|
||||
""" 2D convolution schedule.
|
||||
"""
|
||||
layout = attrs["layout"]
|
||||
|
@ -288,8 +280,7 @@ def schedule_quantized_conv2d(attrs, outs, target):
|
|||
return tvm.create_schedule([x.op for x in outs])
|
||||
else:
|
||||
raise RuntimeError("not support target %s" % target)
|
||||
with tvm.target.create(target):
|
||||
return topi.generic.schedule_conv2d_nchw(outs)
|
||||
return _nn.schedule_conv2d(attrs, outs, target)
|
||||
|
||||
|
||||
def _get_workload(data, pad_data, kernel, output):
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <dmlc/thread_local.h>
|
||||
#include <vta/runtime.h>
|
||||
|
||||
#include "../tvm/src/runtime/workspace_pool.h"
|
||||
#include "../../src/runtime/workspace_pool.h"
|
||||
|
||||
|
||||
namespace tvm {
|
||||
|
|
|
@ -155,8 +155,8 @@ class UopKernel {
|
|||
le.dst_factor = dst_factor;
|
||||
le.src_factor = src_factor;
|
||||
le.wgt_factor = wgt_factor;
|
||||
assert(seq_.size() == 0);
|
||||
assert(loop_.size() < 2);
|
||||
CHECK_EQ(seq_.size(), 0U);
|
||||
CHECK_LT(loop_.size(), 2U);
|
||||
loop_.push_back(le);
|
||||
++loop_ptr_;
|
||||
}
|
||||
|
@ -196,13 +196,13 @@ class UopKernel {
|
|||
if (mode_ == 0xFFFFFFFF) {
|
||||
mode_ = mode;
|
||||
} else {
|
||||
assert(mode_ == mode);
|
||||
CHECK(mode_ == mode);
|
||||
}
|
||||
// Set reset_out field if unset
|
||||
if (reset_out_ == 0xFFFFFFFF) {
|
||||
reset_out_ = reset_out;
|
||||
} else {
|
||||
assert(reset_out_ == reset_out);
|
||||
CHECK(reset_out_ == reset_out);
|
||||
}
|
||||
// Check kernel op and imm/imm_val in ALU mode
|
||||
if (mode == 1) {
|
||||
|
@ -211,9 +211,9 @@ class UopKernel {
|
|||
use_imm_ = use_imm;
|
||||
imm_val_ = imm_val;
|
||||
} else {
|
||||
assert(opcode_ == opcode);
|
||||
assert(use_imm_ == use_imm);
|
||||
assert(imm_val_ == imm_val);
|
||||
CHECK(opcode_ == opcode);
|
||||
CHECK(use_imm_ == use_imm);
|
||||
CHECK(imm_val_ == imm_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ class UopKernel {
|
|||
void VerifyDep(uint32_t dst_index) {
|
||||
size_t step = std::min(static_cast<size_t>(2U), seq_.size());
|
||||
for (size_t i = seq_.size() - step; i < seq_.size(); ++i) {
|
||||
assert(seq_[i].dst_idx != dst_index);
|
||||
CHECK(seq_[i].dst_idx != dst_index);
|
||||
}
|
||||
}
|
||||
// The uop buffer
|
||||
|
@ -293,7 +293,7 @@ class BaseQueue {
|
|||
elem_bytes_ = elem_bytes;
|
||||
dram_buffer_ = static_cast<char*>(VTAMemAlloc(
|
||||
max_bytes, coherent || always_cache_));
|
||||
assert(dram_buffer_ != nullptr);
|
||||
CHECK(dram_buffer_ != nullptr);
|
||||
dram_phy_addr_ = VTAMemGetPhyAddr(dram_buffer_);
|
||||
}
|
||||
/*!
|
||||
|
@ -363,9 +363,9 @@ class UopQueue : public BaseQueue {
|
|||
size_t num_op = kernel->size();
|
||||
if (dram_end_ + num_op > kMaxElems) {
|
||||
fautosync();
|
||||
assert(dram_end_ <= kMaxElems);
|
||||
CHECK(dram_end_ <= kMaxElems);
|
||||
}
|
||||
assert(num_op <= kMaxNumUop);
|
||||
CHECK(num_op <= kMaxNumUop);
|
||||
uint32_t uop_begin = 0;
|
||||
if (sram_end_ + num_op > kMaxNumUop) {
|
||||
// Need to evict
|
||||
|
@ -390,7 +390,7 @@ class UopQueue : public BaseQueue {
|
|||
kernel->sram_begin_ = uop_begin;
|
||||
kernel->sram_end_ = sram_end_;
|
||||
CHECK(kernel->cached());
|
||||
assert(uop_begin != sram_end_);
|
||||
CHECK(uop_begin != sram_end_);
|
||||
cache_.insert(cache_.begin() + cache_ptr_, kernel);
|
||||
cache_.erase(cache_.begin() + evict_begin, cache_.begin() + cache_ptr_);
|
||||
cache_ptr_ = evict_begin + 1;
|
||||
|
@ -398,7 +398,7 @@ class UopQueue : public BaseQueue {
|
|||
// Flush as weight load
|
||||
void FlushUopLoad(VTAMemInsn* insn) {
|
||||
if (sram_begin_ != sram_end_) {
|
||||
assert((dram_end_ - dram_begin_) == (sram_end_ - sram_begin_));
|
||||
CHECK((dram_end_ - dram_begin_) == (sram_end_ - sram_begin_));
|
||||
insn->memory_type = VTA_MEM_ID_UOP;
|
||||
insn->sram_base = sram_begin_;
|
||||
insn->dram_base = dram_phy_addr_ / kElemBytes + dram_begin_;
|
||||
|
@ -433,12 +433,12 @@ class UopKernelMap {
|
|||
UopKernel** Get(void* signature,
|
||||
int nbytes) {
|
||||
uint32_t key = 0;
|
||||
assert(nbytes == 0 || nbytes == sizeof(int));
|
||||
CHECK(nbytes == 0 || nbytes == sizeof(int));
|
||||
if (nbytes == sizeof(int)) {
|
||||
memcpy(&key, signature, sizeof(int));
|
||||
key = key + 1;
|
||||
}
|
||||
assert(key < 100);
|
||||
CHECK_LT(key, 100);
|
||||
if (kmap_.size() <= key) {
|
||||
kmap_.resize(key + 1, nullptr);
|
||||
}
|
||||
|
@ -490,8 +490,8 @@ class InsnQueue : public BaseQueue {
|
|||
pending_pop_next_[to] = 1;
|
||||
}
|
||||
// Impossible condition
|
||||
assert(from != kLoadStage || to != kStoreStage);
|
||||
assert(to != kLoadStage || to != kComputeStage);
|
||||
CHECK(from != kLoadStage || to != kStoreStage);
|
||||
CHECK(to != kLoadStage || to != kComputeStage);
|
||||
}
|
||||
// Insert dependency push of load
|
||||
void DepPush(int from, int to) {
|
||||
|
@ -636,15 +636,15 @@ class InsnQueue : public BaseQueue {
|
|||
// Count status in queues
|
||||
if (c.mem.opcode == VTA_OPCODE_LOAD || c.mem.opcode == VTA_OPCODE_STORE) {
|
||||
if (c.mem.opcode == VTA_OPCODE_STORE) {
|
||||
assert(c.mem.pop_next_dep == false);
|
||||
assert(c.mem.push_next_dep == false);
|
||||
CHECK(c.mem.pop_next_dep == false);
|
||||
CHECK(c.mem.push_next_dep == false);
|
||||
if (c.mem.pop_prev_dep) g2s_queue--;
|
||||
if (c.mem.push_prev_dep) s2g_queue++;
|
||||
} else if (c.mem.opcode == VTA_OPCODE_LOAD &&
|
||||
(c.mem.memory_type == VTA_MEM_ID_INP ||
|
||||
c.mem.memory_type == VTA_MEM_ID_WGT) ) {
|
||||
assert(c.mem.pop_prev_dep == false);
|
||||
assert(c.mem.push_prev_dep == false);
|
||||
CHECK(c.mem.pop_prev_dep == false);
|
||||
CHECK(c.mem.push_prev_dep == false);
|
||||
if (c.mem.pop_next_dep) g2l_queue--;
|
||||
if (c.mem.push_next_dep) l2g_queue++;
|
||||
} else {
|
||||
|
@ -742,15 +742,15 @@ class InsnQueue : public BaseQueue {
|
|||
// Count status in queues
|
||||
if (c.mem.opcode == VTA_OPCODE_LOAD || c.mem.opcode == VTA_OPCODE_STORE) {
|
||||
if (c.mem.opcode == VTA_OPCODE_STORE) {
|
||||
assert(c.mem.pop_next_dep == false);
|
||||
assert(c.mem.push_next_dep == false);
|
||||
CHECK(c.mem.pop_next_dep == false);
|
||||
CHECK(c.mem.push_next_dep == false);
|
||||
if (c.mem.pop_prev_dep) g2s_queue--;
|
||||
if (c.mem.push_prev_dep) s2g_queue++;
|
||||
} else if (c.mem.opcode == VTA_OPCODE_LOAD &&
|
||||
(c.mem.memory_type == VTA_MEM_ID_INP ||
|
||||
c.mem.memory_type == VTA_MEM_ID_WGT) ) {
|
||||
assert(c.mem.pop_prev_dep == false);
|
||||
assert(c.mem.push_prev_dep == false);
|
||||
CHECK(c.mem.pop_prev_dep == false);
|
||||
CHECK(c.mem.push_prev_dep == false);
|
||||
if (c.mem.pop_next_dep) g2l_queue--;
|
||||
if (c.mem.push_next_dep) l2g_queue++;
|
||||
} else {
|
||||
|
@ -776,7 +776,7 @@ class InsnQueue : public BaseQueue {
|
|||
void CommitPendingPop(int stage) {
|
||||
// Handle the LD<->compute queue
|
||||
// NOTE: pop executes on target(stage)
|
||||
assert(stage > 0 && stage < 4);
|
||||
CHECK(stage > 0 && stage < 4);
|
||||
if (pending_pop_prev_[stage] ||
|
||||
pending_pop_next_[stage]) {
|
||||
PushNoop(stage, false, false,
|
||||
|
@ -806,7 +806,7 @@ class InsnQueue : public BaseQueue {
|
|||
VTAGenericInsn* NextInsn() {
|
||||
VTAGenericInsn* insn = data() + dram_end_;
|
||||
++dram_end_;
|
||||
assert(dram_end_ < kMaxElems);
|
||||
CHECK(dram_end_ < kMaxElems);
|
||||
return insn;
|
||||
}
|
||||
// Create a new instruction for a given stage
|
||||
|
@ -840,10 +840,10 @@ class InsnQueue : public BaseQueue {
|
|||
if (insn->opcode == VTA_OPCODE_STORE) {
|
||||
// FIXME: Right now memory_type is a 2-bit field which means that
|
||||
// VTA_MEM_ID_OUT will appear as 0. For now we'll refrain from
|
||||
// checking the memory_type to avoid an assertion error...
|
||||
// checking the memory_type to avoid an CHECKion error...
|
||||
return kStoreStage;
|
||||
}
|
||||
assert(false);
|
||||
LOG(FATAL) << "not reached";
|
||||
return kNoneStage;
|
||||
}
|
||||
// Push no-op
|
||||
|
@ -888,7 +888,7 @@ class CommandQueue {
|
|||
uop_queue_.InitSpace();
|
||||
insn_queue_.InitSpace();
|
||||
device_ = VTADeviceAlloc();
|
||||
assert(device_ != nullptr);
|
||||
CHECK(device_ != nullptr);
|
||||
printf("Initialize VTACommandHandle...\n");
|
||||
}
|
||||
|
||||
|
@ -906,8 +906,7 @@ class CommandQueue {
|
|||
case VTA_MEM_ID_OUT: return VTA_INP_ELEM_BYTES;
|
||||
default: break;
|
||||
}
|
||||
printf("Memory id not recognized: %d\n", memory_id);
|
||||
assert(false);
|
||||
LOG(FATAL) << "Memory id not recognized:" << memory_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -999,7 +998,7 @@ class CommandQueue {
|
|||
// NOTE: FINISH cannot contain pop
|
||||
VTAGemInsn* insn = insn_queue_.CreateGemInsn();
|
||||
insn->opcode = VTA_OPCODE_FINISH;
|
||||
assert(!insn_queue_.PendingPop());
|
||||
CHECK(!insn_queue_.PendingPop());
|
||||
// Check if there are no instruction to execute at all
|
||||
if (insn_queue_.count() == 0) return;
|
||||
// Synchronization for the queues
|
||||
|
@ -1010,17 +1009,17 @@ class CommandQueue {
|
|||
insn_queue_.DumpInsn();
|
||||
}
|
||||
// Make sure that the last instruction is a finish instruction
|
||||
assert(reinterpret_cast<VTAMemInsn*>(
|
||||
CHECK(reinterpret_cast<VTAMemInsn*>(
|
||||
insn_queue_.data())[insn_queue_.count()-1].opcode == VTA_OPCODE_FINISH);
|
||||
|
||||
// Make sure that we don't exceed contiguous physical memory limits
|
||||
assert(insn_queue_.count() * sizeof(VTAGenericInsn) < VTA_MAX_XFER);
|
||||
CHECK(insn_queue_.count() * sizeof(VTAGenericInsn) < VTA_MAX_XFER);
|
||||
int timeout = VTADeviceRun(
|
||||
device_,
|
||||
insn_queue_.dram_phy_addr(),
|
||||
insn_queue_.count(),
|
||||
wait_cycles);
|
||||
assert(timeout == 0);
|
||||
CHECK_EQ(timeout, 0);
|
||||
// Reset buffers
|
||||
uop_queue_.Reset();
|
||||
insn_queue_.Reset();
|
||||
|
@ -1028,7 +1027,7 @@ class CommandQueue {
|
|||
|
||||
// Get record kernel
|
||||
UopKernel* record_kernel() const {
|
||||
assert(record_kernel_ != nullptr);
|
||||
CHECK(record_kernel_ != nullptr);
|
||||
return record_kernel_;
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1047,7 @@ class CommandQueue {
|
|||
UopKernel** kptr = uptr[0]->Get(signature, nbytes);
|
||||
if (kptr[0] == nullptr) {
|
||||
record_kernel_ = new UopKernel(static_cast<char*>(signature), nbytes);
|
||||
assert((*finit)(signature) == 0);
|
||||
CHECK_EQ((*finit)(signature), 0);
|
||||
kptr[0] = static_cast<UopKernel*>(record_kernel_);
|
||||
if (debug_flag_ & VTA_DEBUG_DUMP_UOP) {
|
||||
record_kernel_->Dump();
|
||||
|
@ -1070,7 +1069,7 @@ class CommandQueue {
|
|||
UopKernel** kptr = uptr[0]->Get(signature, nbytes);
|
||||
if (kptr[0] == nullptr) {
|
||||
record_kernel_ = new UopKernel(static_cast<char*>(signature), nbytes);
|
||||
assert((*finit)(signature) == 0);
|
||||
CHECK_EQ((*finit)(signature), 0);
|
||||
kptr[0] = static_cast<UopKernel*>(record_kernel_);
|
||||
if (debug_flag_ & VTA_DEBUG_DUMP_UOP) {
|
||||
record_kernel_->Dump();
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
# For CPU
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update --fix-missing
|
||||
|
||||
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
|
||||
RUN bash /install/ubuntu_install_core.sh
|
||||
|
||||
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
|
||||
RUN bash /install/ubuntu_install_python.sh
|
||||
|
||||
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
|
||||
RUN bash /install/ubuntu_install_python_package.sh
|
||||
|
||||
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
|
||||
RUN bash /install/ubuntu_install_llvm.sh
|
||||
|
||||
COPY install/ubuntu_install_sphinx.sh /install/ubuntu_install_sphinx.sh
|
||||
RUN bash /install/ubuntu_install_sphinx.sh
|
||||
|
||||
# Enable doxygen for c++ doc build
|
||||
RUN apt-get update && apt-get install -y doxygen graphviz
|
||||
|
||||
# Fix recommonmark to latest version
|
||||
RUN git clone https://github.com/rtfd/recommonmark
|
||||
RUN cd recommonmark; python3 setup.py install
|
|
@ -1,8 +0,0 @@
|
|||
# For lint test
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update && apt-get install -y sudo wget
|
||||
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
|
||||
RUN bash /install/ubuntu_install_python.sh
|
||||
RUN apt-get install -y doxygen graphviz
|
||||
RUN pip3 install cpplint pylint
|
|
@ -1,35 +0,0 @@
|
|||
# CI Build Scripts
|
||||
|
||||
This directory contains the files and setup instructions to run all tests.
|
||||
|
||||
## Run locally
|
||||
|
||||
To run locally, we need to first install
|
||||
[docker](https://docs.docker.com/engine/installation/)
|
||||
|
||||
Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by
|
||||
using (`ci_build.sh`)[./ci_build.sh]. For example
|
||||
|
||||
- lint the python codes
|
||||
|
||||
```bash
|
||||
./ci_build.sh lint make pylint
|
||||
```
|
||||
|
||||
- build codes with CUDA supports
|
||||
|
||||
```bash
|
||||
./ci_build.sh gpu tests/scripts/task_build.sh
|
||||
```
|
||||
|
||||
- do the python unittest
|
||||
|
||||
```bash
|
||||
./ci_build.sh gpu tests/scripts/task_python_test.sh
|
||||
```
|
||||
|
||||
- build the documents. The results will be available at `docs/_build/html`
|
||||
|
||||
```bash
|
||||
tests/ci_build/ci_build.sh gpu tests/scripts/task_python_docs.sh
|
||||
```
|
|
@ -1,126 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Execute command within a docker container
|
||||
#
|
||||
# Usage: ci_build.sh <CONTAINER_TYPE> [--dockerfile <DOCKERFILE_PATH>] [-it]
|
||||
# <COMMAND>
|
||||
#
|
||||
# CONTAINER_TYPE: Type of the docker container used the run the build: e.g.,
|
||||
# (cpu | gpu)
|
||||
#
|
||||
# DOCKERFILE_PATH: (Optional) Path to the Dockerfile used for docker build. If
|
||||
# this optional value is not supplied (via the --dockerfile
|
||||
# flag), will use Dockerfile.CONTAINER_TYPE in default
|
||||
#
|
||||
# COMMAND: Command to be executed in the docker container
|
||||
#
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Get the command line arguments.
|
||||
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
|
||||
shift 1
|
||||
|
||||
# Dockerfile to be used in docker build
|
||||
DOCKERFILE_PATH="${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}"
|
||||
DOCKER_CONTEXT_PATH="${SCRIPT_DIR}"
|
||||
|
||||
if [[ "$1" == "--dockerfile" ]]; then
|
||||
DOCKERFILE_PATH="$2"
|
||||
DOCKER_CONTEXT_PATH=$(dirname "${DOCKERFILE_PATH}")
|
||||
echo "Using custom Dockerfile path: ${DOCKERFILE_PATH}"
|
||||
echo "Using custom docker build context path: ${DOCKER_CONTEXT_PATH}"
|
||||
shift 2
|
||||
fi
|
||||
|
||||
if [[ "$1" == "-it" ]]; then
|
||||
CI_DOCKER_EXTRA_PARAMS+=('-it')
|
||||
shift 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "${DOCKERFILE_PATH}" ]]; then
|
||||
echo "Invalid Dockerfile path: \"${DOCKERFILE_PATH}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMAND=("$@")
|
||||
|
||||
# Validate command line arguments.
|
||||
if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then
|
||||
supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \
|
||||
sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' )
|
||||
echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND"
|
||||
echo " CONTAINER_TYPE can be one of [${supported_container_types}]"
|
||||
echo " COMMAND is a command (with arguments) to run inside"
|
||||
echo " the container."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use nvidia-docker if the container is GPU.
|
||||
if [[ "${CONTAINER_TYPE}" == *"gpu"* ]]; then
|
||||
DOCKER_BINARY="nvidia-docker"
|
||||
else
|
||||
DOCKER_BINARY="docker"
|
||||
fi
|
||||
|
||||
# Helper function to traverse directories up until given file is found.
|
||||
function upsearch () {
|
||||
test / == "$PWD" && return || \
|
||||
test -e "$1" && echo "$PWD" && return || \
|
||||
cd .. && upsearch "$1"
|
||||
}
|
||||
|
||||
# Set up WORKSPACE and BUILD_TAG. Jenkins will set them for you or we pick
|
||||
# reasonable defaults if you run it outside of Jenkins.
|
||||
WORKSPACE="${WORKSPACE:-${SCRIPT_DIR}/../../}"
|
||||
BUILD_TAG="${BUILD_TAG:-nnvm-ci}"
|
||||
|
||||
# Determine the docker image name
|
||||
DOCKER_IMG_NAME="${BUILD_TAG}.${CONTAINER_TYPE}"
|
||||
|
||||
# Under Jenkins matrix build, the build tag may contain characters such as
|
||||
# commas (,) and equal signs (=), which are not valid inside docker image names.
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g')
|
||||
|
||||
# Convert to all lower-case, as per requirement of Docker image names
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Print arguments.
|
||||
echo "WORKSPACE: ${WORKSPACE}"
|
||||
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[@]}"
|
||||
echo "COMMAND: ${COMMAND[@]}"
|
||||
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
|
||||
echo "BUILD_TAG: ${BUILD_TAG}"
|
||||
echo "DOCKER CONTAINER NAME: ${DOCKER_IMG_NAME}"
|
||||
echo ""
|
||||
|
||||
|
||||
# Build the docker container.
|
||||
echo "Building container (${DOCKER_IMG_NAME})..."
|
||||
docker build -t ${DOCKER_IMG_NAME} \
|
||||
-f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}"
|
||||
|
||||
# Check docker build status
|
||||
if [[ $? != "0" ]]; then
|
||||
echo "ERROR: docker build failed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the command inside the container.
|
||||
echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
|
||||
|
||||
# By default we cleanup - remove the container once it finish running (--rm)
|
||||
# and share the PID namespace (--pid=host) so the process inside does not have
|
||||
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
|
||||
echo ${DOCKER_BINARY}
|
||||
${DOCKER_BINARY} run --rm --pid=host \
|
||||
-v ${WORKSPACE}:/workspace \
|
||||
-w /workspace \
|
||||
-e "CI_BUILD_HOME=/workspace" \
|
||||
-e "CI_BUILD_USER=$(id -u -n)" \
|
||||
-e "CI_BUILD_UID=$(id -u)" \
|
||||
-e "CI_BUILD_GROUP=$(id -g -n)" \
|
||||
-e "CI_BUILD_GID=$(id -g)" \
|
||||
${CI_DOCKER_EXTRA_PARAMS[@]} \
|
||||
${DOCKER_IMG_NAME} \
|
||||
bash tests/ci_build/with_the_same_user \
|
||||
${COMMAND[@]}
|
|
@ -1,6 +0,0 @@
|
|||
# install libraries for building c++ core on ubuntu
|
||||
apt-get update && apt-get install -y --no-install-recommends --force-yes \
|
||||
git make libgtest-dev cmake wget unzip libtinfo-dev libz-dev\
|
||||
libcurl4-openssl-dev libopenblas-dev g++ sudo
|
||||
|
||||
cd /usr/src/gtest && cmake CMakeLists.txt && make && cp *.a /usr/lib
|
|
@ -1,22 +0,0 @@
|
|||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
apt-get update && apt-get install -y --force-yes llvm-4.0 llvm-5.0 llvm-6.0 clang-6.0
|
|
@ -1,12 +0,0 @@
|
|||
# install python and pip, don't modify this, modify install_python_package.sh
|
||||
apt-get update && apt-get install -y python-dev
|
||||
|
||||
# python 3.6
|
||||
apt-get update && yes | apt-get install software-properties-common
|
||||
add-apt-repository ppa:jonathonf/python-3.6 &&\
|
||||
apt-get update && apt-get install -y python-pip python-dev python3.6 python3.6-dev
|
||||
|
||||
rm -f /usr/bin/python3 && ln -s /usr/bin/python3.6 /usr/bin/python3
|
||||
|
||||
# Install pip
|
||||
cd /tmp && wget https://bootstrap.pypa.io/get-pip.py && python2 get-pip.py && python3.6 get-pip.py
|
|
@ -1,3 +0,0 @@
|
|||
# install libraries for python package on ubuntu
|
||||
pip2 install nose pylint numpy nose-timer cython decorator scipy tornado
|
||||
pip3 install nose pylint numpy nose-timer cython decorator scipy tornado typed_ast
|
|
@ -1 +0,0 @@
|
|||
pip3 install sphinx sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>=0.7.3 docutils>=0.11
|
|
@ -1,34 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script is a wrapper creating the same user inside container as the one
|
||||
# running the ci_build.sh outside the container. It also set the home directory
|
||||
# for the user inside container to match the same absolute path as the workspace
|
||||
# outside of container. Do not run this manually. It does not make sense. It is
|
||||
# intended to be called by ci_build.sh only.
|
||||
|
||||
set -e
|
||||
|
||||
COMMAND=("$@")
|
||||
|
||||
if ! touch /this_is_writable_file_system; then
|
||||
echo "You can't write to your filesystem!"
|
||||
echo "If you are in Docker you should check you do not have too many images" \
|
||||
"with too many files in them. Docker has some issue with it."
|
||||
exit 1
|
||||
else
|
||||
rm /this_is_writable_file_system
|
||||
fi
|
||||
|
||||
getent group "${CI_BUILD_GID}" || addgroup --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}"
|
||||
getent passwd "${CI_BUILD_UID}" || adduser --gid "${CI_BUILD_GID}" --uid "${CI_BUILD_UID}" \
|
||||
--gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \
|
||||
--disabled-password --home "${CI_BUILD_HOME}" --quiet "${CI_BUILD_USER}"
|
||||
usermod -a -G sudo "${CI_BUILD_USER}"
|
||||
echo "${CI_BUILD_USER} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-nopasswd-sudo
|
||||
|
||||
HOME=${CI_BUILD_HOME}\
|
||||
sudo -u "#${CI_BUILD_UID}" --preserve-env\
|
||||
PATH=${PATH}\
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}\
|
||||
HOME=${CI_BUILD_HOME}\
|
||||
${COMMAND[@]}
|
|
@ -1,406 +0,0 @@
|
|||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS, _cy2, _cy3
|
||||
|
||||
# Add files or directories matching the regex patterns to the blacklist. The
|
||||
# regex matches against base names, not paths.
|
||||
ignore-patterns=
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
jobs=8
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code
|
||||
extension-pkg-whitelist=numpy,opencv
|
||||
|
||||
# Allow optimization of some AST trees. This will activate a peephole AST
|
||||
# optimizer, which will apply various small optimizations. For instance, it can
|
||||
# be used to obtain the result of joining multiple strings with the addition
|
||||
# operator. Joining a lot of strings can lead to a maximum recursion error in
|
||||
# Pylint and this flag can prevent that. It has one side effect, the resulting
|
||||
# AST will be different than the one from reality. This option is deprecated
|
||||
# and it will be removed in Pylint 2.0.
|
||||
optimize-ast=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
||||
confidence=
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
enable=indexing-exception,old-raise-syntax
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then reenable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,protected-access
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html. You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]". This option is deprecated
|
||||
# and it will be removed in Pylint 2.0.
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=100
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,dict-separator
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
||||
# install python-enchant package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to indicated private dictionary in
|
||||
# --spelling-private-dict-file option instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# List of class names for which member attributes should not be checked (useful
|
||||
# for classes with dynamically set attributes). This supports the use of
|
||||
# qualified names.
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
# List of decorators that produce context managers, such as
|
||||
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||
# produce valid context managers.
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
||||
# not used).
|
||||
dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,_cb
|
||||
|
||||
# List of qualified module names which can have objects that can redefine
|
||||
# builtins.
|
||||
redefining-builtins-modules=six.moves,future.builtins
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,_,a,b,op,x,y,wd,lr,kv,k,v,s,p,h,c,m,n,X,t,g,f
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name
|
||||
include-naming-hint=no
|
||||
|
||||
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||
# to this list to register other decorators that produce valid properties.
|
||||
property-classes=abc.abstractproperty
|
||||
|
||||
# Regular expression matching correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Naming hint for module names
|
||||
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression matching correct constant names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Naming hint for constant names
|
||||
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression matching correct inline iteration names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Naming hint for inline iteration names
|
||||
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Regular expression matching correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for method names
|
||||
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct class attribute names
|
||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Naming hint for class attribute names
|
||||
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Regular expression matching correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for argument names
|
||||
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for attribute names
|
||||
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for variable names
|
||||
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for function names
|
||||
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Naming hint for class names
|
||||
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=10
|
||||
|
||||
|
||||
[ELIF]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=optparse
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
# Force import order to recognize a module as part of the standard
|
||||
# compatibility libraries.
|
||||
known-standard-library=
|
||||
|
||||
# Force import order to recognize a module as part of a third party library.
|
||||
known-third-party=enchant
|
||||
|
||||
# Analyse import fallback blocks. This can be used to support both Python 2 and
|
||||
# 3 compatible code, which means that the block might have code that exists
|
||||
# only in one or another interpreter, leading to false positives when analysed.
|
||||
analyse-fallback-blocks=no
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=0
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of boolean expressions in a if statement
|
||||
max-bool-expr=5
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
|
@ -262,5 +262,4 @@ def test_vta_conv2d():
|
|||
|
||||
if __name__ == "__main__":
|
||||
test_cpu_conv2d()
|
||||
exit(0)
|
||||
test_vta_conv2d()
|
||||
|
|
|
@ -5,7 +5,8 @@ import vta
|
|||
import numpy as np
|
||||
import topi
|
||||
from collections import namedtuple
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import util
|
||||
import pandas as pd
|
||||
|
||||
host = os.environ.get("VTA_PYNQ_RPC_HOST", "pynq")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
import tvm
|
||||
from tvm.contrib import rpc
|
||||
from tvm import rpc
|
||||
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
|
||||
|
||||
host = os.environ.get("VTA_PYNQ_RPC_HOST", "pynq")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
import tvm
|
||||
import numpy as np
|
||||
import topi
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm.contrib import util
|
||||
|
||||
import vta
|
||||
import vta.testing
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
echo "Build TVM..."
|
||||
cd tvm
|
||||
cp cmake/config.cmake .
|
||||
echo set\(USE_LLVM llvm-config-5.0\) >> config.cmake
|
||||
echo set\(USE_RPC ON\) >> config.cmake
|
||||
echo set\(USE_BLAS openblas\) >> config.cmake
|
||||
echo set\(USE_GRAPH_RUNTIME ON\) >> config.cmake
|
||||
make "$@"
|
||||
make cython
|
||||
make cython3
|
||||
cd ..
|
||||
|
||||
echo "Build VTA..."
|
||||
make "$@"
|
|
@ -1,8 +0,0 @@
|
|||
#!/bin/bash
|
||||
echo "Cleanup data..."
|
||||
|
||||
cd tvm
|
||||
make clean
|
||||
|
||||
cd ..
|
||||
make clean
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
echo "Check codestyle of c++ code..."
|
||||
make cpplint || exit -1
|
||||
echo "Check codestyle of python code..."
|
||||
make pylint || exit -1
|
||||
echo "Check documentations of c++ code..."
|
||||
make doc 2>log.txt
|
||||
(cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag") > logclean.txt
|
||||
echo "---------Error Log----------"
|
||||
cat logclean.txt
|
||||
echo "----------------------------"
|
||||
(cat logclean.txt|grep warning) && exit -1
|
||||
(cat logclean.txt|grep error) && exit -1
|
||||
rm logclean.txt
|
||||
rm log.txt
|
|
@ -1,17 +0,0 @@
|
|||
#!/bin/bash
|
||||
cd tvm
|
||||
make cython
|
||||
make cython3
|
||||
cd ../
|
||||
|
||||
mkdir -p docs/_build/html
|
||||
# C++ doc
|
||||
make doc
|
||||
|
||||
rm -rf python/vta/*.pyc python/vta/*/*.pyc
|
||||
|
||||
cd docs
|
||||
PYTHONPATH=../python:../tvm/python:../tvm/topi/python:../tvm/nnvm/python make html || exit -1
|
||||
cd _build/html
|
||||
tar czf docs.tgz *
|
||||
mv docs.tgz ../../../
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=python:tvm/nnvm/python:tvm/python:tvm/topi/python
|
||||
|
||||
echo "Running unittest..."
|
||||
python -m nose -v tests/python/unittest || exit -1
|
||||
python3 -m nose -v tests/python/unittest || exit -1
|
||||
|
||||
echo "Running integration test..."
|
||||
python -m nose -v tests/python/integration || exit -1
|
||||
python3 -m nose -v tests/python/integration || exit -1
|
|
@ -1,3 +1,2 @@
|
|||
Tutorials
|
||||
=========
|
||||
This page contains the python tutorials about how to use TVM to program VTA.
|
||||
VTA Tutorials
|
||||
=============
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
This tutorial provides an overview on how to use TVM to map a 2D convolution
|
||||
workload efficiently on the VTA design.
|
||||
We recommend covering the :ref:`mat-mult-opt` tutorial first.
|
||||
We recommend covering the :ref:`vta-mat-mult-opt` tutorial first.
|
||||
|
||||
2D convolution is dominant in most computer vision deep neural networks.
|
||||
In this tutorial, we will demonstrate TVM schedule optimizations to map
|
||||
|
@ -26,7 +26,8 @@ import tvm
|
|||
import vta
|
||||
import numpy as np
|
||||
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import util
|
||||
from vta.testing import simulator
|
||||
|
||||
# Load VTA parameters from the config.json file
|
||||
|
@ -423,5 +424,3 @@ print("Successful 2D convolution test!")
|
|||
# use of hardware specific optimizations, such as latency hiding with
|
||||
# virtual threading.
|
||||
#
|
||||
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ Simple Matrix Multiply
|
|||
======================
|
||||
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
|
||||
|
||||
In this tutorial, we will build on top of the :ref:`get-started` tutorial
|
||||
In this tutorial, we will build on top of the :ref:`vta-get-started` tutorial
|
||||
and introduce additional concepts required to implement matrix multiplication
|
||||
on VTA with the TVM workflow.
|
||||
"""
|
||||
|
@ -22,7 +22,8 @@ import os
|
|||
import tvm
|
||||
import vta
|
||||
import numpy as np
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import util
|
||||
from vta.testing import simulator
|
||||
|
||||
# Load VTA parameters from the config.json file
|
||||
|
@ -107,7 +108,7 @@ elif env.TARGET == "sim":
|
|||
# Tiling by a (2, 2) tile shape ensures that data within each tile is
|
||||
# contiguous.
|
||||
# The resulting tiled tensor has a shape of (2, 4, 2, 2).
|
||||
#
|
||||
#
|
||||
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/data_tiling.png
|
||||
# :align: center
|
||||
# :width: 480px
|
||||
|
@ -451,4 +452,3 @@ print("Successful matrix multiply test!")
|
|||
# - Compiling the function to the VTA target.
|
||||
# - Running the compiled module and verifying it against a numpy implementation.
|
||||
#
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
.. _mat-mult-opt:
|
||||
.. _vta-mat-mult-opt:
|
||||
|
||||
Matrix Multiply Blocking
|
||||
========================
|
||||
|
@ -7,7 +7,7 @@ Matrix Multiply Blocking
|
|||
|
||||
This tutorial provides an overview on how to use TVM to map matrix
|
||||
multiplication efficiently on the VTA design.
|
||||
We recommend covering the :ref:`basic-mat-mult` tutorial first.
|
||||
We recommend covering the :ref:`vta-basic-mat-mult` tutorial first.
|
||||
|
||||
In this tutorial, we will demonstrate TVM schedule optimizations to break large
|
||||
neural network operators down onto smaller blocks to achieve computation within
|
||||
|
@ -25,7 +25,8 @@ import os
|
|||
import tvm
|
||||
import vta
|
||||
import numpy as np
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import util
|
||||
from vta.testing import simulator
|
||||
|
||||
# Load VTA parameters from the config.json file
|
||||
|
@ -183,7 +184,7 @@ print(tvm.lower(s, [data, weight, res], simple_mode=True))
|
|||
# :width: 480px
|
||||
#
|
||||
# .. note::
|
||||
#
|
||||
#
|
||||
# The code after loop splitting and reordering is equivalent to the following
|
||||
# pseudo-code. We ignore the batch axis since we are only performing single-batch
|
||||
# inference in this example:
|
||||
|
@ -359,5 +360,3 @@ print("Successful blocked matrix multiply test!")
|
|||
# This allows us to map arbitrarily large computation onto limited
|
||||
# hardware accelerator resources.
|
||||
#
|
||||
|
||||
|
||||
|
|
|
@ -29,7 +29,8 @@ import requests
|
|||
import time
|
||||
|
||||
from nnvm.compiler import graph_attr
|
||||
from tvm.contrib import graph_runtime, rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import graph_runtime, util
|
||||
from tvm.contrib.download import download
|
||||
from vta.testing import simulator
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
.. _get-started:
|
||||
.. _vta-get-started:
|
||||
|
||||
Get Started with VTA
|
||||
====================
|
||||
|
@ -49,7 +49,8 @@ env = vta.get_env()
|
|||
# the board with a VTA bitstream.
|
||||
|
||||
# We'll need the TVM RPC module and the VTA simulator module
|
||||
from tvm.contrib import rpc, util
|
||||
from tvm import rpc
|
||||
from tvm.contrib import util
|
||||
from vta.testing import simulator
|
||||
|
||||
# We read the Pynq RPC host IP address and port number from the OS environment
|
||||
|
@ -384,4 +385,3 @@ print("Successful vector add test!")
|
|||
# to learn more about the supported operations, schedule primitives
|
||||
# and other features supported by TVM to program VTA.
|
||||
#
|
||||
|
Загрузка…
Ссылка в новой задаче