[TEST] CI infrastructure (#30)
This commit is contained in:
Родитель
edac6a8dac
Коммит
395f4c36ad
|
@ -0,0 +1,128 @@
|
|||
#!groovy
|
||||
// -*- mode: groovy -*-
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
// nnvm libraries
|
||||
vta_lib += "lib/libvta.so, lib/libvta.so.json"
|
||||
vta_lib += ", nnvm/tvm/lib/libtvm.so, nnvm/tvm/lib/libtopi.so, nnvm/lib/libnnvm_compiler.so"
|
||||
|
||||
|
||||
// command to start a docker container
|
||||
docker_run = 'tests/ci_build/ci_build.sh'
|
||||
// timeout in minutes
|
||||
max_time = 60
|
||||
|
||||
// initialize source codes
|
||||
def init_git() {
|
||||
checkout scm
|
||||
retry(5) {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
sh 'git submodule update --init --recursive'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def init_git_win() {
|
||||
checkout scm
|
||||
retry(5) {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
bat 'git submodule update --init --recursive'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Sanity Check") {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
node('linux') {
|
||||
ws('workspace/vta/sanity') {
|
||||
init_git()
|
||||
sh "${docker_run} lint ./tests/scripts/task_lint.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run make. First try to do an incremental make from a previous workspace in hope to
|
||||
// accelerate the compilation. If something wrong, clean the workspace and then
|
||||
// build from scratch.
|
||||
def make(docker_type, make_flag) {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} ${docker_type} cp make/sim_sample.json config.json"
|
||||
try {
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
|
||||
} catch (exc) {
|
||||
echo 'Incremental compilation failed. Fall back to build from scratch'
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
|
||||
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pack libraries for later use
|
||||
def pack_lib(name, libs) {
|
||||
sh """
|
||||
echo "Packing ${libs} into ${name}"
|
||||
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
|
||||
"""
|
||||
stash includes: libs, name: name
|
||||
}
|
||||
|
||||
|
||||
// unpack libraries saved before
|
||||
def unpack_lib(name, libs) {
|
||||
unstash name
|
||||
sh """
|
||||
echo "Unpacked ${libs} from ${name}"
|
||||
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
|
||||
"""
|
||||
}
|
||||
|
||||
stage('Build') {
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
node('linux') {
|
||||
ws('workspace/vta/build') {
|
||||
init_git()
|
||||
make('cpu', '-j2')
|
||||
pack_lib('cpu', vta_lib)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Tests') {
|
||||
parallel 'python': {
|
||||
node('linux') {
|
||||
ws('workspace/vta/it-python') {
|
||||
init_git()
|
||||
unpack_lib('cpu', vta_lib)
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} cpu ./tests/scripts/task_python_test.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
'docs': {
|
||||
node('linux') {
|
||||
ws('workspace/vta/docs-python') {
|
||||
init_git()
|
||||
unpack_lib('cpu', vta_lib)
|
||||
timeout(time: max_time, unit: 'MINUTES') {
|
||||
sh "${docker_run} cpu ./tests/scripts/task_python_docs.sh"
|
||||
}
|
||||
pack_lib('mydocs', 'docs.tgz')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Deploy') {
|
||||
node('docker' && 'doc') {
|
||||
ws('workspace/vta/deploy-docs') {
|
||||
if (env.BRANCH_NAME == "master") {
|
||||
unpack_lib('mydocs', 'docs.tgz')
|
||||
sh "tar xf docs.tgz -C /var/vta-docs"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,6 +16,7 @@ import os, subprocess
|
|||
import shlex
|
||||
import recommonmark
|
||||
import sphinx_gallery
|
||||
from tvm.contrib import rpc, graph_runtime
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
from recommonmark.transform import AutoStructify
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*!
|
||||
* Copyright (c) 2018 by Contributors
|
||||
* \file vta_driver.h
|
||||
* \file driver.h
|
||||
* \brief Driver interface that is used by runtime.
|
||||
*
|
||||
* Driver's implementation is device specific.
|
||||
|
@ -46,7 +46,7 @@ void VTADeviceFree(VTADeviceHandle handle);
|
|||
|
||||
/*!
|
||||
* \brief Launch the instructions block until done.
|
||||
* \param The device handle.
|
||||
* \param device The device handle.
|
||||
* \param insn_phy_addr The physical address of instruction stream.
|
||||
* \param insn_count Instruction count.
|
||||
* \param wait_cycles The maximum of cycles to wait
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*!
|
||||
* Copyright (c) 2018 by Contributors
|
||||
* \file vta_defines.h
|
||||
* \file hw_spec.h
|
||||
* \brief Preprocessor definitions for VTA HLS design and runtime.
|
||||
*/
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ extern "C" {
|
|||
|
||||
/*!
|
||||
* \brief Allocate data buffer.
|
||||
* \param cmd The VTA command handle.
|
||||
* \param size Buffer size.
|
||||
* \return A pointer to the allocated buffer.
|
||||
*/
|
||||
|
@ -33,14 +32,12 @@ void* VTABufferAlloc(size_t size);
|
|||
|
||||
/*!
|
||||
* \brief Free data buffer.
|
||||
* \param cmd The VTA command handle.
|
||||
* \param buffer The data buffer to be freed.
|
||||
*/
|
||||
void VTABufferFree(void* buffer);
|
||||
|
||||
/*!
|
||||
* \brief Copy data buffer from one location to another.
|
||||
* \param cmd The VTA command handle.
|
||||
* \param from The source buffer base address.
|
||||
* \param from_offset The offset of the source buffer.
|
||||
* \param to The target buffer base address.
|
||||
|
@ -145,6 +142,7 @@ void VTALoadBuffer2D(VTACommandHandle cmd,
|
|||
* \param src_sram_index Source SRAM index.
|
||||
* \param src_memory_type Source memory type.
|
||||
* \param dst_dram_addr Destination DRAM address.
|
||||
* \param dst_elem_offset The destination DRAM offset in number of unit elements.
|
||||
* \param x_size The lowest dimension (x axis) size in number of unit elements.
|
||||
* \param y_size The number of rows.
|
||||
* \param x_stride The x axis stride.
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"TARGET" : "sim",
|
||||
"LOG_INP_WIDTH" : 3,
|
||||
"LOG_WGT_WIDTH" : 3,
|
||||
"LOG_ACC_WIDTH" : 5,
|
||||
"LOG_OUT_WIDTH" : 3,
|
||||
"LOG_BATCH" : 0,
|
||||
"LOG_BLOCK_IN" : 4,
|
||||
"LOG_BLOCK_OUT" : 4,
|
||||
"LOG_UOP_BUFF_SIZE" : 15,
|
||||
"LOG_INP_BUFF_SIZE" : 15,
|
||||
"LOG_WGT_BUFF_SIZE" : 15,
|
||||
"LOG_ACC_BUFF_SIZE" : 17
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
"""TVM-based VTA Compiler Toolchain"""
|
||||
from __future__ import absolute_import as _abs
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
|
||||
from .environment import get_env, Environment
|
||||
from .rpc_client import reconfig_runtime, program_fpga
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
# For CPU
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update --fix-missing
|
||||
|
||||
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
|
||||
RUN bash /install/ubuntu_install_core.sh
|
||||
|
||||
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
|
||||
RUN bash /install/ubuntu_install_llvm.sh
|
||||
|
||||
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
|
||||
RUN bash /install/ubuntu_install_python.sh
|
||||
|
||||
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
|
||||
RUN bash /install/ubuntu_install_python_package.sh
|
||||
|
||||
COPY install/ubuntu_install_sphinx.sh /install/ubuntu_install_sphinx.sh
|
||||
RUN bash /install/ubuntu_install_sphinx.sh
|
||||
|
||||
# Fix recommonmark to latest version
|
||||
RUN git clone https://github.com/rtfd/recommonmark
|
||||
RUN cd recommonmark; python setup.py install
|
||||
|
||||
# Enable doxygen for c++ doc build
|
||||
RUN apt-get update && apt-get install -y doxygen graphviz
|
|
@ -0,0 +1,6 @@
|
|||
# For lint test
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update && apt-get install -y python-pip sudo
|
||||
RUN apt-get install -y doxygen graphviz
|
||||
RUN pip install cpplint pylint
|
|
@ -0,0 +1,35 @@
|
|||
# CI Build Scripts
|
||||
|
||||
This directory contains the files and setup instructions to run all tests.
|
||||
|
||||
## Run locally
|
||||
|
||||
To run locally, we need to first install
|
||||
[docker](https://docs.docker.com/engine/installation/)
|
||||
|
||||
Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by
|
||||
using (`ci_build.sh`)[./ci_build.sh]. For example
|
||||
|
||||
- lint the python codes
|
||||
|
||||
```bash
|
||||
./ci_build.sh lint make pylint
|
||||
```
|
||||
|
||||
- build codes with CUDA supports
|
||||
|
||||
```bash
|
||||
./ci_build.sh gpu tests/scripts/task_build.sh
|
||||
```
|
||||
|
||||
- do the python unittest
|
||||
|
||||
```bash
|
||||
./ci_build.sh gpu tests/scripts/task_python_test.sh
|
||||
```
|
||||
|
||||
- build the documents. The results will be available at `docs/_build/html`
|
||||
|
||||
```bash
|
||||
tests/ci_build/ci_build.sh gpu tests/scripts/task_python_docs.sh
|
||||
```
|
|
@ -0,0 +1,126 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Execute command within a docker container
|
||||
#
|
||||
# Usage: ci_build.sh <CONTAINER_TYPE> [--dockerfile <DOCKERFILE_PATH>] [-it]
|
||||
# <COMMAND>
|
||||
#
|
||||
# CONTAINER_TYPE: Type of the docker container used the run the build: e.g.,
|
||||
# (cpu | gpu)
|
||||
#
|
||||
# DOCKERFILE_PATH: (Optional) Path to the Dockerfile used for docker build. If
|
||||
# this optional value is not supplied (via the --dockerfile
|
||||
# flag), will use Dockerfile.CONTAINER_TYPE in default
|
||||
#
|
||||
# COMMAND: Command to be executed in the docker container
|
||||
#
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Get the command line arguments.
|
||||
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
|
||||
shift 1
|
||||
|
||||
# Dockerfile to be used in docker build
|
||||
DOCKERFILE_PATH="${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}"
|
||||
DOCKER_CONTEXT_PATH="${SCRIPT_DIR}"
|
||||
|
||||
if [[ "$1" == "--dockerfile" ]]; then
|
||||
DOCKERFILE_PATH="$2"
|
||||
DOCKER_CONTEXT_PATH=$(dirname "${DOCKERFILE_PATH}")
|
||||
echo "Using custom Dockerfile path: ${DOCKERFILE_PATH}"
|
||||
echo "Using custom docker build context path: ${DOCKER_CONTEXT_PATH}"
|
||||
shift 2
|
||||
fi
|
||||
|
||||
if [[ "$1" == "-it" ]]; then
|
||||
CI_DOCKER_EXTRA_PARAMS+=('-it')
|
||||
shift 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "${DOCKERFILE_PATH}" ]]; then
|
||||
echo "Invalid Dockerfile path: \"${DOCKERFILE_PATH}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMAND=("$@")
|
||||
|
||||
# Validate command line arguments.
|
||||
if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then
|
||||
supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \
|
||||
sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' )
|
||||
echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND"
|
||||
echo " CONTAINER_TYPE can be one of [${supported_container_types}]"
|
||||
echo " COMMAND is a command (with arguments) to run inside"
|
||||
echo " the container."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use nvidia-docker if the container is GPU.
|
||||
if [[ "${CONTAINER_TYPE}" == *"gpu"* ]]; then
|
||||
DOCKER_BINARY="nvidia-docker"
|
||||
else
|
||||
DOCKER_BINARY="docker"
|
||||
fi
|
||||
|
||||
# Helper function to traverse directories up until given file is found.
|
||||
function upsearch () {
|
||||
test / == "$PWD" && return || \
|
||||
test -e "$1" && echo "$PWD" && return || \
|
||||
cd .. && upsearch "$1"
|
||||
}
|
||||
|
||||
# Set up WORKSPACE and BUILD_TAG. Jenkins will set them for you or we pick
|
||||
# reasonable defaults if you run it outside of Jenkins.
|
||||
WORKSPACE="${WORKSPACE:-${SCRIPT_DIR}/../../}"
|
||||
BUILD_TAG="${BUILD_TAG:-nnvm-ci}"
|
||||
|
||||
# Determine the docker image name
|
||||
DOCKER_IMG_NAME="${BUILD_TAG}.${CONTAINER_TYPE}"
|
||||
|
||||
# Under Jenkins matrix build, the build tag may contain characters such as
|
||||
# commas (,) and equal signs (=), which are not valid inside docker image names.
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g')
|
||||
|
||||
# Convert to all lower-case, as per requirement of Docker image names
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Print arguments.
|
||||
echo "WORKSPACE: ${WORKSPACE}"
|
||||
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[@]}"
|
||||
echo "COMMAND: ${COMMAND[@]}"
|
||||
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
|
||||
echo "BUILD_TAG: ${BUILD_TAG}"
|
||||
echo "DOCKER CONTAINER NAME: ${DOCKER_IMG_NAME}"
|
||||
echo ""
|
||||
|
||||
|
||||
# Build the docker container.
|
||||
echo "Building container (${DOCKER_IMG_NAME})..."
|
||||
docker build -t ${DOCKER_IMG_NAME} \
|
||||
-f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}"
|
||||
|
||||
# Check docker build status
|
||||
if [[ $? != "0" ]]; then
|
||||
echo "ERROR: docker build failed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the command inside the container.
|
||||
echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
|
||||
|
||||
# By default we cleanup - remove the container once it finish running (--rm)
|
||||
# and share the PID namespace (--pid=host) so the process inside does not have
|
||||
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
|
||||
echo ${DOCKER_BINARY}
|
||||
${DOCKER_BINARY} run --rm --pid=host \
|
||||
-v ${WORKSPACE}:/workspace \
|
||||
-w /workspace \
|
||||
-e "CI_BUILD_HOME=/workspace" \
|
||||
-e "CI_BUILD_USER=$(id -u -n)" \
|
||||
-e "CI_BUILD_UID=$(id -u)" \
|
||||
-e "CI_BUILD_GROUP=$(id -g -n)" \
|
||||
-e "CI_BUILD_GID=$(id -g)" \
|
||||
${CI_DOCKER_EXTRA_PARAMS[@]} \
|
||||
${DOCKER_IMG_NAME} \
|
||||
bash tests/ci_build/with_the_same_user \
|
||||
${COMMAND[@]}
|
|
@ -0,0 +1,6 @@
|
|||
# install libraries for building c++ core on ubuntu
|
||||
apt-get update && apt-get install -y --no-install-recommends --force-yes \
|
||||
git make libgtest-dev cmake wget unzip libtinfo-dev libz-dev\
|
||||
libcurl4-openssl-dev libopenblas-dev g++ sudo
|
||||
|
||||
cd /usr/src/gtest && cmake CMakeLists.txt && make && cp *.a /usr/lib
|
|
@ -0,0 +1,22 @@
|
|||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
|
||||
>> /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
apt-get update && apt-get install -y --force-yes llvm-4.0 llvm-5.0 llvm-6.0
|
|
@ -0,0 +1,5 @@
|
|||
# install python and pip, don't modify this, modify install_python_package.sh
|
||||
apt-get update && apt-get install -y python-pip python-dev python3-dev
|
||||
|
||||
# the version of the pip shipped with ubuntu may be too lower, install a recent version here
|
||||
cd /tmp && wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py && python2 get-pip.py
|
|
@ -0,0 +1,3 @@
|
|||
# install libraries for python package on ubuntu
|
||||
pip2 install nose pylint numpy nose-timer cython decorator scipy tornado
|
||||
pip3 install nose pylint numpy nose-timer cython decorator scipy tornado
|
|
@ -0,0 +1 @@
|
|||
pip install sphinx sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>=0.7.3 docutils>=0.11
|
|
@ -0,0 +1,34 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script is a wrapper creating the same user inside container as the one
|
||||
# running the ci_build.sh outside the container. It also set the home directory
|
||||
# for the user inside container to match the same absolute path as the workspace
|
||||
# outside of container. Do not run this manually. It does not make sense. It is
|
||||
# intended to be called by ci_build.sh only.
|
||||
|
||||
set -e
|
||||
|
||||
COMMAND=("$@")
|
||||
|
||||
if ! touch /this_is_writable_file_system; then
|
||||
echo "You can't write to your filesystem!"
|
||||
echo "If you are in Docker you should check you do not have too many images" \
|
||||
"with too many files in them. Docker has some issue with it."
|
||||
exit 1
|
||||
else
|
||||
rm /this_is_writable_file_system
|
||||
fi
|
||||
|
||||
getent group "${CI_BUILD_GID}" || addgroup --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}"
|
||||
getent passwd "${CI_BUILD_UID}" || adduser --gid "${CI_BUILD_GID}" --uid "${CI_BUILD_UID}" \
|
||||
--gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \
|
||||
--disabled-password --home "${CI_BUILD_HOME}" --quiet "${CI_BUILD_USER}"
|
||||
usermod -a -G sudo "${CI_BUILD_USER}"
|
||||
echo "${CI_BUILD_USER} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-nopasswd-sudo
|
||||
|
||||
HOME=${CI_BUILD_HOME}\
|
||||
sudo -u "#${CI_BUILD_UID}" --preserve-env\
|
||||
PATH=${PATH}\
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}\
|
||||
HOME=${CI_BUILD_HOME}\
|
||||
${COMMAND[@]}
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
echo "Build TVM..."
|
||||
cd nnvm/tvm
|
||||
cp make/config.mk .
|
||||
echo USE_CUDA=0 >> config.mk
|
||||
echo LLVM_CONFIG=llvm-config-4.0 >> config.mk
|
||||
echo USE_RPC=1 >> config.mk
|
||||
echo USE_BLAS=openblas >> config.mk
|
||||
echo USE_GRAPH_RUNTIME=1 >> config.mk
|
||||
make "$@"
|
||||
make cython
|
||||
make cython3
|
||||
cd ..
|
||||
|
||||
echo "Build NNVM..."
|
||||
make "$@"
|
||||
|
||||
cd ..
|
||||
|
||||
echo "Build VTA..."
|
||||
make "$@"
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
echo "Cleanup data..."
|
||||
cd nnvm
|
||||
make clean
|
||||
|
||||
cd tvm
|
||||
make clean
|
||||
|
||||
cd ../..
|
||||
make clean
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
echo "Check codestyle of c++ code..."
|
||||
make cpplint || exit -1
|
||||
echo "Check codestyle of python code..."
|
||||
make pylint || exit -1
|
||||
echo "Check documentations of c++ code..."
|
||||
make doc 2>log.txt
|
||||
(cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag") > logclean.txt
|
||||
echo "---------Error Log----------"
|
||||
cat logclean.txt
|
||||
echo "----------------------------"
|
||||
(cat logclean.txt|grep warning) && exit -1
|
||||
(cat logclean.txt|grep error) && exit -1
|
||||
rm logclean.txt
|
||||
rm log.txt
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
mkdir -p docs/_build/html
|
||||
# C++ doc
|
||||
make doc
|
||||
|
||||
rm -rf python/vta/*.pyc python/vta/*/*.pyc
|
||||
|
||||
cd docs
|
||||
PYTHONPATH=../python:../nnvm/tvm/python:../nnvm/tvm/topi/python:../nnvm/python make html || exit -1
|
||||
cd _build/html
|
||||
tar czf docs.tgz *
|
||||
mv docs.tgz ../../../
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=python:nnvm/python:nnvm/tvm/python:nnvm/tvm/topi/python
|
||||
|
||||
echo "Running unittest..."
|
||||
python -m nose -v tests/python/unittest || exit -1
|
||||
python3 -m nose -v tests/python/unittest || exit -1
|
||||
|
||||
echo "Running integration test..."
|
||||
python -m nose -v tests/python/integration || exit -1
|
||||
python3 -m nose -v tests/python/integration || exit -1
|
|
@ -14,9 +14,9 @@ extensions for compiler to generate code that runs on VTA.
|
|||
"""
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import tvm
|
||||
import vta
|
||||
|
||||
|
||||
######################################################################
|
||||
# Program the FPGA with VTA Bistream
|
||||
# ----------------------------------
|
||||
|
|
Загрузка…
Ссылка в новой задаче