зеркало из https://github.com/mozilla/DeepSpeech.git
TaskCluster enabling of libdeepspeech_model.so
This commit is contained in:
Родитель
db09ab7d19
Коммит
42f605790a
|
@ -17,4 +17,4 @@ python -u DeepSpeech.py \
|
|||
--n_hidden 494 --epoch 75 --random_seed 4567 --default_stddev 0.046875 \
|
||||
--max_to_keep 1 --checkpoint_dir '/tmp/ckpt' --checkpoint_secs 0 \
|
||||
--learning_rate 0.001 --dropout_rate 0.05 --export_dir "/tmp/train" \
|
||||
--decoder_library_path "/tmp/ds/libctc_decoder_with_kenlm.so"
|
||||
--use_seq_length False --decoder_library_path "/tmp/ds/libctc_decoder_with_kenlm.so"
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
source $(dirname "$0")/../tc-tests-utils.sh
|
||||
|
||||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:deepspeech
|
||||
//native_client:deepspeech_utils
|
||||
${BAZEL_AOT_TARGETS}"
|
||||
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
SYSTEM_TARGET=host
|
||||
|
||||
EXTRA_LOCAL_CFLAGS="${EXTRA_AOT_CFLAGS}"
|
||||
EXTRA_LOCAL_LDFLAGS="${EXTRA_AOT_LDFLAGS}"
|
||||
EXTRA_LOCAL_LIBS="${EXTRA_AOT_LIBS}"
|
||||
|
||||
do_get_model_parameters "${DEEPSPEECH_TEST_MODEL}" AOT_MODEL_PARAMS
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_AOT_BUILD_FLAGS} ${AOT_MODEL_PARAMS}"
|
||||
|
||||
do_bazel_build
|
||||
|
||||
do_deepspeech_binary_build
|
||||
|
||||
do_deepspeech_python_build
|
||||
|
||||
do_deepspeech_nodejs_build
|
|
@ -0,0 +1,15 @@
|
|||
build:
|
||||
template_file: darwin-opt-base.tyml
|
||||
routes:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.osx_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.osx_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.osx_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.osx/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.osx/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh --aot"
|
||||
package: "taskcluster/package.sh"
|
||||
maxRunTime: 14400
|
||||
metadata:
|
||||
name: "DeepSpeech OSX AMD64 CPU AOT"
|
||||
description: "Building DeepSpeech for OSX AMD64, AOT model, CPU only, optimized version"
|
|
@ -13,10 +13,20 @@ BAZEL_TARGETS="
|
|||
${BAZEL_CTC_TARGETS}
|
||||
"
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS}"
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
SYSTEM_TARGET=host
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS}"
|
||||
if [ $1 = "--aot" ]; then
|
||||
EXTRA_LOCAL_CFLAGS="${EXTRA_AOT_CFLAGS}"
|
||||
EXTRA_LOCAL_LDFLAGS="${EXTRA_AOT_LDFLAGS}"
|
||||
EXTRA_LOCAL_LIBS="${EXTRA_AOT_LIBS}"
|
||||
|
||||
do_get_model_parameters "${DEEPSPEECH_PROD_MODEL}" AOT_MODEL_PARAMS
|
||||
|
||||
BAZEL_TARGETS="${BAZEL_AOT_TARGETS} ${BAZEL_TARGETS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_BUILD_FLAGS} ${BAZEL_AOT_BUILD_FLAGS} ${AOT_MODEL_PARAMS}"
|
||||
fi;
|
||||
|
||||
do_bazel_build
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
build:
|
||||
template_file: linux-opt-base.tyml
|
||||
routes:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.cpu_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.cpu_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.cpu_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/summarize_graph"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml &&
|
||||
apt-get -qq -y install ${python.packages.apt} && ${swig.packages.install_script}
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh --aot"
|
||||
package: "taskcluster/package.sh"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU AOT"
|
||||
description: "Building DeepSpeech for Linux/AMD64, AOT model, CPU only, optimized version"
|
|
@ -0,0 +1,16 @@
|
|||
build:
|
||||
template_file: linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "test-training-linux-amd64-opt"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/summarize_graph"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml &&
|
||||
apt-get -qq -y install ${python.packages.apt} && ${swig.packages.install_script}
|
||||
scripts:
|
||||
build: "taskcluster/aot-test-model-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU AOT (Test)"
|
||||
description: "Building DeepSpeech for Linux/AMD64, AOT Test model, CPU only, optimized version"
|
|
@ -0,0 +1,25 @@
|
|||
build:
|
||||
template_file: linux-opt-base.tyml
|
||||
routes:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.arm_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.arm_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.arm_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.arm/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/summarize_graph"
|
||||
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install gdebi git pixz &&
|
||||
wget http://mirrors.kernel.org/ubuntu/pool/universe/m/multistrap/multistrap_2.2.0ubuntu2_all.deb -O /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
echo "y" | gdebi /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml &&
|
||||
apt-get -qq -y install ${python.packages.apt} && ${swig.packages.install_script}
|
||||
system_config:
|
||||
>
|
||||
multistrap -d /tmp/multistrap-raspbian-jessie/ -f ${system.homedir.linux}/DeepSpeech/ds/native_client/multistrap.conf
|
||||
scripts:
|
||||
build: "taskcluster/rpi3-build.sh --aot"
|
||||
package: "taskcluster/package.sh"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux RPi3/ARMv6 CPU AOT"
|
||||
description: "Building DeepSpeech for Linux RPi3 ARMv6, AOT model, CPU only, optimized version"
|
|
@ -13,10 +13,20 @@ BAZEL_TARGETS="
|
|||
${BAZEL_CTC_TARGETS}
|
||||
"
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS}"
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
SYSTEM_TARGET=rpi3
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS}"
|
||||
if [ $1 = "--aot" ]; then
|
||||
EXTRA_LOCAL_CFLAGS="${EXTRA_AOT_CFLAGS}"
|
||||
EXTRA_LOCAL_LDFLAGS="${EXTRA_AOT_LDFLAGS}"
|
||||
EXTRA_LOCAL_LIBS="${EXTRA_AOT_LIBS}"
|
||||
|
||||
do_get_model_parameters "${DEEPSPEECH_PROD_MODEL}" AOT_MODEL_PARAMS
|
||||
|
||||
BAZEL_TARGETS="${BAZEL_AOT_TARGETS} ${BAZEL_TARGETS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_BUILD_FLAGS} ${BAZEL_AOT_BUILD_FLAGS} ${AOT_MODEL_PARAMS}"
|
||||
fi;
|
||||
|
||||
do_bazel_build
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt} zip
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-benchmark-tests.sh --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU benchmarking AOT (Test)"
|
||||
description: "Benchmarking DeepSpeech for Linux/AMD64, AOT model (test), CPU only, optimized version"
|
|
@ -0,0 +1,10 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-cpp-ds-tests.sh --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU C++ tests AOT (Test)"
|
||||
description: "Testing DeepSpeech C++ for Linux/AMD64, AOT Model (test), CPU only, optimized version"
|
|
@ -24,11 +24,13 @@ then:
|
|||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid("test-training-linux-amd64-opt") }
|
||||
linux_amd64_aot_test: { $eval: as_slugid("linux-amd64-cpu-aot_test-opt") }
|
||||
linux_amd64_build: { $eval: as_slugid("linux-amd64-cpu-opt") }
|
||||
linux_amd64_ctc: { $eval: as_slugid("linux-amd64-ctc-opt") }
|
||||
in:
|
||||
TENSORFLOW_WHEEL: https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.cpu/artifacts/public/tensorflow_warpctc-1.3.0rc0-cp27-cp27mu-linux_x86_64.whl
|
||||
DEEPSPEECH_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${linux_amd64_build}/artifacts/public
|
||||
DEEPSPEECH_AOT_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${linux_amd64_aot_test}/artifacts/public
|
||||
DEEPSPEECH_LIBCTC: https://queue.taskcluster.net/v1/task/${linux_amd64_ctc}/artifacts/public/decoder.tar.xz
|
||||
DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_4} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-node-tests.sh 4.x --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU NodeJS 4.x tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v4.x, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_5} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-node-tests.sh 5.x --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU NodeJS 5.x tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v5.x, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-node-tests.sh 6.x --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU NodeJS 6.x tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v6.x, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-python-tests.sh 2.7.13 --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU Python v2.7 tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on Python v2.7, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-python-tests.sh 3.4.6 --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU Python v3.4 tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.4, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-python-tests.sh 3.5.3 --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU Python v3.5 tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.5, AOT Model (test), CPU only, optimized version"
|
|
@ -0,0 +1,13 @@
|
|||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-python-tests.sh 3.6.2 --aot"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU Python v3.6 tests AOT (test)"
|
||||
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.6, AOT Model (test), CPU only, optimized version"
|
|
@ -8,6 +8,7 @@ exec_benchmark()
|
|||
{
|
||||
model_file="$1"
|
||||
run_postfix=$2
|
||||
aot_model=$3
|
||||
|
||||
mkdir -p /tmp/bench-ds/ || true
|
||||
mkdir -p /tmp/bench-ds-nolm/ || true
|
||||
|
@ -17,9 +18,14 @@ exec_benchmark()
|
|||
png=${TASKCLUSTER_ARTIFACTS}/benchmark-${run_postfix}.png
|
||||
svg=${TASKCLUSTER_ARTIFACTS}/benchmark-${run_postfix}.svg
|
||||
|
||||
AOT_MODEL_ARGS=""
|
||||
if [ ! -z "${aot_model}" ]; then
|
||||
AOT_MODEL_ARGS="--so-model ${aot_model}"
|
||||
fi;
|
||||
|
||||
python ${DS_ROOT_TASK}/DeepSpeech/ds/bin/benchmark_nc.py \
|
||||
--dir /tmp/bench-ds/ \
|
||||
--models ${model_file} \
|
||||
--models ${model_file} ${AOT_MODEL_ARGS} \
|
||||
--wav /tmp/LDC93S1.wav \
|
||||
--alphabet /tmp/alphabet.txt \
|
||||
--lm_binary /tmp/lm.binary \
|
||||
|
@ -28,7 +34,7 @@ exec_benchmark()
|
|||
|
||||
python ${DS_ROOT_TASK}/DeepSpeech/ds/bin/benchmark_nc.py \
|
||||
--dir /tmp/bench-ds-nolm/ \
|
||||
--models ${model_file} \
|
||||
--models ${model_file} ${AOT_MODEL_ARGS} \
|
||||
--wav /tmp/LDC93S1.wav \
|
||||
--alphabet /tmp/alphabet.txt \
|
||||
--csv ${csv_nolm}
|
||||
|
@ -86,7 +92,11 @@ done;
|
|||
mv /tmp/${model_name} /tmp/test.frozen.e75.lstm494.ldc93s1.pb
|
||||
|
||||
# We don't need download_material here, benchmark code should take care of it.
|
||||
export TASKCLUSTER_SCHEME=${DEEPSPEECH_ARTIFACTS_ROOT}/native_client.tar.xz
|
||||
if [ "$1" = "--aot" ]; then
|
||||
export TASKCLUSTER_SCHEME=${DEEPSPEECH_AOT_ARTIFACTS_ROOT}/native_client.tar.xz
|
||||
else
|
||||
export TASKCLUSTER_SCHEME=${DEEPSPEECH_ARTIFACTS_ROOT}/native_client.tar.xz
|
||||
fi;
|
||||
|
||||
install_pyenv "${PYENV_ROOT}"
|
||||
install_pyenv_virtualenv "$(pyenv root)/plugins/pyenv-virtualenv"
|
||||
|
@ -98,9 +108,15 @@ source ${PYENV_ROOT}/versions/${pyver}/envs/${PYENV_NAME}/bin/activate
|
|||
|
||||
pip install -r ${DS_ROOT_TASK}/DeepSpeech/ds/requirements.txt
|
||||
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm494.ldc93s1.pb" "single-model"
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm100-900.ldc93s1.zip" "zipfile-model"
|
||||
exec_benchmark "${model_list}" "multi-model"
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm494.ldc93s1.pb" "single-model_noAOT"
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm100-900.ldc93s1.zip" "zipfile-model_noAOT"
|
||||
exec_benchmark "${model_list}" "multi-model_noAOT"
|
||||
|
||||
if [ "$1" = "--aot" ]; then
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm494.ldc93s1.pb" "single-model_AOT" "test.aot.e5.lstm494.ldc93s1.so"
|
||||
exec_benchmark "/tmp/test.frozen.e75.lstm100-900.ldc93s1.zip" "zipfile-model_AOT" "test.aot.e5.lstm494.ldc93s1.so"
|
||||
exec_benchmark "${model_list}" "multi-model_AOT" "test.aot.e5.lstm494.ldc93s1.so"
|
||||
fi;
|
||||
|
||||
deactivate
|
||||
pyenv uninstall --force ${PYENV_NAME}
|
||||
|
|
|
@ -4,8 +4,19 @@ set -xe
|
|||
|
||||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
download_material "/tmp/ds"
|
||||
aot_model=$1
|
||||
|
||||
phrase=$(LD_LIBRARY_PATH=/tmp/ds/:$LD_LIBRARY_PATH /tmp/ds/deepspeech /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
download_material "/tmp/ds" "${aot_model}"
|
||||
|
||||
assert_correct_ldc93s1 "${phrase}"
|
||||
phrase_pbmodel_nolm=$(LD_LIBRARY_PATH=/tmp/ds/:$LD_LIBRARY_PATH /tmp/ds/deepspeech /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}"
|
||||
|
||||
phrase_pbmodel_withlm=$(LD_LIBRARY_PATH=/tmp/ds/:$LD_LIBRARY_PATH /tmp/ds/deepspeech /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_withlm}"
|
||||
|
||||
if [ "${aot_model}" = "--aot" ]; then
|
||||
phrase_somodel_nolm=$(LD_LIBRARY_PATH=/tmp/ds/:$LD_LIBRARY_PATH /tmp/ds/deepspeech "" /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
phrase_somodel_withlm=$(LD_LIBRARY_PATH=/tmp/ds/:$LD_LIBRARY_PATH /tmp/ds/deepspeech "" /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
|
||||
assert_correct_ldc93s1_somodel "${phrase_somodel_nolm}" "${phrase_somodel_withlm}"
|
||||
fi;
|
||||
|
|
|
@ -5,6 +5,7 @@ set -xe
|
|||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
nodever=$1
|
||||
aot_model=$2
|
||||
|
||||
if [ -z "${nodever}" ]; then
|
||||
echo "No node version given, aborting."
|
||||
|
@ -18,9 +19,24 @@ phrase=""
|
|||
pushd ${HOME}/DeepSpeech/ds/native_client/
|
||||
node --version
|
||||
npm --version
|
||||
npm install ${DEEPSPEECH_ARTIFACTS_ROOT}/deepspeech-0.0.1.tgz
|
||||
npm install
|
||||
phrase=$(node client.js /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
popd
|
||||
if [ "${aot_model}" = "--aot" ]; then
|
||||
npm install ${DEEPSPEECH_AOT_ARTIFACTS_ROOT}/deepspeech-0.0.1.tgz
|
||||
else
|
||||
npm install ${DEEPSPEECH_ARTIFACTS_ROOT}/deepspeech-0.0.1.tgz
|
||||
fi
|
||||
|
||||
assert_correct_ldc93s1 "${phrase}"
|
||||
npm install
|
||||
|
||||
phrase_pbmodel_nolm=$(node client.js /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}"
|
||||
|
||||
phrase_pbmodel_withlm=$(node client.js /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_withlm}"
|
||||
|
||||
if [ "${aot_model}" = "--aot" ]; then
|
||||
phrase_somodel_nolm=$(node client.js "" /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
phrase_somodel_withlm=$(node client.js "" /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
|
||||
assert_correct_ldc93s1_somodel "${phrase_somodel_nolm}" "${phrase_somodel_withlm}"
|
||||
fi
|
||||
popd
|
||||
|
|
|
@ -5,6 +5,7 @@ set -xe
|
|||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
pyver=$1
|
||||
aot_model=$2
|
||||
|
||||
if [ -z "${pyver}" ]; then
|
||||
echo "No python version given, aborting."
|
||||
|
@ -40,11 +41,26 @@ source ${PYENV_ROOT}/versions/${pyver}/envs/${PYENV_NAME}/bin/activate
|
|||
|
||||
platform=$(python -c 'import sys; import platform; sys.stdout.write("%s_%s" % (platform.system().lower(), platform.machine()));')
|
||||
deepspeech_pkg="deepspeech-0.0.1-cp${pyver_pkg}-cp${pyver_pkg}${py_unicode_type}-${platform}.whl"
|
||||
pip install --upgrade scipy==0.19.1 ${DEEPSPEECH_ARTIFACTS_ROOT}/${deepspeech_pkg}
|
||||
|
||||
phrase=$(python ${HOME}/DeepSpeech/ds/native_client/client.py /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
if [ "${aot_model}" = "--aot" ]; then
|
||||
deepspeech_pkg_url=${DEEPSPEECH_AOT_ARTIFACTS_ROOT}/${deepspeech_pkg}
|
||||
else
|
||||
deepspeech_pkg_url=${DEEPSPEECH_ARTIFACTS_ROOT}/${deepspeech_pkg}
|
||||
fi
|
||||
pip install --upgrade scipy==0.19.1 ${deepspeech_pkg_url}
|
||||
|
||||
assert_correct_ldc93s1 "${phrase}"
|
||||
phrase_pbmodel_nolm=$(python ${HOME}/DeepSpeech/ds/native_client/client.py /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}"
|
||||
|
||||
phrase_pbmodel_withlm=$(python ${HOME}/DeepSpeech/ds/native_client/client.py /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_withlm}"
|
||||
|
||||
if [ "${aot_model}" = "--aot" ]; then
|
||||
phrase_somodel_nolm=$(python ${HOME}/DeepSpeech/ds/native_client/client.py "" /tmp/LDC93S1.wav /tmp/alphabet.txt)
|
||||
phrase_somodel_withlm=$(python ${HOME}/DeepSpeech/ds/native_client/client.py "" /tmp/LDC93S1.wav /tmp/alphabet.txt /tmp/lm.binary /tmp/trie)
|
||||
|
||||
assert_correct_ldc93s1_somodel "${phrase_somodel_nolm}" "${phrase_somodel_withlm}"
|
||||
fi
|
||||
|
||||
deactivate
|
||||
pyenv uninstall --force ${PYENV_NAME}
|
||||
|
|
|
@ -18,6 +18,18 @@ export DS_DSDIR=${DS_ROOT_TASK}/DeepSpeech/ds
|
|||
|
||||
export BAZEL_CTC_TARGETS="//native_client:ctc_decoder_with_kenlm"
|
||||
|
||||
export EXTRA_AOT_CFLAGS=""
|
||||
export EXTRA_AOT_LDFLAGS="-L${DS_TFDIR}/bazel-bin/tensorflow/compiler/xla -L${DS_TFDIR}/bazel-bin/tensorflow/compiler/aot -L${DS_TFDIR}/bazel-bin/tensorflow/compiler/xla/service/cpu"
|
||||
export EXTRA_AOT_LIBS="-ldeepspeech_model -lruntime -lruntime_matmul -lexecutable_run_options"
|
||||
|
||||
export BAZEL_AOT_BUILD_FLAGS="--define=DS_NATIVE_MODEL=1 --define=DS_MODEL_TIMESTEPS=64"
|
||||
export BAZEL_AOT_TARGETS="
|
||||
//native_client:deepspeech_model
|
||||
//tensorflow/compiler/aot:runtime
|
||||
//tensorflow/compiler/xla/service/cpu:runtime_matmul
|
||||
//tensorflow/compiler/xla:executable_run_options
|
||||
"
|
||||
|
||||
model_name=$(basename "${DEEPSPEECH_TEST_MODEL}")
|
||||
|
||||
SUPPORTED_PYTHON_VERSIONS=${SUPPORTED_PYTHON_VERSIONS:-2.7.13 3.4.6 3.5.3 3.6.2}
|
||||
|
@ -57,6 +69,37 @@ assert_correct_ldc93s1()
|
|||
assert_correct_inference "$1" "she had your dark suit in greasy wash water all year"
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_somodel()
|
||||
{
|
||||
somodel_nolm=$1
|
||||
somodel_withlm=$2
|
||||
|
||||
# We want to be able to return non zero value from the function, while not
|
||||
# failing the whole execution
|
||||
set +e
|
||||
|
||||
assert_correct_ldc93s1 "${somodel_nolm}"
|
||||
so_nolm=$?
|
||||
|
||||
assert_correct_ldc93s1 "${somodel_withlm}"
|
||||
so_lm=$?
|
||||
|
||||
set -e
|
||||
|
||||
# We accept that with no LM there may be errors, but we do not accept that
|
||||
# for LM. For now.
|
||||
if [ ${so_lm} -eq 1 ] && [ ${so_nolm} -eq 1 -o ${so_nolm} -eq 0 ];
|
||||
then
|
||||
exit 1
|
||||
elif [ ${so_lm} -eq 0 ] && [ ${so_nolm} -eq 1 -o ${so_nolm} -eq 0 ];
|
||||
then
|
||||
exit 0
|
||||
else
|
||||
echo "Unexpected status"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
generic_download_tarxz()
|
||||
{
|
||||
target_dir=$1
|
||||
|
@ -79,6 +122,11 @@ download_native_client_files()
|
|||
generic_download_tarxz "$1" "${DEEPSPEECH_ARTIFACTS_ROOT}/native_client.tar.xz"
|
||||
}
|
||||
|
||||
download_aot_model_files()
|
||||
{
|
||||
generic_download_tarxz "$1" "${DEEPSPEECH_AOT_ARTIFACTS_ROOT}/native_client.tar.xz"
|
||||
}
|
||||
|
||||
download_ctc_kenlm()
|
||||
{
|
||||
generic_download_tarxz "$1" "${DEEPSPEECH_LIBCTC}"
|
||||
|
@ -95,7 +143,15 @@ download_data()
|
|||
|
||||
download_material()
|
||||
{
|
||||
download_native_client_files "$1"
|
||||
target_dir=$1
|
||||
maybe_aot=$2
|
||||
|
||||
if [ "${maybe_aot}" = "--aot" ]; then
|
||||
download_aot_model_files "${target_dir}"
|
||||
else
|
||||
download_native_client_files "${target_dir}"
|
||||
fi
|
||||
|
||||
download_data
|
||||
|
||||
ls -hal /tmp/${model_name} /tmp/LDC93S1.wav /tmp/alphabet.txt
|
||||
|
@ -250,14 +306,31 @@ package_native_client()
|
|||
echo "Please specify artifact name."
|
||||
fi;
|
||||
|
||||
tar -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_cc.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_trie \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libctc_decoder_with_kenlm.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech_utils.so \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
| pixz -9 > "${artifacts_dir}/${artifact_name}"
|
||||
if [ -f "${tensorflow_dir}/bazel-bin/native_client/libdeepspeech_model.so" ]; then
|
||||
tar -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_cc.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/aot/ libruntime.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/xla/service/cpu/ libruntime_matmul.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/xla/ libexecutable_run_options.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_trie \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libctc_decoder_with_kenlm.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech_model.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech_utils.so \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
| pixz -9 > "${artifacts_dir}/${artifact_name}"
|
||||
else
|
||||
tar -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_cc.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_trie \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libctc_decoder_with_kenlm.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech_utils.so \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
| pixz -9 > "${artifacts_dir}/${artifact_name}"
|
||||
fi;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче