зеркало из https://github.com/mozilla/kaldi.git
delelted old pnorm scripts with wrong names
git-svn-id: https://svn.code.sf.net/p/kaldi/code/sandbox/dan2@3270 5e6a8d80-dfce-4ca6-a32a-6e07a63d50c8
This commit is contained in:
Родитель
d4f0008574
Коммит
ddaecebfde
|
@ -1,39 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is neural net training on top of adapted 40-dimensional features.
|
||||
# This version of the script uses GPUs. We distinguish it by putting "_gpu"
|
||||
# at the end of the directory name.
|
||||
#
|
||||
# Since we're using one quarter the number of jobs (num-jobs-nnet) as the
|
||||
# run_4c.sh script, we halve the learning rate (generally speaking, splitting
|
||||
# the difference like this is probably a good idea.)
|
||||
|
||||
|
||||
parallel_opts="-l gpu=1,hostname=g*" # This is suitable for the CLSP network, you'll likely have to change it.
|
||||
|
||||
. cmd.sh
|
||||
|
||||
dir=exp/nnet4c_gpu_pnorm_lr016004_dim1200600
|
||||
( steps/nnet2/train_pnorm.sh --num-epochs 20 \
|
||||
--num-jobs-nnet 4 --num-threads 1 --parallel-opts "$parallel_opts" \
|
||||
--num-epochs-extra 10 --add-layers-period 1 \
|
||||
--num-hidden-layers 2 \
|
||||
--mix-up 4000 \
|
||||
--initial-learning-rate 0.016 --final-learning-rate 0.004 \
|
||||
--cmd "$decode_cmd" \
|
||||
--pnorm-input-dim 1200 \
|
||||
--pnorm-output-dim 600 \
|
||||
--stage -5 \
|
||||
data/train data/lang exp/tri3b_ali $dir
|
||||
|
||||
steps/nnet2/decode.sh --config conf/decode.config --cmd "$decode_cmd" --nj 20 \
|
||||
--transform-dir exp/tri3b/decode \
|
||||
exp/tri3b/graph data/test $dir/decode
|
||||
|
||||
steps/nnet2/decode.sh --config conf/decode.config --cmd "$decode_cmd" --nj 20 \
|
||||
--transform-dir exp/tri3b/decode_ug \
|
||||
exp/tri3b/graph_ug data/test $dir/decode_ug
|
||||
|
||||
)
|
||||
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This runs on the 100 hour subset, using steps/nnet2/train_pnorm.sh.
|
||||
# e.g. of usage:
|
||||
# local/nnet2/run_5a_gpu.sh --temp-dir /export/m1-01/dpovey/kaldi-dan2/egs/swbd/s5b
|
||||
|
||||
dir=nnet5a_gpu_2n02002_nl4_3000600
|
||||
temp_dir=
|
||||
|
||||
train_stage=-10
|
||||
|
||||
. cmd.sh
|
||||
|
||||
|
||||
. utils/parse_options.sh
|
||||
parallel_opts="-l gpu=1,hostname=g*" # This is suitable for the CLSP network, you'll likely have to change it.
|
||||
|
||||
(
|
||||
if [ ! -f exp/$dir/final.mdl ]; then
|
||||
if [ ! -z "$temp_dir" ] && [ ! -e exp/$dir/egs ]; then
|
||||
mkdir -p exp/$dir
|
||||
mkdir -p $temp_dir/$dir/egs
|
||||
ln -s $temp_dir/$dir/egs exp/$dir/
|
||||
fi
|
||||
|
||||
steps/nnet2/train_pnorm.sh --stage $train_stage \
|
||||
--num-jobs-nnet 8 --num-threads 1 --max-change 40.0 \
|
||||
--minibatch-size 512 --parallel-opts "$parallel_opts" \
|
||||
--mix-up 8000 \
|
||||
--initial-learning-rate 0.02 --final-learning-rate 0.002 \
|
||||
--num-hidden-layers 4 \
|
||||
--pnorm-input-dim 3000 \
|
||||
--pnorm-output-dim 600 \
|
||||
--cmd "$decode_cmd" \
|
||||
data/train_100k_nodup data/lang exp/tri4a exp/$dir || exit 1;
|
||||
fi
|
||||
|
||||
for lm_suffix in tg fsh_tgpr; do
|
||||
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 30 \
|
||||
--config conf/decode.config --transform-dir exp/tri4a/decode_eval2000_sw1_${lm_suffix} \
|
||||
exp/tri4a/graph_sw1_${lm_suffix} data/eval2000 exp/$dir/decode_eval2000_sw1_${lm_suffix} &
|
||||
done
|
||||
)
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is pnorm neural net training on top of adapted 40-dimensional features.
|
||||
#
|
||||
|
||||
|
||||
train_stage=-100
|
||||
temp_dir= # e.g. --temp-dir /export/m1-02/dpovey/kaldi-dan2/egs/wsj/s5/
|
||||
parallel_opts="-l gpu=1,hostname=g*" # This is suitable for the CLSP network, you'll likely have to change it.
|
||||
dir=exp/nnet5c_gpu_si284_2n02002_nl4_50001000
|
||||
|
||||
# Note: since we multiplied the num-jobs by 1/4, we halved the
|
||||
# learning rate, relative to run_5c.sh
|
||||
. ./cmd.sh
|
||||
. utils/parse_options.sh
|
||||
|
||||
(
|
||||
|
||||
if [ ! -z "$temp_dir" ] && [ ! -e $dir/egs ]; then
|
||||
mkdir -p $dir
|
||||
mkdir -p $temp_dir/$dir/egs
|
||||
ln -s $temp_dir/$dir/egs $dir/
|
||||
fi
|
||||
|
||||
steps/nnet2/train_pnorm.sh \
|
||||
--num-jobs-nnet 4 --num-threads 1 --parallel-opts "$parallel_opts" \
|
||||
--mix-up 8000 \
|
||||
--initial-learning-rate 0.02 --final-learning-rate 0.002 \
|
||||
--num-hidden-layers 4 \
|
||||
--pnorm-input-dim 5000 --pnorm-output-dim 1000\
|
||||
--cmd "$decode_cmd" \
|
||||
--p 2 \
|
||||
data/train_si284 data/lang exp/tri4b_ali_si284 $dir || exit 1
|
||||
steps/decode_nnet_cpu.sh --cmd "$decode_cmd" --nj 10 \
|
||||
--transform-dir exp/tri4b/decode_tgpr_dev93 \
|
||||
exp/tri4b/graph_tgpr data/test_dev93 $dir/decode_tgpr_dev93
|
||||
|
||||
steps/decode_nnet_cpu.sh --cmd "$decode_cmd" --nj 8 \
|
||||
--transform-dir exp/tri4b/decode_tgpr_eval92 \
|
||||
exp/tri4b/graph_tgpr data/test_eval92 $dir/decode_tgpr_eval92
|
||||
|
||||
steps/decode_nnet_cpu.sh --cmd "$decode_cmd" --nj 10 \
|
||||
--transform-dir exp/tri4b/decode_bd_tgpr_dev93 \
|
||||
exp/tri4b/graph_bd_tgpr data/test_dev93 $dir/decode_bd_tgpr_dev93
|
||||
|
||||
steps/decode_nnet_cpu.sh --cmd "$decode_cmd" --nj 8 \
|
||||
--transform-dir exp/tri4b/decode_bd_tgpr_eval92 \
|
||||
exp/tri4b/graph_bd_tgpr data/test_eval92 $dir/decode_bd_tgpr_eval92
|
||||
)
|
||||
|
Загрузка…
Ссылка в новой задаче