|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +# This script is based on tun_tdnn_7h.sh in swbd chain recipe. |
| 4 | + |
| 5 | +set -e |
| 6 | + |
| 7 | +# configs for 'chain' |
| 8 | +affix= |
| 9 | +stage=12 |
| 10 | +train_stage=-10 |
| 11 | +get_egs_stage=-10 |
| 12 | +dir=exp/chain/tdnn_7h # Note: _sp will get added to this if $speed_perturb == true. |
| 13 | +decode_iter= |
| 14 | + |
| 15 | +# training options |
| 16 | +num_epochs=4 |
| 17 | +initial_effective_lrate=0.001 |
| 18 | +final_effective_lrate=0.0001 |
| 19 | +leftmost_questions_truncate=-1 |
| 20 | +max_param_change=2.0 |
| 21 | +final_layer_normalize_target=0.5 |
| 22 | +num_jobs_initial=2 |
| 23 | +num_jobs_final=12 |
| 24 | +minibatch_size=128 |
| 25 | +frames_per_eg=150 |
| 26 | +remove_egs=true |
| 27 | +common_egs_dir= |
| 28 | +xent_regularize=0.1 |
| 29 | + |
| 30 | +# End configuration section. |
| 31 | +echo "$0 $@" # Print the command line for logging |
| 32 | + |
| 33 | +. ./cmd.sh |
| 34 | +. ./path.sh |
| 35 | +. ./utils/parse_options.sh |
| 36 | + |
| 37 | +if ! cuda-compiled; then |
| 38 | + cat <<EOF && exit 1 |
| 39 | +This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA |
| 40 | +If you want to use GPUs (and have them), go to src/, and configure and make on a machine |
| 41 | +where "nvcc" is installed. |
| 42 | +EOF |
| 43 | +fi |
| 44 | + |
| 45 | +# The iVector-extraction and feature-dumping parts are the same as the standard |
| 46 | +# nnet3 setup, and you can skip them by setting "--stage 8" if you have already |
| 47 | +# run those things. |
| 48 | + |
| 49 | +dir=${dir}${affix:+_$affix}_sp |
| 50 | +train_set=train_sp |
| 51 | +ali_dir=exp/tri5a_sp_ali |
| 52 | +treedir=exp/chain/tri6_7d_tree_sp |
| 53 | +lang=data/lang_chain |
| 54 | + |
| 55 | + |
| 56 | +# if we are using the speed-perturbed data we need to generate |
| 57 | +# alignments for it. |
| 58 | +local/nnet3/run_ivector_common.sh --stage $stage \ |
| 59 | + --ivector-extractor exp/nnet2_online/extractor || exit 1; |
| 60 | + |
| 61 | +if [ $stage -le 9 ]; then |
| 62 | + # Get the alignments as lattices (gives the LF-MMI training more freedom). |
| 63 | + # use the same num-jobs as the alignments |
| 64 | + nj=$(cat $ali_dir/num_jobs) || exit 1; |
| 65 | + steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \ |
| 66 | + data/lang exp/tri5a exp/tri5a_sp_lats |
| 67 | + rm exp/tri5a_sp_lats/fsts.*.gz # save space |
| 68 | +fi |
| 69 | + |
| 70 | +if [ $stage -le 10 ]; then |
| 71 | + # Create a version of the lang/ directory that has one state per phone in the |
| 72 | + # topo file. [note, it really has two states.. the first one is only repeated |
| 73 | + # once, the second one has zero or more repeats.] |
| 74 | + rm -rf $lang |
| 75 | + cp -r data/lang $lang |
| 76 | + silphonelist=$(cat $lang/phones/silence.csl) || exit 1; |
| 77 | + nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1; |
| 78 | + # Use our special topology... note that later on may have to tune this |
| 79 | + # topology. |
| 80 | + steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo |
| 81 | +fi |
| 82 | + |
| 83 | +if [ $stage -le 11 ]; then |
| 84 | + # Build a tree using our new topology. This is the critically different |
| 85 | + # step compared with other recipes. |
| 86 | + steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ |
| 87 | + --leftmost-questions-truncate $leftmost_questions_truncate \ |
| 88 | + --context-opts "--context-width=2 --central-position=1" \ |
| 89 | + --cmd "$train_cmd" 5000 data/$train_set $lang $ali_dir $treedir |
| 90 | +fi |
| 91 | + |
| 92 | +if [ $stage -le 12 ]; then |
| 93 | + echo "$0: creating neural net configs using the xconfig parser"; |
| 94 | + |
| 95 | + num_targets=$(tree-info $treedir/tree |grep num-pdfs|awk '{print $2}') |
| 96 | + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) |
| 97 | + |
| 98 | + mkdir -p $dir/configs |
| 99 | + cat <<EOF > $dir/configs/network.xconfig |
| 100 | + input dim=100 name=ivector |
| 101 | + input dim=43 name=input |
| 102 | +
|
| 103 | + # please note that it is important to have input layer with the name=input |
| 104 | + # as the layer immediately preceding the fixed-affine-layer to enable |
| 105 | + # the use of short notation for the descriptor |
| 106 | + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat |
| 107 | +
|
| 108 | + # the first splicing is moved before the lda layer, so no splicing here |
| 109 | + relu-renorm-layer name=tdnn1 dim=625 |
| 110 | + relu-renorm-layer name=tdnn2 input=Append(-1,0,1) dim=625 |
| 111 | + relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=625 |
| 112 | + relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=625 |
| 113 | + relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=625 |
| 114 | + relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=625 |
| 115 | +
|
| 116 | + ## adding the layers for chain branch |
| 117 | + relu-renorm-layer name=prefinal-chain input=tdnn6 dim=625 target-rms=0.5 |
| 118 | + output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 |
| 119 | +
|
| 120 | + # adding the layers for xent branch |
| 121 | + # This block prints the configs for a separate output that will be |
| 122 | + # trained with a cross-entropy objective in the 'chain' models... this |
| 123 | + # has the effect of regularizing the hidden parts of the model. we use |
| 124 | + # 0.5 / args.xent_regularize as the learning rate factor- the factor of |
| 125 | + # 0.5 / args.xent_regularize is suitable as it means the xent |
| 126 | + # final-layer learns at a rate independent of the regularization |
| 127 | + # constant; and the 0.5 was tuned so as to make the relative progress |
| 128 | + # similar in the xent and regular final layers. |
| 129 | + relu-renorm-layer name=prefinal-xent input=tdnn6 dim=625 target-rms=0.5 |
| 130 | + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 |
| 131 | +
|
| 132 | +EOF |
| 133 | + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ |
| 134 | +fi |
| 135 | + |
| 136 | +if [ $stage -le 13 ]; then |
| 137 | + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then |
| 138 | + utils/create_split_dir.pl \ |
| 139 | + /export/b0{5,6,7,8}/$USER/kaldi-data/egs/hkust-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage |
| 140 | + fi |
| 141 | + |
| 142 | + steps/nnet3/chain/train.py --stage $train_stage \ |
| 143 | + --cmd "$decode_cmd" \ |
| 144 | + --feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \ |
| 145 | + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ |
| 146 | + --chain.xent-regularize $xent_regularize \ |
| 147 | + --chain.leaky-hmm-coefficient 0.1 \ |
| 148 | + --chain.l2-regularize 0.00005 \ |
| 149 | + --chain.apply-deriv-weights false \ |
| 150 | + --chain.lm-opts="--num-extra-lm-states=2000" \ |
| 151 | + --egs.dir "$common_egs_dir" \ |
| 152 | + --egs.stage $get_egs_stage \ |
| 153 | + --egs.opts "--frames-overlap-per-eg 0" \ |
| 154 | + --egs.chunk-width $frames_per_eg \ |
| 155 | + --trainer.num-chunk-per-minibatch $minibatch_size \ |
| 156 | + --trainer.frames-per-iter 1500000 \ |
| 157 | + --trainer.num-epochs $num_epochs \ |
| 158 | + --trainer.optimization.num-jobs-initial $num_jobs_initial \ |
| 159 | + --trainer.optimization.num-jobs-final $num_jobs_final \ |
| 160 | + --trainer.optimization.initial-effective-lrate $initial_effective_lrate \ |
| 161 | + --trainer.optimization.final-effective-lrate $final_effective_lrate \ |
| 162 | + --trainer.max-param-change $max_param_change \ |
| 163 | + --cleanup.remove-egs $remove_egs \ |
| 164 | + --feat-dir data/${train_set}_hires \ |
| 165 | + --tree-dir $treedir \ |
| 166 | + --lat-dir exp/tri5a_sp_lats \ |
| 167 | + --dir $dir || exit 1; |
| 168 | +fi |
| 169 | + |
| 170 | +if [ $stage -le 14 ]; then |
| 171 | + # Note: it might appear that this $lang directory is mismatched, and it is as |
| 172 | + # far as the 'topo' is concerned, but this script doesn't read the 'topo' from |
| 173 | + # the lang directory. |
| 174 | + utils/mkgraph.sh --self-loop-scale 1.0 data/lang_test $dir $dir/graph |
| 175 | +fi |
| 176 | + |
| 177 | +graph_dir=$dir/graph |
| 178 | +if [ $stage -le 15 ]; then |
| 179 | + iter_opts= |
| 180 | + if [ ! -z $decode_iter ]; then |
| 181 | + iter_opts=" --iter $decode_iter " |
| 182 | + fi |
| 183 | + steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ |
| 184 | + --nj 10 --cmd "$decode_cmd" $iter_opts \ |
| 185 | + --online-ivector-dir exp/nnet3/ivectors_dev \ |
| 186 | + $graph_dir data/dev_hires $dir/decode || exit 1; |
| 187 | +fi |
| 188 | + |
| 189 | +if [ $stage -le 16 ]; then |
| 190 | + steps/online/nnet3/prepare_online_decoding.sh --mfcc-config conf/mfcc_hires.conf \ |
| 191 | + --add-pitch true \ |
| 192 | + data/lang exp/nnet2_online/extractor "$dir" ${dir}_online || exit 1; |
| 193 | +fi |
| 194 | + |
| 195 | +if [ $stage -le 17 ]; then |
| 196 | + # do the actual online decoding with iVectors, carrying info forward from |
| 197 | + # previous utterances of the same speaker. |
| 198 | + steps/online/nnet3/decode.sh --config conf/decode.config \ |
| 199 | + --cmd "$decode_cmd" --nj 10 --acwt 1.0 --post-decode-acwt 10.0 \ |
| 200 | + "$graph_dir" data/dev_hires \ |
| 201 | + ${dir}_online/decode || exit 1; |
| 202 | +fi |
| 203 | + |
| 204 | +if [ $stage -le 18 ]; then |
| 205 | + # this version of the decoding treats each utterance separately |
| 206 | + # without carrying forward speaker information. |
| 207 | + steps/online/nnet3/decode.sh --config conf/decode.config \ |
| 208 | + --cmd "$decode_cmd" --nj 10 --per-utt true --acwt 1.0 --post-decode-acwt 10.0 \ |
| 209 | + "$graph_dir" data/dev_hires \ |
| 210 | + ${dir}_online/decode_per_utt || exit 1; |
| 211 | +fi |
0 commit comments