Commit 7fd54997 authored by Abdelwahab HEBA's avatar Abdelwahab HEBA
Browse files

French ASR system

parent 37514a2a
# you can change depending on what type of queue you are using.
# If you have no queueing system and want to run on a local machine, you
# can change all instances '' to (but be careful and run
# commands one by one: most recipes will exhaust the memory on your
# machine). works with GridEngine (qsub). works
# with slurm. Different queues are configured differently, with different
# queue names and different ways of specifying things like memory;
# to account for these differences you can create and edit the file
# conf/queue.conf to match your queue's configuration. Search for
# conf/queue.conf in for more information,
# or search for the string 'default_config' in utils/ or utils/
#export train_cmd=" --mem 2G"
#export decode_cmd=" --mem 4G"
#export mkgraph_cmd=" --mem 8G"
# Local machine
export train_cmd=" --mem 2G"
export decode_cmd=" --mem 4G"
export mkgraph_cmd=" --mem 8G"
# empty config, just use the defaults.
--use-energy=false # only non-default option.
# config for high-resolution MFCC features, intended for neural network training
# Note: we keep all cepstra, so it has the same info as filterbank features,
# but MFCC is more easily compressible (because less correlated) which is why
# we prefer this method.
--use-energy=false # use average of log energy, not energy.
--num-mel-bins=40 # similar to Google's setup.
--num-ceps=40 # there is no dimensionality reduction.
--low-freq=20 # low cutoff frequency for mel bins... this is high-bandwidth data, so
# there might be some information at the low end.
--high-freq=-400 # high cutoff frequently, relative to Nyquist of 8000 (=7600)
# configuration file for apply-cmvn-online, used in the script ../local/
## This config is given by conf/ to the program compute-and-process-kaldi-pitch-feats,
## and is copied by steps/online/nnet2/ and similar scripts, to be given
## to programs like online2-wav-nnet2-latgen-faster.
## The program compute-and-process-kaldi-pitch-feats will use it to compute pitch features that
## are the same as that those which will generated in online decoding; this enables us to train
## in a way that's compatible with online decoding.
## most of these options relate to the post-processing rather than the pitch
## extraction itself.
--add-raw-log-pitch=true ## this is intended for input to neural nets, so our
## approach is "throw everything in and see what
## sticks".
--normalization-right-context=50 # We're removing some of the right-context
# for the normalization. Would normally be 75.
# Note: our changes to the (left,right) context
# from the defaults of (75,75) to (75,50) will
# almost certainly worsen results, but will
# reduce latency.
--frames-per-chunk=10 ## relates to offline simulation of online decoding; 1
## would be equivalent to getting in samples one by
## one.
--simulate-first-pass-online=true ## this make the online-pitch-extraction code
## output the 'first-pass' features, which
## are less accurate than the final ones, and
## which are the only features the neural-net
## decoding would ever see (since we can't
## afford to do lattice rescoring in the
## neural-net code
# Default configuration
command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
option mem=* -l mem_free=$0,ram_free=$0
option mem=0 # Do not add anything to qsub_opts
option num_threads=* -pe smp $0
option num_threads=1 # Do not add anything to qsub_opts
option max_jobs_run=* -tc $0
default gpu=0
option gpu=0 -q all.q
option gpu=* -l gpu=$0 -q g.q
default allow_k20=true
option allow_k20=true
option allow_k20=false -l 'hostname=!g01*&!g02*&!b06*'
pip install num2words
python install
python test
from num2words import num2words
num2words(42, lang='fr')
export KALDI_ROOT=`pwd`/../../..
export PATH=$PWD/tools/festival/nsw/bin:$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH
[ ! -f $KALDI_ROOT/tools/config/ ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/ is not present -> Exit!" && exit 1
. $KALDI_ROOT/tools/config/
export LC_ALL=C
# we use this both in the (optional) LM training and the G2P-related scripts
### Below are the paths used by the optional parts of the recipe
# We only need the Festival stuff below for the optional text normalization(for LM-training) step
# SRILM is needed for LM model building
# Sequitur G2P executable
sequitur_path="$(dirname $sequitur)/lib/$PYTHON/site-packages"
# Directory under which the LM training corpus should be extracted
# data dir
. ./
. ./
# you might not want to do this for interactive shells.
set -e
# format the data as Kaldi data directories
for part in train dev test ; do
# use underscore-separated names in data directories.
local/ $data/LibriSpeech/$part data/$part
## Optional text corpus normalization and LM training
## These scripts are here primarily as a documentation of the process that has been
## used to build the LM. Most users of this recipe will NOT need/want to run
## this step. The pre-built language models and the pronunciation lexicon, as
## well as some intermediate data(e.g. the normalized text used for LM training),
## are available for download at
#local/lm/ $LM_CORPUS_ROOT \
# data/local/lm/norm/tmp data/local/lm/norm/norm_texts data/local/lm
## Optional G2P training scripts.
## As the LM training scripts above, this script is intended primarily to
## document our G2P model creation process
#local/g2p/ data/local/dict/cmudict data/local/lm
# when "--stage 3" option is used below we skip the G2P steps, and use the
# lexicon we have already downloaded from
local/ --stage 3 --nj 30 --cmd "$train_cmd" \
data/local/lm data/local/lm data/local/dict_nosp
utils/ data/local/dict_nosp \
"<UNK>" data/local/lang_tmp_nosp data/lang_nosp
local/ --src-dir data/lang_nosp data/local/lm
# Create ConstArpaLm format language model for full 3-gram and 4-gram LMs
utils/ data/local/lm/ \
data/lang_nosp data/lang_nosp_test_tglarge
utils/ data/local/lm/ \
data/lang_nosp data/lang_nosp_test_fglarge
# spread the mfccs over various machines, as this data-set is quite large.
if [[ $(hostname -f) == * ]]; then
mfcc=$(basename mfccdir) # in case was absolute pathname (unlikely), get basename.
utils/ /export/b{02,11,12,13}/$USER/kaldi-data/egs/librispeech/s5/$mfcc/storage \
for part in dev_clean test_clean dev_other test_other train_clean_100; do
steps/ --cmd "$train_cmd" --nj 40 data/$part exp/make_mfcc/$part $mfccdir
steps/ data/$part exp/make_mfcc/$part $mfccdir
# Make some small data subsets for early system-build stages. Note, there are 29k
# utterances in the train_clean_100 directory which has 100 hours of data.
# For the monophone stages we select the shortest utterances, which should make it
# easier to align the data from a flat start.
utils/ --shortest data/train_clean_100 2000 data/train_2kshort
utils/ data/train_clean_100 5000 data/train_5k
utils/ data/train_clean_100 10000 data/train_10k
# train a monophone system
steps/ --boost-silence 1.25 --nj 20 --cmd "$train_cmd" \
data/train_2kshort data/lang_nosp exp/mono
# decode using the monophone model
utils/ --mono data/lang_nosp_test_tgsmall \
exp/mono exp/mono/graph_nosp_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" exp/mono/graph_nosp_tgsmall \
data/$test exp/mono/decode_nosp_tgsmall_$test
steps/ --boost-silence 1.25 --nj 10 --cmd "$train_cmd" \
data/train_5k data/lang_nosp exp/mono exp/mono_ali_5k
# train a first delta + delta-delta triphone system on a subset of 5000 utterances
steps/ --boost-silence 1.25 --cmd "$train_cmd" \
2000 10000 data/train_5k data/lang_nosp exp/mono_ali_5k exp/tri1
# decode using the tri1 model
utils/ data/lang_nosp_test_tgsmall \
exp/tri1 exp/tri1/graph_nosp_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" exp/tri1/graph_nosp_tgsmall \
data/$test exp/tri1/decode_nosp_tgsmall_$test
steps/ --cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tgmed} \
data/$test exp/tri1/decode_nosp_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tglarge} \
data/$test exp/tri1/decode_nosp_{tgsmall,tglarge}_$test
steps/ --nj 10 --cmd "$train_cmd" \
data/train_10k data/lang_nosp exp/tri1 exp/tri1_ali_10k
# train an LDA+MLLT system.
steps/ --cmd "$train_cmd" \
--splice-opts "--left-context=3 --right-context=3" 2500 15000 \
data/train_10k data/lang_nosp exp/tri1_ali_10k exp/tri2b
# decode using the LDA+MLLT model
utils/ data/lang_nosp_test_tgsmall \
exp/tri2b exp/tri2b/graph_nosp_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" exp/tri2b/graph_nosp_tgsmall \
data/$test exp/tri2b/decode_nosp_tgsmall_$test
steps/ --cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tgmed} \
data/$test exp/tri2b/decode_nosp_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tglarge} \
data/$test exp/tri2b/decode_nosp_{tgsmall,tglarge}_$test
# Align a 10k utts subset using the tri2b model
steps/ --nj 10 --cmd "$train_cmd" --use-graphs true \
data/train_10k data/lang_nosp exp/tri2b exp/tri2b_ali_10k
# Train tri3b, which is LDA+MLLT+SAT on 10k utts
steps/ --cmd "$train_cmd" 2500 15000 \
data/train_10k data/lang_nosp exp/tri2b_ali_10k exp/tri3b
# decode using the tri3b model
utils/ data/lang_nosp_test_tgsmall \
exp/tri3b exp/tri3b/graph_nosp_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" \
exp/tri3b/graph_nosp_tgsmall data/$test \
steps/ --cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tgmed} \
data/$test exp/tri3b/decode_nosp_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tglarge} \
data/$test exp/tri3b/decode_nosp_{tgsmall,tglarge}_$test
# align the entire train_clean_100 subset using the tri3b model
steps/ --nj 20 --cmd "$train_cmd" \
data/train_clean_100 data/lang_nosp \
exp/tri3b exp/tri3b_ali_clean_100
# train another LDA+MLLT+SAT system on the entire 100 hour subset
steps/ --cmd "$train_cmd" 4200 40000 \
data/train_clean_100 data/lang_nosp \
exp/tri3b_ali_clean_100 exp/tri4b
# decode using the tri4b model
utils/ data/lang_nosp_test_tgsmall \
exp/tri4b exp/tri4b/graph_nosp_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" \
exp/tri4b/graph_nosp_tgsmall data/$test \
steps/ --cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tgmed} \
data/$test exp/tri4b/decode_nosp_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,tglarge} \
data/$test exp/tri4b/decode_nosp_{tgsmall,tglarge}_$test
steps/ \
--cmd "$decode_cmd" data/lang_nosp_test_{tgsmall,fglarge} \
data/$test exp/tri4b/decode_nosp_{tgsmall,fglarge}_$test
# Now we compute the pronunciation and silence probabilities from training data,
# and re-create the lang directory.
steps/ --cmd "$train_cmd" \
data/train_clean_100 data/lang_nosp exp/tri4b
utils/ --max-normalize true \
data/local/dict_nosp \
exp/tri4b/pron_counts_nowb.txt exp/tri4b/sil_counts_nowb.txt \
exp/tri4b/pron_bigram_counts_nowb.txt data/local/dict
utils/ data/local/dict \
"<UNK>" data/local/lang_tmp data/lang
local/ --src-dir data/lang data/local/lm
utils/ \
data/local/lm/ data/lang data/lang_test_tglarge
utils/ \
data/local/lm/ data/lang data/lang_test_fglarge
# decode using the tri4b model with pronunciation and silence probabilities
utils/ \
data/lang_test_tgsmall exp/tri4b exp/tri4b/graph_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" \
exp/tri4b/graph_tgsmall data/$test \
steps/ --cmd "$decode_cmd" data/lang_test_{tgsmall,tgmed} \
data/$test exp/tri4b/decode_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,tglarge} \
data/$test exp/tri4b/decode_{tgsmall,tglarge}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,fglarge} \
data/$test exp/tri4b/decode_{tgsmall,fglarge}_$test
# align train_clean_100 using the tri4b model
steps/ --nj 30 --cmd "$train_cmd" \
data/train_clean_100 data/lang exp/tri4b exp/tri4b_ali_clean_100
# if you want at this point you can train and test NN model(s) on the 100 hour
# subset
local/ $data $data_url train-clean-360
# now add the "clean-360" subset to the mix ...
local/ \
$data/LibriSpeech/train-clean-360 data/train_clean_360
steps/ --cmd "$train_cmd" --nj 40 data/train_clean_360 \
exp/make_mfcc/train_clean_360 $mfccdir
steps/ \
data/train_clean_360 exp/make_mfcc/train_clean_360 $mfccdir
# ... and then combine the two sets into a 460 hour one
utils/ \
data/train_clean_460 data/train_clean_100 data/train_clean_360
# align the new, combined set, using the tri4b model
steps/ --nj 40 --cmd "$train_cmd" \
data/train_clean_460 data/lang exp/tri4b exp/tri4b_ali_clean_460
# create a larger SAT model, trained on the 460 hours of data.
steps/ --cmd "$train_cmd" 5000 100000 \
data/train_clean_460 data/lang exp/tri4b_ali_clean_460 exp/tri5b
# decode using the tri5b model
utils/ data/lang_test_tgsmall \
exp/tri5b exp/tri5b/graph_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" \
exp/tri5b/graph_tgsmall data/$test \
steps/ --cmd "$decode_cmd" data/lang_test_{tgsmall,tgmed} \
data/$test exp/tri5b/decode_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,tglarge} \
data/$test exp/tri5b/decode_{tgsmall,tglarge}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,fglarge} \
data/$test exp/tri5b/decode_{tgsmall,fglarge}_$test
# train a NN model on the 460 hour set
local/ $data $data_url train-other-500
# prepare the 500 hour subset.
local/ \
$data/LibriSpeech/train-other-500 data/train_other_500
steps/ --cmd "$train_cmd" --nj 40 data/train_other_500 \
exp/make_mfcc/train_other_500 $mfccdir
steps/ \
data/train_other_500 exp/make_mfcc/train_other_500 $mfccdir
# combine all the data
utils/ \
data/train_960 data/train_clean_460 data/train_other_500
steps/ --nj 40 --cmd "$train_cmd" \
data/train_960 data/lang exp/tri5b exp/tri5b_ali_960
# train a SAT model on the 960 hour mixed data. Use the script
# as it is faster.
steps/ --cmd "$train_cmd" \
7000 150000 data/train_960 data/lang exp/tri5b_ali_960 exp/tri6b
# decode using the tri6b model
utils/ data/lang_test_tgsmall \
exp/tri6b exp/tri6b/graph_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/ --nj 20 --cmd "$decode_cmd" \
exp/tri6b/graph_tgsmall data/$test exp/tri6b/decode_tgsmall_$test
steps/ --cmd "$decode_cmd" data/lang_test_{tgsmall,tgmed} \
data/$test exp/tri6b/decode_{tgsmall,tgmed}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,tglarge} \
data/$test exp/tri6b/decode_{tgsmall,tglarge}_$test
steps/ \
--cmd "$decode_cmd" data/lang_test_{tgsmall,fglarge} \
data/$test exp/tri6b/decode_{tgsmall,fglarge}_$test
# this does some data-cleaning. The cleaned data should be useful when we add
# the neural net and chain systems.
# steps/cleanup/ --remove-stress true --nj 200 --cmd "$train_cmd" data/train_clean_100 \
# data/lang exp/tri6b data/local/dict/lexicon.txt exp/debug_lexicon_100h
# #Perform rescoring of tri6b be means of faster-rnnlm
# #Attention: with default settings requires 4 GB of memory per rescoring job, so commenting this out by default
# wait && local/ \
# --rnnlm-ver "faster-rnnlm" \
# --rnnlm-options "-hidden 150 -direct 1000 -direct-order 5" \
# --rnnlm-tag "h150-me5-1000" $data data/local/lm
# #Perform rescoring of tri6b be means of faster-rnnlm using Noise contrastive estimation
# #Note, that could be extremely slow without CUDA
# #We use smaller direct layer size so that it could be stored in GPU memory (~2Gb)
# #Suprisingly, bottleneck here is validation rather then learning
# #Therefore you can use smaller validation dataset to speed up training
# wait && local/ \
# --rnnlm-ver "faster-rnnlm" \
# --rnnlm-options "-hidden 150 -direct 400 -direct-order 3 --nce 20" \
# --rnnlm-tag "h150-me3-400-nce20" $data data/local/lm
# train nnet3 tdnn models on the entire data with data-cleaning (xent and chain)
local/chain/ # set "--stage 11" if you have already run local/nnet3/
# The nnet3 TDNN recipe:
# local/nnet3/ # set "--stage 11" if you have already run local/chain/
# # train models on cleaned-up data
# # we've found that this isn't helpful-- see the comments in local/
# local/
# # The following is the current online-nnet2 recipe, with "multi-splice".
# local/online/
# # The following is the discriminative-training continuation of the above.
# local/online/
# ## The following is an older version of the online-nnet2 recipe, without "multi-splice". It's faster
# ## to train but slightly worse.
# # local/online/
# Wait for decodings in the background
\ No newline at end of file
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment