File size: 1,805 Bytes
62e9ca6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#####################################
# SpeechLM Base model #
#####################################
[ $# -lt 2 ] && echo "Usage: $0 <model_path> <data_dir> [gen-set=dev_clean,dev_other,test_clean,test_other]" && exit 1
[ ${PWD##*/} != SpeechLM ] && echo "Error: dir not match! Switch to SpeechLM/ and run it again!" && exit 1

model_path=$1
DATA_DIR=$2
gen_set=$3
[ -z $gen_set ] && gen_set="dev_clean,dev_other,test_clean,test_other"
src_dir=${model_path%/*}
cpt=${model_path##*/}
cpt=${cpt%.*}

CODE_ROOT=${PWD}

for subset in ${gen_set//,/ }; do
    results_path=$src_dir/decode_${cpt}_ctc/${subset}
    [ ! -d $results_path ] && mkdir -p $results_path

    python $CODE_ROOT/speechlm/infer.py \
    --config-dir $CODE_ROOT/speechlm/config/decode \
    --config-name infer_viterbi \
    common.user_dir=$CODE_ROOT/speechlm \
    \
    dataset.gen_subset=${subset} \
    task.data=$DATA_DIR task.label_dir=$DATA_DIR task.normalize=false \
    common_eval.results_path=${results_path} common_eval.path=${model_path} \
    \
    common_eval.quiet=true \
    &
done
wait

### important to know
# When loading the fine-tuned model for decoding, fairseq also loads the pre-trained model to use its states['model'] to build the model instance.
# To prevent the error about the w2v_path (if you don't have the pre-trained model at w2v_path), we set common_eval.model_overrides to override 
# the w2v_path by speechlmp_base_cfg.pt. speechlmp_base_cfg.pt is just a pre-trained model checkpoint without parameters (only contains config).
# So, if you have trained a model with different model config (e.g. different encoder layers), you should modify the common_eval.model_overrides to your own.
    # common_eval.model_overrides=\"{\'w2v_path\':\'$CODE_ROOT/speechlm/config/pretrain/speechlmp_base_cfg.pt\'}\" \