whisper_small_CGN / scripts /run-cgn-small.sh
Jakob Poncelet
First Model Version
60aae99
#!/usr/bin/env bash
export PYTHONPATH=""
source /esat/spchtemp/scratch/jponcele/anaconda3/bin/activate whisper
python --version
#--dataset_config_name="nl" \
#--language="dutch" \
python run_speech_recognition_seq2seq_streaming.py \
--model_name_or_path="openai/whisper-small" \
--dataset_name="kul-speech-lab/CGN" \
--train_split_name="train+validation" \
--eval_split_name="test" \
--language="dutch" \
--task="transcribe" \
--model_index_name="Whisper Small CGN" \
--max_steps="15000" \
--output_dir="/esat/audioslave/jponcele/whisper/finetuning_event/CGN/small" \
--per_device_train_batch_size="16" \
--per_device_eval_batch_size="8" \
--gradient_accumulation_steps="4" \
--logging_steps="100" \
--learning_rate="1e-5" \
--warmup_steps="500" \
--evaluation_strategy="steps" \
--eval_steps="1000" \
--save_strategy="steps" \
--save_steps="1000" \
--generation_max_length="225" \
--length_column_name="input_length" \
--max_duration_in_seconds="30" \
--text_column_name="transcription" \
--freeze_feature_encoder="False" \
--report_to="tensorboard" \
--metric_for_best_model="wer" \
--greater_is_better="False" \
--load_best_model_at_end \
--gradient_checkpointing \
--fp16 \
--do_train \
--do_eval \
--predict_with_generate \
--do_normalize_eval \
--streaming \
--use_auth_token
# --overwrite_output_dir
# --push_to_hub