updated run_small.sh to match the end of run1 training
Browse files
src/run_small.sh
CHANGED
@@ -7,10 +7,10 @@ python src/run_speech_recognition_seq2seq_streaming.py \
|
|
7 |
--eval_split_name="validation" \
|
8 |
--model_index_name="Whisper Small Belarusian" \
|
9 |
\
|
10 |
-
--max_steps="
|
11 |
--output_dir="./" \
|
12 |
--per_device_train_batch_size="64" \
|
13 |
-
--per_device_eval_batch_size="
|
14 |
--logging_steps="50" \
|
15 |
--logging_first_step \
|
16 |
--learning_rate="1e-4" \
|
@@ -34,7 +34,6 @@ python src/run_speech_recognition_seq2seq_streaming.py \
|
|
34 |
\
|
35 |
--do_train \
|
36 |
--do_eval \
|
37 |
-
--resume_from_checkpoint="checkpoint-12000" \
|
38 |
--ignore_data_skip \
|
39 |
--predict_with_generate \
|
40 |
--do_normalize_eval \
|
|
|
7 |
--eval_split_name="validation" \
|
8 |
--model_index_name="Whisper Small Belarusian" \
|
9 |
\
|
10 |
+
--max_steps="12000" \
|
11 |
--output_dir="./" \
|
12 |
--per_device_train_batch_size="64" \
|
13 |
+
--per_device_eval_batch_size="64" \
|
14 |
--logging_steps="50" \
|
15 |
--logging_first_step \
|
16 |
--learning_rate="1e-4" \
|
|
|
34 |
\
|
35 |
--do_train \
|
36 |
--do_eval \
|
|
|
37 |
--ignore_data_skip \
|
38 |
--predict_with_generate \
|
39 |
--do_normalize_eval \
|
src/run_speech_recognition_seq2seq_streaming.py
CHANGED
@@ -720,6 +720,7 @@ def main():
|
|
720 |
checkpoint = training_args.resume_from_checkpoint
|
721 |
elif last_checkpoint is not None:
|
722 |
checkpoint = last_checkpoint
|
|
|
723 |
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
724 |
trainer.save_model() # Saves the feature extractor too for easy upload
|
725 |
|
|
|
720 |
checkpoint = training_args.resume_from_checkpoint
|
721 |
elif last_checkpoint is not None:
|
722 |
checkpoint = last_checkpoint
|
723 |
+
logger.info(f'will launch training and pass resume_from_checkpoint={checkpoint}')
|
724 |
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
725 |
trainer.save_model() # Saves the feature extractor too for easy upload
|
726 |
|