command: - python3 - ${program} - --use_auth_token - --do_eval - --group_by_length - --overwrite_output_dir - --fp16 - --do_lower_case - --do_eval - --do_train - --fuse_loss_wer - ${args} method: grid metric: goal: minimize name: train/train_loss parameters: config_path: value: conf/conformer_transducer_bpe_xlarge.yaml dataset_config_name: value: clean dataset_name: value: librispeech_asr max_steps: value: 50 model_name_or_path: value: stt_en_conformer_transducer_xlarge output_dir: value: ./sweep_output_dir gradient_accumulation_steps: values: - 1 - 2 per_device_train_batch_size: values: - 8 - 16 fused_batch_size: values: - 4 - 8 - 16 per_device_eval_batch_size: value: 4 preprocessing_num_workers: value: 1 train_split_name: value: train.100[:500] eval_split_name: value: validation[:100] tokenizer_path: value: tokenizer vocab_size: value: 1024 wandb_project: value: rnnt-debug logging_steps: value: 5 program: run_speech_recognition_rnnt.py project: rnnt-debug