bf16: true cutoff_len: 16384 dataset: mlfoundations-dev/r1_annotated_math dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero3.json do_train: true eval_strategy: 'no' finetuning_type: full formatting: sharegpt global_batch_size: 96 gradient_accumulation_steps: 3 hub_model_id: mlfoundations-dev/llama3-1_8b_r1_annotated_math include_hp: dcft/train/hp_settings/reasoning.yaml learning_rate: 1.0e-05 logging_steps: 1 lr_scheduler_type: cosine max_samples: 1000000 messages: conversations model_name_or_path: Qwen/Qwen2.5-7B-Instruct num_train_epochs: 3.0 output_dir: /tmp/dcft_checkpoints/llama3-1_8b_r1_annotated_math overwrite_cache: true per_device_train_batch_size: 1 plot_loss: true preprocessing_num_workers: 16 push_to_db: true push_to_hub: true report_to: wandb run_name: llama3-1_8b_r1_annotated_math save_strategy: epoch stage: sft template: qwen25 warmup_ratio: 0.1