bf16: true cutoff_len: 1024 dataset: mathinstruct dataset_dir: data ddp_timeout: 180000000 do_train: true finetuning_type: lora flash_attn: auto gradient_accumulation_steps: 1 learning_rate: 0.0002 logging_steps: 5 lora_alpha: 16 lora_dropout: 0 lora_rank: 8 lora_target: all lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 10000 model_name_or_path: meta-llama/Llama-3.2-1B num_train_epochs: 3.0 optim: adamw_torch output_dir: saves/Llama-3.2-1B/lora/llama3.2-1b packing: false per_device_train_batch_size: 16 plot_loss: true preprocessing_num_workers: 16 report_to: none rope_scaling: linear save_steps: 100 stage: sft template: default trust_remote_code: true warmup_steps: 0