s1K_32b / configs.yaml
sedrickkeh's picture
Upload configs.yaml with huggingface_hub
feb7d8a verified
raw
history blame contribute delete
936 Bytes
adam_beta1: '0.9'
adam_beta2: '0.95'
assistant_tag: gpt
bf16: 'True'
content_tag: value
cutoff_len: '32768'
dataset: mlfoundations-dev/s1K_reformat
dataset_dir: ONLINE
ddp_timeout: '180000000'
deepspeed: /opt/ml/code/zero3_offload.json
do_train: 'True'
enable_liger_kernel: 'False'
finetuning_type: full
formatting: sharegpt
global_batch_size: '16'
gradient_accumulation_steps: '1'
hub_model_id: mlfoundations-dev/s1K_32b
learning_rate: 1e-05
logging_steps: '1'
lr_scheduler_type: cosine
max_samples: '1000000'
messages: conversations
model_name_or_path: Qwen/Qwen2.5-32B-Instruct
num_train_epochs: '5.0'
output_dir: /opt/ml/model
overwrite_cache: 'True'
per_device_train_batch_size: '1'
plot_loss: 'True'
preprocessing_num_workers: '16'
push_to_db: 'True'
push_to_hub: 'True'
report_to: wandb
role_tag: from
run_name: s1K_32b
save_strategy: epoch
stage: sft
template: qwen25
user_tag: human
warmup_ratio: '0.05'
weight_decay: '0.0001'