II-Tulu-3B-DPO / training_config.yaml
phunguyen01's picture
Training in progress, epoch 1
2341ff4 verified
train_dataset_path: allenai/llama-3.1-tulu-3-8b-preference-mixture
train_split: train
eval_dataset_path: allenai/llama-3.1-tulu-3-8b-preference-mixture
eval_split: test
chat_template: tokenizer_default
model_name_or_path: phunguyen01/II-Tulu-3B-SFT
model_revision: main
trust_remote_code: false
attn_implementation: flash_attention_2
use_peft: false
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules: []
lora_modules_to_save: []
load_in_8bit: false
load_in_4bit: false
bnb_4bit_quant_type: nf4
use_bnb_nested_quant: false
bnb_4bit_quant_storage: uint8
output_dir: checkpoints/ffc21d27-f1c2-41da-9dbb-658ce6048ce1
beta: 0.1
seed: 42
do_eval: false
learning_rate: 5.0e-07
gradient_accumulation_steps: 1
per_device_train_batch_size: 2
per_device_eval_batch_size: 2
num_train_epochs: 1
hub_model_id: phunguyen01/II-Tulu-3B-DPO
max_length: 2048
max_prompt_length: 1024
lr_scheduler_type: linear
gradient_checkpointing: false
evals_per_epoch: 1
bf16: true
logging_steps: 10
save_strategy: epoch
push_to_hub: true
optim: rmsprop
wandb_project: llm-training-platform
wandb_run_name: II-Tulu-3B-DPO
wandb_token: d6057554afd47904923154654e9d6bdff5988592