|
|
|
model_name_or_path: "meta-llama/Llama-Guard-3-1B" |
|
|
|
stage: sft |
|
do_train: true |
|
do_eval: true |
|
finetuning_type: lora |
|
lora_target: all |
|
|
|
dataset: train_sample |
|
eval_dataset: test_sample |
|
dataset_dir: ./data |
|
template: llama3 |
|
cutoff_len: 4096 |
|
max_samples: 1000 |
|
overwrite_cache: true |
|
preprocessing_num_workers: 64 |
|
|
|
output_dir: ./saves/llama3-1b/lora/sft |
|
logging_steps: 1 |
|
save_steps: 10 |
|
plot_loss: true |
|
overwrite_output_dir: true |
|
|
|
per_device_train_batch_size: 4 |
|
gradient_accumulation_steps: 8 |
|
learning_rate: 1.0e-4 |
|
num_train_epochs: 3.0 |
|
lr_scheduler_type: cosine |
|
warmup_ratio: 0.1 |
|
bf16: true |
|
ddp_timeout: 180000000 |
|
|
|
|
|
per_device_eval_batch_size: 16 |
|
eval_strategy: steps |
|
eval_steps: 1 |