|
cutoff_len: 1024 |
|
dataset: identity |
|
dataset_dir: data |
|
do_train: true |
|
finetuning_type: lora |
|
flash_attn: auto |
|
fp16: true |
|
gradient_accumulation_steps: 8 |
|
learning_rate: 5.0e-05 |
|
logging_steps: 5 |
|
lora_alpha: 16 |
|
lora_dropout: 0 |
|
lora_rank: 8 |
|
lora_target: q_proj,v_proj |
|
lr_scheduler_type: cosine |
|
max_grad_norm: 1.0 |
|
max_samples: 100000 |
|
model_name_or_path: huggyllama/llama-7b |
|
num_train_epochs: 3.0 |
|
optim: adamw_torch |
|
output_dir: saves/LLaMA-7B/lora/custom1 |
|
packing: false |
|
per_device_train_batch_size: 2 |
|
report_to: none |
|
save_steps: 100 |
|
stage: sft |
|
template: default |
|
warmup_steps: 0 |
|
|