Waxwing-Storytelling-70B-LoRA / training_parameters.json
alac
add lora and readme.md
5df80f3
raw
history blame contribute delete
940 Bytes
{
"lora_name": "Waxwing",
"always_override": false,
"save_steps": 0.0,
"micro_batch_size": 3,
"batch_size": 0,
"epochs": 1.0,
"learning_rate": "3e-4",
"lr_scheduler_type": "linear",
"lora_rank": 16,
"lora_alpha": 8,
"lora_dropout": 0.05,
"cutoff_len": 1280,
"dataset": "dataset_11.27.23",
"eval_dataset": "None",
"format": "t2d_oobabooga_training_format",
"eval_steps": 100.0,
"raw_text_file": "None",
"higher_rank_limit": false,
"warmup_steps": 100.0,
"optimizer": "adamw_torch_fused",
"hard_cut_string": "\\n\\n\\n",
"train_only_after": "",
"stop_at_loss": 0,
"add_eos_token": false,
"min_chars": 0.0,
"report_to": "None",
"precize_slicing_overlap": true,
"add_eos_token_type": "Every Block",
"save_steps_under_loss": 1.8,
"add_bos_token": false,
"training_projection": "all",
"sliding_window": false,
"warmup_ratio": 0,
"grad_accumulation": 4,
"neft_noise_alpha": 3
}