hagelk commited on
Commit
6ba81ea
·
verified ·
1 Parent(s): f4f2fb2

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. training_params.json +50 -0
training_params.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "meta-llama/Llama-3.2-1B-Instruct",
3
+ "project_name": "llama3-2-1b-Solutions",
4
+ "data_path": "psd401/PSD401SolutionsData",
5
+ "train_split": "train",
6
+ "valid_split": null,
7
+ "add_eos_token": true,
8
+ "block_size": -1,
9
+ "model_max_length": 2048,
10
+ "padding": "right",
11
+ "trainer": "sft",
12
+ "use_flash_attention_2": false,
13
+ "log": "tensorboard",
14
+ "disable_gradient_checkpointing": false,
15
+ "logging_steps": -1,
16
+ "eval_strategy": "epoch",
17
+ "save_total_limit": 1,
18
+ "auto_find_batch_size": false,
19
+ "mixed_precision": "bf16",
20
+ "lr": 0.00001,
21
+ "epochs": 3,
22
+ "batch_size": 1,
23
+ "warmup_ratio": 0.1,
24
+ "gradient_accumulation": 8,
25
+ "optimizer": "paged_adamw_8bit",
26
+ "scheduler": "cosine",
27
+ "weight_decay": 0.0,
28
+ "max_grad_norm": 1.0,
29
+ "seed": 42,
30
+ "chat_template": "tokenizer",
31
+ "quantization": "int8",
32
+ "target_modules": "all-linear",
33
+ "merge_adapter": true,
34
+ "peft": true,
35
+ "lora_r": 16,
36
+ "lora_alpha": 32,
37
+ "lora_dropout": 0.05,
38
+ "model_ref": null,
39
+ "dpo_beta": 0.1,
40
+ "max_prompt_length": 128,
41
+ "max_completion_length": null,
42
+ "prompt_text_column": null,
43
+ "text_column": "messages",
44
+ "rejected_text_column": null,
45
+ "push_to_hub": false,
46
+ "username": null,
47
+ "token": null,
48
+ "unsloth": false,
49
+ "distributed_backend": null
50
+ }