WangXFng commited on
Commit
026018f
·
verified ·
1 Parent(s): a4975ae

Model save

Browse files
README.md CHANGED
@@ -34,15 +34,14 @@ More information needed
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0001
37
- - train_batch_size: 8
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
- - total_train_batch_size: 128
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
- - lr_scheduler_warmup_steps: 2
45
- - num_epochs: 2
46
 
47
  ### Training results
48
 
@@ -50,7 +49,7 @@ The following hyperparameters were used during training:
50
 
51
  ### Framework versions
52
 
53
- - PEFT 0.13.2
54
  - Transformers 4.45.2
55
- - Pytorch 2.4.1
56
- - Tokenizers 0.20.1
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0001
37
+ - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
+ - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
+ - num_epochs: 4
 
45
 
46
  ### Training results
47
 
 
49
 
50
  ### Framework versions
51
 
52
+ - PEFT 0.13.0
53
  - Transformers 4.45.2
54
+ - Pytorch 2.4.0
55
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -1,37 +1,34 @@
1
  {
2
  "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlamaForCausalLM",
5
- "parent_library": "transformers.models.llama.modeling_llama"
6
- },
7
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
11
- "init_lora_weights": "gaussian",
12
  "layer_replication": null,
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
  "lora_alpha": 32,
17
- "lora_dropout": 0.1,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
- "r": 8,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "down_proj",
27
- "up_proj",
28
  "o_proj",
29
- "q_proj",
30
  "k_proj",
31
- "gate_proj",
32
- "v_proj"
 
33
  ],
34
- "task_type": null,
35
- "use_dora": true,
36
  "use_rslora": false
37
  }
 
1
  {
2
  "alpha_pattern": {},
3
+ "auto_mapping": null,
 
 
 
4
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
+ "init_lora_weights": true,
9
  "layer_replication": null,
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "gate_proj",
25
  "o_proj",
 
26
  "k_proj",
27
+ "down_proj",
28
+ "up_proj",
29
+ "q_proj"
30
  ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
  "use_rslora": false
34
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4296b9cf510cd2dd41ae8bf4eb415a592e7aba73769a261849b22435972f91f
3
- size 1639093264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0edcd8d9a75fca009e240360ca8fd3dcb45f6f3d4f67a449b5e050db25eec1dd
3
+ size 1684597880
trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 500,
6
  "global_step": 2060,
7
  "is_hyper_param_search": false,
@@ -9,75 +9,75 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.24271844660194175,
13
- "grad_norm": 2.2880468368530273,
14
- "learning_rate": 8.794946550048592e-05,
15
- "loss": 0.7378,
16
  "step": 250
17
  },
18
  {
19
- "epoch": 0.4854368932038835,
20
- "grad_norm": 1.7398513555526733,
21
- "learning_rate": 7.580174927113704e-05,
22
- "loss": 0.448,
23
  "step": 500
24
  },
25
  {
26
- "epoch": 0.7281553398058253,
27
- "grad_norm": 1.7528009414672852,
28
- "learning_rate": 6.365403304178815e-05,
29
- "loss": 0.4192,
30
  "step": 750
31
  },
32
  {
33
- "epoch": 0.970873786407767,
34
- "grad_norm": 1.7135326862335205,
35
- "learning_rate": 5.150631681243926e-05,
36
- "loss": 0.3959,
37
  "step": 1000
38
  },
39
  {
40
- "epoch": 1.2135922330097086,
41
- "grad_norm": 2.295848846435547,
42
- "learning_rate": 3.9358600583090386e-05,
43
- "loss": 0.3666,
44
  "step": 1250
45
  },
46
  {
47
- "epoch": 1.4563106796116505,
48
- "grad_norm": 2.189216136932373,
49
- "learning_rate": 2.72108843537415e-05,
50
- "loss": 0.3465,
51
  "step": 1500
52
  },
53
  {
54
- "epoch": 1.6990291262135924,
55
- "grad_norm": 2.383833885192871,
56
- "learning_rate": 1.5063168124392615e-05,
57
- "loss": 0.3249,
58
  "step": 1750
59
  },
60
  {
61
- "epoch": 1.941747572815534,
62
- "grad_norm": 2.1898789405822754,
63
- "learning_rate": 2.915451895043732e-06,
64
- "loss": 0.3096,
65
  "step": 2000
66
  },
67
  {
68
- "epoch": 2.0,
69
  "step": 2060,
70
- "total_flos": 6.440370064857293e+17,
71
- "train_loss": 0.4152493615752285,
72
- "train_runtime": 78972.7393,
73
- "train_samples_per_second": 3.339,
74
- "train_steps_per_second": 0.026
75
  }
76
  ],
77
  "logging_steps": 250,
78
  "max_steps": 2060,
79
  "num_input_tokens_seen": 0,
80
- "num_train_epochs": 2,
81
  "save_steps": 500,
82
  "stateful_callbacks": {
83
  "TrainerControl": {
@@ -91,8 +91,8 @@
91
  "attributes": {}
92
  }
93
  },
94
- "total_flos": 6.440370064857293e+17,
95
- "train_batch_size": 8,
96
  "trial_name": null,
97
  "trial_params": null
98
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
  "eval_steps": 500,
6
  "global_step": 2060,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.5094059705734253,
14
+ "learning_rate": 8.786407766990292e-05,
15
+ "loss": 0.7642,
16
  "step": 250
17
  },
18
  {
19
+ "epoch": 0.970873786407767,
20
+ "grad_norm": 0.3778647482395172,
21
+ "learning_rate": 7.572815533980583e-05,
22
+ "loss": 0.3845,
23
  "step": 500
24
  },
25
  {
26
+ "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.4107059836387634,
28
+ "learning_rate": 6.359223300970875e-05,
29
+ "loss": 0.3589,
30
  "step": 750
31
  },
32
  {
33
+ "epoch": 1.941747572815534,
34
+ "grad_norm": 0.4316255450248718,
35
+ "learning_rate": 5.145631067961165e-05,
36
+ "loss": 0.3445,
37
  "step": 1000
38
  },
39
  {
40
+ "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.4872223436832428,
42
+ "learning_rate": 3.9320388349514564e-05,
43
+ "loss": 0.3255,
44
  "step": 1250
45
  },
46
  {
47
+ "epoch": 2.912621359223301,
48
+ "grad_norm": 0.6546180248260498,
49
+ "learning_rate": 2.7184466019417475e-05,
50
+ "loss": 0.3019,
51
  "step": 1500
52
  },
53
  {
54
+ "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.6317738890647888,
56
+ "learning_rate": 1.5048543689320387e-05,
57
+ "loss": 0.2698,
58
  "step": 1750
59
  },
60
  {
61
+ "epoch": 3.883495145631068,
62
+ "grad_norm": 0.6270581483840942,
63
+ "learning_rate": 2.912621359223301e-06,
64
+ "loss": 0.2537,
65
  "step": 2000
66
  },
67
  {
68
+ "epoch": 4.0,
69
  "step": 2060,
70
+ "total_flos": 1.4514285460762153e+18,
71
+ "train_loss": 0.3716998405826902,
72
+ "train_runtime": 16511.4152,
73
+ "train_samples_per_second": 31.938,
74
+ "train_steps_per_second": 0.125
75
  }
76
  ],
77
  "logging_steps": 250,
78
  "max_steps": 2060,
79
  "num_input_tokens_seen": 0,
80
+ "num_train_epochs": 4,
81
  "save_steps": 500,
82
  "stateful_callbacks": {
83
  "TrainerControl": {
 
91
  "attributes": {}
92
  }
93
  },
94
+ "total_flos": 1.4514285460762153e+18,
95
+ "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
98
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0653084ef74cad66a842993ee891fafc998059dc681e884d2eb0539b717dfcce
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91794dd336216b2a89ff8f8b3a21988f053c3c394a0ec8cbcf0fc4f0977592bb
3
+ size 5240