ZhangShenao commited on
Commit
4aca503
·
verified ·
1 Parent(s): f2fb5a7

Model save

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: llama3.2
4
+ base_model: meta-llama/Llama-3.2-1B-Instruct
5
+ tags:
6
+ - trl
7
+ - dpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: SELM-Llama-3.2-1B-Instruct-rebuttal-iter-1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # SELM-Llama-3.2-1B-Instruct-rebuttal-iter-1
18
+
19
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) on the None dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-07
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 2
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 128
46
+ - total_eval_batch_size: 16
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 1
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.45.0
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 2.14.6
61
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984301412872841,
3
+ "total_flos": 0.0,
4
+ "train_loss": 10.661155712679497,
5
+ "train_runtime": 6456.3708,
6
+ "train_samples": 20378,
7
+ "train_samples_per_second": 3.156,
8
+ "train_steps_per_second": 0.025
9
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.45.0"
12
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984301412872841,
3
+ "total_flos": 0.0,
4
+ "train_loss": 10.661155712679497,
5
+ "train_runtime": 6456.3708,
6
+ "train_samples": 20378,
7
+ "train_samples_per_second": 3.156,
8
+ "train_steps_per_second": 0.025
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9984301412872841,
5
+ "eval_steps": 500,
6
+ "global_step": 159,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.006279434850863423,
13
+ "grad_norm": 2389741299.6785245,
14
+ "learning_rate": 3.125e-08,
15
+ "logits/chosen": 0.9376043081283569,
16
+ "logits/rejected": 1.0501841306686401,
17
+ "logps/chosen": -447.9299011230469,
18
+ "logps/pi_response": -306.161865234375,
19
+ "logps/ref_response": -306.4182434082031,
20
+ "logps/rejected": -457.8988952636719,
21
+ "loss": 0.6928,
22
+ "rewards/accuracies": 0.5,
23
+ "rewards/chosen": 0.0020125629380345345,
24
+ "rewards/margins": 0.0018250466091558337,
25
+ "rewards/rejected": 0.00018751618335954845,
26
+ "step": 1
27
+ },
28
+ {
29
+ "epoch": 0.06279434850863422,
30
+ "grad_norm": 0.0,
31
+ "learning_rate": 3.1249999999999997e-07,
32
+ "logits/chosen": 0.12129918485879898,
33
+ "logits/rejected": 0.1437881588935852,
34
+ "logps/chosen": -3537.467041015625,
35
+ "logps/pi_response": -4150.1064453125,
36
+ "logps/ref_response": -316.7183837890625,
37
+ "logps/rejected": -3190.5419921875,
38
+ "loss": 10.1423,
39
+ "rewards/accuracies": 0.4930555522441864,
40
+ "rewards/chosen": -31.1226806640625,
41
+ "rewards/margins": -3.1455698013305664,
42
+ "rewards/rejected": -27.977109909057617,
43
+ "step": 10
44
+ },
45
+ {
46
+ "epoch": 0.12558869701726844,
47
+ "grad_norm": 0.0,
48
+ "learning_rate": 4.990353313429303e-07,
49
+ "logits/chosen": 0.0,
50
+ "logits/rejected": 0.0,
51
+ "logps/chosen": -3586.97607421875,
52
+ "logps/pi_response": -4675.82373046875,
53
+ "logps/ref_response": -314.9863586425781,
54
+ "logps/rejected": -3205.0859375,
55
+ "loss": 10.1147,
56
+ "rewards/accuracies": 0.44999998807907104,
57
+ "rewards/chosen": -31.944461822509766,
58
+ "rewards/margins": -3.5670688152313232,
59
+ "rewards/rejected": -28.377389907836914,
60
+ "step": 20
61
+ },
62
+ {
63
+ "epoch": 0.18838304552590268,
64
+ "grad_norm": 0.0,
65
+ "learning_rate": 4.882681251368548e-07,
66
+ "logits/chosen": 0.0,
67
+ "logits/rejected": 0.0,
68
+ "logps/chosen": -3549.117919921875,
69
+ "logps/pi_response": -4436.2509765625,
70
+ "logps/ref_response": -306.4608459472656,
71
+ "logps/rejected": -3030.349853515625,
72
+ "loss": 10.9098,
73
+ "rewards/accuracies": 0.4375,
74
+ "rewards/chosen": -31.402786254882812,
75
+ "rewards/margins": -4.936440467834473,
76
+ "rewards/rejected": -26.46634292602539,
77
+ "step": 30
78
+ },
79
+ {
80
+ "epoch": 0.25117739403453687,
81
+ "grad_norm": 0.0,
82
+ "learning_rate": 4.6604720940421207e-07,
83
+ "logits/chosen": 0.0,
84
+ "logits/rejected": 0.0,
85
+ "logps/chosen": -3818.757080078125,
86
+ "logps/pi_response": -4928.62890625,
87
+ "logps/ref_response": -323.02288818359375,
88
+ "logps/rejected": -3543.67822265625,
89
+ "loss": 11.076,
90
+ "rewards/accuracies": 0.48124998807907104,
91
+ "rewards/chosen": -33.8750114440918,
92
+ "rewards/margins": -2.380995273590088,
93
+ "rewards/rejected": -31.4940128326416,
94
+ "step": 40
95
+ },
96
+ {
97
+ "epoch": 0.3139717425431711,
98
+ "grad_norm": 0.0,
99
+ "learning_rate": 4.3344075855595097e-07,
100
+ "logits/chosen": 0.0,
101
+ "logits/rejected": 0.0,
102
+ "logps/chosen": -4011.28271484375,
103
+ "logps/pi_response": -5043.599609375,
104
+ "logps/ref_response": -320.0797424316406,
105
+ "logps/rejected": -3335.053466796875,
106
+ "loss": 10.8383,
107
+ "rewards/accuracies": 0.42500001192092896,
108
+ "rewards/chosen": -35.91133499145508,
109
+ "rewards/margins": -6.445613861083984,
110
+ "rewards/rejected": -29.46571922302246,
111
+ "step": 50
112
+ },
113
+ {
114
+ "epoch": 0.37676609105180536,
115
+ "grad_norm": 0.0,
116
+ "learning_rate": 3.920161866827889e-07,
117
+ "logits/chosen": 0.0,
118
+ "logits/rejected": 0.0,
119
+ "logps/chosen": -3751.494140625,
120
+ "logps/pi_response": -4971.9267578125,
121
+ "logps/ref_response": -311.6258544921875,
122
+ "logps/rejected": -3540.6640625,
123
+ "loss": 10.2301,
124
+ "rewards/accuracies": 0.4375,
125
+ "rewards/chosen": -33.251548767089844,
126
+ "rewards/margins": -1.7325384616851807,
127
+ "rewards/rejected": -31.519006729125977,
128
+ "step": 60
129
+ },
130
+ {
131
+ "epoch": 0.43956043956043955,
132
+ "grad_norm": 0.0,
133
+ "learning_rate": 3.4376480090239047e-07,
134
+ "logits/chosen": 0.0,
135
+ "logits/rejected": 0.0,
136
+ "logps/chosen": -3980.702392578125,
137
+ "logps/pi_response": -5090.5732421875,
138
+ "logps/ref_response": -329.1057434082031,
139
+ "logps/rejected": -3480.60595703125,
140
+ "loss": 10.855,
141
+ "rewards/accuracies": 0.4437499940395355,
142
+ "rewards/chosen": -35.667266845703125,
143
+ "rewards/margins": -4.873533725738525,
144
+ "rewards/rejected": -30.793731689453125,
145
+ "step": 70
146
+ },
147
+ {
148
+ "epoch": 0.5023547880690737,
149
+ "grad_norm": 0.0,
150
+ "learning_rate": 2.910060778827554e-07,
151
+ "logits/chosen": 0.0,
152
+ "logits/rejected": 0.0,
153
+ "logps/chosen": -3804.789794921875,
154
+ "logps/pi_response": -4688.3935546875,
155
+ "logps/ref_response": -309.07379150390625,
156
+ "logps/rejected": -3321.96923828125,
157
+ "loss": 10.0252,
158
+ "rewards/accuracies": 0.4124999940395355,
159
+ "rewards/chosen": -34.202884674072266,
160
+ "rewards/margins": -4.780238628387451,
161
+ "rewards/rejected": -29.42264175415039,
162
+ "step": 80
163
+ },
164
+ {
165
+ "epoch": 0.565149136577708,
166
+ "grad_norm": 0.0,
167
+ "learning_rate": 2.3627616503391812e-07,
168
+ "logits/chosen": 0.0,
169
+ "logits/rejected": 0.0,
170
+ "logps/chosen": -3998.41845703125,
171
+ "logps/pi_response": -5007.4326171875,
172
+ "logps/ref_response": -320.7072448730469,
173
+ "logps/rejected": -3723.192626953125,
174
+ "loss": 10.9442,
175
+ "rewards/accuracies": 0.4749999940395355,
176
+ "rewards/chosen": -35.741825103759766,
177
+ "rewards/margins": -2.8130385875701904,
178
+ "rewards/rejected": -32.92878723144531,
179
+ "step": 90
180
+ },
181
+ {
182
+ "epoch": 0.6279434850863422,
183
+ "grad_norm": 0.0,
184
+ "learning_rate": 1.8220596619089573e-07,
185
+ "logits/chosen": 0.0,
186
+ "logits/rejected": 0.0,
187
+ "logps/chosen": -3574.7734375,
188
+ "logps/pi_response": -4309.73779296875,
189
+ "logps/ref_response": -297.6278076171875,
190
+ "logps/rejected": -2943.09228515625,
191
+ "loss": 11.0889,
192
+ "rewards/accuracies": 0.4124999940395355,
193
+ "rewards/chosen": -31.873769760131836,
194
+ "rewards/margins": -6.0053815841674805,
195
+ "rewards/rejected": -25.868383407592773,
196
+ "step": 100
197
+ },
198
+ {
199
+ "epoch": 0.6907378335949764,
200
+ "grad_norm": 0.0,
201
+ "learning_rate": 1.3139467229135998e-07,
202
+ "logits/chosen": 0.0,
203
+ "logits/rejected": 0.0,
204
+ "logps/chosen": -3946.592529296875,
205
+ "logps/pi_response": -4776.75439453125,
206
+ "logps/ref_response": -326.58782958984375,
207
+ "logps/rejected": -3317.11669921875,
208
+ "loss": 10.8242,
209
+ "rewards/accuracies": 0.41874998807907104,
210
+ "rewards/chosen": -35.36956024169922,
211
+ "rewards/margins": -6.115808486938477,
212
+ "rewards/rejected": -29.25374984741211,
213
+ "step": 110
214
+ },
215
+ {
216
+ "epoch": 0.7535321821036107,
217
+ "grad_norm": 0.0,
218
+ "learning_rate": 8.628481651367875e-08,
219
+ "logits/chosen": 0.0,
220
+ "logits/rejected": 0.0,
221
+ "logps/chosen": -3573.376953125,
222
+ "logps/pi_response": -4478.4462890625,
223
+ "logps/ref_response": -297.75628662109375,
224
+ "logps/rejected": -3231.69677734375,
225
+ "loss": 10.9182,
226
+ "rewards/accuracies": 0.4625000059604645,
227
+ "rewards/chosen": -31.708187103271484,
228
+ "rewards/margins": -3.1025025844573975,
229
+ "rewards/rejected": -28.60568618774414,
230
+ "step": 120
231
+ },
232
+ {
233
+ "epoch": 0.8163265306122449,
234
+ "grad_norm": 0.0,
235
+ "learning_rate": 4.904486005914027e-08,
236
+ "logits/chosen": 0.0,
237
+ "logits/rejected": 0.0,
238
+ "logps/chosen": -3555.366455078125,
239
+ "logps/pi_response": -4567.4677734375,
240
+ "logps/ref_response": -308.615966796875,
241
+ "logps/rejected": -3327.48193359375,
242
+ "loss": 10.3728,
243
+ "rewards/accuracies": 0.48124998807907104,
244
+ "rewards/chosen": -31.474365234375,
245
+ "rewards/margins": -2.0411860942840576,
246
+ "rewards/rejected": -29.433177947998047,
247
+ "step": 130
248
+ },
249
+ {
250
+ "epoch": 0.8791208791208791,
251
+ "grad_norm": 0.0,
252
+ "learning_rate": 2.1464952759020856e-08,
253
+ "logits/chosen": 0.0,
254
+ "logits/rejected": 0.0,
255
+ "logps/chosen": -3977.247314453125,
256
+ "logps/pi_response": -4856.95458984375,
257
+ "logps/ref_response": -321.81634521484375,
258
+ "logps/rejected": -3193.8388671875,
259
+ "loss": 10.413,
260
+ "rewards/accuracies": 0.4000000059604645,
261
+ "rewards/chosen": -35.818058013916016,
262
+ "rewards/margins": -7.793890953063965,
263
+ "rewards/rejected": -28.024166107177734,
264
+ "step": 140
265
+ },
266
+ {
267
+ "epoch": 0.9419152276295133,
268
+ "grad_norm": 0.0,
269
+ "learning_rate": 4.8708793644441086e-09,
270
+ "logits/chosen": 0.0,
271
+ "logits/rejected": 0.0,
272
+ "logps/chosen": -3445.246826171875,
273
+ "logps/pi_response": -4451.90869140625,
274
+ "logps/ref_response": -298.1885986328125,
275
+ "logps/rejected": -3225.22802734375,
276
+ "loss": 10.872,
277
+ "rewards/accuracies": 0.5,
278
+ "rewards/chosen": -30.63448715209961,
279
+ "rewards/margins": -1.953590750694275,
280
+ "rewards/rejected": -28.680896759033203,
281
+ "step": 150
282
+ },
283
+ {
284
+ "epoch": 0.9984301412872841,
285
+ "step": 159,
286
+ "total_flos": 0.0,
287
+ "train_loss": 10.661155712679497,
288
+ "train_runtime": 6456.3708,
289
+ "train_samples_per_second": 3.156,
290
+ "train_steps_per_second": 0.025
291
+ }
292
+ ],
293
+ "logging_steps": 10,
294
+ "max_steps": 159,
295
+ "num_input_tokens_seen": 0,
296
+ "num_train_epochs": 1,
297
+ "save_steps": 100,
298
+ "stateful_callbacks": {
299
+ "TrainerControl": {
300
+ "args": {
301
+ "should_epoch_stop": false,
302
+ "should_evaluate": false,
303
+ "should_log": false,
304
+ "should_save": true,
305
+ "should_training_stop": true
306
+ },
307
+ "attributes": {}
308
+ }
309
+ },
310
+ "total_flos": 0.0,
311
+ "train_batch_size": 2,
312
+ "trial_name": null,
313
+ "trial_params": null
314
+ }