tyzhu commited on
Commit
9c5081d
·
verified ·
1 Parent(s): 1f3e30d

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. tokenizer.json +1 -6
  5. train_results.json +9 -0
  6. trainer_state.json +560 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: other
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_3e-5_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  library_name: peft
12
  ---
13
 
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_3e-5_lora2
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 3.6297
22
  - Accuracy: 0.4864
 
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_3e-5_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.48644444444444446
23
  library_name: peft
24
  ---
25
 
 
28
 
29
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_3e-5_lora2
30
 
31
+ This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_qa dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 3.6297
34
  - Accuracy: 0.4864
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.48644444444444446,
4
+ "eval_loss": 3.6297109127044678,
5
+ "eval_runtime": 6.1847,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 80.844,
8
+ "eval_steps_per_second": 10.186,
9
+ "perplexity": 37.7019158967248,
10
+ "total_flos": 1.293538587312128e+17,
11
+ "train_loss": 1.3807264678955078,
12
+ "train_runtime": 10548.406,
13
+ "train_samples": 8000,
14
+ "train_samples_per_second": 15.168,
15
+ "train_steps_per_second": 0.474
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.48644444444444446,
4
+ "eval_loss": 3.6297109127044678,
5
+ "eval_runtime": 6.1847,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 80.844,
8
+ "eval_steps_per_second": 10.186,
9
+ "perplexity": 37.7019158967248
10
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 1.293538587312128e+17,
4
+ "train_loss": 1.3807264678955078,
5
+ "train_runtime": 10548.406,
6
+ "train_samples": 8000,
7
+ "train_samples_per_second": 15.168,
8
+ "train_steps_per_second": 0.474
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.4,
13
+ "grad_norm": 0.6342277526855469,
14
+ "learning_rate": 3e-05,
15
+ "loss": 2.4625,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.8,
20
+ "grad_norm": 0.7092506885528564,
21
+ "learning_rate": 3e-05,
22
+ "loss": 2.2869,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.5139365079365079,
28
+ "eval_loss": 2.348330020904541,
29
+ "eval_runtime": 6.0141,
30
+ "eval_samples_per_second": 83.137,
31
+ "eval_steps_per_second": 10.475,
32
+ "step": 250
33
+ },
34
+ {
35
+ "epoch": 1.2,
36
+ "grad_norm": 0.862402081489563,
37
+ "learning_rate": 3e-05,
38
+ "loss": 2.2582,
39
+ "step": 300
40
+ },
41
+ {
42
+ "epoch": 1.6,
43
+ "grad_norm": 0.9191051721572876,
44
+ "learning_rate": 3e-05,
45
+ "loss": 2.2329,
46
+ "step": 400
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "grad_norm": 0.9321324825286865,
51
+ "learning_rate": 3e-05,
52
+ "loss": 2.208,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 2.0,
57
+ "eval_accuracy": 0.515968253968254,
58
+ "eval_loss": 2.330822706222534,
59
+ "eval_runtime": 5.8242,
60
+ "eval_samples_per_second": 85.849,
61
+ "eval_steps_per_second": 10.817,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 2.4,
66
+ "grad_norm": 1.0817488431930542,
67
+ "learning_rate": 3e-05,
68
+ "loss": 2.1367,
69
+ "step": 600
70
+ },
71
+ {
72
+ "epoch": 2.8,
73
+ "grad_norm": 1.1674364805221558,
74
+ "learning_rate": 3e-05,
75
+ "loss": 2.1226,
76
+ "step": 700
77
+ },
78
+ {
79
+ "epoch": 3.0,
80
+ "eval_accuracy": 0.5158730158730159,
81
+ "eval_loss": 2.3344078063964844,
82
+ "eval_runtime": 5.9956,
83
+ "eval_samples_per_second": 83.395,
84
+ "eval_steps_per_second": 10.508,
85
+ "step": 750
86
+ },
87
+ {
88
+ "epoch": 3.2,
89
+ "grad_norm": 1.5059082508087158,
90
+ "learning_rate": 3e-05,
91
+ "loss": 2.079,
92
+ "step": 800
93
+ },
94
+ {
95
+ "epoch": 3.6,
96
+ "grad_norm": 1.6278578042984009,
97
+ "learning_rate": 3e-05,
98
+ "loss": 2.0194,
99
+ "step": 900
100
+ },
101
+ {
102
+ "epoch": 4.0,
103
+ "grad_norm": 1.8584562540054321,
104
+ "learning_rate": 3e-05,
105
+ "loss": 2.0165,
106
+ "step": 1000
107
+ },
108
+ {
109
+ "epoch": 4.0,
110
+ "eval_accuracy": 0.5151428571428571,
111
+ "eval_loss": 2.3550074100494385,
112
+ "eval_runtime": 5.8265,
113
+ "eval_samples_per_second": 85.815,
114
+ "eval_steps_per_second": 10.813,
115
+ "step": 1000
116
+ },
117
+ {
118
+ "epoch": 4.4,
119
+ "grad_norm": 2.0351390838623047,
120
+ "learning_rate": 3e-05,
121
+ "loss": 1.8879,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 4.8,
126
+ "grad_norm": 2.1740379333496094,
127
+ "learning_rate": 3e-05,
128
+ "loss": 1.8949,
129
+ "step": 1200
130
+ },
131
+ {
132
+ "epoch": 5.0,
133
+ "eval_accuracy": 0.5125079365079365,
134
+ "eval_loss": 2.4099338054656982,
135
+ "eval_runtime": 6.0007,
136
+ "eval_samples_per_second": 83.323,
137
+ "eval_steps_per_second": 10.499,
138
+ "step": 1250
139
+ },
140
+ {
141
+ "epoch": 5.2,
142
+ "grad_norm": 2.3697099685668945,
143
+ "learning_rate": 3e-05,
144
+ "loss": 1.8434,
145
+ "step": 1300
146
+ },
147
+ {
148
+ "epoch": 5.6,
149
+ "grad_norm": 2.605630397796631,
150
+ "learning_rate": 3e-05,
151
+ "loss": 1.7754,
152
+ "step": 1400
153
+ },
154
+ {
155
+ "epoch": 6.0,
156
+ "grad_norm": 2.482067584991455,
157
+ "learning_rate": 3e-05,
158
+ "loss": 1.7858,
159
+ "step": 1500
160
+ },
161
+ {
162
+ "epoch": 6.0,
163
+ "eval_accuracy": 0.5107619047619048,
164
+ "eval_loss": 2.459786891937256,
165
+ "eval_runtime": 5.6735,
166
+ "eval_samples_per_second": 88.129,
167
+ "eval_steps_per_second": 11.104,
168
+ "step": 1500
169
+ },
170
+ {
171
+ "epoch": 6.4,
172
+ "grad_norm": 3.0776612758636475,
173
+ "learning_rate": 3e-05,
174
+ "loss": 1.6592,
175
+ "step": 1600
176
+ },
177
+ {
178
+ "epoch": 6.8,
179
+ "grad_norm": 2.9348673820495605,
180
+ "learning_rate": 3e-05,
181
+ "loss": 1.6743,
182
+ "step": 1700
183
+ },
184
+ {
185
+ "epoch": 7.0,
186
+ "eval_accuracy": 0.507968253968254,
187
+ "eval_loss": 2.5374317169189453,
188
+ "eval_runtime": 5.8306,
189
+ "eval_samples_per_second": 85.754,
190
+ "eval_steps_per_second": 10.805,
191
+ "step": 1750
192
+ },
193
+ {
194
+ "epoch": 7.2,
195
+ "grad_norm": 4.043740272521973,
196
+ "learning_rate": 3e-05,
197
+ "loss": 1.6158,
198
+ "step": 1800
199
+ },
200
+ {
201
+ "epoch": 7.6,
202
+ "grad_norm": 3.6799135208129883,
203
+ "learning_rate": 3e-05,
204
+ "loss": 1.552,
205
+ "step": 1900
206
+ },
207
+ {
208
+ "epoch": 8.0,
209
+ "grad_norm": 3.3421478271484375,
210
+ "learning_rate": 3e-05,
211
+ "loss": 1.578,
212
+ "step": 2000
213
+ },
214
+ {
215
+ "epoch": 8.0,
216
+ "eval_accuracy": 0.5064444444444445,
217
+ "eval_loss": 2.61122465133667,
218
+ "eval_runtime": 6.1244,
219
+ "eval_samples_per_second": 81.64,
220
+ "eval_steps_per_second": 10.287,
221
+ "step": 2000
222
+ },
223
+ {
224
+ "epoch": 8.4,
225
+ "grad_norm": 4.560361385345459,
226
+ "learning_rate": 3e-05,
227
+ "loss": 1.4378,
228
+ "step": 2100
229
+ },
230
+ {
231
+ "epoch": 8.8,
232
+ "grad_norm": 3.800124406814575,
233
+ "learning_rate": 3e-05,
234
+ "loss": 1.4609,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 9.0,
239
+ "eval_accuracy": 0.5040634920634921,
240
+ "eval_loss": 2.675280809402466,
241
+ "eval_runtime": 5.8443,
242
+ "eval_samples_per_second": 85.554,
243
+ "eval_steps_per_second": 10.78,
244
+ "step": 2250
245
+ },
246
+ {
247
+ "epoch": 9.2,
248
+ "grad_norm": 4.507548809051514,
249
+ "learning_rate": 3e-05,
250
+ "loss": 1.4057,
251
+ "step": 2300
252
+ },
253
+ {
254
+ "epoch": 9.6,
255
+ "grad_norm": 4.94022798538208,
256
+ "learning_rate": 3e-05,
257
+ "loss": 1.3474,
258
+ "step": 2400
259
+ },
260
+ {
261
+ "epoch": 10.0,
262
+ "grad_norm": 4.4978437423706055,
263
+ "learning_rate": 3e-05,
264
+ "loss": 1.3821,
265
+ "step": 2500
266
+ },
267
+ {
268
+ "epoch": 10.0,
269
+ "eval_accuracy": 0.5018412698412699,
270
+ "eval_loss": 2.7747349739074707,
271
+ "eval_runtime": 5.9804,
272
+ "eval_samples_per_second": 83.607,
273
+ "eval_steps_per_second": 10.534,
274
+ "step": 2500
275
+ },
276
+ {
277
+ "epoch": 10.4,
278
+ "grad_norm": 5.287800312042236,
279
+ "learning_rate": 3e-05,
280
+ "loss": 1.2189,
281
+ "step": 2600
282
+ },
283
+ {
284
+ "epoch": 10.8,
285
+ "grad_norm": 5.6393866539001465,
286
+ "learning_rate": 3e-05,
287
+ "loss": 1.2732,
288
+ "step": 2700
289
+ },
290
+ {
291
+ "epoch": 11.0,
292
+ "eval_accuracy": 0.49793650793650795,
293
+ "eval_loss": 2.8578593730926514,
294
+ "eval_runtime": 5.9848,
295
+ "eval_samples_per_second": 83.545,
296
+ "eval_steps_per_second": 10.527,
297
+ "step": 2750
298
+ },
299
+ {
300
+ "epoch": 11.2,
301
+ "grad_norm": 5.639581680297852,
302
+ "learning_rate": 3e-05,
303
+ "loss": 1.2011,
304
+ "step": 2800
305
+ },
306
+ {
307
+ "epoch": 11.6,
308
+ "grad_norm": 4.993607521057129,
309
+ "learning_rate": 3e-05,
310
+ "loss": 1.1516,
311
+ "step": 2900
312
+ },
313
+ {
314
+ "epoch": 12.0,
315
+ "grad_norm": 5.3670654296875,
316
+ "learning_rate": 3e-05,
317
+ "loss": 1.2047,
318
+ "step": 3000
319
+ },
320
+ {
321
+ "epoch": 12.0,
322
+ "eval_accuracy": 0.4972063492063492,
323
+ "eval_loss": 2.920531749725342,
324
+ "eval_runtime": 5.8415,
325
+ "eval_samples_per_second": 85.595,
326
+ "eval_steps_per_second": 10.785,
327
+ "step": 3000
328
+ },
329
+ {
330
+ "epoch": 12.4,
331
+ "grad_norm": 5.994533538818359,
332
+ "learning_rate": 3e-05,
333
+ "loss": 1.0399,
334
+ "step": 3100
335
+ },
336
+ {
337
+ "epoch": 12.8,
338
+ "grad_norm": 5.729809761047363,
339
+ "learning_rate": 3e-05,
340
+ "loss": 1.0965,
341
+ "step": 3200
342
+ },
343
+ {
344
+ "epoch": 13.0,
345
+ "eval_accuracy": 0.49406349206349204,
346
+ "eval_loss": 3.063263177871704,
347
+ "eval_runtime": 5.9839,
348
+ "eval_samples_per_second": 83.558,
349
+ "eval_steps_per_second": 10.528,
350
+ "step": 3250
351
+ },
352
+ {
353
+ "epoch": 13.2,
354
+ "grad_norm": 5.3366193771362305,
355
+ "learning_rate": 3e-05,
356
+ "loss": 1.038,
357
+ "step": 3300
358
+ },
359
+ {
360
+ "epoch": 13.6,
361
+ "grad_norm": 6.156430244445801,
362
+ "learning_rate": 3e-05,
363
+ "loss": 0.9832,
364
+ "step": 3400
365
+ },
366
+ {
367
+ "epoch": 14.0,
368
+ "grad_norm": 6.15902042388916,
369
+ "learning_rate": 3e-05,
370
+ "loss": 1.0197,
371
+ "step": 3500
372
+ },
373
+ {
374
+ "epoch": 14.0,
375
+ "eval_accuracy": 0.49326984126984125,
376
+ "eval_loss": 3.102771520614624,
377
+ "eval_runtime": 6.1308,
378
+ "eval_samples_per_second": 81.555,
379
+ "eval_steps_per_second": 10.276,
380
+ "step": 3500
381
+ },
382
+ {
383
+ "epoch": 14.4,
384
+ "grad_norm": 6.487879276275635,
385
+ "learning_rate": 3e-05,
386
+ "loss": 0.8909,
387
+ "step": 3600
388
+ },
389
+ {
390
+ "epoch": 14.8,
391
+ "grad_norm": 5.8875651359558105,
392
+ "learning_rate": 3e-05,
393
+ "loss": 0.9397,
394
+ "step": 3700
395
+ },
396
+ {
397
+ "epoch": 15.0,
398
+ "eval_accuracy": 0.4913015873015873,
399
+ "eval_loss": 3.2329375743865967,
400
+ "eval_runtime": 5.8504,
401
+ "eval_samples_per_second": 85.465,
402
+ "eval_steps_per_second": 10.769,
403
+ "step": 3750
404
+ },
405
+ {
406
+ "epoch": 15.2,
407
+ "grad_norm": 7.488805770874023,
408
+ "learning_rate": 3e-05,
409
+ "loss": 0.8821,
410
+ "step": 3800
411
+ },
412
+ {
413
+ "epoch": 15.6,
414
+ "grad_norm": 5.308674335479736,
415
+ "learning_rate": 3e-05,
416
+ "loss": 0.8564,
417
+ "step": 3900
418
+ },
419
+ {
420
+ "epoch": 16.0,
421
+ "grad_norm": 7.314394950866699,
422
+ "learning_rate": 3e-05,
423
+ "loss": 0.8754,
424
+ "step": 4000
425
+ },
426
+ {
427
+ "epoch": 16.0,
428
+ "eval_accuracy": 0.48898412698412697,
429
+ "eval_loss": 3.3337290287017822,
430
+ "eval_runtime": 5.665,
431
+ "eval_samples_per_second": 88.262,
432
+ "eval_steps_per_second": 11.121,
433
+ "step": 4000
434
+ },
435
+ {
436
+ "epoch": 16.4,
437
+ "grad_norm": 7.315802574157715,
438
+ "learning_rate": 3e-05,
439
+ "loss": 0.7682,
440
+ "step": 4100
441
+ },
442
+ {
443
+ "epoch": 16.8,
444
+ "grad_norm": 6.771595001220703,
445
+ "learning_rate": 3e-05,
446
+ "loss": 0.8084,
447
+ "step": 4200
448
+ },
449
+ {
450
+ "epoch": 17.0,
451
+ "eval_accuracy": 0.48853968253968255,
452
+ "eval_loss": 3.438441514968872,
453
+ "eval_runtime": 6.1412,
454
+ "eval_samples_per_second": 81.417,
455
+ "eval_steps_per_second": 10.259,
456
+ "step": 4250
457
+ },
458
+ {
459
+ "epoch": 17.2,
460
+ "grad_norm": 6.476133823394775,
461
+ "learning_rate": 3e-05,
462
+ "loss": 0.7666,
463
+ "step": 4300
464
+ },
465
+ {
466
+ "epoch": 17.6,
467
+ "grad_norm": 6.825470924377441,
468
+ "learning_rate": 3e-05,
469
+ "loss": 0.7467,
470
+ "step": 4400
471
+ },
472
+ {
473
+ "epoch": 18.0,
474
+ "grad_norm": 6.7562079429626465,
475
+ "learning_rate": 3e-05,
476
+ "loss": 0.7655,
477
+ "step": 4500
478
+ },
479
+ {
480
+ "epoch": 18.0,
481
+ "eval_accuracy": 0.4874603174603175,
482
+ "eval_loss": 3.4687836170196533,
483
+ "eval_runtime": 5.8271,
484
+ "eval_samples_per_second": 85.807,
485
+ "eval_steps_per_second": 10.812,
486
+ "step": 4500
487
+ },
488
+ {
489
+ "epoch": 18.4,
490
+ "grad_norm": 5.820846080780029,
491
+ "learning_rate": 3e-05,
492
+ "loss": 0.671,
493
+ "step": 4600
494
+ },
495
+ {
496
+ "epoch": 18.8,
497
+ "grad_norm": 6.176006317138672,
498
+ "learning_rate": 3e-05,
499
+ "loss": 0.7125,
500
+ "step": 4700
501
+ },
502
+ {
503
+ "epoch": 19.0,
504
+ "eval_accuracy": 0.4875555555555556,
505
+ "eval_loss": 3.564953327178955,
506
+ "eval_runtime": 5.6747,
507
+ "eval_samples_per_second": 88.111,
508
+ "eval_steps_per_second": 11.102,
509
+ "step": 4750
510
+ },
511
+ {
512
+ "epoch": 19.2,
513
+ "grad_norm": 6.995168685913086,
514
+ "learning_rate": 3e-05,
515
+ "loss": 0.6735,
516
+ "step": 4800
517
+ },
518
+ {
519
+ "epoch": 19.6,
520
+ "grad_norm": 5.369238376617432,
521
+ "learning_rate": 3e-05,
522
+ "loss": 0.6472,
523
+ "step": 4900
524
+ },
525
+ {
526
+ "epoch": 20.0,
527
+ "grad_norm": 5.25742244720459,
528
+ "learning_rate": 3e-05,
529
+ "loss": 0.6824,
530
+ "step": 5000
531
+ },
532
+ {
533
+ "epoch": 20.0,
534
+ "eval_accuracy": 0.48644444444444446,
535
+ "eval_loss": 3.6297109127044678,
536
+ "eval_runtime": 5.6743,
537
+ "eval_samples_per_second": 88.116,
538
+ "eval_steps_per_second": 11.103,
539
+ "step": 5000
540
+ },
541
+ {
542
+ "epoch": 20.0,
543
+ "step": 5000,
544
+ "total_flos": 1.293538587312128e+17,
545
+ "train_loss": 1.3807264678955078,
546
+ "train_runtime": 10548.406,
547
+ "train_samples_per_second": 15.168,
548
+ "train_steps_per_second": 0.474
549
+ }
550
+ ],
551
+ "logging_steps": 100,
552
+ "max_steps": 5000,
553
+ "num_input_tokens_seen": 0,
554
+ "num_train_epochs": 20,
555
+ "save_steps": 500,
556
+ "total_flos": 1.293538587312128e+17,
557
+ "train_batch_size": 1,
558
+ "trial_name": null,
559
+ "trial_params": null
560
+ }