Tural commited on
Commit
b24be94
·
1 Parent(s): 894ef4a

End of training

Browse files
Files changed (5) hide show
  1. README.md +22 -2
  2. all_results.json +14 -0
  3. eval_results.json +9 -0
  4. train_results.json +8 -0
  5. trainer_state.json +670 -0
README.md CHANGED
@@ -1,12 +1,29 @@
1
  ---
 
 
2
  base_model: Tural/language-modeling-from-scratch-ml
3
  tags:
4
  - generated_from_trainer
5
  datasets:
6
  - glue
 
 
7
  model-index:
8
  - name: ml-out-glue-cola
9
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -14,7 +31,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # ml-out-glue-cola
16
 
17
- This model is a fine-tuned version of [Tural/language-modeling-from-scratch-ml](https://huggingface.co/Tural/language-modeling-from-scratch-ml) on the glue dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
1
  ---
2
+ language:
3
+ - en
4
  base_model: Tural/language-modeling-from-scratch-ml
5
  tags:
6
  - generated_from_trainer
7
  datasets:
8
  - glue
9
+ metrics:
10
+ - matthews_correlation
11
  model-index:
12
  - name: ml-out-glue-cola
13
+ results:
14
+ - task:
15
+ name: Text Classification
16
+ type: text-classification
17
+ dataset:
18
+ name: GLUE COLA
19
+ type: glue
20
+ config: cola
21
+ split: validation
22
+ args: cola
23
+ metrics:
24
+ - name: Matthews Correlation
25
+ type: matthews_correlation
26
+ value: 0.2052273727942896
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  # ml-out-glue-cola
33
 
34
+ This model is a fine-tuned version of [Tural/language-modeling-from-scratch-ml](https://huggingface.co/Tural/language-modeling-from-scratch-ml) on the GLUE COLA dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 1.9269
37
+ - Matthews Correlation: 0.2052
38
 
39
  ## Model description
40
 
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_loss": 1.9269137382507324,
4
+ "eval_matthews_correlation": 0.2052273727942896,
5
+ "eval_runtime": 6.4158,
6
+ "eval_samples": 1043,
7
+ "eval_samples_per_second": 162.567,
8
+ "eval_steps_per_second": 1.403,
9
+ "train_loss": 0.2142893238688138,
10
+ "train_runtime": 3029.2991,
11
+ "train_samples": 8551,
12
+ "train_samples_per_second": 56.455,
13
+ "train_steps_per_second": 0.885
14
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_loss": 1.9269137382507324,
4
+ "eval_matthews_correlation": 0.2052273727942896,
5
+ "eval_runtime": 6.4158,
6
+ "eval_samples": 1043,
7
+ "eval_samples_per_second": 162.567,
8
+ "eval_steps_per_second": 1.403
9
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "train_loss": 0.2142893238688138,
4
+ "train_runtime": 3029.2991,
5
+ "train_samples": 8551,
6
+ "train_samples_per_second": 56.455,
7
+ "train_steps_per_second": 0.885
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
+ "eval_steps": 2.0,
6
+ "global_step": 2680,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.19,
13
+ "learning_rate": 1.9813432835820897e-05,
14
+ "loss": 0.6233,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.37,
19
+ "learning_rate": 1.9626865671641793e-05,
20
+ "loss": 0.6221,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.56,
25
+ "learning_rate": 1.9440298507462686e-05,
26
+ "loss": 0.6235,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.75,
31
+ "learning_rate": 1.9253731343283585e-05,
32
+ "loss": 0.5969,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.93,
37
+ "learning_rate": 1.9067164179104477e-05,
38
+ "loss": 0.6,
39
+ "step": 125
40
+ },
41
+ {
42
+ "epoch": 1.12,
43
+ "learning_rate": 1.8880597014925376e-05,
44
+ "loss": 0.615,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 1.31,
49
+ "learning_rate": 1.869402985074627e-05,
50
+ "loss": 0.5995,
51
+ "step": 175
52
+ },
53
+ {
54
+ "epoch": 1.49,
55
+ "learning_rate": 1.8507462686567165e-05,
56
+ "loss": 0.5833,
57
+ "step": 200
58
+ },
59
+ {
60
+ "epoch": 1.68,
61
+ "learning_rate": 1.832089552238806e-05,
62
+ "loss": 0.6055,
63
+ "step": 225
64
+ },
65
+ {
66
+ "epoch": 1.87,
67
+ "learning_rate": 1.8134328358208956e-05,
68
+ "loss": 0.5892,
69
+ "step": 250
70
+ },
71
+ {
72
+ "epoch": 2.05,
73
+ "learning_rate": 1.7947761194029852e-05,
74
+ "loss": 0.5942,
75
+ "step": 275
76
+ },
77
+ {
78
+ "epoch": 2.24,
79
+ "learning_rate": 1.7761194029850748e-05,
80
+ "loss": 0.5543,
81
+ "step": 300
82
+ },
83
+ {
84
+ "epoch": 2.43,
85
+ "learning_rate": 1.7574626865671644e-05,
86
+ "loss": 0.5465,
87
+ "step": 325
88
+ },
89
+ {
90
+ "epoch": 2.61,
91
+ "learning_rate": 1.738805970149254e-05,
92
+ "loss": 0.5416,
93
+ "step": 350
94
+ },
95
+ {
96
+ "epoch": 2.8,
97
+ "learning_rate": 1.7201492537313432e-05,
98
+ "loss": 0.5653,
99
+ "step": 375
100
+ },
101
+ {
102
+ "epoch": 2.99,
103
+ "learning_rate": 1.701492537313433e-05,
104
+ "loss": 0.5215,
105
+ "step": 400
106
+ },
107
+ {
108
+ "epoch": 3.17,
109
+ "learning_rate": 1.6828358208955223e-05,
110
+ "loss": 0.4565,
111
+ "step": 425
112
+ },
113
+ {
114
+ "epoch": 3.36,
115
+ "learning_rate": 1.6641791044776122e-05,
116
+ "loss": 0.4517,
117
+ "step": 450
118
+ },
119
+ {
120
+ "epoch": 3.54,
121
+ "learning_rate": 1.6455223880597015e-05,
122
+ "loss": 0.4791,
123
+ "step": 475
124
+ },
125
+ {
126
+ "epoch": 3.73,
127
+ "learning_rate": 1.626865671641791e-05,
128
+ "loss": 0.4519,
129
+ "step": 500
130
+ },
131
+ {
132
+ "epoch": 3.92,
133
+ "learning_rate": 1.6082089552238806e-05,
134
+ "loss": 0.4671,
135
+ "step": 525
136
+ },
137
+ {
138
+ "epoch": 4.1,
139
+ "learning_rate": 1.5895522388059702e-05,
140
+ "loss": 0.438,
141
+ "step": 550
142
+ },
143
+ {
144
+ "epoch": 4.29,
145
+ "learning_rate": 1.5708955223880598e-05,
146
+ "loss": 0.3501,
147
+ "step": 575
148
+ },
149
+ {
150
+ "epoch": 4.48,
151
+ "learning_rate": 1.5522388059701494e-05,
152
+ "loss": 0.3944,
153
+ "step": 600
154
+ },
155
+ {
156
+ "epoch": 4.66,
157
+ "learning_rate": 1.533582089552239e-05,
158
+ "loss": 0.3881,
159
+ "step": 625
160
+ },
161
+ {
162
+ "epoch": 4.85,
163
+ "learning_rate": 1.5149253731343285e-05,
164
+ "loss": 0.3794,
165
+ "step": 650
166
+ },
167
+ {
168
+ "epoch": 5.04,
169
+ "learning_rate": 1.496268656716418e-05,
170
+ "loss": 0.3824,
171
+ "step": 675
172
+ },
173
+ {
174
+ "epoch": 5.22,
175
+ "learning_rate": 1.4776119402985077e-05,
176
+ "loss": 0.3098,
177
+ "step": 700
178
+ },
179
+ {
180
+ "epoch": 5.41,
181
+ "learning_rate": 1.4589552238805971e-05,
182
+ "loss": 0.2898,
183
+ "step": 725
184
+ },
185
+ {
186
+ "epoch": 5.6,
187
+ "learning_rate": 1.4402985074626867e-05,
188
+ "loss": 0.3009,
189
+ "step": 750
190
+ },
191
+ {
192
+ "epoch": 5.78,
193
+ "learning_rate": 1.4216417910447761e-05,
194
+ "loss": 0.3145,
195
+ "step": 775
196
+ },
197
+ {
198
+ "epoch": 5.97,
199
+ "learning_rate": 1.4029850746268658e-05,
200
+ "loss": 0.323,
201
+ "step": 800
202
+ },
203
+ {
204
+ "epoch": 6.16,
205
+ "learning_rate": 1.3843283582089553e-05,
206
+ "loss": 0.2488,
207
+ "step": 825
208
+ },
209
+ {
210
+ "epoch": 6.34,
211
+ "learning_rate": 1.365671641791045e-05,
212
+ "loss": 0.2145,
213
+ "step": 850
214
+ },
215
+ {
216
+ "epoch": 6.53,
217
+ "learning_rate": 1.3470149253731344e-05,
218
+ "loss": 0.2635,
219
+ "step": 875
220
+ },
221
+ {
222
+ "epoch": 6.72,
223
+ "learning_rate": 1.328358208955224e-05,
224
+ "loss": 0.2422,
225
+ "step": 900
226
+ },
227
+ {
228
+ "epoch": 6.9,
229
+ "learning_rate": 1.3097014925373134e-05,
230
+ "loss": 0.2691,
231
+ "step": 925
232
+ },
233
+ {
234
+ "epoch": 7.09,
235
+ "learning_rate": 1.2910447761194032e-05,
236
+ "loss": 0.2335,
237
+ "step": 950
238
+ },
239
+ {
240
+ "epoch": 7.28,
241
+ "learning_rate": 1.2723880597014926e-05,
242
+ "loss": 0.2011,
243
+ "step": 975
244
+ },
245
+ {
246
+ "epoch": 7.46,
247
+ "learning_rate": 1.2537313432835823e-05,
248
+ "loss": 0.1889,
249
+ "step": 1000
250
+ },
251
+ {
252
+ "epoch": 7.65,
253
+ "learning_rate": 1.2350746268656717e-05,
254
+ "loss": 0.2176,
255
+ "step": 1025
256
+ },
257
+ {
258
+ "epoch": 7.84,
259
+ "learning_rate": 1.2164179104477613e-05,
260
+ "loss": 0.2217,
261
+ "step": 1050
262
+ },
263
+ {
264
+ "epoch": 8.02,
265
+ "learning_rate": 1.1977611940298509e-05,
266
+ "loss": 0.2057,
267
+ "step": 1075
268
+ },
269
+ {
270
+ "epoch": 8.21,
271
+ "learning_rate": 1.1791044776119405e-05,
272
+ "loss": 0.1614,
273
+ "step": 1100
274
+ },
275
+ {
276
+ "epoch": 8.4,
277
+ "learning_rate": 1.1604477611940299e-05,
278
+ "loss": 0.1548,
279
+ "step": 1125
280
+ },
281
+ {
282
+ "epoch": 8.58,
283
+ "learning_rate": 1.1417910447761196e-05,
284
+ "loss": 0.1576,
285
+ "step": 1150
286
+ },
287
+ {
288
+ "epoch": 8.77,
289
+ "learning_rate": 1.123134328358209e-05,
290
+ "loss": 0.1666,
291
+ "step": 1175
292
+ },
293
+ {
294
+ "epoch": 8.96,
295
+ "learning_rate": 1.1044776119402986e-05,
296
+ "loss": 0.1633,
297
+ "step": 1200
298
+ },
299
+ {
300
+ "epoch": 9.14,
301
+ "learning_rate": 1.0858208955223882e-05,
302
+ "loss": 0.122,
303
+ "step": 1225
304
+ },
305
+ {
306
+ "epoch": 9.33,
307
+ "learning_rate": 1.0671641791044778e-05,
308
+ "loss": 0.1254,
309
+ "step": 1250
310
+ },
311
+ {
312
+ "epoch": 9.51,
313
+ "learning_rate": 1.0485074626865672e-05,
314
+ "loss": 0.1193,
315
+ "step": 1275
316
+ },
317
+ {
318
+ "epoch": 9.7,
319
+ "learning_rate": 1.029850746268657e-05,
320
+ "loss": 0.1602,
321
+ "step": 1300
322
+ },
323
+ {
324
+ "epoch": 9.89,
325
+ "learning_rate": 1.0111940298507463e-05,
326
+ "loss": 0.161,
327
+ "step": 1325
328
+ },
329
+ {
330
+ "epoch": 10.07,
331
+ "learning_rate": 9.925373134328359e-06,
332
+ "loss": 0.1128,
333
+ "step": 1350
334
+ },
335
+ {
336
+ "epoch": 10.26,
337
+ "learning_rate": 9.738805970149255e-06,
338
+ "loss": 0.0877,
339
+ "step": 1375
340
+ },
341
+ {
342
+ "epoch": 10.45,
343
+ "learning_rate": 9.552238805970149e-06,
344
+ "loss": 0.1165,
345
+ "step": 1400
346
+ },
347
+ {
348
+ "epoch": 10.63,
349
+ "learning_rate": 9.365671641791045e-06,
350
+ "loss": 0.1232,
351
+ "step": 1425
352
+ },
353
+ {
354
+ "epoch": 10.82,
355
+ "learning_rate": 9.17910447761194e-06,
356
+ "loss": 0.1285,
357
+ "step": 1450
358
+ },
359
+ {
360
+ "epoch": 11.01,
361
+ "learning_rate": 8.992537313432836e-06,
362
+ "loss": 0.1214,
363
+ "step": 1475
364
+ },
365
+ {
366
+ "epoch": 11.19,
367
+ "learning_rate": 8.805970149253732e-06,
368
+ "loss": 0.0804,
369
+ "step": 1500
370
+ },
371
+ {
372
+ "epoch": 11.38,
373
+ "learning_rate": 8.619402985074628e-06,
374
+ "loss": 0.105,
375
+ "step": 1525
376
+ },
377
+ {
378
+ "epoch": 11.57,
379
+ "learning_rate": 8.432835820895524e-06,
380
+ "loss": 0.0885,
381
+ "step": 1550
382
+ },
383
+ {
384
+ "epoch": 11.75,
385
+ "learning_rate": 8.246268656716418e-06,
386
+ "loss": 0.1015,
387
+ "step": 1575
388
+ },
389
+ {
390
+ "epoch": 11.94,
391
+ "learning_rate": 8.059701492537314e-06,
392
+ "loss": 0.101,
393
+ "step": 1600
394
+ },
395
+ {
396
+ "epoch": 12.13,
397
+ "learning_rate": 7.87313432835821e-06,
398
+ "loss": 0.1017,
399
+ "step": 1625
400
+ },
401
+ {
402
+ "epoch": 12.31,
403
+ "learning_rate": 7.686567164179105e-06,
404
+ "loss": 0.08,
405
+ "step": 1650
406
+ },
407
+ {
408
+ "epoch": 12.5,
409
+ "learning_rate": 7.500000000000001e-06,
410
+ "loss": 0.1013,
411
+ "step": 1675
412
+ },
413
+ {
414
+ "epoch": 12.69,
415
+ "learning_rate": 7.313432835820896e-06,
416
+ "loss": 0.088,
417
+ "step": 1700
418
+ },
419
+ {
420
+ "epoch": 12.87,
421
+ "learning_rate": 7.126865671641792e-06,
422
+ "loss": 0.0816,
423
+ "step": 1725
424
+ },
425
+ {
426
+ "epoch": 13.06,
427
+ "learning_rate": 6.9402985074626876e-06,
428
+ "loss": 0.0657,
429
+ "step": 1750
430
+ },
431
+ {
432
+ "epoch": 13.25,
433
+ "learning_rate": 6.7537313432835825e-06,
434
+ "loss": 0.0765,
435
+ "step": 1775
436
+ },
437
+ {
438
+ "epoch": 13.43,
439
+ "learning_rate": 6.567164179104478e-06,
440
+ "loss": 0.0655,
441
+ "step": 1800
442
+ },
443
+ {
444
+ "epoch": 13.62,
445
+ "learning_rate": 6.380597014925374e-06,
446
+ "loss": 0.0854,
447
+ "step": 1825
448
+ },
449
+ {
450
+ "epoch": 13.81,
451
+ "learning_rate": 6.194029850746269e-06,
452
+ "loss": 0.0792,
453
+ "step": 1850
454
+ },
455
+ {
456
+ "epoch": 13.99,
457
+ "learning_rate": 6.007462686567165e-06,
458
+ "loss": 0.0778,
459
+ "step": 1875
460
+ },
461
+ {
462
+ "epoch": 14.18,
463
+ "learning_rate": 5.820895522388061e-06,
464
+ "loss": 0.049,
465
+ "step": 1900
466
+ },
467
+ {
468
+ "epoch": 14.37,
469
+ "learning_rate": 5.6343283582089556e-06,
470
+ "loss": 0.0625,
471
+ "step": 1925
472
+ },
473
+ {
474
+ "epoch": 14.55,
475
+ "learning_rate": 5.447761194029851e-06,
476
+ "loss": 0.0655,
477
+ "step": 1950
478
+ },
479
+ {
480
+ "epoch": 14.74,
481
+ "learning_rate": 5.261194029850747e-06,
482
+ "loss": 0.0636,
483
+ "step": 1975
484
+ },
485
+ {
486
+ "epoch": 14.93,
487
+ "learning_rate": 5.074626865671642e-06,
488
+ "loss": 0.0548,
489
+ "step": 2000
490
+ },
491
+ {
492
+ "epoch": 15.11,
493
+ "learning_rate": 4.888059701492538e-06,
494
+ "loss": 0.0509,
495
+ "step": 2025
496
+ },
497
+ {
498
+ "epoch": 15.3,
499
+ "learning_rate": 4.701492537313434e-06,
500
+ "loss": 0.0424,
501
+ "step": 2050
502
+ },
503
+ {
504
+ "epoch": 15.49,
505
+ "learning_rate": 4.514925373134329e-06,
506
+ "loss": 0.0566,
507
+ "step": 2075
508
+ },
509
+ {
510
+ "epoch": 15.67,
511
+ "learning_rate": 4.3283582089552236e-06,
512
+ "loss": 0.0547,
513
+ "step": 2100
514
+ },
515
+ {
516
+ "epoch": 15.86,
517
+ "learning_rate": 4.141791044776119e-06,
518
+ "loss": 0.0612,
519
+ "step": 2125
520
+ },
521
+ {
522
+ "epoch": 16.04,
523
+ "learning_rate": 3.955223880597015e-06,
524
+ "loss": 0.0502,
525
+ "step": 2150
526
+ },
527
+ {
528
+ "epoch": 16.23,
529
+ "learning_rate": 3.7686567164179105e-06,
530
+ "loss": 0.0392,
531
+ "step": 2175
532
+ },
533
+ {
534
+ "epoch": 16.42,
535
+ "learning_rate": 3.582089552238806e-06,
536
+ "loss": 0.0346,
537
+ "step": 2200
538
+ },
539
+ {
540
+ "epoch": 16.6,
541
+ "learning_rate": 3.3955223880597017e-06,
542
+ "loss": 0.043,
543
+ "step": 2225
544
+ },
545
+ {
546
+ "epoch": 16.79,
547
+ "learning_rate": 3.208955223880597e-06,
548
+ "loss": 0.0498,
549
+ "step": 2250
550
+ },
551
+ {
552
+ "epoch": 16.98,
553
+ "learning_rate": 3.022388059701493e-06,
554
+ "loss": 0.0511,
555
+ "step": 2275
556
+ },
557
+ {
558
+ "epoch": 17.16,
559
+ "learning_rate": 2.835820895522388e-06,
560
+ "loss": 0.0358,
561
+ "step": 2300
562
+ },
563
+ {
564
+ "epoch": 17.35,
565
+ "learning_rate": 2.6492537313432836e-06,
566
+ "loss": 0.0345,
567
+ "step": 2325
568
+ },
569
+ {
570
+ "epoch": 17.54,
571
+ "learning_rate": 2.4626865671641794e-06,
572
+ "loss": 0.0267,
573
+ "step": 2350
574
+ },
575
+ {
576
+ "epoch": 17.72,
577
+ "learning_rate": 2.2761194029850747e-06,
578
+ "loss": 0.0237,
579
+ "step": 2375
580
+ },
581
+ {
582
+ "epoch": 17.91,
583
+ "learning_rate": 2.08955223880597e-06,
584
+ "loss": 0.0383,
585
+ "step": 2400
586
+ },
587
+ {
588
+ "epoch": 18.1,
589
+ "learning_rate": 1.9029850746268657e-06,
590
+ "loss": 0.0396,
591
+ "step": 2425
592
+ },
593
+ {
594
+ "epoch": 18.28,
595
+ "learning_rate": 1.7164179104477613e-06,
596
+ "loss": 0.0361,
597
+ "step": 2450
598
+ },
599
+ {
600
+ "epoch": 18.47,
601
+ "learning_rate": 1.5298507462686568e-06,
602
+ "loss": 0.0279,
603
+ "step": 2475
604
+ },
605
+ {
606
+ "epoch": 18.66,
607
+ "learning_rate": 1.3432835820895524e-06,
608
+ "loss": 0.0385,
609
+ "step": 2500
610
+ },
611
+ {
612
+ "epoch": 18.84,
613
+ "learning_rate": 1.1567164179104478e-06,
614
+ "loss": 0.0313,
615
+ "step": 2525
616
+ },
617
+ {
618
+ "epoch": 19.03,
619
+ "learning_rate": 9.701492537313434e-07,
620
+ "loss": 0.0357,
621
+ "step": 2550
622
+ },
623
+ {
624
+ "epoch": 19.22,
625
+ "learning_rate": 7.835820895522389e-07,
626
+ "loss": 0.0366,
627
+ "step": 2575
628
+ },
629
+ {
630
+ "epoch": 19.4,
631
+ "learning_rate": 5.970149253731343e-07,
632
+ "loss": 0.0236,
633
+ "step": 2600
634
+ },
635
+ {
636
+ "epoch": 19.59,
637
+ "learning_rate": 4.104477611940299e-07,
638
+ "loss": 0.0205,
639
+ "step": 2625
640
+ },
641
+ {
642
+ "epoch": 19.78,
643
+ "learning_rate": 2.2388059701492537e-07,
644
+ "loss": 0.035,
645
+ "step": 2650
646
+ },
647
+ {
648
+ "epoch": 19.96,
649
+ "learning_rate": 3.7313432835820895e-08,
650
+ "loss": 0.0303,
651
+ "step": 2675
652
+ },
653
+ {
654
+ "epoch": 20.0,
655
+ "step": 2680,
656
+ "total_flos": 1.12493131719168e+16,
657
+ "train_loss": 0.2142893238688138,
658
+ "train_runtime": 3029.2991,
659
+ "train_samples_per_second": 56.455,
660
+ "train_steps_per_second": 0.885
661
+ }
662
+ ],
663
+ "logging_steps": 25,
664
+ "max_steps": 2680,
665
+ "num_train_epochs": 20,
666
+ "save_steps": 500,
667
+ "total_flos": 1.12493131719168e+16,
668
+ "trial_name": null,
669
+ "trial_params": null
670
+ }