fpadovani commited on
Commit
d8aa4d7
·
verified ·
1 Parent(s): ed3fef5

Training in progress, step 48000, checkpoint

Browse files
checkpoint-48000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5111253d5654c0f771d81f8a097260b7c4fd54f2ff072000b908e95295de9f37
3
  size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c9e7e2f08b5fb7cc87f71db9d514e9b3a736cb5d9f324c06b70c60f60d30538
3
  size 51007160
checkpoint-48000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e21c8e38145139df8883bc5cff31108b50dcc6ffccf3912783457b6f1774592
3
  size 102078202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1784c6abefe8b93cba0c8c77ed55a792ddb54f4584e26be6c03d0b4daf427773
3
  size 102078202
checkpoint-48000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:028ff7bf42e19236465943cd515e39f4ec2da74f90d7d6f6e7e168398ef7528a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fcb71d6e61cdba80f94274e99f11ca6054ddccf9c7e7c9c242763df0f07601f
3
  size 14244
checkpoint-48000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cbe06da11b5cdd588d1873d9c23d587ee65f6dc8da7f1c18f378e311101e73bb
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78d7d5eaf2c026b79868b3befc395dd0b9e037728decf46d65f818df3cb8da1e
3
  size 1000
checkpoint-48000/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-48000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 3.937915563583374,
3
- "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/fr_clm/childes_42/checkpoint-24000",
4
- "epoch": 75.47169811320755,
5
  "eval_steps": 2000,
6
  "global_step": 48000,
7
  "is_hyper_param_search": false,
@@ -9,286 +9,286 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 3.1446540880503147,
13
- "eval_loss": 6.666613578796387,
14
- "eval_runtime": 0.6599,
15
- "eval_samples_per_second": 1380.413,
16
- "eval_steps_per_second": 86.37,
17
  "step": 2000
18
  },
19
  {
20
- "epoch": 6.289308176100629,
21
- "grad_norm": 1.431768536567688,
22
  "learning_rate": 1e-05,
23
- "loss": 6.5315,
24
  "step": 4000
25
  },
26
  {
27
- "epoch": 6.289308176100629,
28
- "eval_loss": 5.101934432983398,
29
- "eval_runtime": 0.6406,
30
- "eval_samples_per_second": 1422.145,
31
- "eval_steps_per_second": 88.982,
32
  "step": 4000
33
  },
34
  {
35
- "epoch": 9.433962264150944,
36
- "eval_loss": 4.677049160003662,
37
- "eval_runtime": 0.6439,
38
- "eval_samples_per_second": 1414.724,
39
- "eval_steps_per_second": 88.517,
40
  "step": 6000
41
  },
42
  {
43
- "epoch": 12.578616352201259,
44
- "grad_norm": 2.6387760639190674,
45
- "learning_rate": 2e-05,
46
- "loss": 4.3407,
47
  "step": 8000
48
  },
49
  {
50
- "epoch": 12.578616352201259,
51
- "eval_loss": 4.4404096603393555,
52
- "eval_runtime": 0.6445,
53
- "eval_samples_per_second": 1413.55,
54
- "eval_steps_per_second": 88.444,
55
  "step": 8000
56
  },
57
  {
58
- "epoch": 15.723270440251572,
59
- "eval_loss": 4.286661624908447,
60
- "eval_runtime": 0.6446,
61
- "eval_samples_per_second": 1413.35,
62
- "eval_steps_per_second": 88.431,
63
  "step": 10000
64
  },
65
  {
66
- "epoch": 18.867924528301888,
67
- "grad_norm": 2.381523609161377,
68
  "learning_rate": 2.9995e-05,
69
- "loss": 3.8725,
70
  "step": 12000
71
  },
72
  {
73
- "epoch": 18.867924528301888,
74
- "eval_loss": 4.171908855438232,
75
- "eval_runtime": 0.6441,
76
- "eval_samples_per_second": 1414.321,
77
- "eval_steps_per_second": 88.492,
78
  "step": 12000
79
  },
80
  {
81
- "epoch": 22.0125786163522,
82
- "eval_loss": 4.096552848815918,
83
- "eval_runtime": 0.6506,
84
- "eval_samples_per_second": 1400.196,
85
- "eval_steps_per_second": 87.608,
86
  "step": 14000
87
  },
88
  {
89
- "epoch": 25.157232704402517,
90
- "grad_norm": 2.4479799270629883,
91
- "learning_rate": 3.999e-05,
92
- "loss": 3.5953,
93
  "step": 16000
94
  },
95
  {
96
- "epoch": 25.157232704402517,
97
- "eval_loss": 4.033241271972656,
98
- "eval_runtime": 0.6465,
99
- "eval_samples_per_second": 1409.06,
100
- "eval_steps_per_second": 88.163,
101
  "step": 16000
102
  },
103
  {
104
- "epoch": 28.30188679245283,
105
- "eval_loss": 3.9956283569335938,
106
- "eval_runtime": 0.6443,
107
- "eval_samples_per_second": 1413.837,
108
- "eval_steps_per_second": 88.462,
109
  "step": 18000
110
  },
111
  {
112
- "epoch": 31.446540880503143,
113
- "grad_norm": 2.266655921936035,
114
- "learning_rate": 4.9985e-05,
115
- "loss": 3.3822,
116
  "step": 20000
117
  },
118
  {
119
- "epoch": 31.446540880503143,
120
- "eval_loss": 3.970871686935425,
121
- "eval_runtime": 0.6424,
122
- "eval_samples_per_second": 1418.161,
123
- "eval_steps_per_second": 88.732,
124
  "step": 20000
125
  },
126
  {
127
- "epoch": 34.59119496855346,
128
- "eval_loss": 3.951535224914551,
129
- "eval_runtime": 0.6747,
130
- "eval_samples_per_second": 1350.174,
131
- "eval_steps_per_second": 84.479,
132
  "step": 22000
133
  },
134
  {
135
- "epoch": 37.735849056603776,
136
- "grad_norm": 2.3022546768188477,
137
- "learning_rate": 5.9980000000000005e-05,
138
- "loss": 3.2038,
139
  "step": 24000
140
  },
141
  {
142
- "epoch": 37.735849056603776,
143
- "eval_loss": 3.937915563583374,
144
- "eval_runtime": 0.6427,
145
- "eval_samples_per_second": 1417.462,
146
- "eval_steps_per_second": 88.689,
147
  "step": 24000
148
  },
149
  {
150
- "epoch": 40.880503144654085,
151
- "eval_loss": 3.935504913330078,
152
- "eval_runtime": 0.6463,
153
- "eval_samples_per_second": 1409.641,
154
- "eval_steps_per_second": 88.199,
155
  "step": 26000
156
  },
157
  {
158
- "epoch": 44.0251572327044,
159
- "grad_norm": 2.377114772796631,
160
- "learning_rate": 6.997500000000001e-05,
161
- "loss": 3.0443,
162
  "step": 28000
163
  },
164
  {
165
- "epoch": 44.0251572327044,
166
- "eval_loss": 3.951632261276245,
167
- "eval_runtime": 0.6452,
168
- "eval_samples_per_second": 1412.017,
169
- "eval_steps_per_second": 88.348,
170
  "step": 28000
171
  },
172
  {
173
- "epoch": 47.16981132075472,
174
- "eval_loss": 3.9560022354125977,
175
- "eval_runtime": 0.6457,
176
- "eval_samples_per_second": 1410.953,
177
- "eval_steps_per_second": 88.281,
178
  "step": 30000
179
  },
180
  {
181
- "epoch": 50.314465408805034,
182
- "grad_norm": 2.4269351959228516,
183
- "learning_rate": 7.997e-05,
184
- "loss": 2.8937,
185
  "step": 32000
186
  },
187
  {
188
- "epoch": 50.314465408805034,
189
- "eval_loss": 3.9868452548980713,
190
- "eval_runtime": 0.642,
191
- "eval_samples_per_second": 1418.921,
192
- "eval_steps_per_second": 88.78,
193
  "step": 32000
194
  },
195
  {
196
- "epoch": 53.459119496855344,
197
- "eval_loss": 4.025730133056641,
198
- "eval_runtime": 0.6451,
199
- "eval_samples_per_second": 1412.122,
200
- "eval_steps_per_second": 88.355,
201
  "step": 34000
202
  },
203
  {
204
- "epoch": 56.60377358490566,
205
- "grad_norm": 2.4868056774139404,
206
- "learning_rate": 8.9965e-05,
207
- "loss": 2.7503,
208
  "step": 36000
209
  },
210
  {
211
- "epoch": 56.60377358490566,
212
- "eval_loss": 4.050300598144531,
213
- "eval_runtime": 0.6446,
214
- "eval_samples_per_second": 1413.342,
215
- "eval_steps_per_second": 88.431,
216
  "step": 36000
217
  },
218
  {
219
- "epoch": 59.74842767295598,
220
- "eval_loss": 4.09123420715332,
221
- "eval_runtime": 0.6444,
222
- "eval_samples_per_second": 1413.818,
223
- "eval_steps_per_second": 88.461,
224
  "step": 38000
225
  },
226
  {
227
- "epoch": 62.893081761006286,
228
- "grad_norm": 2.6151175498962402,
229
- "learning_rate": 9.996000000000001e-05,
230
- "loss": 2.6083,
231
  "step": 40000
232
  },
233
  {
234
- "epoch": 62.893081761006286,
235
- "eval_loss": 4.143652439117432,
236
- "eval_runtime": 0.6467,
237
- "eval_samples_per_second": 1408.69,
238
- "eval_steps_per_second": 88.14,
239
  "step": 40000
240
  },
241
  {
242
- "epoch": 66.0377358490566,
243
- "eval_loss": 4.220719814300537,
244
- "eval_runtime": 0.6466,
245
- "eval_samples_per_second": 1408.922,
246
- "eval_steps_per_second": 88.154,
247
  "step": 42000
248
  },
249
  {
250
- "epoch": 69.18238993710692,
251
- "grad_norm": 2.823240280151367,
252
- "learning_rate": 9.336333333333334e-05,
253
- "loss": 2.4507,
254
  "step": 44000
255
  },
256
  {
257
- "epoch": 69.18238993710692,
258
- "eval_loss": 4.291810989379883,
259
- "eval_runtime": 0.6433,
260
- "eval_samples_per_second": 1416.201,
261
- "eval_steps_per_second": 88.61,
262
  "step": 44000
263
  },
264
  {
265
- "epoch": 72.32704402515724,
266
- "eval_loss": 4.3598246574401855,
267
- "eval_runtime": 0.6463,
268
- "eval_samples_per_second": 1409.638,
269
- "eval_steps_per_second": 88.199,
270
  "step": 46000
271
  },
272
  {
273
- "epoch": 75.47169811320755,
274
- "grad_norm": 3.11128830909729,
275
- "learning_rate": 8.67e-05,
276
- "loss": 2.2924,
277
  "step": 48000
278
  },
279
  {
280
- "epoch": 75.47169811320755,
281
- "eval_loss": 4.444736003875732,
282
- "eval_runtime": 0.6454,
283
- "eval_samples_per_second": 1411.606,
284
- "eval_steps_per_second": 88.322,
285
  "step": 48000
286
  }
287
  ],
288
  "logging_steps": 4000,
289
  "max_steps": 100000,
290
  "num_input_tokens_seen": 0,
291
- "num_train_epochs": 158,
292
  "save_steps": 4000,
293
  "stateful_callbacks": {
294
  "TrainerControl": {
@@ -302,7 +302,7 @@
302
  "attributes": {}
303
  }
304
  },
305
- "total_flos": 1.2409967377661952e+16,
306
  "train_batch_size": 16,
307
  "trial_name": null,
308
  "trial_params": null
 
1
  {
2
+ "best_metric": 4.579585075378418,
3
+ "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/de_clm/childes_42/checkpoint-32000",
4
+ "epoch": 50.314465408805034,
5
  "eval_steps": 2000,
6
  "global_step": 48000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 2.0964360587002098,
13
+ "eval_loss": 7.0908098220825195,
14
+ "eval_runtime": 0.9834,
15
+ "eval_samples_per_second": 1398.23,
16
+ "eval_steps_per_second": 87.453,
17
  "step": 2000
18
  },
19
  {
20
+ "epoch": 4.1928721174004195,
21
+ "grad_norm": 1.4699604511260986,
22
  "learning_rate": 1e-05,
23
+ "loss": 6.9765,
24
  "step": 4000
25
  },
26
  {
27
+ "epoch": 4.1928721174004195,
28
+ "eval_loss": 5.874638557434082,
29
+ "eval_runtime": 0.973,
30
+ "eval_samples_per_second": 1413.214,
31
+ "eval_steps_per_second": 88.39,
32
  "step": 4000
33
  },
34
  {
35
+ "epoch": 6.289308176100629,
36
+ "eval_loss": 5.544075965881348,
37
+ "eval_runtime": 0.9898,
38
+ "eval_samples_per_second": 1389.228,
39
+ "eval_steps_per_second": 86.89,
40
  "step": 6000
41
  },
42
  {
43
+ "epoch": 8.385744234800839,
44
+ "grad_norm": 2.451873779296875,
45
+ "learning_rate": 1.9997500000000003e-05,
46
+ "loss": 5.2182,
47
  "step": 8000
48
  },
49
  {
50
+ "epoch": 8.385744234800839,
51
+ "eval_loss": 5.278811454772949,
52
+ "eval_runtime": 0.9809,
53
+ "eval_samples_per_second": 1401.823,
54
+ "eval_steps_per_second": 87.678,
55
  "step": 8000
56
  },
57
  {
58
+ "epoch": 10.482180293501049,
59
+ "eval_loss": 5.099233150482178,
60
+ "eval_runtime": 0.9694,
61
+ "eval_samples_per_second": 1418.396,
62
+ "eval_steps_per_second": 88.714,
63
  "step": 10000
64
  },
65
  {
66
+ "epoch": 12.578616352201259,
67
+ "grad_norm": 2.490121841430664,
68
  "learning_rate": 2.9995e-05,
69
+ "loss": 4.7379,
70
  "step": 12000
71
  },
72
  {
73
+ "epoch": 12.578616352201259,
74
+ "eval_loss": 4.970981597900391,
75
+ "eval_runtime": 0.9729,
76
+ "eval_samples_per_second": 1413.251,
77
+ "eval_steps_per_second": 88.392,
78
  "step": 12000
79
  },
80
  {
81
+ "epoch": 14.675052410901468,
82
+ "eval_loss": 4.875853061676025,
83
+ "eval_runtime": 0.9902,
84
+ "eval_samples_per_second": 1388.55,
85
+ "eval_steps_per_second": 86.848,
86
  "step": 14000
87
  },
88
  {
89
+ "epoch": 16.771488469601678,
90
+ "grad_norm": 2.485079765319824,
91
+ "learning_rate": 3.99925e-05,
92
+ "loss": 4.4249,
93
  "step": 16000
94
  },
95
  {
96
+ "epoch": 16.771488469601678,
97
+ "eval_loss": 4.8005266189575195,
98
+ "eval_runtime": 0.9709,
99
+ "eval_samples_per_second": 1416.144,
100
+ "eval_steps_per_second": 88.573,
101
  "step": 16000
102
  },
103
  {
104
+ "epoch": 18.867924528301888,
105
+ "eval_loss": 4.743557453155518,
106
+ "eval_runtime": 0.9764,
107
+ "eval_samples_per_second": 1408.167,
108
+ "eval_steps_per_second": 88.074,
109
  "step": 18000
110
  },
111
  {
112
+ "epoch": 20.964360587002098,
113
+ "grad_norm": 2.311079978942871,
114
+ "learning_rate": 4.99875e-05,
115
+ "loss": 4.1842,
116
  "step": 20000
117
  },
118
  {
119
+ "epoch": 20.964360587002098,
120
+ "eval_loss": 4.692162990570068,
121
+ "eval_runtime": 0.9728,
122
+ "eval_samples_per_second": 1413.396,
123
+ "eval_steps_per_second": 88.401,
124
  "step": 20000
125
  },
126
  {
127
+ "epoch": 23.060796645702307,
128
+ "eval_loss": 4.648115634918213,
129
+ "eval_runtime": 0.9711,
130
+ "eval_samples_per_second": 1415.941,
131
+ "eval_steps_per_second": 88.561,
132
  "step": 22000
133
  },
134
  {
135
+ "epoch": 25.157232704402517,
136
+ "grad_norm": 2.3314199447631836,
137
+ "learning_rate": 5.998250000000001e-05,
138
+ "loss": 3.9843,
139
  "step": 24000
140
  },
141
  {
142
+ "epoch": 25.157232704402517,
143
+ "eval_loss": 4.615537166595459,
144
+ "eval_runtime": 0.9745,
145
+ "eval_samples_per_second": 1410.994,
146
+ "eval_steps_per_second": 88.251,
147
  "step": 24000
148
  },
149
  {
150
+ "epoch": 27.253668763102727,
151
+ "eval_loss": 4.59817361831665,
152
+ "eval_runtime": 0.9725,
153
+ "eval_samples_per_second": 1413.828,
154
+ "eval_steps_per_second": 88.429,
155
  "step": 26000
156
  },
157
  {
158
+ "epoch": 29.350104821802937,
159
+ "grad_norm": 2.2022252082824707,
160
+ "learning_rate": 6.99775e-05,
161
+ "loss": 3.8181,
162
  "step": 28000
163
  },
164
  {
165
+ "epoch": 29.350104821802937,
166
+ "eval_loss": 4.584521293640137,
167
+ "eval_runtime": 0.9704,
168
+ "eval_samples_per_second": 1416.917,
169
+ "eval_steps_per_second": 88.622,
170
  "step": 28000
171
  },
172
  {
173
+ "epoch": 31.446540880503143,
174
+ "eval_loss": 4.581137657165527,
175
+ "eval_runtime": 0.9781,
176
+ "eval_samples_per_second": 1405.736,
177
+ "eval_steps_per_second": 87.922,
178
  "step": 30000
179
  },
180
  {
181
+ "epoch": 33.542976939203356,
182
+ "grad_norm": 2.109912872314453,
183
+ "learning_rate": 7.997250000000001e-05,
184
+ "loss": 3.6751,
185
  "step": 32000
186
  },
187
  {
188
+ "epoch": 33.542976939203356,
189
+ "eval_loss": 4.579585075378418,
190
+ "eval_runtime": 0.9725,
191
+ "eval_samples_per_second": 1413.852,
192
+ "eval_steps_per_second": 88.43,
193
  "step": 32000
194
  },
195
  {
196
+ "epoch": 35.63941299790356,
197
+ "eval_loss": 4.582756042480469,
198
+ "eval_runtime": 0.9724,
199
+ "eval_samples_per_second": 1413.996,
200
+ "eval_steps_per_second": 88.439,
201
  "step": 34000
202
  },
203
  {
204
+ "epoch": 37.735849056603776,
205
+ "grad_norm": 2.1550486087799072,
206
+ "learning_rate": 8.996750000000001e-05,
207
+ "loss": 3.5484,
208
  "step": 36000
209
  },
210
  {
211
+ "epoch": 37.735849056603776,
212
+ "eval_loss": 4.586859226226807,
213
+ "eval_runtime": 0.979,
214
+ "eval_samples_per_second": 1404.481,
215
+ "eval_steps_per_second": 87.844,
216
  "step": 36000
217
  },
218
  {
219
+ "epoch": 39.83228511530398,
220
+ "eval_loss": 4.597586154937744,
221
+ "eval_runtime": 0.9841,
222
+ "eval_samples_per_second": 1397.167,
223
+ "eval_steps_per_second": 87.386,
224
  "step": 38000
225
  },
226
  {
227
+ "epoch": 41.928721174004195,
228
+ "grad_norm": 2.2574758529663086,
229
+ "learning_rate": 9.99625e-05,
230
+ "loss": 3.4328,
231
  "step": 40000
232
  },
233
  {
234
+ "epoch": 41.928721174004195,
235
+ "eval_loss": 4.6090407371521,
236
+ "eval_runtime": 0.9945,
237
+ "eval_samples_per_second": 1382.571,
238
+ "eval_steps_per_second": 86.474,
239
  "step": 40000
240
  },
241
  {
242
+ "epoch": 44.0251572327044,
243
+ "eval_loss": 4.629756450653076,
244
+ "eval_runtime": 0.9745,
245
+ "eval_samples_per_second": 1410.989,
246
+ "eval_steps_per_second": 88.251,
247
  "step": 42000
248
  },
249
  {
250
+ "epoch": 46.121593291404615,
251
+ "grad_norm": 2.2658331394195557,
252
+ "learning_rate": 9.336166666666667e-05,
253
+ "loss": 3.31,
254
  "step": 44000
255
  },
256
  {
257
+ "epoch": 46.121593291404615,
258
+ "eval_loss": 4.659794330596924,
259
+ "eval_runtime": 0.9812,
260
+ "eval_samples_per_second": 1401.309,
261
+ "eval_steps_per_second": 87.645,
262
  "step": 44000
263
  },
264
  {
265
+ "epoch": 48.21802935010482,
266
+ "eval_loss": 4.698253631591797,
267
+ "eval_runtime": 0.981,
268
+ "eval_samples_per_second": 1401.638,
269
+ "eval_steps_per_second": 87.666,
270
  "step": 46000
271
  },
272
  {
273
+ "epoch": 50.314465408805034,
274
+ "grad_norm": 2.4032843112945557,
275
+ "learning_rate": 8.669833333333334e-05,
276
+ "loss": 3.1908,
277
  "step": 48000
278
  },
279
  {
280
+ "epoch": 50.314465408805034,
281
+ "eval_loss": 4.72628927230835,
282
+ "eval_runtime": 0.9912,
283
+ "eval_samples_per_second": 1387.274,
284
+ "eval_steps_per_second": 86.768,
285
  "step": 48000
286
  }
287
  ],
288
  "logging_steps": 4000,
289
  "max_steps": 100000,
290
  "num_input_tokens_seen": 0,
291
+ "num_train_epochs": 105,
292
  "save_steps": 4000,
293
  "stateful_callbacks": {
294
  "TrainerControl": {
 
302
  "attributes": {}
303
  }
304
  },
305
+ "total_flos": 1.2411179406999552e+16,
306
  "train_batch_size": 16,
307
  "trial_name": null,
308
  "trial_params": null
checkpoint-48000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6d9a18dff8e8501ed8e1286b71e34a5ee0478c42b015fd65b8981e86e58a8c2
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e7557b0dc8ef07c9db255e1167d98819525b043deb4934f580e21d716914111
3
  size 5368