dada22231 commited on
Commit
ddb703a
·
verified ·
1 Parent(s): c751136

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a25e3891d3f1d8c2858caeca0ab3008de6bea93d9edf07f6b98256e732d19767
3
  size 400084608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3156371f18912efe9870f8d597cf8bf2f37de2339a0d2f009df115d47ceedbc1
3
  size 400084608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6b77cf92f935a71da0afbb805140b29205c9310e39675261d2e1b0a3d82bf06
3
  size 800394282
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530d376e951a9d7026ba0125c54fcd3d684dabfc39e53e82b04a9a031d4d83ef
3
  size 800394282
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19a5387608b3f44ba27260c03bfd18565d894873a92ba1d7da2864d43f572e36
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c15e094d65a2a04ddf2ebcfc5ab4b15c91e3bbc100ae81a10baa00a1c5a4f71
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f51860b90e5bb277396d1dd182bc1c33f3e2932e797311f860e2584a2205a1f1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28db7b94298edfbbdbc29b237ba07b0fdc1211300bc39385315854d1fbbaee49
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a937245e3b5cbcd687e1cb8578cfd316893887810f68a852d4b78571836d818
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587da782b6c5f0e8b2fa4d8896a929f45313deb0b7da2c24c2393a205fef0714
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c4c9ec7bc64b05fbe0e5ed9b7055a306b7e172cf8370cbe5995c76966c12d44
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c43dbd13f8f0564355267a63b86c95dffdc4512d80a63ba57b940fa2854a463
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f5c305e4a92be904895c02f0f0a1da666e6e7555f6043a8f089990c87f4ce88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df19ed1a9610a5422497073697cbf4575f80de47fbb46ef0cdd2779386b031fa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.2550545930862427,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.15168752370117558,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 12.275,
199
  "eval_steps_per_second": 3.192,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -226,7 +409,7 @@
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 3.087239672233984e+17,
230
  "train_batch_size": 1,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.22515492141246796,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.30337504740235116,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 12.275,
199
  "eval_steps_per_second": 3.192,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.1577550246492226,
204
+ "grad_norm": 0.9358745217323303,
205
+ "learning_rate": 8.681980515339464e-05,
206
+ "loss": 0.2008,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.16382252559726962,
211
+ "grad_norm": 0.5705291032791138,
212
+ "learning_rate": 8.571489144483944e-05,
213
+ "loss": 0.3452,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.16989002654531665,
218
+ "grad_norm": 0.5917467474937439,
219
+ "learning_rate": 8.457416554680877e-05,
220
+ "loss": 0.2588,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.17595752749336366,
225
+ "grad_norm": 0.6592450141906738,
226
+ "learning_rate": 8.339895749467238e-05,
227
+ "loss": 0.2964,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.1820250284414107,
232
+ "grad_norm": 0.5917816758155823,
233
+ "learning_rate": 8.219063752844926e-05,
234
+ "loss": 0.3058,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.18809252938945772,
239
+ "grad_norm": 0.5254703164100647,
240
+ "learning_rate": 8.095061449516903e-05,
241
+ "loss": 0.3066,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.19416003033750473,
246
+ "grad_norm": 0.4927206337451935,
247
+ "learning_rate": 7.968033420621935e-05,
248
+ "loss": 0.3338,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.20022753128555176,
253
+ "grad_norm": 0.617469310760498,
254
+ "learning_rate": 7.838127775159452e-05,
255
+ "loss": 0.3476,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.2062950322335988,
260
+ "grad_norm": 0.5512767434120178,
261
+ "learning_rate": 7.705495977301078e-05,
262
+ "loss": 0.2595,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.2123625331816458,
267
+ "grad_norm": 0.6990464925765991,
268
+ "learning_rate": 7.570292669790186e-05,
269
+ "loss": 0.288,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.21843003412969283,
274
+ "grad_norm": 1.4301836490631104,
275
+ "learning_rate": 7.43267549363537e-05,
276
+ "loss": 0.4128,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.22449753507773987,
281
+ "grad_norm": 1.702950119972229,
282
+ "learning_rate": 7.292804904308087e-05,
283
+ "loss": 0.4041,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.23056503602578687,
288
+ "grad_norm": 1.1743119955062866,
289
+ "learning_rate": 7.150843984658754e-05,
290
+ "loss": 0.2015,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.2366325369738339,
295
+ "grad_norm": 0.9276676774024963,
296
+ "learning_rate": 7.006958254769438e-05,
297
+ "loss": 0.3364,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.2427000379218809,
302
+ "grad_norm": 0.53074711561203,
303
+ "learning_rate": 6.861315478964841e-05,
304
+ "loss": 0.2547,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.24876753886992795,
309
+ "grad_norm": 0.4709409773349762,
310
+ "learning_rate": 6.714085470206609e-05,
311
+ "loss": 0.2583,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.25483503981797495,
316
+ "grad_norm": 0.5634525418281555,
317
+ "learning_rate": 6.56543989209901e-05,
318
+ "loss": 0.3323,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.260902540766022,
323
+ "grad_norm": 0.6031685471534729,
324
+ "learning_rate": 6.415552058736854e-05,
325
+ "loss": 0.2583,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.266970041714069,
330
+ "grad_norm": 0.6985509395599365,
331
+ "learning_rate": 6.264596732629e-05,
332
+ "loss": 0.3017,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.27303754266211605,
337
+ "grad_norm": 0.5389325022697449,
338
+ "learning_rate": 6.112749920933111e-05,
339
+ "loss": 0.2776,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.2791050436101631,
344
+ "grad_norm": 0.4434327185153961,
345
+ "learning_rate": 5.960188670239154e-05,
346
+ "loss": 0.2907,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.28517254455821006,
351
+ "grad_norm": 0.47085919976234436,
352
+ "learning_rate": 5.80709086014102e-05,
353
+ "loss": 0.2155,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.2912400455062571,
358
+ "grad_norm": 0.9153717756271362,
359
+ "learning_rate": 5.653634995836856e-05,
360
+ "loss": 0.3143,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.29730754645430413,
365
+ "grad_norm": 2.431823253631592,
366
+ "learning_rate": 5.500000000000001e-05,
367
+ "loss": 0.5086,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.30337504740235116,
372
+ "grad_norm": 0.8440378904342651,
373
+ "learning_rate": 5.346365004163145e-05,
374
+ "loss": 0.3082,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.30337504740235116,
379
+ "eval_loss": 0.22515492141246796,
380
+ "eval_runtime": 4.0292,
381
+ "eval_samples_per_second": 12.409,
382
+ "eval_steps_per_second": 3.226,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 6.174479344467968e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null