lesso03 commited on
Commit
3bd4512
·
verified ·
1 Parent(s): 00362ce

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c413f16b24e45d61e3bcf1a3ecff3f953f20feca425869d437bdefe63669abfd
3
  size 2269195160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da12c466f84c24dff56fbf8dc79868fbc3b3b449ce897493c09f29392c708add
3
  size 2269195160
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca9a19859007218a1036d8cf822444130139b61a0b519f83573298eb60569e50
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926b3a840ac04e94a9c2f845e240d3f199a8eebeb2b05d975ed1ce65114c13fe
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c79f47608aab86948875a4010ce113bb83e408fe46d231faddcd7a34a40a0fa8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7aa7bd6e7162b5cef71bb965bb9ca48a011a5eed5848e6b7c207b50a0c6a7f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:448fc91b5f8bf077617e9062c085061ded5310a55de0a91b7a3fb1b64abeb36d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd76e4cfbed647f5393d42b2fd1c0eead588dbf173cae704b7e26883a38f902
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.1904761904761905,
5
  "eval_steps": 6,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -222,6 +222,213 @@
222
  "learning_rate": 8.150439217908556e-05,
223
  "loss": 0.0003,
224
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  }
226
  ],
227
  "logging_steps": 1,
@@ -241,7 +448,7 @@
241
  "attributes": {}
242
  }
243
  },
244
- "total_flos": 2.0958327135535104e+16,
245
  "train_batch_size": 8,
246
  "trial_name": null,
247
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.380952380952381,
5
  "eval_steps": 6,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
222
  "learning_rate": 8.150439217908556e-05,
223
  "loss": 0.0003,
224
  "step": 25
225
+ },
226
+ {
227
+ "epoch": 1.2380952380952381,
228
+ "grad_norm": 0.02535114623606205,
229
+ "learning_rate": 7.91489739557236e-05,
230
+ "loss": 0.0003,
231
+ "step": 26
232
+ },
233
+ {
234
+ "epoch": 1.2857142857142856,
235
+ "grad_norm": 0.08832518011331558,
236
+ "learning_rate": 7.669116889823955e-05,
237
+ "loss": 0.0009,
238
+ "step": 27
239
+ },
240
+ {
241
+ "epoch": 1.3333333333333333,
242
+ "grad_norm": 0.02784010022878647,
243
+ "learning_rate": 7.413961013653726e-05,
244
+ "loss": 0.0004,
245
+ "step": 28
246
+ },
247
+ {
248
+ "epoch": 1.380952380952381,
249
+ "grad_norm": 0.024539506062865257,
250
+ "learning_rate": 7.150326011382604e-05,
251
+ "loss": 0.0003,
252
+ "step": 29
253
+ },
254
+ {
255
+ "epoch": 1.4285714285714286,
256
+ "grad_norm": 0.04319535195827484,
257
+ "learning_rate": 6.879137910571191e-05,
258
+ "loss": 0.0004,
259
+ "step": 30
260
+ },
261
+ {
262
+ "epoch": 1.4285714285714286,
263
+ "eval_loss": 0.0004903983790427446,
264
+ "eval_runtime": 2.2497,
265
+ "eval_samples_per_second": 8.001,
266
+ "eval_steps_per_second": 1.334,
267
+ "step": 30
268
+ },
269
+ {
270
+ "epoch": 1.4761904761904763,
271
+ "grad_norm": 0.014017721638083458,
272
+ "learning_rate": 6.601349269314188e-05,
273
+ "loss": 0.0003,
274
+ "step": 31
275
+ },
276
+ {
277
+ "epoch": 1.5238095238095237,
278
+ "grad_norm": 0.0841885432600975,
279
+ "learning_rate": 6.317935830345338e-05,
280
+ "loss": 0.0006,
281
+ "step": 32
282
+ },
283
+ {
284
+ "epoch": 1.5714285714285714,
285
+ "grad_norm": 0.020319445058703423,
286
+ "learning_rate": 6.029893093705492e-05,
287
+ "loss": 0.0003,
288
+ "step": 33
289
+ },
290
+ {
291
+ "epoch": 1.619047619047619,
292
+ "grad_norm": 1.9485701322555542,
293
+ "learning_rate": 5.738232820012407e-05,
294
+ "loss": 0.0164,
295
+ "step": 34
296
+ },
297
+ {
298
+ "epoch": 1.6666666666666665,
299
+ "grad_norm": 0.02285480685532093,
300
+ "learning_rate": 5.4439794766146746e-05,
301
+ "loss": 0.0003,
302
+ "step": 35
303
+ },
304
+ {
305
+ "epoch": 1.7142857142857144,
306
+ "grad_norm": 0.16724388301372528,
307
+ "learning_rate": 5.148166639112799e-05,
308
+ "loss": 0.001,
309
+ "step": 36
310
+ },
311
+ {
312
+ "epoch": 1.7142857142857144,
313
+ "eval_loss": 0.0006431459914892912,
314
+ "eval_runtime": 2.2493,
315
+ "eval_samples_per_second": 8.003,
316
+ "eval_steps_per_second": 1.334,
317
+ "step": 36
318
+ },
319
+ {
320
+ "epoch": 1.7619047619047619,
321
+ "grad_norm": 0.047226764261722565,
322
+ "learning_rate": 4.851833360887201e-05,
323
+ "loss": 0.0004,
324
+ "step": 37
325
+ },
326
+ {
327
+ "epoch": 1.8095238095238095,
328
+ "grad_norm": 0.1621876060962677,
329
+ "learning_rate": 4.5560205233853266e-05,
330
+ "loss": 0.0012,
331
+ "step": 38
332
+ },
333
+ {
334
+ "epoch": 1.8571428571428572,
335
+ "grad_norm": 0.0308663472533226,
336
+ "learning_rate": 4.2617671799875944e-05,
337
+ "loss": 0.0003,
338
+ "step": 39
339
+ },
340
+ {
341
+ "epoch": 1.9047619047619047,
342
+ "grad_norm": 0.011845704168081284,
343
+ "learning_rate": 3.970106906294509e-05,
344
+ "loss": 0.0002,
345
+ "step": 40
346
+ },
347
+ {
348
+ "epoch": 1.9523809523809523,
349
+ "grad_norm": 0.003460651496425271,
350
+ "learning_rate": 3.682064169654663e-05,
351
+ "loss": 0.0001,
352
+ "step": 41
353
+ },
354
+ {
355
+ "epoch": 2.0,
356
+ "grad_norm": 0.012540038675069809,
357
+ "learning_rate": 3.3986507306858125e-05,
358
+ "loss": 0.0002,
359
+ "step": 42
360
+ },
361
+ {
362
+ "epoch": 2.0,
363
+ "eval_loss": 0.0001648629695409909,
364
+ "eval_runtime": 2.2501,
365
+ "eval_samples_per_second": 7.999,
366
+ "eval_steps_per_second": 1.333,
367
+ "step": 42
368
+ },
369
+ {
370
+ "epoch": 2.0476190476190474,
371
+ "grad_norm": 0.0032971783075481653,
372
+ "learning_rate": 3.12086208942881e-05,
373
+ "loss": 0.0001,
374
+ "step": 43
375
+ },
376
+ {
377
+ "epoch": 2.0952380952380953,
378
+ "grad_norm": 0.0028387894853949547,
379
+ "learning_rate": 2.8496739886173995e-05,
380
+ "loss": 0.0001,
381
+ "step": 44
382
+ },
383
+ {
384
+ "epoch": 2.142857142857143,
385
+ "grad_norm": 0.004313192795962095,
386
+ "learning_rate": 2.5860389863462765e-05,
387
+ "loss": 0.0001,
388
+ "step": 45
389
+ },
390
+ {
391
+ "epoch": 2.1904761904761907,
392
+ "grad_norm": 0.002875348087400198,
393
+ "learning_rate": 2.3308831101760486e-05,
394
+ "loss": 0.0001,
395
+ "step": 46
396
+ },
397
+ {
398
+ "epoch": 2.238095238095238,
399
+ "grad_norm": 0.002736931899562478,
400
+ "learning_rate": 2.0851026044276406e-05,
401
+ "loss": 0.0001,
402
+ "step": 47
403
+ },
404
+ {
405
+ "epoch": 2.2857142857142856,
406
+ "grad_norm": 0.002644361462444067,
407
+ "learning_rate": 1.849560782091445e-05,
408
+ "loss": 0.0001,
409
+ "step": 48
410
+ },
411
+ {
412
+ "epoch": 2.2857142857142856,
413
+ "eval_loss": 0.00020012857567053288,
414
+ "eval_runtime": 2.2588,
415
+ "eval_samples_per_second": 7.969,
416
+ "eval_steps_per_second": 1.328,
417
+ "step": 48
418
+ },
419
+ {
420
+ "epoch": 2.3333333333333335,
421
+ "grad_norm": 0.002743582706898451,
422
+ "learning_rate": 1.6250849924089484e-05,
423
+ "loss": 0.0001,
424
+ "step": 49
425
+ },
426
+ {
427
+ "epoch": 2.380952380952381,
428
+ "grad_norm": 0.002709955209866166,
429
+ "learning_rate": 1.4124637147783432e-05,
430
+ "loss": 0.0001,
431
+ "step": 50
432
  }
433
  ],
434
  "logging_steps": 1,
 
448
  "attributes": {}
449
  }
450
  },
451
+ "total_flos": 4.228759811417702e+16,
452
  "train_batch_size": 8,
453
  "trial_name": null,
454
  "trial_params": null