ardaspear commited on
Commit
9dd420b
·
verified ·
1 Parent(s): f9b9574

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ac448701a045ff439e79b5ff67b3c1c9e27e9a40781e5b7a14904ca15551334
3
  size 4873875032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:203e08f046df4d334ef6c8caea21c172e5ea1e2c60c119aa2792cf4ad1d6d1c9
3
  size 4873875032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3776ada79a70f53d0fb04f3a4270da720aa3ae008d4a93ebaab0fd32c3e3199
3
  size 1408414360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f1175fc470bdfc8f0d336fde4fd9d2096c6bf3f0f2ad6202f7f87dbeb67b065
3
  size 1408414360
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca0f47184221cac8d75c4921ac2810fa8f2a1d7ee96fb9be1067cc10fdda9dd0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443c83fb7b844b8ff3708b529392bea53306a8d3cfda3e62525bc544ce49d42e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de723127fb29334c1d5f5d4d0ba1d4e78fc916dfc4e8f6b4bb5846a069a13ba9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d89303aa828aaa9e241260e2ca606db088309d8b5a7ba3d79cb10ce1887994d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 6.284088611602783,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-450",
4
- "epoch": 0.20957038071952497,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -402,6 +402,135 @@
402
  "eval_samples_per_second": 9.547,
403
  "eval_steps_per_second": 2.389,
404
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  }
406
  ],
407
  "logging_steps": 10,
@@ -425,12 +554,12 @@
425
  "should_evaluate": false,
426
  "should_log": false,
427
  "should_save": true,
428
- "should_training_stop": false
429
  },
430
  "attributes": {}
431
  }
432
  },
433
- "total_flos": 8.167799352459264e+17,
434
  "train_batch_size": 8,
435
  "trial_name": null,
436
  "trial_params": null
 
1
  {
2
+ "best_metric": 6.1272969245910645,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-600",
4
+ "epoch": 0.2794271742927,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
402
  "eval_samples_per_second": 9.547,
403
  "eval_steps_per_second": 2.389,
404
  "step": 450
405
+ },
406
+ {
407
+ "epoch": 0.21422750029106996,
408
+ "grad_norm": 27.015338897705078,
409
+ "learning_rate": 2.6522584913693294e-05,
410
+ "loss": 6.4693,
411
+ "step": 460
412
+ },
413
+ {
414
+ "epoch": 0.21888461986261498,
415
+ "grad_norm": 6.367403984069824,
416
+ "learning_rate": 2.301660165700936e-05,
417
+ "loss": 6.2205,
418
+ "step": 470
419
+ },
420
+ {
421
+ "epoch": 0.22354173943415998,
422
+ "grad_norm": 16.961223602294922,
423
+ "learning_rate": 1.9728836206903656e-05,
424
+ "loss": 6.2914,
425
+ "step": 480
426
+ },
427
+ {
428
+ "epoch": 0.22819885900570497,
429
+ "grad_norm": 7.398273468017578,
430
+ "learning_rate": 1.6668608091748495e-05,
431
+ "loss": 6.1223,
432
+ "step": 490
433
+ },
434
+ {
435
+ "epoch": 0.23285597857724996,
436
+ "grad_norm": 18.52619171142578,
437
+ "learning_rate": 1.3844591860619383e-05,
438
+ "loss": 6.1299,
439
+ "step": 500
440
+ },
441
+ {
442
+ "epoch": 0.23285597857724996,
443
+ "eval_loss": 6.169624328613281,
444
+ "eval_runtime": 380.4572,
445
+ "eval_samples_per_second": 9.507,
446
+ "eval_steps_per_second": 2.379,
447
+ "step": 500
448
+ },
449
+ {
450
+ "epoch": 0.23751309814879498,
451
+ "grad_norm": 13.293009757995605,
452
+ "learning_rate": 1.1264792494342857e-05,
453
+ "loss": 6.3948,
454
+ "step": 510
455
+ },
456
+ {
457
+ "epoch": 0.24217021772033998,
458
+ "grad_norm": 10.967979431152344,
459
+ "learning_rate": 8.936522714508678e-06,
460
+ "loss": 6.1936,
461
+ "step": 520
462
+ },
463
+ {
464
+ "epoch": 0.24682733729188497,
465
+ "grad_norm": 5.417884826660156,
466
+ "learning_rate": 6.866382254766157e-06,
467
+ "loss": 6.2124,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.25148445686342996,
472
+ "grad_norm": 6.8799662590026855,
473
+ "learning_rate": 5.060239153161872e-06,
474
+ "loss": 6.0413,
475
+ "step": 540
476
+ },
477
+ {
478
+ "epoch": 0.256141576434975,
479
+ "grad_norm": 20.289323806762695,
480
+ "learning_rate": 3.5232131185484076e-06,
481
+ "loss": 6.0214,
482
+ "step": 550
483
+ },
484
+ {
485
+ "epoch": 0.256141576434975,
486
+ "eval_loss": 6.137284755706787,
487
+ "eval_runtime": 379.99,
488
+ "eval_samples_per_second": 9.519,
489
+ "eval_steps_per_second": 2.382,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 0.26079869600651995,
494
+ "grad_norm": 9.264719009399414,
495
+ "learning_rate": 2.259661018213333e-06,
496
+ "loss": 6.4324,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 0.26545581557806497,
501
+ "grad_norm": 4.1392717361450195,
502
+ "learning_rate": 1.2731645278655445e-06,
503
+ "loss": 6.2256,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 0.27011293514961,
508
+ "grad_norm": 9.640027046203613,
509
+ "learning_rate": 5.665199789862907e-07,
510
+ "loss": 6.0853,
511
+ "step": 580
512
+ },
513
+ {
514
+ "epoch": 0.27477005472115495,
515
+ "grad_norm": 5.963624477386475,
516
+ "learning_rate": 1.4173043232380557e-07,
517
+ "loss": 6.1615,
518
+ "step": 590
519
+ },
520
+ {
521
+ "epoch": 0.2794271742927,
522
+ "grad_norm": 13.913993835449219,
523
+ "learning_rate": 0.0,
524
+ "loss": 6.0562,
525
+ "step": 600
526
+ },
527
+ {
528
+ "epoch": 0.2794271742927,
529
+ "eval_loss": 6.1272969245910645,
530
+ "eval_runtime": 380.3389,
531
+ "eval_samples_per_second": 9.51,
532
+ "eval_steps_per_second": 2.379,
533
+ "step": 600
534
  }
535
  ],
536
  "logging_steps": 10,
 
554
  "should_evaluate": false,
555
  "should_log": false,
556
  "should_save": true,
557
+ "should_training_stop": true
558
  },
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 1.0911889810784256e+18,
563
  "train_batch_size": 8,
564
  "trial_name": null,
565
  "trial_params": null