rootxhacker commited on
Commit
00dea70
·
verified ·
1 Parent(s): af29819

Training in progress, step 27000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2e93e89fb68bb8962ff13343b1f03461f74663e88695cc877535d81fccd21cd
3
  size 36730224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ecb2f2e99a3f55ce283ec3c73dc04b2a38b8d596c5eb8a739f6b0463f816d0e
3
  size 36730224
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06161b85e01debd263697b27f956188143b84ef8f31f2d7a79af45d05330fb3b
3
  size 73588346
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aa7532d5b46ff1991d1c3ddb490b4cebca7a510adb0c584bc416f8e750a93e4
3
  size 73588346
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b56501523df118c1a33e60d970ee258e92691efddadd68cb368e352ca4fb0c1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7cb9cecde71619f5455094db9dbc10b01ced14b7fff166fcfd1f46df6e480f1
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76592baa9e3b0e3d15e021e247d3cfa4915cd052c2c669b30b628ff835c5a245
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e8022c7f3002679e6fa3f97f4fe89fb5c796e6e8d168c42ccf78f18e8bd32d
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d9b1af634bbca91339a4e0183f53f86b46f1f5a7d978b27638787d68fcb88bd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84d9c773d9d655234bd445b24c5fab29b1818f6d34a0b08096443007d2a42d37
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": 24500,
3
  "best_metric": 1.4431298971176147,
4
  "best_model_checkpoint": "./ar-diffusion-checkpoints/checkpoint-24500",
5
- "epoch": 2.0383047457887855,
6
  "eval_steps": 250,
7
- "global_step": 26500,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -4566,6 +4566,92 @@
4566
  "eval_samples_per_second": 54.796,
4567
  "eval_steps_per_second": 13.699,
4568
  "step": 26500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4569
  }
4570
  ],
4571
  "logging_steps": 50,
 
2
  "best_global_step": 24500,
3
  "best_metric": 1.4431298971176147,
4
  "best_model_checkpoint": "./ar-diffusion-checkpoints/checkpoint-24500",
5
+ "epoch": 2.076763325898008,
6
  "eval_steps": 250,
7
+ "global_step": 27000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
4566
  "eval_samples_per_second": 54.796,
4567
  "eval_steps_per_second": 13.699,
4568
  "step": 26500
4569
+ },
4570
+ {
4571
+ "epoch": 2.042150603799708,
4572
+ "grad_norm": 1.0049172639846802,
4573
+ "learning_rate": 3.932208374692107e-06,
4574
+ "loss": 1.3012,
4575
+ "step": 26550
4576
+ },
4577
+ {
4578
+ "epoch": 2.04599646181063,
4579
+ "grad_norm": 1.193533182144165,
4580
+ "learning_rate": 3.6748648946729894e-06,
4581
+ "loss": 1.4038,
4582
+ "step": 26600
4583
+ },
4584
+ {
4585
+ "epoch": 2.0498423198215523,
4586
+ "grad_norm": 1.6459178924560547,
4587
+ "learning_rate": 3.417521414653873e-06,
4588
+ "loss": 1.4089,
4589
+ "step": 26650
4590
+ },
4591
+ {
4592
+ "epoch": 2.0536881778324743,
4593
+ "grad_norm": 0.546062171459198,
4594
+ "learning_rate": 3.160177934634756e-06,
4595
+ "loss": 1.3675,
4596
+ "step": 26700
4597
+ },
4598
+ {
4599
+ "epoch": 2.0575340358433967,
4600
+ "grad_norm": 1.7894645929336548,
4601
+ "learning_rate": 2.9028344546156386e-06,
4602
+ "loss": 1.4585,
4603
+ "step": 26750
4604
+ },
4605
+ {
4606
+ "epoch": 2.0575340358433967,
4607
+ "eval_loss": 1.460014820098877,
4608
+ "eval_runtime": 18.2356,
4609
+ "eval_samples_per_second": 54.838,
4610
+ "eval_steps_per_second": 13.709,
4611
+ "step": 26750
4612
+ },
4613
+ {
4614
+ "epoch": 2.061379893854319,
4615
+ "grad_norm": 1.1368170976638794,
4616
+ "learning_rate": 2.645490974596522e-06,
4617
+ "loss": 1.4038,
4618
+ "step": 26800
4619
+ },
4620
+ {
4621
+ "epoch": 2.065225751865241,
4622
+ "grad_norm": 1.698556900024414,
4623
+ "learning_rate": 2.388147494577405e-06,
4624
+ "loss": 1.4592,
4625
+ "step": 26850
4626
+ },
4627
+ {
4628
+ "epoch": 2.0690716098761635,
4629
+ "grad_norm": 1.3114346265792847,
4630
+ "learning_rate": 2.130804014558288e-06,
4631
+ "loss": 1.4566,
4632
+ "step": 26900
4633
+ },
4634
+ {
4635
+ "epoch": 2.0729174678870854,
4636
+ "grad_norm": 1.7974728345870972,
4637
+ "learning_rate": 1.8734605345391713e-06,
4638
+ "loss": 1.5074,
4639
+ "step": 26950
4640
+ },
4641
+ {
4642
+ "epoch": 2.076763325898008,
4643
+ "grad_norm": 1.4648147821426392,
4644
+ "learning_rate": 1.6161170545200544e-06,
4645
+ "loss": 1.4478,
4646
+ "step": 27000
4647
+ },
4648
+ {
4649
+ "epoch": 2.076763325898008,
4650
+ "eval_loss": 1.4667593240737915,
4651
+ "eval_runtime": 18.1467,
4652
+ "eval_samples_per_second": 55.107,
4653
+ "eval_steps_per_second": 13.777,
4654
+ "step": 27000
4655
  }
4656
  ],
4657
  "logging_steps": 50,