rootxhacker commited on
Commit
b6770fa
·
verified ·
1 Parent(s): 3123988

Training in progress, step 39000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f4e59779ae5268c858ada9fe05772a37f9613020d8b9f7340783b2a16c0e383
3
  size 36730224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf3bda4176df59060b4c3b2e5426a0849f3db73d160bd88e36205096ff67d81c
3
  size 36730224
last-checkpoint/ar_diffusion_info.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:376da5b0e7144c1b91117f5526417311ed45b536f61dd6ac0ef8b19051f1dc33
3
  size 1736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53bc0f8817dd10f924c15f7cdd42fe5d2191e0ae00ed029ca8895e92da7bac07
3
  size 1736
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9de512a7acdea4434e0675f44f1896eab7a4c3fe6cc967e9cd2360783b35758f
3
  size 73588346
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:558eed0dfcd806ee844603a215b913f148e482ed28be46203246d8562d48e6e7
3
  size 73588346
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8c00195221e523ee69e83979891848166b78d5b2c3da6d54545d0f1c8a050c4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80343da31c600f0e1bbaa709a5f7f529e2d6a5cd1c7788323715a3bfd97d3201
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:274528fb9458ad54ab5973b2752fc7f6c9483a2a721e4d0ba19183ca440bc21c
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c156120bdd3702e22db9f52ea4b7b1f109fd9c58a54b6df0124843a2ca789b7e
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bcb038f6702a5ed9cf6a7787eb82f2807063a89d7bbe28d73008f47570ae171
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92e907578f7dfc926449c7e4863a766eefc5d8dcfbba692ef77fb44a937de6c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": 31000,
3
  "best_metric": 0.7226839661598206,
4
  "best_model_checkpoint": "./ar-diffusion-checkpoints-fixed/checkpoint-31000",
5
- "epoch": 2.961310668410122,
6
  "eval_steps": 250,
7
- "global_step": 38500,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -6630,6 +6630,92 @@
6630
  "eval_samples_per_second": 55.821,
6631
  "eval_steps_per_second": 13.955,
6632
  "step": 38500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6633
  }
6634
  ],
6635
  "logging_steps": 50,
 
2
  "best_global_step": 31000,
3
  "best_metric": 0.7226839661598206,
4
  "best_model_checkpoint": "./ar-diffusion-checkpoints-fixed/checkpoint-31000",
5
+ "epoch": 2.999769248519345,
6
  "eval_steps": 250,
7
+ "global_step": 39000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
6630
  "eval_samples_per_second": 55.821,
6631
  "eval_steps_per_second": 13.955,
6632
  "step": 38500
6633
+ },
6634
+ {
6635
+ "epoch": 2.9651565264210444,
6636
+ "grad_norm": 0.7657055854797363,
6637
+ "learning_rate": 2.4257850037659406e-06,
6638
+ "loss": 0.7114,
6639
+ "step": 38550
6640
+ },
6641
+ {
6642
+ "epoch": 2.969002384431967,
6643
+ "grad_norm": 0.7305043339729309,
6644
+ "learning_rate": 2.1660649819494585e-06,
6645
+ "loss": 0.7135,
6646
+ "step": 38600
6647
+ },
6648
+ {
6649
+ "epoch": 2.972848242442889,
6650
+ "grad_norm": 0.7981142401695251,
6651
+ "learning_rate": 1.9063449601329769e-06,
6652
+ "loss": 0.7328,
6653
+ "step": 38650
6654
+ },
6655
+ {
6656
+ "epoch": 2.976694100453811,
6657
+ "grad_norm": 0.7305875420570374,
6658
+ "learning_rate": 1.6466249383164948e-06,
6659
+ "loss": 0.7103,
6660
+ "step": 38700
6661
+ },
6662
+ {
6663
+ "epoch": 2.9805399584647336,
6664
+ "grad_norm": 1.197097659111023,
6665
+ "learning_rate": 1.386904916500013e-06,
6666
+ "loss": 0.7148,
6667
+ "step": 38750
6668
+ },
6669
+ {
6670
+ "epoch": 2.9805399584647336,
6671
+ "eval_loss": 0.7678167819976807,
6672
+ "eval_runtime": 17.8827,
6673
+ "eval_samples_per_second": 55.92,
6674
+ "eval_steps_per_second": 13.98,
6675
+ "step": 38750
6676
+ },
6677
+ {
6678
+ "epoch": 2.9843858164756556,
6679
+ "grad_norm": 0.8533993363380432,
6680
+ "learning_rate": 1.127184894683531e-06,
6681
+ "loss": 0.753,
6682
+ "step": 38800
6683
+ },
6684
+ {
6685
+ "epoch": 2.988231674486578,
6686
+ "grad_norm": 0.7372131943702698,
6687
+ "learning_rate": 8.674648728670494e-07,
6688
+ "loss": 0.7082,
6689
+ "step": 38850
6690
+ },
6691
+ {
6692
+ "epoch": 2.9920775324975004,
6693
+ "grad_norm": 1.499084234237671,
6694
+ "learning_rate": 6.077448510505675e-07,
6695
+ "loss": 0.6937,
6696
+ "step": 38900
6697
+ },
6698
+ {
6699
+ "epoch": 2.9959233905084224,
6700
+ "grad_norm": 0.5895427465438843,
6701
+ "learning_rate": 3.4802482923408566e-07,
6702
+ "loss": 0.701,
6703
+ "step": 38950
6704
+ },
6705
+ {
6706
+ "epoch": 2.999769248519345,
6707
+ "grad_norm": 0.9201724529266357,
6708
+ "learning_rate": 8.830480741760382e-08,
6709
+ "loss": 0.7665,
6710
+ "step": 39000
6711
+ },
6712
+ {
6713
+ "epoch": 2.999769248519345,
6714
+ "eval_loss": 0.767805814743042,
6715
+ "eval_runtime": 17.9509,
6716
+ "eval_samples_per_second": 55.708,
6717
+ "eval_steps_per_second": 13.927,
6718
+ "step": 39000
6719
  }
6720
  ],
6721
  "logging_steps": 50,