infogep commited on
Commit
51c77d6
·
verified ·
1 Parent(s): 5d14d91

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01e464a3bbe52b949168e708676cb441625e8c1fa2117d34141ea15e3634a445
3
  size 56660888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea4b7976ba6cc61571f2332740282f3b16fd9afa7e849a976c102e1728e14a4
3
  size 56660888
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:262295db875ec1922efa03595f66493f78ae0aa6f558f4f8ec19a2808d5516bb
3
  size 113488058
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa39e0335ccdf1182e3407ba30c3a4b25237199632d6f494ad3084d51b9690
3
  size 113488058
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d317c8c136dbfaa7d00b3b636a57f9087e0cc72926988c2be1ed6398941542ca
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e795150f6ce7fa12ee4e6e03a761abfdca9e4c5647c2b5c8b15eb7fe34e24b61
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c188a6a4749e6ca627bb6d536eb7443f499d5b1b88d98a78f9c713443e010d9c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015707cb16790250630febca682498cb5d3456d5a13443b953687f19dc7d59ed
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 4.860773086547852,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.008481764206955046,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -52,6 +52,43 @@
52
  "eval_samples_per_second": 23.258,
53
  "eval_steps_per_second": 5.832,
54
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "logging_steps": 3,
@@ -80,7 +117,7 @@
80
  "attributes": {}
81
  }
82
  },
83
- "total_flos": 2610106655047680.0,
84
  "train_batch_size": 4,
85
  "trial_name": null,
86
  "trial_params": null
 
1
  {
2
+ "best_metric": 4.792747497558594,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.016963528413910092,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
52
  "eval_samples_per_second": 23.258,
53
  "eval_steps_per_second": 5.832,
54
  "step": 10
55
+ },
56
+ {
57
+ "epoch": 0.010178117048346057,
58
+ "grad_norm": 0.13161055743694305,
59
+ "learning_rate": 0.000163742398974869,
60
+ "loss": 4.8411,
61
+ "step": 12
62
+ },
63
+ {
64
+ "epoch": 0.01272264631043257,
65
+ "grad_norm": 0.14220638573169708,
66
+ "learning_rate": 0.00013090169943749476,
67
+ "loss": 4.8331,
68
+ "step": 15
69
+ },
70
+ {
71
+ "epoch": 0.01272264631043257,
72
+ "eval_loss": 4.81943416595459,
73
+ "eval_runtime": 42.6958,
74
+ "eval_samples_per_second": 23.258,
75
+ "eval_steps_per_second": 5.832,
76
+ "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.015267175572519083,
80
+ "grad_norm": 0.12289228290319443,
81
+ "learning_rate": 9.372094804706867e-05,
82
+ "loss": 4.8002,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.016963528413910092,
87
+ "eval_loss": 4.792747497558594,
88
+ "eval_runtime": 42.6653,
89
+ "eval_samples_per_second": 23.274,
90
+ "eval_steps_per_second": 5.836,
91
+ "step": 20
92
  }
93
  ],
94
  "logging_steps": 3,
 
117
  "attributes": {}
118
  }
119
  },
120
+ "total_flos": 5220213310095360.0,
121
  "train_batch_size": 4,
122
  "trial_name": null,
123
  "trial_params": null