dsakerkwq commited on
Commit
d1c4bf6
·
verified ·
1 Parent(s): 6508958

Training in progress, step 34, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3dcf08a07630b5d8fc12ef87471f9719a4330767db2f392e7e778819cc19793
3
  size 400084608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b336e8073b3a2fd13e0b0c7c2350a546f4ead5d10a9b415d60b63dd48fca689
3
  size 400084608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f634f46c5652f6927cadf4b686224a762c5aab8d5eeea6c0483118e382980539
3
  size 800394282
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21c75ebd30d8c38e76f59f8cf907a2137ff4f272476fbe3fb4fae86bad3447d
3
  size 800394282
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:116799cfcafee8097de641f682eb6f4469057abad98c74c9f9734a95406a096b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d22c5e4d98a66829548cb600325d63aaa8739d17a7aecf0100846f2afe72e70
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23d848dabb6bc38ff43473c21d7be2b08bb8548094e836702054933aaacbbfba
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767a2fd667ddadd1421c73fc4302b452a37b1de4baa59f021c867ede8f954df9
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7539ac9f02350f27a3e6b73b4f7464ed1d850a06448ed4b703d06302d3eb098b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a812e938b03f1fdbad64f9936ab0790ba8fcbc329d29a159207d72cdc68f26
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb75b64ab4ee313717cb111207c37bf02c69745d8fb056d01bab9109e6ae055a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b79cfb5239c137907672a844adb26ffb5c8d84a40da24ce2be6abd4a077036d8
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c220d2fbe9a2870f9ac93749b3d4b3852ce94b5983d948fa8de3d35e453f98ce
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7312f49c1e1b01f68cdaee175129535fda49cd22ed1f251a2745d2c8c42996be
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8160011172294617,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 2.2598870056497176,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,69 @@
198
  "eval_samples_per_second": 21.497,
199
  "eval_steps_per_second": 5.589,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +284,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 3.087239672233984e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8160011172294617,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 3.073446327683616,
5
  "eval_steps": 25,
6
+ "global_step": 34,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 21.497,
199
  "eval_steps_per_second": 5.589,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 2.3502824858757063,
204
+ "grad_norm": 2.9992833137512207,
205
+ "learning_rate": 2.3180194846605367e-05,
206
+ "loss": 0.7953,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 2.440677966101695,
211
+ "grad_norm": 3.01076602935791,
212
+ "learning_rate": 2.0214529598676836e-05,
213
+ "loss": 0.7956,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 2.5310734463276834,
218
+ "grad_norm": 3.3934991359710693,
219
+ "learning_rate": 1.758386744638546e-05,
220
+ "loss": 0.8145,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 2.621468926553672,
225
+ "grad_norm": 2.1873085498809814,
226
+ "learning_rate": 1.531354310432403e-05,
227
+ "loss": 0.7734,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 2.711864406779661,
232
+ "grad_norm": 1.753711462020874,
233
+ "learning_rate": 1.3425421036992098e-05,
234
+ "loss": 0.7645,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 2.8022598870056497,
239
+ "grad_norm": 3.903904914855957,
240
+ "learning_rate": 1.1937684892050604e-05,
241
+ "loss": 0.8525,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 2.8926553672316384,
246
+ "grad_norm": 1.5703907012939453,
247
+ "learning_rate": 1.0864662381854632e-05,
248
+ "loss": 0.7837,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 2.983050847457627,
253
+ "grad_norm": 1.638397216796875,
254
+ "learning_rate": 1.0216687299751144e-05,
255
+ "loss": 0.8452,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 3.073446327683616,
260
+ "grad_norm": 1.8110815286636353,
261
+ "learning_rate": 1e-05,
262
+ "loss": 1.5157,
263
+ "step": 34
264
  }
265
  ],
266
  "logging_steps": 1,
 
284
  "should_evaluate": false,
285
  "should_log": false,
286
  "should_save": true,
287
+ "should_training_stop": true
288
  },
289
  "attributes": {}
290
  }
291
  },
292
+ "total_flos": 4.198645954238218e+17,
293
  "train_batch_size": 2,
294
  "trial_name": null,
295
  "trial_params": null