kooff11 commited on
Commit
77231d3
1 Parent(s): b12aed0

Training in progress, step 39, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c57fe09243896e76e6f76c61f17bc3984147f179fca4fb74f9ffa39488fe0c0
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:775d50206878581dfe06ec2396904f1d59c23c680396c2cd1f3525cccb567480
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcfdbc7bca82335aae340b66c7faa043a100ef8deb0953fa142d1c1c8131445d
3
  size 23159290
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2aa9216e4e1a07f85db34440ac81d831b797ceb306ad4cdc207f086df04e93
3
  size 23159290
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfadc05d2d983979ebf39752461dcf62eb230eb8d820bd09cf23d7888d9df570
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f3771cff6451a6af8ba56cd60be1bd6705dffa64cc52c8fdd9f27e3f1d0b72f
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:826ab5cbdeebfceff791289dbda0e94c2a0531ba16021b3dca9d34da57cc673a
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ae10977704005c7d5bcd9df9eff952d9a1d3f45b6f2413e502030767ab97045
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e4f8bcc2f798ba5b1e1d3cd63c141ee8b120ed84e3abbab7c93fa5dd43e4059
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e78d5e09340c0681df725fbe1595e157ca67a75816fc394229ff49b11a9add2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7862407862407862,
5
  "eval_steps": 10,
6
- "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -249,6 +249,69 @@
249
  "eval_samples_per_second": 37.045,
250
  "eval_steps_per_second": 9.369,
251
  "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "logging_steps": 1,
@@ -263,12 +326,12 @@
263
  "should_evaluate": false,
264
  "should_log": false,
265
  "should_save": true,
266
- "should_training_stop": false
267
  },
268
  "attributes": {}
269
  }
270
  },
271
- "total_flos": 9.425617760446054e+16,
272
  "train_batch_size": 2,
273
  "trial_name": null,
274
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0221130221130221,
5
  "eval_steps": 10,
6
+ "global_step": 39,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
249
  "eval_samples_per_second": 37.045,
250
  "eval_steps_per_second": 9.369,
251
  "step": 30
252
+ },
253
+ {
254
+ "epoch": 0.8124488124488124,
255
+ "grad_norm": 0.4604407548904419,
256
+ "learning_rate": 1.1098212284078036e-05,
257
+ "loss": 1.6154,
258
+ "step": 31
259
+ },
260
+ {
261
+ "epoch": 0.8386568386568387,
262
+ "grad_norm": 0.4262702763080597,
263
+ "learning_rate": 8.574517537807897e-06,
264
+ "loss": 1.6072,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.8648648648648649,
269
+ "grad_norm": 0.452141135931015,
270
+ "learning_rate": 6.349294341940593e-06,
271
+ "loss": 1.6484,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.8910728910728911,
276
+ "grad_norm": 0.45040103793144226,
277
+ "learning_rate": 4.43857548059321e-06,
278
+ "loss": 1.6131,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.9172809172809173,
283
+ "grad_norm": 0.43721693754196167,
284
+ "learning_rate": 2.85612772694579e-06,
285
+ "loss": 1.5875,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.9434889434889435,
290
+ "grad_norm": 0.4300181269645691,
291
+ "learning_rate": 1.6133526533250565e-06,
292
+ "loss": 1.6122,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.9696969696969697,
297
+ "grad_norm": 0.4537125825881958,
298
+ "learning_rate": 7.192044826145771e-07,
299
+ "loss": 1.6159,
300
+ "step": 37
301
+ },
302
+ {
303
+ "epoch": 0.9959049959049959,
304
+ "grad_norm": 0.44113290309906006,
305
+ "learning_rate": 1.8012557287367392e-07,
306
+ "loss": 1.6142,
307
+ "step": 38
308
+ },
309
+ {
310
+ "epoch": 1.0221130221130221,
311
+ "grad_norm": 1.147554636001587,
312
+ "learning_rate": 0.0,
313
+ "loss": 2.9159,
314
+ "step": 39
315
  }
316
  ],
317
  "logging_steps": 1,
 
326
  "should_evaluate": false,
327
  "should_log": false,
328
  "should_save": true,
329
+ "should_training_stop": true
330
  },
331
  "attributes": {}
332
  }
333
  },
334
+ "total_flos": 1.2251367690416947e+17,
335
  "train_batch_size": 2,
336
  "trial_name": null,
337
  "trial_params": null