aseratus1 commited on
Commit
a78916b
·
verified ·
1 Parent(s): f6972b7

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea86a1ab6262ea640f9a6d27090edfc246f6a07418b281494f642460d00ec911
3
  size 912336848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:982fcee59ec684574e54f8db6d982cdae0847ab546bb74de7c18599d04455922
3
  size 912336848
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5baf2d64b3bfddde66c4fa2491255f53466e62972b130ea8a5398d69578308f3
3
  size 463916180
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17820da8e21068c65acfc2c91ef6e07e39666c3454e42ee4de2ec1100e52b14e
3
  size 463916180
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc9d3500674a5c3e1216d67d490fdf1ccc5b42aab24568cc2d025e555b1470e4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a54508f9777a908cec330dbc41708af3eb66e01380aaf01f3bf181ea0b00d5f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4c9c807f0681c8b7e53ada9b6ec3dba530d303de7da0d0a0562a3d8d0bbba08
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8167033791542053,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 0.05697464628240433,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 9.447,
145
  "eval_steps_per_second": 2.363,
146
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 10,
@@ -167,12 +210,12 @@
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
- "should_training_stop": false
171
  },
172
  "attributes": {}
173
  }
174
  },
175
- "total_flos": 3.48136509800448e+17,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7954394221305847,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.07596619504320577,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 9.447,
145
  "eval_steps_per_second": 2.363,
146
  "step": 150
147
+ },
148
+ {
149
+ "epoch": 0.06077295603456462,
150
+ "grad_norm": 8.532111167907715,
151
+ "learning_rate": 1.1697777844051105e-05,
152
+ "loss": 3.3664,
153
+ "step": 160
154
+ },
155
+ {
156
+ "epoch": 0.06457126578672491,
157
+ "grad_norm": 15.966036796569824,
158
+ "learning_rate": 6.698729810778065e-06,
159
+ "loss": 3.0575,
160
+ "step": 170
161
+ },
162
+ {
163
+ "epoch": 0.0683695755388852,
164
+ "grad_norm": 16.28304100036621,
165
+ "learning_rate": 3.0153689607045845e-06,
166
+ "loss": 3.0103,
167
+ "step": 180
168
+ },
169
+ {
170
+ "epoch": 0.07216788529104548,
171
+ "grad_norm": 19.59560775756836,
172
+ "learning_rate": 7.596123493895991e-07,
173
+ "loss": 3.1984,
174
+ "step": 190
175
+ },
176
+ {
177
+ "epoch": 0.07596619504320577,
178
+ "grad_norm": 23.846603393554688,
179
+ "learning_rate": 0.0,
180
+ "loss": 3.4317,
181
+ "step": 200
182
+ },
183
+ {
184
+ "epoch": 0.07596619504320577,
185
+ "eval_loss": 0.7954394221305847,
186
+ "eval_runtime": 469.4851,
187
+ "eval_samples_per_second": 9.444,
188
+ "eval_steps_per_second": 2.362,
189
+ "step": 200
190
  }
191
  ],
192
  "logging_steps": 10,
 
210
  "should_evaluate": false,
211
  "should_log": false,
212
  "should_save": true,
213
+ "should_training_stop": true
214
  },
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 4.64182013067264e+17,
219
  "train_batch_size": 8,
220
  "trial_name": null,
221
  "trial_params": null