dzanbek commited on
Commit
7854eaf
·
verified ·
1 Parent(s): cdc075d

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44dc3fbbe13a71f5b9b2dc1dfdb312c59d3da6c2aadea975e4e891d129fe19ac
3
  size 25192688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89eb5ed31f068bc725e624783d234176de8048f66689a875c94baaf8827824d1
3
  size 25192688
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47a781e6a58c1422569909ae38392e9af98cd2f9f39226137c88c8236db19da2
3
  size 50493050
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1f96d78be725272a2591e5c297aa5c9d02ffe12ab6f1c4717c4aea99d8b900e
3
  size 50493050
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08bf2b2538bcf5289702f665cdf547aa18a0c25852676d00d65e9fa80ae6c966
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317bf1310bd01646e56bfd3b55b274ccfeeb7ef1eee3cbee4a51268b2b021418
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a62e6ea25099651400ff4a3142a50e40bef5b52ba883be53b2fcb9d1a5b0a98c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2def2cd24154d8cecbaa07c36ae27e5ebb9b7273a78abfea27aa67c480e4ae2b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.2585527896881104,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
- "epoch": 0.0721370604147881,
5
  "eval_steps": 5,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -89,6 +89,50 @@
89
  "eval_samples_per_second": 12.446,
90
  "eval_steps_per_second": 6.276,
91
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  ],
94
  "logging_steps": 3,
@@ -112,12 +156,12 @@
112
  "should_evaluate": false,
113
  "should_log": false,
114
  "should_save": true,
115
- "should_training_stop": false
116
  },
117
  "attributes": {}
118
  }
119
  },
120
- "total_flos": 489185718829056.0,
121
  "train_batch_size": 2,
122
  "trial_name": null,
123
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.131821632385254,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-30",
4
+ "epoch": 0.10820559062218214,
5
  "eval_steps": 5,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
89
  "eval_samples_per_second": 12.446,
90
  "eval_steps_per_second": 6.276,
91
  "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.0757439134355275,
95
+ "grad_norm": 111.02490997314453,
96
+ "learning_rate": 8.435655349597689e-05,
97
+ "loss": 9.2688,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.08656447249774572,
102
+ "grad_norm": 195.60818481445312,
103
+ "learning_rate": 4.12214747707527e-05,
104
+ "loss": 8.6093,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.09017132551848513,
109
+ "eval_loss": 2.1531364917755127,
110
+ "eval_runtime": 9.4091,
111
+ "eval_samples_per_second": 12.435,
112
+ "eval_steps_per_second": 6.271,
113
+ "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.09738503155996393,
117
+ "grad_norm": 3.565056800842285,
118
+ "learning_rate": 1.0899347581163221e-05,
119
+ "loss": 8.3193,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 0.10820559062218214,
124
+ "grad_norm": 22.954635620117188,
125
+ "learning_rate": 0.0,
126
+ "loss": 8.2484,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 0.10820559062218214,
131
+ "eval_loss": 2.131821632385254,
132
+ "eval_runtime": 9.4155,
133
+ "eval_samples_per_second": 12.426,
134
+ "eval_steps_per_second": 6.266,
135
+ "step": 30
136
  }
137
  ],
138
  "logging_steps": 3,
 
156
  "should_evaluate": false,
157
  "should_log": false,
158
  "should_save": true,
159
+ "should_training_stop": true
160
  },
161
  "attributes": {}
162
  }
163
  },
164
+ "total_flos": 750843196342272.0,
165
  "train_batch_size": 2,
166
  "trial_name": null,
167
  "trial_params": null