besimray commited on
Commit
49f3d42
·
verified ·
1 Parent(s): 4f91f05

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90dc1d9881c892828e9db1710ba7ca5fae5f23e33727fe523f3b8feeb1c59251
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84deee13dd315e8fcbcb28d22e374b4e919e8d119644daac24c63ee4b326580
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e8bca7e2e4ea0ce3d7c8ca91f49b34ce31a8f4654fe65e50b8ecee5497b2c18
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ab33aabb23561c4a90afb5ee894e1faaf2a53a68f4feafcc5727886b373b7cf
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d06bafe6b7413a64b1b2b7501fafe281fdadc6b8d8a7d2b8d42c504a96de904
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d8414ba48354825ad20b0a3d80cc23ff1d239366d3e0da53cb9bbe2c2455ccc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321b462a2538632d6d720f0cf198c8f471dee11f51db9b50cc50d1fa7f132bbe
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.1853621006011963,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.0028238616307800918,
5
  "eval_steps": 10,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 5.568,
94
  "eval_steps_per_second": 5.568,
95
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 1,
@@ -121,7 +199,7 @@
121
  "attributes": {}
122
  }
123
  },
124
- "total_flos": 978803585187840.0,
125
  "train_batch_size": 1,
126
  "trial_name": null,
127
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.7350926399230957,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.0056477232615601836,
5
  "eval_steps": 10,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 5.568,
94
  "eval_steps_per_second": 5.568,
95
  "step": 10
96
+ },
97
+ {
98
+ "epoch": 0.003106247793858101,
99
+ "grad_norm": 0.9601467847824097,
100
+ "learning_rate": 0.0001999979446958366,
101
+ "loss": 1.9811,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.00338863395693611,
106
+ "grad_norm": 1.353819489479065,
107
+ "learning_rate": 0.00019999177886783194,
108
+ "loss": 2.373,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.0036710201200141194,
113
+ "grad_norm": 0.897077202796936,
114
+ "learning_rate": 0.00019998150276943902,
115
+ "loss": 1.696,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.003953406283092128,
120
+ "grad_norm": 1.358173131942749,
121
+ "learning_rate": 0.000199967116823068,
122
+ "loss": 2.3396,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.004235792446170138,
127
+ "grad_norm": 1.0444282293319702,
128
+ "learning_rate": 0.0001999486216200688,
129
+ "loss": 1.482,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.004518178609248147,
134
+ "grad_norm": 3.5194482803344727,
135
+ "learning_rate": 0.00019992601792070679,
136
+ "loss": 1.6677,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.004800564772326156,
141
+ "grad_norm": 3.523196220397949,
142
+ "learning_rate": 0.00019989930665413147,
143
+ "loss": 2.393,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.005082950935404165,
148
+ "grad_norm": 0.9800947904586792,
149
+ "learning_rate": 0.00019986848891833845,
150
+ "loss": 1.6947,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.005365337098482174,
155
+ "grad_norm": 1.540337324142456,
156
+ "learning_rate": 0.0001998335659801241,
157
+ "loss": 0.563,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.0056477232615601836,
162
+ "grad_norm": 1.577869176864624,
163
+ "learning_rate": 0.00019979453927503364,
164
+ "loss": 2.0525,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.0056477232615601836,
169
+ "eval_loss": 1.7350926399230957,
170
+ "eval_runtime": 133.9613,
171
+ "eval_samples_per_second": 5.569,
172
+ "eval_steps_per_second": 5.569,
173
+ "step": 20
174
  }
175
  ],
176
  "logging_steps": 1,
 
199
  "attributes": {}
200
  }
201
  },
202
+ "total_flos": 1957607170375680.0,
203
  "train_batch_size": 1,
204
  "trial_name": null,
205
  "trial_params": null