iamnguyen commited on
Commit
3ff766a
·
verified ·
1 Parent(s): c12e544

Training in progress, step 176, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5872be2126b15802eada500482cac157e1cd15f99b724a9f187dba5f99bafc9
3
  size 479769104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ce63bf80e3fe75c543395c98fbe8e01a2544bd8150423346530c9da91f0ce1
3
  size 479769104
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2bc8c5c01889b8832eca2082109ae4436d43907021d6cf8390bfb7f695a25a9
3
  size 240728084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b509609cd98971e06b2049ce774579afe57b9760f48d07ecf58ba2c8524a8b17
3
  size 240728084
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:300c8159d4f2286b17795d32870d10fc9b17bf00e3d175e60b6c67ddbb2ad949
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4add145129184e11af087cef41cfbbb05756226b865c01b02ddb6159cd74dc10
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010344207546826733,
5
  "eval_steps": 500,
6
- "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1127,6 +1127,118 @@
1127
  "learning_rate": 9.999997369030074e-06,
1128
  "loss": 1.3629,
1129
  "step": 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130
  }
1131
  ],
1132
  "logging_steps": 1,
@@ -1146,7 +1258,7 @@
1146
  "attributes": {}
1147
  }
1148
  },
1149
- "total_flos": 1.0243901698014413e+17,
1150
  "train_batch_size": 2,
1151
  "trial_name": null,
1152
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.011378628301509406,
5
  "eval_steps": 500,
6
+ "global_step": 176,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1127
  "learning_rate": 9.999997369030074e-06,
1128
  "loss": 1.3629,
1129
  "step": 160
1130
+ },
1131
+ {
1132
+ "epoch": 0.0104088588439944,
1133
+ "grad_norm": 4.200235366821289,
1134
+ "learning_rate": 9.999996211403454e-06,
1135
+ "loss": 1.4429,
1136
+ "step": 161
1137
+ },
1138
+ {
1139
+ "epoch": 0.010473510141162067,
1140
+ "grad_norm": 4.418542385101318,
1141
+ "learning_rate": 9.999994843299381e-06,
1142
+ "loss": 1.3971,
1143
+ "step": 162
1144
+ },
1145
+ {
1146
+ "epoch": 0.010538161438329734,
1147
+ "grad_norm": 4.672099590301514,
1148
+ "learning_rate": 9.999993264717911e-06,
1149
+ "loss": 1.3352,
1150
+ "step": 163
1151
+ },
1152
+ {
1153
+ "epoch": 0.0106028127354974,
1154
+ "grad_norm": 4.304332256317139,
1155
+ "learning_rate": 9.999991475659115e-06,
1156
+ "loss": 1.3116,
1157
+ "step": 164
1158
+ },
1159
+ {
1160
+ "epoch": 0.010667464032665068,
1161
+ "grad_norm": 4.34376335144043,
1162
+ "learning_rate": 9.999989476123067e-06,
1163
+ "loss": 1.3822,
1164
+ "step": 165
1165
+ },
1166
+ {
1167
+ "epoch": 0.010732115329832734,
1168
+ "grad_norm": 4.60739278793335,
1169
+ "learning_rate": 9.999987266109848e-06,
1170
+ "loss": 1.3655,
1171
+ "step": 166
1172
+ },
1173
+ {
1174
+ "epoch": 0.010796766627000402,
1175
+ "grad_norm": 6.895850658416748,
1176
+ "learning_rate": 9.999984845619553e-06,
1177
+ "loss": 1.3699,
1178
+ "step": 167
1179
+ },
1180
+ {
1181
+ "epoch": 0.010861417924168068,
1182
+ "grad_norm": 3.9377450942993164,
1183
+ "learning_rate": 9.999982214652286e-06,
1184
+ "loss": 1.4356,
1185
+ "step": 168
1186
+ },
1187
+ {
1188
+ "epoch": 0.010926069221335736,
1189
+ "grad_norm": 4.687614440917969,
1190
+ "learning_rate": 9.999979373208155e-06,
1191
+ "loss": 1.3916,
1192
+ "step": 169
1193
+ },
1194
+ {
1195
+ "epoch": 0.010990720518503404,
1196
+ "grad_norm": 4.686786651611328,
1197
+ "learning_rate": 9.99997632128728e-06,
1198
+ "loss": 1.3448,
1199
+ "step": 170
1200
+ },
1201
+ {
1202
+ "epoch": 0.01105537181567107,
1203
+ "grad_norm": 4.827507495880127,
1204
+ "learning_rate": 9.999973058889791e-06,
1205
+ "loss": 1.3514,
1206
+ "step": 171
1207
+ },
1208
+ {
1209
+ "epoch": 0.011120023112838738,
1210
+ "grad_norm": 4.3625617027282715,
1211
+ "learning_rate": 9.999969586015824e-06,
1212
+ "loss": 1.4232,
1213
+ "step": 172
1214
+ },
1215
+ {
1216
+ "epoch": 0.011184674410006404,
1217
+ "grad_norm": 4.434966087341309,
1218
+ "learning_rate": 9.999965902665524e-06,
1219
+ "loss": 1.4217,
1220
+ "step": 173
1221
+ },
1222
+ {
1223
+ "epoch": 0.011249325707174072,
1224
+ "grad_norm": 5.053067207336426,
1225
+ "learning_rate": 9.99996200883905e-06,
1226
+ "loss": 1.4709,
1227
+ "step": 174
1228
+ },
1229
+ {
1230
+ "epoch": 0.011313977004341738,
1231
+ "grad_norm": 4.309473514556885,
1232
+ "learning_rate": 9.999957904536562e-06,
1233
+ "loss": 1.3668,
1234
+ "step": 175
1235
+ },
1236
+ {
1237
+ "epoch": 0.011378628301509406,
1238
+ "grad_norm": 4.460648536682129,
1239
+ "learning_rate": 9.999953589758235e-06,
1240
+ "loss": 1.4512,
1241
+ "step": 176
1242
  }
1243
  ],
1244
  "logging_steps": 1,
 
1258
  "attributes": {}
1259
  }
1260
  },
1261
+ "total_flos": 1.1276252815822848e+17,
1262
  "train_batch_size": 2,
1263
  "trial_name": null,
1264
  "trial_params": null