joelniklaus commited on
Commit
71ea8b7
1 Parent(s): 53bf0fe

Training in progress, step 400000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01eeb7203c73ae25830371a530337eca01f1003b7a50caee9183d76ee91ba7de
3
  size 1475917081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df30fe6bd68fca40ee22ea8ec37ae156b39effe17da0e8c6bd90f869f1186d37
3
  size 1475917081
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a4b61df7a33ac16523e6c4ab0a364e641dc3a424040f9c827d0938f4fbd92de
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:212e05f8e854456a203541ab3ff307559133e87b152fda8eac6ef2b267ce568c
3
  size 737971755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:329fbeb25a56616a76e374f3b84422a557e38ebfe8539cf5633ebfa81a135c8d
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8f220426de5a076dbb6f66f54955d3a3fc0acbab10b1bd60cf9472b552bfdca
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e4ecef8b58c710458716a0153f8519567dd2a15c4728bc445f0af4d3fb15782
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.35,
5
- "global_step": 350000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -2162,11 +2162,319 @@
2162
  "eval_samples_per_second": 43.775,
2163
  "eval_steps_per_second": 0.35,
2164
  "step": 350000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2165
  }
2166
  ],
2167
  "max_steps": 1000000,
2168
  "num_train_epochs": 9223372036854775807,
2169
- "total_flos": 5.9024955408384e+18,
2170
  "trial_name": null,
2171
  "trial_params": null
2172
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4,
5
+ "global_step": 400000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
2162
  "eval_samples_per_second": 43.775,
2163
  "eval_steps_per_second": 0.35,
2164
  "step": 350000
2165
+ },
2166
+ {
2167
+ "epoch": 0.35,
2168
+ "learning_rate": 7.720883567456298e-05,
2169
+ "loss": 0.8513,
2170
+ "step": 351000
2171
+ },
2172
+ {
2173
+ "epoch": 0.35,
2174
+ "learning_rate": 7.70699658915369e-05,
2175
+ "loss": 0.8621,
2176
+ "step": 352000
2177
+ },
2178
+ {
2179
+ "epoch": 0.35,
2180
+ "learning_rate": 7.693080007570084e-05,
2181
+ "loss": 0.883,
2182
+ "step": 353000
2183
+ },
2184
+ {
2185
+ "epoch": 0.35,
2186
+ "learning_rate": 7.679133974894983e-05,
2187
+ "loss": 0.8509,
2188
+ "step": 354000
2189
+ },
2190
+ {
2191
+ "epoch": 0.35,
2192
+ "learning_rate": 7.66515864363997e-05,
2193
+ "loss": 0.8383,
2194
+ "step": 355000
2195
+ },
2196
+ {
2197
+ "epoch": 0.36,
2198
+ "learning_rate": 7.651154166637025e-05,
2199
+ "loss": 0.861,
2200
+ "step": 356000
2201
+ },
2202
+ {
2203
+ "epoch": 0.36,
2204
+ "learning_rate": 7.637120697036866e-05,
2205
+ "loss": 0.8572,
2206
+ "step": 357000
2207
+ },
2208
+ {
2209
+ "epoch": 0.36,
2210
+ "learning_rate": 7.623058388307269e-05,
2211
+ "loss": 0.8382,
2212
+ "step": 358000
2213
+ },
2214
+ {
2215
+ "epoch": 0.36,
2216
+ "learning_rate": 7.608967394231387e-05,
2217
+ "loss": 0.8728,
2218
+ "step": 359000
2219
+ },
2220
+ {
2221
+ "epoch": 0.36,
2222
+ "learning_rate": 7.594847868906076e-05,
2223
+ "loss": 0.8893,
2224
+ "step": 360000
2225
+ },
2226
+ {
2227
+ "epoch": 0.36,
2228
+ "learning_rate": 7.580699966740201e-05,
2229
+ "loss": 0.8877,
2230
+ "step": 361000
2231
+ },
2232
+ {
2233
+ "epoch": 0.36,
2234
+ "learning_rate": 7.566523842452958e-05,
2235
+ "loss": 0.8866,
2236
+ "step": 362000
2237
+ },
2238
+ {
2239
+ "epoch": 0.36,
2240
+ "learning_rate": 7.552319651072164e-05,
2241
+ "loss": 0.8975,
2242
+ "step": 363000
2243
+ },
2244
+ {
2245
+ "epoch": 0.36,
2246
+ "learning_rate": 7.538087547932585e-05,
2247
+ "loss": 0.8892,
2248
+ "step": 364000
2249
+ },
2250
+ {
2251
+ "epoch": 0.36,
2252
+ "learning_rate": 7.52382768867422e-05,
2253
+ "loss": 0.8823,
2254
+ "step": 365000
2255
+ },
2256
+ {
2257
+ "epoch": 0.37,
2258
+ "learning_rate": 7.509540229240601e-05,
2259
+ "loss": 0.8824,
2260
+ "step": 366000
2261
+ },
2262
+ {
2263
+ "epoch": 0.37,
2264
+ "learning_rate": 7.495225325877103e-05,
2265
+ "loss": 0.8842,
2266
+ "step": 367000
2267
+ },
2268
+ {
2269
+ "epoch": 0.37,
2270
+ "learning_rate": 7.480883135129211e-05,
2271
+ "loss": 0.8951,
2272
+ "step": 368000
2273
+ },
2274
+ {
2275
+ "epoch": 0.37,
2276
+ "learning_rate": 7.466513813840825e-05,
2277
+ "loss": 0.9146,
2278
+ "step": 369000
2279
+ },
2280
+ {
2281
+ "epoch": 0.37,
2282
+ "learning_rate": 7.452117519152542e-05,
2283
+ "loss": 0.9109,
2284
+ "step": 370000
2285
+ },
2286
+ {
2287
+ "epoch": 0.37,
2288
+ "learning_rate": 7.437694408499933e-05,
2289
+ "loss": 0.9195,
2290
+ "step": 371000
2291
+ },
2292
+ {
2293
+ "epoch": 0.37,
2294
+ "learning_rate": 7.423244639611826e-05,
2295
+ "loss": 0.9092,
2296
+ "step": 372000
2297
+ },
2298
+ {
2299
+ "epoch": 0.37,
2300
+ "learning_rate": 7.408768370508576e-05,
2301
+ "loss": 0.9264,
2302
+ "step": 373000
2303
+ },
2304
+ {
2305
+ "epoch": 0.37,
2306
+ "learning_rate": 7.394265759500348e-05,
2307
+ "loss": 0.909,
2308
+ "step": 374000
2309
+ },
2310
+ {
2311
+ "epoch": 0.38,
2312
+ "learning_rate": 7.379736965185368e-05,
2313
+ "loss": 0.9017,
2314
+ "step": 375000
2315
+ },
2316
+ {
2317
+ "epoch": 0.38,
2318
+ "learning_rate": 7.365182146448205e-05,
2319
+ "loss": 0.8978,
2320
+ "step": 376000
2321
+ },
2322
+ {
2323
+ "epoch": 0.38,
2324
+ "learning_rate": 7.350601462458024e-05,
2325
+ "loss": 0.8909,
2326
+ "step": 377000
2327
+ },
2328
+ {
2329
+ "epoch": 0.38,
2330
+ "learning_rate": 7.335995072666848e-05,
2331
+ "loss": 0.8812,
2332
+ "step": 378000
2333
+ },
2334
+ {
2335
+ "epoch": 0.38,
2336
+ "learning_rate": 7.32136313680782e-05,
2337
+ "loss": 0.8897,
2338
+ "step": 379000
2339
+ },
2340
+ {
2341
+ "epoch": 0.38,
2342
+ "learning_rate": 7.30670581489344e-05,
2343
+ "loss": 0.8959,
2344
+ "step": 380000
2345
+ },
2346
+ {
2347
+ "epoch": 0.38,
2348
+ "learning_rate": 7.292023267213835e-05,
2349
+ "loss": 0.8773,
2350
+ "step": 381000
2351
+ },
2352
+ {
2353
+ "epoch": 0.38,
2354
+ "learning_rate": 7.277315654334997e-05,
2355
+ "loss": 0.8846,
2356
+ "step": 382000
2357
+ },
2358
+ {
2359
+ "epoch": 0.38,
2360
+ "learning_rate": 7.262583137097018e-05,
2361
+ "loss": 0.8715,
2362
+ "step": 383000
2363
+ },
2364
+ {
2365
+ "epoch": 0.38,
2366
+ "learning_rate": 7.247825876612353e-05,
2367
+ "loss": 0.8659,
2368
+ "step": 384000
2369
+ },
2370
+ {
2371
+ "epoch": 0.39,
2372
+ "learning_rate": 7.233044034264034e-05,
2373
+ "loss": 0.8671,
2374
+ "step": 385000
2375
+ },
2376
+ {
2377
+ "epoch": 0.39,
2378
+ "learning_rate": 7.218237771703921e-05,
2379
+ "loss": 0.835,
2380
+ "step": 386000
2381
+ },
2382
+ {
2383
+ "epoch": 0.39,
2384
+ "learning_rate": 7.203407250850928e-05,
2385
+ "loss": 0.8407,
2386
+ "step": 387000
2387
+ },
2388
+ {
2389
+ "epoch": 0.39,
2390
+ "learning_rate": 7.188552633889259e-05,
2391
+ "loss": 0.8578,
2392
+ "step": 388000
2393
+ },
2394
+ {
2395
+ "epoch": 0.39,
2396
+ "learning_rate": 7.173674083266624e-05,
2397
+ "loss": 0.8489,
2398
+ "step": 389000
2399
+ },
2400
+ {
2401
+ "epoch": 0.39,
2402
+ "learning_rate": 7.158771761692464e-05,
2403
+ "loss": 0.839,
2404
+ "step": 390000
2405
+ },
2406
+ {
2407
+ "epoch": 0.39,
2408
+ "learning_rate": 7.143845832136188e-05,
2409
+ "loss": 0.8651,
2410
+ "step": 391000
2411
+ },
2412
+ {
2413
+ "epoch": 0.39,
2414
+ "learning_rate": 7.128896457825364e-05,
2415
+ "loss": 0.8678,
2416
+ "step": 392000
2417
+ },
2418
+ {
2419
+ "epoch": 0.39,
2420
+ "learning_rate": 7.113923802243957e-05,
2421
+ "loss": 0.8823,
2422
+ "step": 393000
2423
+ },
2424
+ {
2425
+ "epoch": 0.39,
2426
+ "learning_rate": 7.09892802913053e-05,
2427
+ "loss": 0.8914,
2428
+ "step": 394000
2429
+ },
2430
+ {
2431
+ "epoch": 0.4,
2432
+ "learning_rate": 7.083909302476453e-05,
2433
+ "loss": 0.885,
2434
+ "step": 395000
2435
+ },
2436
+ {
2437
+ "epoch": 0.4,
2438
+ "learning_rate": 7.068867786524116e-05,
2439
+ "loss": 0.8884,
2440
+ "step": 396000
2441
+ },
2442
+ {
2443
+ "epoch": 0.4,
2444
+ "learning_rate": 7.053803645765128e-05,
2445
+ "loss": 0.8554,
2446
+ "step": 397000
2447
+ },
2448
+ {
2449
+ "epoch": 0.4,
2450
+ "learning_rate": 7.038717044938519e-05,
2451
+ "loss": 0.8701,
2452
+ "step": 398000
2453
+ },
2454
+ {
2455
+ "epoch": 0.4,
2456
+ "learning_rate": 7.023608149028937e-05,
2457
+ "loss": 0.8664,
2458
+ "step": 399000
2459
+ },
2460
+ {
2461
+ "epoch": 0.4,
2462
+ "learning_rate": 7.008477123264848e-05,
2463
+ "loss": 0.8406,
2464
+ "step": 400000
2465
+ },
2466
+ {
2467
+ "epoch": 0.4,
2468
+ "eval_loss": 0.6259231567382812,
2469
+ "eval_runtime": 96.8276,
2470
+ "eval_samples_per_second": 51.638,
2471
+ "eval_steps_per_second": 0.413,
2472
+ "step": 400000
2473
  }
2474
  ],
2475
  "max_steps": 1000000,
2476
  "num_train_epochs": 9223372036854775807,
2477
+ "total_flos": 6.7457091895296e+18,
2478
  "trial_name": null,
2479
  "trial_params": null
2480
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a4b61df7a33ac16523e6c4ab0a364e641dc3a424040f9c827d0938f4fbd92de
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:212e05f8e854456a203541ab3ff307559133e87b152fda8eac6ef2b267ce568c
3
  size 737971755
runs/Jan25_00-37-02_t1v-n-9f780742-w-0/events.out.tfevents.1674607228.t1v-n-9f780742-w-0.3357200.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcc882a80b6103706d77991d69242068edc8820d6c9f49efb947dfc73b394f67
3
- size 12038
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c0bb664dfae777337994328c5a785438c146f717652b2c916c004acc03b050
3
+ size 20314