|
{ |
|
"best_metric": 0.2233106642961502, |
|
"best_model_checkpoint": "/exports/eddie/scratch/s1970716/models/question_gen/t5_3b_epoch_3_qa/checkpoint-342", |
|
"epoch": 2.9983561643835617, |
|
"global_step": 1026, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.902534113060428e-05, |
|
"loss": 0.2782, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.805068226120859e-05, |
|
"loss": 0.221, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.707602339181286e-05, |
|
"loss": 0.2068, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.610136452241715e-05, |
|
"loss": 0.2119, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.512670565302145e-05, |
|
"loss": 0.2116, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.415204678362574e-05, |
|
"loss": 0.2104, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.317738791423001e-05, |
|
"loss": 0.2137, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.220272904483432e-05, |
|
"loss": 0.2157, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.12280701754386e-05, |
|
"loss": 0.2052, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.025341130604289e-05, |
|
"loss": 0.2062, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 8.927875243664718e-05, |
|
"loss": 0.2091, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 8.830409356725147e-05, |
|
"loss": 0.2161, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 8.732943469785574e-05, |
|
"loss": 0.2051, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.635477582846005e-05, |
|
"loss": 0.2112, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.538011695906433e-05, |
|
"loss": 0.1951, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 8.440545808966862e-05, |
|
"loss": 0.2062, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 8.343079922027291e-05, |
|
"loss": 0.2046, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 8.24561403508772e-05, |
|
"loss": 0.1999, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.148148148148148e-05, |
|
"loss": 0.2047, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.050682261208578e-05, |
|
"loss": 0.2042, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.953216374269006e-05, |
|
"loss": 0.1995, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.855750487329435e-05, |
|
"loss": 0.1926, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 7.758284600389864e-05, |
|
"loss": 0.204, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 7.660818713450293e-05, |
|
"loss": 0.1925, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 7.563352826510721e-05, |
|
"loss": 0.2017, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 7.465886939571151e-05, |
|
"loss": 0.1984, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 7.368421052631579e-05, |
|
"loss": 0.1941, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 7.270955165692008e-05, |
|
"loss": 0.1932, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 7.173489278752437e-05, |
|
"loss": 0.1979, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.076023391812866e-05, |
|
"loss": 0.1994, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 6.978557504873294e-05, |
|
"loss": 0.1935, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.881091617933724e-05, |
|
"loss": 0.1951, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.783625730994152e-05, |
|
"loss": 0.1964, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.686159844054581e-05, |
|
"loss": 0.2003, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.2233106642961502, |
|
"eval_runtime": 133.4707, |
|
"eval_samples_per_second": 79.193, |
|
"eval_steps_per_second": 9.905, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.58869395711501e-05, |
|
"loss": 0.1463, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 6.49122807017544e-05, |
|
"loss": 0.1331, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 6.393762183235867e-05, |
|
"loss": 0.1401, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 6.296296296296296e-05, |
|
"loss": 0.1359, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 6.198830409356725e-05, |
|
"loss": 0.1372, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 6.101364522417154e-05, |
|
"loss": 0.1361, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 6.003898635477583e-05, |
|
"loss": 0.1408, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 5.9064327485380125e-05, |
|
"loss": 0.1382, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 5.80896686159844e-05, |
|
"loss": 0.1316, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 5.71150097465887e-05, |
|
"loss": 0.1416, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 5.6140350877192984e-05, |
|
"loss": 0.1371, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 5.5165692007797275e-05, |
|
"loss": 0.1366, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 5.419103313840156e-05, |
|
"loss": 0.1359, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 5.3216374269005856e-05, |
|
"loss": 0.1395, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.2241715399610133e-05, |
|
"loss": 0.1353, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 5.126705653021443e-05, |
|
"loss": 0.1369, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 5.0292397660818715e-05, |
|
"loss": 0.1296, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.9317738791423e-05, |
|
"loss": 0.1323, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.834307992202729e-05, |
|
"loss": 0.1363, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.1492, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.6393762183235865e-05, |
|
"loss": 0.1355, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.5419103313840156e-05, |
|
"loss": 0.134, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.1385, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.346978557504873e-05, |
|
"loss": 0.1301, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.249512670565302e-05, |
|
"loss": 0.1442, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.152046783625731e-05, |
|
"loss": 0.135, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 4.0545808966861596e-05, |
|
"loss": 0.1278, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 3.957115009746589e-05, |
|
"loss": 0.1389, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.1386, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 3.762183235867446e-05, |
|
"loss": 0.1313, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.664717348927875e-05, |
|
"loss": 0.1325, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 3.5672514619883044e-05, |
|
"loss": 0.1315, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.469785575048733e-05, |
|
"loss": 0.1344, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 3.372319688109162e-05, |
|
"loss": 0.1331, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.2396751344203949, |
|
"eval_runtime": 133.3515, |
|
"eval_samples_per_second": 79.264, |
|
"eval_steps_per_second": 9.914, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 3.274853801169591e-05, |
|
"loss": 0.1059, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.1773879142300193e-05, |
|
"loss": 0.1006, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 3.0799220272904484e-05, |
|
"loss": 0.0941, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.1033, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 2.884990253411306e-05, |
|
"loss": 0.0978, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 2.787524366471735e-05, |
|
"loss": 0.0983, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.6900584795321637e-05, |
|
"loss": 0.0987, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.0985, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.4951267056530216e-05, |
|
"loss": 0.0974, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.3976608187134503e-05, |
|
"loss": 0.1001, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.300194931773879e-05, |
|
"loss": 0.0961, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.2027290448343078e-05, |
|
"loss": 0.0958, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.0975, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.0077972709551656e-05, |
|
"loss": 0.0955, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.9103313840155944e-05, |
|
"loss": 0.0927, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.8128654970760235e-05, |
|
"loss": 0.0958, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.7153996101364522e-05, |
|
"loss": 0.0979, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.617933723196881e-05, |
|
"loss": 0.1, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.5204678362573099e-05, |
|
"loss": 0.0965, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.4230019493177388e-05, |
|
"loss": 0.098, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 1.3255360623781677e-05, |
|
"loss": 0.0981, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.1024, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.1306042884990253e-05, |
|
"loss": 0.0962, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.0331384015594543e-05, |
|
"loss": 0.0993, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 9.35672514619883e-06, |
|
"loss": 0.0932, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 8.38206627680312e-06, |
|
"loss": 0.0929, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.0881, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 6.432748538011696e-06, |
|
"loss": 0.0923, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.458089668615984e-06, |
|
"loss": 0.0935, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.483430799220273e-06, |
|
"loss": 0.1015, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.0939, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.5341130604288498e-06, |
|
"loss": 0.093, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.5594541910331385e-06, |
|
"loss": 0.0975, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 5.847953216374269e-07, |
|
"loss": 0.0971, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.26710498332977295, |
|
"eval_runtime": 133.9576, |
|
"eval_samples_per_second": 78.906, |
|
"eval_steps_per_second": 9.869, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1026, |
|
"total_flos": 1.4063349020358083e+18, |
|
"train_loss": 0.1461177020974559, |
|
"train_runtime": 16425.358, |
|
"train_samples_per_second": 15.999, |
|
"train_steps_per_second": 0.062 |
|
} |
|
], |
|
"max_steps": 1026, |
|
"num_train_epochs": 3, |
|
"total_flos": 1.4063349020358083e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|