sara-nabhani's picture
Training in progress, step 1600
d9ccdf8
{
"best_metric": 0.3950704777630208,
"best_model_checkpoint": "/home2/s5432073/language-tech-project/results/ltp-roberta-large-defaultltp-roberta-large-default-9/checkpoint-1200",
"epoch": 9.467455621301776,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.18,
"learning_rate": 9.28348909657321e-06,
"loss": 0.4412,
"step": 200
},
{
"epoch": 1.18,
"eval_f1": 0.15828796157072217,
"eval_f1_all": [
0.2097902097902098,
0.2789115646258503,
0.0,
0.0,
0.547798066595059,
0.0,
0.0,
0.0,
0.7165913492575855,
0.636150234741784,
0.0,
0.013071895424836602,
0.0,
0.0,
0.0,
0.0,
0.6257166257166257,
0.13235294117647056,
0.0,
0.005376344086021506
],
"eval_loss": 0.35295796394348145,
"eval_runtime": 2.6168,
"eval_samples_per_second": 724.543,
"eval_steps_per_second": 22.929,
"step": 200
},
{
"epoch": 2.37,
"learning_rate": 8.037383177570094e-06,
"loss": 0.3276,
"step": 400
},
{
"epoch": 2.37,
"eval_f1": 0.27070745440431776,
"eval_f1_all": [
0.3987730061349693,
0.37480314960629924,
0.0,
0.0,
0.6236125126135217,
0.05917159763313609,
0.3404255319148936,
0.0,
0.7367021276595745,
0.5967130214917826,
0.11518324607329843,
0.4112149532710281,
0.0,
0.0,
0.15251798561151078,
0.0,
0.6607431340872374,
0.624390243902439,
0.017621145374449337,
0.30227743271221535
],
"eval_loss": 0.3264588415622711,
"eval_runtime": 2.8315,
"eval_samples_per_second": 669.617,
"eval_steps_per_second": 21.19,
"step": 400
},
{
"epoch": 3.55,
"learning_rate": 6.791277258566978e-06,
"loss": 0.2941,
"step": 600
},
{
"epoch": 3.55,
"eval_f1": 0.3284303777422183,
"eval_f1_all": [
0.5230769230769231,
0.5660847880299252,
0.014388489208633093,
0.09090909090909091,
0.6258776328986961,
0.0588235294117647,
0.2923976608187135,
0.0,
0.7289595758780648,
0.6356589147286822,
0.28971962616822433,
0.40816326530612246,
0.0,
0.015625,
0.47457627118644063,
0.06382978723404255,
0.6955153422501967,
0.635897435897436,
0.09795918367346938,
0.3511450381679389
],
"eval_loss": 0.31264743208885193,
"eval_runtime": 2.8103,
"eval_samples_per_second": 674.672,
"eval_steps_per_second": 21.35,
"step": 600
},
{
"epoch": 4.73,
"learning_rate": 5.545171339563863e-06,
"loss": 0.2732,
"step": 800
},
{
"epoch": 4.73,
"eval_f1": 0.3619965683236828,
"eval_f1_all": [
0.5528089887640449,
0.5821596244131456,
0.0945945945945946,
0.18965517241379312,
0.6472868217054263,
0.13829787234042554,
0.3692307692307692,
0.0,
0.7455386649041639,
0.638095238095238,
0.37606837606837606,
0.4579025110782866,
0.0,
0.015503875968992248,
0.5087179487179486,
0.07719298245614035,
0.6716917922948074,
0.6666666666666667,
0.08298755186721991,
0.425531914893617
],
"eval_loss": 0.3080659508705139,
"eval_runtime": 2.7772,
"eval_samples_per_second": 682.698,
"eval_steps_per_second": 21.604,
"step": 800
},
{
"epoch": 5.92,
"learning_rate": 4.299065420560748e-06,
"loss": 0.2559,
"step": 1000
},
{
"epoch": 5.92,
"eval_f1": 0.37526293741944416,
"eval_f1_all": [
0.5243619489559165,
0.5671641791044777,
0.06896551724137931,
0.1724137931034483,
0.6521327014218009,
0.11363636363636363,
0.3711340206185567,
0.0,
0.7530312699425653,
0.6465116279069768,
0.42857142857142855,
0.5034387895460799,
0.0,
0.015384615384615385,
0.5711743772241993,
0.17629179331306988,
0.6535162950257289,
0.6972477064220184,
0.11290322580645162,
0.4773790951638066
],
"eval_loss": 0.30820420384407043,
"eval_runtime": 2.6936,
"eval_samples_per_second": 703.901,
"eval_steps_per_second": 22.275,
"step": 1000
},
{
"epoch": 7.1,
"learning_rate": 3.0529595015576325e-06,
"loss": 0.2414,
"step": 1200
},
{
"epoch": 7.1,
"eval_f1": 0.3950704777630208,
"eval_f1_all": [
0.5097087378640777,
0.5710872162485066,
0.13924050632911392,
0.25757575757575757,
0.6406406406406406,
0.17346938775510204,
0.3553299492385787,
0.0,
0.7513020833333334,
0.6586586586586587,
0.47727272727272735,
0.5625,
0.0,
0.015267175572519085,
0.5554520037278659,
0.24444444444444446,
0.6672012830793906,
0.7079646017699114,
0.14925373134328354,
0.4650406504065041
],
"eval_loss": 0.309607595205307,
"eval_runtime": 2.8834,
"eval_samples_per_second": 657.552,
"eval_steps_per_second": 20.809,
"step": 1200
},
{
"epoch": 8.28,
"learning_rate": 1.8068535825545173e-06,
"loss": 0.2305,
"step": 1400
},
{
"epoch": 8.28,
"eval_f1": 0.38770117434745416,
"eval_f1_all": [
0.5200945626477541,
0.5766590389016018,
0.11842105263157895,
0.22399999999999998,
0.6587771203155818,
0.1658031088082902,
0.380952380952381,
0.0,
0.7496774193548387,
0.6578657865786579,
0.4302788844621514,
0.5163043478260869,
0.0,
0.015267175572519085,
0.5784832451499118,
0.1567398119122257,
0.7061435973353072,
0.690909090909091,
0.1450381679389313,
0.4626086956521739
],
"eval_loss": 0.3088609278202057,
"eval_runtime": 2.6739,
"eval_samples_per_second": 709.087,
"eval_steps_per_second": 22.439,
"step": 1400
},
{
"epoch": 9.47,
"learning_rate": 5.607476635514019e-07,
"loss": 0.2244,
"step": 1600
},
{
"epoch": 9.47,
"eval_f1": 0.39269551799562785,
"eval_f1_all": [
0.5283018867924528,
0.5794392523364486,
0.11612903225806452,
0.22047244094488186,
0.6502369668246445,
0.19095477386934676,
0.3864734299516908,
0.0,
0.7457404980340759,
0.6617179215270413,
0.43307086614173235,
0.5231607629427792,
0.0,
0.015151515151515152,
0.5685740236148956,
0.2138728323699422,
0.6947852760736196,
0.7031963470319635,
0.14869888475836432,
0.47393364928909953
],
"eval_loss": 0.3094000220298767,
"eval_runtime": 3.0101,
"eval_samples_per_second": 629.877,
"eval_steps_per_second": 19.933,
"step": 1600
}
],
"max_steps": 1690,
"num_train_epochs": 10,
"total_flos": 8787505912201752.0,
"trial_name": null,
"trial_params": null
}