Sag1012's picture
Upload EncoderDecoder model - ver5
3e91389 verified
raw
history blame
12.4 kB
{
"best_metric": 1.3951435089111328,
"best_model_checkpoint": "./results/checkpoint-18616",
"epoch": 6.0,
"eval_steps": 500,
"global_step": 27924,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10743446497636441,
"grad_norm": 4.838443756103516,
"learning_rate": 4.9462827675118175e-05,
"loss": 3.2819,
"step": 500
},
{
"epoch": 0.21486892995272883,
"grad_norm": 3.5501515865325928,
"learning_rate": 4.892565535023636e-05,
"loss": 2.3948,
"step": 1000
},
{
"epoch": 0.32230339492909327,
"grad_norm": 3.361682653427124,
"learning_rate": 4.8388483025354535e-05,
"loss": 2.1166,
"step": 1500
},
{
"epoch": 0.42973785990545765,
"grad_norm": 3.207756996154785,
"learning_rate": 4.7851310700472715e-05,
"loss": 1.983,
"step": 2000
},
{
"epoch": 0.5371723248818221,
"grad_norm": 3.38034725189209,
"learning_rate": 4.7314138375590894e-05,
"loss": 1.8785,
"step": 2500
},
{
"epoch": 0.6446067898581865,
"grad_norm": 3.3562798500061035,
"learning_rate": 4.677696605070907e-05,
"loss": 1.7957,
"step": 3000
},
{
"epoch": 0.752041254834551,
"grad_norm": 2.897372007369995,
"learning_rate": 4.623979372582725e-05,
"loss": 1.7387,
"step": 3500
},
{
"epoch": 0.8594757198109153,
"grad_norm": 2.9306259155273438,
"learning_rate": 4.570262140094543e-05,
"loss": 1.7103,
"step": 4000
},
{
"epoch": 0.9669101847872797,
"grad_norm": 2.7418951988220215,
"learning_rate": 4.5165449076063606e-05,
"loss": 1.6637,
"step": 4500
},
{
"epoch": 1.0,
"eval_loss": 1.5266185998916626,
"eval_rouge2_fmeasure": 0.4101,
"eval_rouge2_precision": 0.4101,
"eval_rouge2_recall": 0.4101,
"eval_runtime": 72.9922,
"eval_samples_per_second": 65.582,
"eval_steps_per_second": 1.028,
"step": 4654
},
{
"epoch": 1.0743446497636442,
"grad_norm": 3.1563010215759277,
"learning_rate": 4.462827675118178e-05,
"loss": 1.4927,
"step": 5000
},
{
"epoch": 1.1817791147400085,
"grad_norm": 2.845944404602051,
"learning_rate": 4.409110442629996e-05,
"loss": 1.4188,
"step": 5500
},
{
"epoch": 1.289213579716373,
"grad_norm": 2.7506182193756104,
"learning_rate": 4.355393210141814e-05,
"loss": 1.418,
"step": 6000
},
{
"epoch": 1.3966480446927374,
"grad_norm": 2.623769760131836,
"learning_rate": 4.301675977653631e-05,
"loss": 1.406,
"step": 6500
},
{
"epoch": 1.504082509669102,
"grad_norm": 2.671093225479126,
"learning_rate": 4.247958745165449e-05,
"loss": 1.4021,
"step": 7000
},
{
"epoch": 1.6115169746454663,
"grad_norm": 2.96425724029541,
"learning_rate": 4.194241512677267e-05,
"loss": 1.3883,
"step": 7500
},
{
"epoch": 1.7189514396218306,
"grad_norm": 2.6702911853790283,
"learning_rate": 4.140524280189085e-05,
"loss": 1.3883,
"step": 8000
},
{
"epoch": 1.8263859045981952,
"grad_norm": 2.6645655632019043,
"learning_rate": 4.0868070477009024e-05,
"loss": 1.3761,
"step": 8500
},
{
"epoch": 1.9338203695745595,
"grad_norm": 3.023545265197754,
"learning_rate": 4.0330898152127204e-05,
"loss": 1.376,
"step": 9000
},
{
"epoch": 2.0,
"eval_loss": 1.410889744758606,
"eval_rouge2_fmeasure": 0.4238,
"eval_rouge2_precision": 0.4238,
"eval_rouge2_recall": 0.4238,
"eval_runtime": 72.9371,
"eval_samples_per_second": 65.632,
"eval_steps_per_second": 1.028,
"step": 9308
},
{
"epoch": 2.041254834550924,
"grad_norm": 2.6128861904144287,
"learning_rate": 3.979372582724538e-05,
"loss": 1.2778,
"step": 9500
},
{
"epoch": 2.1486892995272884,
"grad_norm": 2.602187395095825,
"learning_rate": 3.9256553502363556e-05,
"loss": 1.1422,
"step": 10000
},
{
"epoch": 2.256123764503653,
"grad_norm": 2.725907802581787,
"learning_rate": 3.8719381177481736e-05,
"loss": 1.1449,
"step": 10500
},
{
"epoch": 2.363558229480017,
"grad_norm": 2.6348464488983154,
"learning_rate": 3.8182208852599916e-05,
"loss": 1.1529,
"step": 11000
},
{
"epoch": 2.4709926944563816,
"grad_norm": 2.8584792613983154,
"learning_rate": 3.764503652771809e-05,
"loss": 1.1552,
"step": 11500
},
{
"epoch": 2.578427159432746,
"grad_norm": 2.59346342086792,
"learning_rate": 3.7107864202836275e-05,
"loss": 1.1666,
"step": 12000
},
{
"epoch": 2.6858616244091102,
"grad_norm": 2.655982255935669,
"learning_rate": 3.657069187795445e-05,
"loss": 1.1658,
"step": 12500
},
{
"epoch": 2.793296089385475,
"grad_norm": 2.5818564891815186,
"learning_rate": 3.603351955307263e-05,
"loss": 1.1674,
"step": 13000
},
{
"epoch": 2.9007305543618394,
"grad_norm": 2.8041298389434814,
"learning_rate": 3.549634722819081e-05,
"loss": 1.1684,
"step": 13500
},
{
"epoch": 3.0,
"eval_loss": 1.3994433879852295,
"eval_rouge2_fmeasure": 0.4311,
"eval_rouge2_precision": 0.4311,
"eval_rouge2_recall": 0.4311,
"eval_runtime": 72.9642,
"eval_samples_per_second": 65.607,
"eval_steps_per_second": 1.028,
"step": 13962
},
{
"epoch": 3.008165019338204,
"grad_norm": 2.646003246307373,
"learning_rate": 3.495917490330898e-05,
"loss": 1.1485,
"step": 14000
},
{
"epoch": 3.115599484314568,
"grad_norm": 2.8257687091827393,
"learning_rate": 3.442200257842716e-05,
"loss": 0.9456,
"step": 14500
},
{
"epoch": 3.2230339492909326,
"grad_norm": 2.9319422245025635,
"learning_rate": 3.388483025354534e-05,
"loss": 0.961,
"step": 15000
},
{
"epoch": 3.330468414267297,
"grad_norm": 2.7773501873016357,
"learning_rate": 3.334765792866352e-05,
"loss": 0.9748,
"step": 15500
},
{
"epoch": 3.4379028792436612,
"grad_norm": 2.673140048980713,
"learning_rate": 3.281048560378169e-05,
"loss": 0.9822,
"step": 16000
},
{
"epoch": 3.5453373442200258,
"grad_norm": 2.7191991806030273,
"learning_rate": 3.227331327889987e-05,
"loss": 0.9808,
"step": 16500
},
{
"epoch": 3.6527718091963903,
"grad_norm": 2.717005491256714,
"learning_rate": 3.173614095401805e-05,
"loss": 0.9832,
"step": 17000
},
{
"epoch": 3.760206274172755,
"grad_norm": 2.696438789367676,
"learning_rate": 3.1198968629136225e-05,
"loss": 0.9954,
"step": 17500
},
{
"epoch": 3.867640739149119,
"grad_norm": 2.5392231941223145,
"learning_rate": 3.066179630425441e-05,
"loss": 0.9978,
"step": 18000
},
{
"epoch": 3.9750752041254835,
"grad_norm": 3.3882222175598145,
"learning_rate": 3.0124623979372585e-05,
"loss": 0.9957,
"step": 18500
},
{
"epoch": 4.0,
"eval_loss": 1.3951435089111328,
"eval_rouge2_fmeasure": 0.4323,
"eval_rouge2_precision": 0.4323,
"eval_rouge2_recall": 0.4323,
"eval_runtime": 72.9673,
"eval_samples_per_second": 65.605,
"eval_steps_per_second": 1.028,
"step": 18616
},
{
"epoch": 4.082509669101848,
"grad_norm": 2.655376672744751,
"learning_rate": 2.958745165449076e-05,
"loss": 0.8449,
"step": 19000
},
{
"epoch": 4.189944134078212,
"grad_norm": 2.5333900451660156,
"learning_rate": 2.9050279329608944e-05,
"loss": 0.8125,
"step": 19500
},
{
"epoch": 4.297378599054577,
"grad_norm": 2.4350857734680176,
"learning_rate": 2.8513107004727117e-05,
"loss": 0.8184,
"step": 20000
},
{
"epoch": 4.404813064030941,
"grad_norm": 2.595292329788208,
"learning_rate": 2.7975934679845293e-05,
"loss": 0.8317,
"step": 20500
},
{
"epoch": 4.512247529007306,
"grad_norm": 2.4971327781677246,
"learning_rate": 2.743876235496347e-05,
"loss": 0.833,
"step": 21000
},
{
"epoch": 4.61968199398367,
"grad_norm": 2.7136857509613037,
"learning_rate": 2.6901590030081653e-05,
"loss": 0.8361,
"step": 21500
},
{
"epoch": 4.727116458960034,
"grad_norm": 2.8456897735595703,
"learning_rate": 2.636441770519983e-05,
"loss": 0.8458,
"step": 22000
},
{
"epoch": 4.834550923936399,
"grad_norm": 2.601877212524414,
"learning_rate": 2.5827245380318005e-05,
"loss": 0.846,
"step": 22500
},
{
"epoch": 4.941985388912763,
"grad_norm": 2.6655149459838867,
"learning_rate": 2.5290073055436185e-05,
"loss": 0.848,
"step": 23000
},
{
"epoch": 5.0,
"eval_loss": 1.42265784740448,
"eval_rouge2_fmeasure": 0.4307,
"eval_rouge2_precision": 0.4307,
"eval_rouge2_recall": 0.4307,
"eval_runtime": 73.0163,
"eval_samples_per_second": 65.561,
"eval_steps_per_second": 1.027,
"step": 23270
},
{
"epoch": 5.049419853889128,
"grad_norm": 2.525017023086548,
"learning_rate": 2.475290073055436e-05,
"loss": 0.7735,
"step": 23500
},
{
"epoch": 5.156854318865492,
"grad_norm": 2.2720000743865967,
"learning_rate": 2.421572840567254e-05,
"loss": 0.6797,
"step": 24000
},
{
"epoch": 5.264288783841857,
"grad_norm": 2.633282423019409,
"learning_rate": 2.3678556080790718e-05,
"loss": 0.6955,
"step": 24500
},
{
"epoch": 5.3717232488182205,
"grad_norm": 2.6699864864349365,
"learning_rate": 2.3141383755908897e-05,
"loss": 0.7026,
"step": 25000
},
{
"epoch": 5.479157713794585,
"grad_norm": 2.765111207962036,
"learning_rate": 2.2604211431027074e-05,
"loss": 0.7045,
"step": 25500
},
{
"epoch": 5.58659217877095,
"grad_norm": 2.8248813152313232,
"learning_rate": 2.206703910614525e-05,
"loss": 0.7148,
"step": 26000
},
{
"epoch": 5.694026643747314,
"grad_norm": 2.7719056606292725,
"learning_rate": 2.152986678126343e-05,
"loss": 0.7218,
"step": 26500
},
{
"epoch": 5.801461108723679,
"grad_norm": 2.8628671169281006,
"learning_rate": 2.099269445638161e-05,
"loss": 0.717,
"step": 27000
},
{
"epoch": 5.908895573700043,
"grad_norm": 2.479224443435669,
"learning_rate": 2.0455522131499786e-05,
"loss": 0.7247,
"step": 27500
},
{
"epoch": 6.0,
"eval_loss": 1.479453682899475,
"eval_rouge2_fmeasure": 0.43,
"eval_rouge2_precision": 0.43,
"eval_rouge2_recall": 0.43,
"eval_runtime": 73.0025,
"eval_samples_per_second": 65.573,
"eval_steps_per_second": 1.027,
"step": 27924
}
],
"logging_steps": 500,
"max_steps": 46540,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9760389250501837e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}