Text2Text Generation
Transformers
PyTorch
English
Kinyarwanda
m2m_100
Inference Endpoints
Nllb_finetuned_education_en_kin / trainer_state.json
Kleber's picture
Upload folder using huggingface_hub
9e4609b
raw
history blame
12.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 46602,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 4.946354233723875e-05,
"loss": 1.0067,
"step": 500
},
{
"epoch": 0.04,
"learning_rate": 4.892708467447749e-05,
"loss": 0.9999,
"step": 1000
},
{
"epoch": 0.06,
"learning_rate": 4.839062701171624e-05,
"loss": 0.9592,
"step": 1500
},
{
"epoch": 0.09,
"learning_rate": 4.785416934895498e-05,
"loss": 0.9386,
"step": 2000
},
{
"epoch": 0.11,
"learning_rate": 4.731771168619373e-05,
"loss": 0.9855,
"step": 2500
},
{
"epoch": 0.13,
"learning_rate": 4.6781254023432475e-05,
"loss": 0.9377,
"step": 3000
},
{
"epoch": 0.15,
"learning_rate": 4.6244796360671214e-05,
"loss": 0.9157,
"step": 3500
},
{
"epoch": 0.17,
"learning_rate": 4.5708338697909966e-05,
"loss": 0.9202,
"step": 4000
},
{
"epoch": 0.19,
"learning_rate": 4.517188103514871e-05,
"loss": 0.9141,
"step": 4500
},
{
"epoch": 0.21,
"learning_rate": 4.463542337238745e-05,
"loss": 0.9193,
"step": 5000
},
{
"epoch": 0.24,
"learning_rate": 4.40989657096262e-05,
"loss": 0.9009,
"step": 5500
},
{
"epoch": 0.26,
"learning_rate": 4.356250804686494e-05,
"loss": 0.9039,
"step": 6000
},
{
"epoch": 0.28,
"learning_rate": 4.3026050384103686e-05,
"loss": 0.9168,
"step": 6500
},
{
"epoch": 0.3,
"learning_rate": 4.248959272134244e-05,
"loss": 0.8819,
"step": 7000
},
{
"epoch": 0.32,
"learning_rate": 4.195313505858118e-05,
"loss": 0.9244,
"step": 7500
},
{
"epoch": 0.34,
"learning_rate": 4.141667739581992e-05,
"loss": 0.8773,
"step": 8000
},
{
"epoch": 0.36,
"learning_rate": 4.088021973305867e-05,
"loss": 0.8875,
"step": 8500
},
{
"epoch": 0.39,
"learning_rate": 4.034376207029741e-05,
"loss": 0.8741,
"step": 9000
},
{
"epoch": 0.41,
"learning_rate": 3.980730440753616e-05,
"loss": 0.8557,
"step": 9500
},
{
"epoch": 0.43,
"learning_rate": 3.9270846744774904e-05,
"loss": 0.8757,
"step": 10000
},
{
"epoch": 0.45,
"learning_rate": 3.873438908201365e-05,
"loss": 0.8607,
"step": 10500
},
{
"epoch": 0.47,
"learning_rate": 3.8197931419252395e-05,
"loss": 0.8673,
"step": 11000
},
{
"epoch": 0.49,
"learning_rate": 3.766147375649114e-05,
"loss": 0.8819,
"step": 11500
},
{
"epoch": 0.51,
"learning_rate": 3.7125016093729886e-05,
"loss": 0.8619,
"step": 12000
},
{
"epoch": 0.54,
"learning_rate": 3.658855843096863e-05,
"loss": 0.8415,
"step": 12500
},
{
"epoch": 0.56,
"learning_rate": 3.605210076820738e-05,
"loss": 0.839,
"step": 13000
},
{
"epoch": 0.58,
"learning_rate": 3.551564310544612e-05,
"loss": 0.8342,
"step": 13500
},
{
"epoch": 0.6,
"learning_rate": 3.497918544268486e-05,
"loss": 0.8431,
"step": 14000
},
{
"epoch": 0.62,
"learning_rate": 3.444272777992361e-05,
"loss": 0.8524,
"step": 14500
},
{
"epoch": 0.64,
"learning_rate": 3.390627011716235e-05,
"loss": 0.8397,
"step": 15000
},
{
"epoch": 0.67,
"learning_rate": 3.33698124544011e-05,
"loss": 0.8083,
"step": 15500
},
{
"epoch": 0.69,
"learning_rate": 3.283335479163985e-05,
"loss": 0.8509,
"step": 16000
},
{
"epoch": 0.71,
"learning_rate": 3.229689712887859e-05,
"loss": 0.8382,
"step": 16500
},
{
"epoch": 0.73,
"learning_rate": 3.176043946611733e-05,
"loss": 0.847,
"step": 17000
},
{
"epoch": 0.75,
"learning_rate": 3.122398180335608e-05,
"loss": 0.8309,
"step": 17500
},
{
"epoch": 0.77,
"learning_rate": 3.0687524140594824e-05,
"loss": 0.8202,
"step": 18000
},
{
"epoch": 0.79,
"learning_rate": 3.0151066477833573e-05,
"loss": 0.8044,
"step": 18500
},
{
"epoch": 0.82,
"learning_rate": 2.9614608815072315e-05,
"loss": 0.8184,
"step": 19000
},
{
"epoch": 0.84,
"learning_rate": 2.907815115231106e-05,
"loss": 0.8094,
"step": 19500
},
{
"epoch": 0.86,
"learning_rate": 2.8541693489549803e-05,
"loss": 0.8048,
"step": 20000
},
{
"epoch": 0.88,
"learning_rate": 2.800523582678855e-05,
"loss": 0.836,
"step": 20500
},
{
"epoch": 0.9,
"learning_rate": 2.7468778164027297e-05,
"loss": 0.7955,
"step": 21000
},
{
"epoch": 0.92,
"learning_rate": 2.693232050126604e-05,
"loss": 0.8221,
"step": 21500
},
{
"epoch": 0.94,
"learning_rate": 2.6395862838504788e-05,
"loss": 0.8056,
"step": 22000
},
{
"epoch": 0.97,
"learning_rate": 2.5859405175743533e-05,
"loss": 0.8292,
"step": 22500
},
{
"epoch": 0.99,
"learning_rate": 2.5322947512982275e-05,
"loss": 0.789,
"step": 23000
},
{
"epoch": 1.0,
"eval_bleu": 43.6468,
"eval_chrf++": 67.5204,
"eval_gen_len": 29.2541,
"eval_loss": 0.6743187308311462,
"eval_runtime": 1508.9571,
"eval_samples_per_second": 3.255,
"eval_spbleu": 56.2871,
"eval_steps_per_second": 0.651,
"eval_ter": 46.5481,
"step": 23301
},
{
"epoch": 1.01,
"learning_rate": 2.4786489850221024e-05,
"loss": 0.7236,
"step": 23500
},
{
"epoch": 1.03,
"learning_rate": 2.4250032187459766e-05,
"loss": 0.5744,
"step": 24000
},
{
"epoch": 1.05,
"learning_rate": 2.371357452469851e-05,
"loss": 0.5704,
"step": 24500
},
{
"epoch": 1.07,
"learning_rate": 2.3177116861937257e-05,
"loss": 0.5804,
"step": 25000
},
{
"epoch": 1.09,
"learning_rate": 2.2640659199176002e-05,
"loss": 0.5753,
"step": 25500
},
{
"epoch": 1.12,
"learning_rate": 2.2104201536414748e-05,
"loss": 0.6005,
"step": 26000
},
{
"epoch": 1.14,
"learning_rate": 2.1567743873653493e-05,
"loss": 0.5813,
"step": 26500
},
{
"epoch": 1.16,
"learning_rate": 2.103128621089224e-05,
"loss": 0.5689,
"step": 27000
},
{
"epoch": 1.18,
"learning_rate": 2.049482854813098e-05,
"loss": 0.5756,
"step": 27500
},
{
"epoch": 1.2,
"learning_rate": 1.995837088536973e-05,
"loss": 0.5856,
"step": 28000
},
{
"epoch": 1.22,
"learning_rate": 1.9421913222608475e-05,
"loss": 0.5632,
"step": 28500
},
{
"epoch": 1.24,
"learning_rate": 1.8885455559847217e-05,
"loss": 0.579,
"step": 29000
},
{
"epoch": 1.27,
"learning_rate": 1.8348997897085962e-05,
"loss": 0.5605,
"step": 29500
},
{
"epoch": 1.29,
"learning_rate": 1.7812540234324708e-05,
"loss": 0.5785,
"step": 30000
},
{
"epoch": 1.31,
"learning_rate": 1.7276082571563453e-05,
"loss": 0.5727,
"step": 30500
},
{
"epoch": 1.33,
"learning_rate": 1.67396249088022e-05,
"loss": 0.5666,
"step": 31000
},
{
"epoch": 1.35,
"learning_rate": 1.6203167246040944e-05,
"loss": 0.5807,
"step": 31500
},
{
"epoch": 1.37,
"learning_rate": 1.5666709583279686e-05,
"loss": 0.566,
"step": 32000
},
{
"epoch": 1.39,
"learning_rate": 1.5130251920518435e-05,
"loss": 0.5912,
"step": 32500
},
{
"epoch": 1.42,
"learning_rate": 1.4593794257757179e-05,
"loss": 0.5665,
"step": 33000
},
{
"epoch": 1.44,
"learning_rate": 1.4057336594995924e-05,
"loss": 0.5563,
"step": 33500
},
{
"epoch": 1.46,
"learning_rate": 1.3520878932234668e-05,
"loss": 0.5848,
"step": 34000
},
{
"epoch": 1.48,
"learning_rate": 1.2984421269473413e-05,
"loss": 0.5491,
"step": 34500
},
{
"epoch": 1.5,
"learning_rate": 1.2447963606712159e-05,
"loss": 0.5713,
"step": 35000
},
{
"epoch": 1.52,
"learning_rate": 1.1911505943950904e-05,
"loss": 0.5862,
"step": 35500
},
{
"epoch": 1.54,
"learning_rate": 1.1375048281189648e-05,
"loss": 0.5592,
"step": 36000
},
{
"epoch": 1.57,
"learning_rate": 1.0838590618428395e-05,
"loss": 0.5658,
"step": 36500
},
{
"epoch": 1.59,
"learning_rate": 1.0302132955667139e-05,
"loss": 0.5794,
"step": 37000
},
{
"epoch": 1.61,
"learning_rate": 9.765675292905884e-06,
"loss": 0.5462,
"step": 37500
},
{
"epoch": 1.63,
"learning_rate": 9.22921763014463e-06,
"loss": 0.5624,
"step": 38000
},
{
"epoch": 1.65,
"learning_rate": 8.692759967383375e-06,
"loss": 0.5504,
"step": 38500
},
{
"epoch": 1.67,
"learning_rate": 8.15630230462212e-06,
"loss": 0.5679,
"step": 39000
},
{
"epoch": 1.7,
"learning_rate": 7.619844641860864e-06,
"loss": 0.5642,
"step": 39500
},
{
"epoch": 1.72,
"learning_rate": 7.0833869790996095e-06,
"loss": 0.5778,
"step": 40000
},
{
"epoch": 1.74,
"learning_rate": 6.546929316338354e-06,
"loss": 0.54,
"step": 40500
},
{
"epoch": 1.76,
"learning_rate": 6.0104716535771e-06,
"loss": 0.5535,
"step": 41000
},
{
"epoch": 1.78,
"learning_rate": 5.474013990815846e-06,
"loss": 0.5497,
"step": 41500
},
{
"epoch": 1.8,
"learning_rate": 4.9375563280545895e-06,
"loss": 0.5556,
"step": 42000
},
{
"epoch": 1.82,
"learning_rate": 4.401098665293335e-06,
"loss": 0.5476,
"step": 42500
},
{
"epoch": 1.85,
"learning_rate": 3.86464100253208e-06,
"loss": 0.5392,
"step": 43000
},
{
"epoch": 1.87,
"learning_rate": 3.3281833397708254e-06,
"loss": 0.5515,
"step": 43500
},
{
"epoch": 1.89,
"learning_rate": 2.7917256770095704e-06,
"loss": 0.5403,
"step": 44000
},
{
"epoch": 1.91,
"learning_rate": 2.255268014248316e-06,
"loss": 0.5544,
"step": 44500
},
{
"epoch": 1.93,
"learning_rate": 1.7188103514870608e-06,
"loss": 0.5628,
"step": 45000
},
{
"epoch": 1.95,
"learning_rate": 1.1823526887258058e-06,
"loss": 0.575,
"step": 45500
},
{
"epoch": 1.97,
"learning_rate": 6.458950259645509e-07,
"loss": 0.546,
"step": 46000
},
{
"epoch": 2.0,
"learning_rate": 1.0943736320329599e-07,
"loss": 0.5278,
"step": 46500
},
{
"epoch": 2.0,
"eval_bleu": 45.9774,
"eval_chrf++": 69.0406,
"eval_gen_len": 29.0554,
"eval_loss": 0.6565608382225037,
"eval_runtime": 1493.1741,
"eval_samples_per_second": 3.29,
"eval_spbleu": 58.5933,
"eval_steps_per_second": 0.658,
"eval_ter": 43.8556,
"step": 46602
}
],
"max_steps": 46602,
"num_train_epochs": 2,
"total_flos": 1.983240667660288e+17,
"trial_name": null,
"trial_params": null
}