yesj1234's picture
Upload folder using huggingface_hub
982c00e
raw
history blame
13.8 kB
{
"best_metric": 0.9302786588668823,
"best_model_checkpoint": "./koja_mbartLarge_55p_run2/checkpoint-32000",
"epoch": 2.8986382439083305,
"eval_steps": 8000,
"global_step": 48000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.9849024699559153e-05,
"loss": 1.6604,
"step": 500
},
{
"epoch": 0.06,
"learning_rate": 4.9698049399118304e-05,
"loss": 1.456,
"step": 1000
},
{
"epoch": 0.09,
"learning_rate": 4.954707409867746e-05,
"loss": 1.3471,
"step": 1500
},
{
"epoch": 0.12,
"learning_rate": 4.939609879823661e-05,
"loss": 1.3,
"step": 2000
},
{
"epoch": 0.15,
"learning_rate": 4.9245123497795764e-05,
"loss": 1.26,
"step": 2500
},
{
"epoch": 0.18,
"learning_rate": 4.9094148197354915e-05,
"loss": 1.2247,
"step": 3000
},
{
"epoch": 0.21,
"learning_rate": 4.8943172896914066e-05,
"loss": 1.1962,
"step": 3500
},
{
"epoch": 0.24,
"learning_rate": 4.8792197596473224e-05,
"loss": 1.1847,
"step": 4000
},
{
"epoch": 0.27,
"learning_rate": 4.8641222296032375e-05,
"loss": 1.1476,
"step": 4500
},
{
"epoch": 0.3,
"learning_rate": 4.8490246995591526e-05,
"loss": 1.1306,
"step": 5000
},
{
"epoch": 0.33,
"learning_rate": 4.833927169515067e-05,
"loss": 1.117,
"step": 5500
},
{
"epoch": 0.36,
"learning_rate": 4.818829639470983e-05,
"loss": 1.1115,
"step": 6000
},
{
"epoch": 0.39,
"learning_rate": 4.803732109426898e-05,
"loss": 1.1071,
"step": 6500
},
{
"epoch": 0.42,
"learning_rate": 4.788634579382813e-05,
"loss": 1.0832,
"step": 7000
},
{
"epoch": 0.45,
"learning_rate": 4.773537049338728e-05,
"loss": 1.0758,
"step": 7500
},
{
"epoch": 0.48,
"learning_rate": 4.758439519294643e-05,
"loss": 1.0633,
"step": 8000
},
{
"epoch": 0.48,
"eval_bleu": 52.4575,
"eval_gen_len": 17.4003,
"eval_loss": 1.0418639183044434,
"eval_runtime": 2289.0698,
"eval_samples_per_second": 14.468,
"eval_steps_per_second": 0.904,
"step": 8000
},
{
"epoch": 0.51,
"learning_rate": 4.743341989250559e-05,
"loss": 1.0568,
"step": 8500
},
{
"epoch": 0.54,
"learning_rate": 4.728244459206474e-05,
"loss": 1.0452,
"step": 9000
},
{
"epoch": 0.57,
"learning_rate": 4.713146929162389e-05,
"loss": 1.0344,
"step": 9500
},
{
"epoch": 0.6,
"learning_rate": 4.698049399118304e-05,
"loss": 1.0088,
"step": 10000
},
{
"epoch": 0.63,
"learning_rate": 4.6829518690742194e-05,
"loss": 1.0294,
"step": 10500
},
{
"epoch": 0.66,
"learning_rate": 4.667854339030135e-05,
"loss": 1.0237,
"step": 11000
},
{
"epoch": 0.69,
"learning_rate": 4.65275680898605e-05,
"loss": 1.0254,
"step": 11500
},
{
"epoch": 0.72,
"learning_rate": 4.6376592789419654e-05,
"loss": 1.0025,
"step": 12000
},
{
"epoch": 0.75,
"learning_rate": 4.6225617488978805e-05,
"loss": 0.9988,
"step": 12500
},
{
"epoch": 0.79,
"learning_rate": 4.6074642188537956e-05,
"loss": 0.9938,
"step": 13000
},
{
"epoch": 0.82,
"learning_rate": 4.5923666888097114e-05,
"loss": 0.9833,
"step": 13500
},
{
"epoch": 0.85,
"learning_rate": 4.5772691587656265e-05,
"loss": 0.9816,
"step": 14000
},
{
"epoch": 0.88,
"learning_rate": 4.5621716287215416e-05,
"loss": 0.9908,
"step": 14500
},
{
"epoch": 0.91,
"learning_rate": 4.547074098677457e-05,
"loss": 0.98,
"step": 15000
},
{
"epoch": 0.94,
"learning_rate": 4.531976568633372e-05,
"loss": 0.974,
"step": 15500
},
{
"epoch": 0.97,
"learning_rate": 4.516879038589287e-05,
"loss": 0.9731,
"step": 16000
},
{
"epoch": 0.97,
"eval_bleu": 55.7136,
"eval_gen_len": 16.9686,
"eval_loss": 0.9550060033798218,
"eval_runtime": 2134.3827,
"eval_samples_per_second": 15.516,
"eval_steps_per_second": 0.97,
"step": 16000
},
{
"epoch": 1.0,
"learning_rate": 4.501781508545202e-05,
"loss": 0.9559,
"step": 16500
},
{
"epoch": 1.03,
"learning_rate": 4.486683978501117e-05,
"loss": 0.8763,
"step": 17000
},
{
"epoch": 1.06,
"learning_rate": 4.471586448457032e-05,
"loss": 0.856,
"step": 17500
},
{
"epoch": 1.09,
"learning_rate": 4.456488918412948e-05,
"loss": 0.8314,
"step": 18000
},
{
"epoch": 1.12,
"learning_rate": 4.441391388368863e-05,
"loss": 0.8235,
"step": 18500
},
{
"epoch": 1.15,
"learning_rate": 4.426293858324778e-05,
"loss": 0.8204,
"step": 19000
},
{
"epoch": 1.18,
"learning_rate": 4.411196328280693e-05,
"loss": 0.808,
"step": 19500
},
{
"epoch": 1.21,
"learning_rate": 4.3960987982366084e-05,
"loss": 0.801,
"step": 20000
},
{
"epoch": 1.24,
"learning_rate": 4.381001268192524e-05,
"loss": 0.8007,
"step": 20500
},
{
"epoch": 1.27,
"learning_rate": 4.365903738148439e-05,
"loss": 0.7924,
"step": 21000
},
{
"epoch": 1.3,
"learning_rate": 4.3508062081043544e-05,
"loss": 0.779,
"step": 21500
},
{
"epoch": 1.33,
"learning_rate": 4.3357086780602695e-05,
"loss": 0.7711,
"step": 22000
},
{
"epoch": 1.36,
"learning_rate": 4.3206111480161846e-05,
"loss": 0.7773,
"step": 22500
},
{
"epoch": 1.39,
"learning_rate": 4.3055136179721003e-05,
"loss": 0.7724,
"step": 23000
},
{
"epoch": 1.42,
"learning_rate": 4.2904160879280154e-05,
"loss": 0.767,
"step": 23500
},
{
"epoch": 1.45,
"learning_rate": 4.2753185578839306e-05,
"loss": 0.7608,
"step": 24000
},
{
"epoch": 1.45,
"eval_bleu": 56.8788,
"eval_gen_len": 16.7537,
"eval_loss": 0.9372403025627136,
"eval_runtime": 2084.1101,
"eval_samples_per_second": 15.891,
"eval_steps_per_second": 0.993,
"step": 24000
},
{
"epoch": 1.48,
"learning_rate": 4.2602210278398457e-05,
"loss": 0.7485,
"step": 24500
},
{
"epoch": 1.51,
"learning_rate": 4.245123497795761e-05,
"loss": 0.7546,
"step": 25000
},
{
"epoch": 1.54,
"learning_rate": 4.2300259677516765e-05,
"loss": 0.7452,
"step": 25500
},
{
"epoch": 1.57,
"learning_rate": 4.2149284377075916e-05,
"loss": 0.7419,
"step": 26000
},
{
"epoch": 1.6,
"learning_rate": 4.199830907663506e-05,
"loss": 0.7289,
"step": 26500
},
{
"epoch": 1.63,
"learning_rate": 4.184733377619421e-05,
"loss": 0.7396,
"step": 27000
},
{
"epoch": 1.66,
"learning_rate": 4.169635847575337e-05,
"loss": 0.7474,
"step": 27500
},
{
"epoch": 1.69,
"learning_rate": 4.154538317531252e-05,
"loss": 0.74,
"step": 28000
},
{
"epoch": 1.72,
"learning_rate": 4.139440787487167e-05,
"loss": 0.7372,
"step": 28500
},
{
"epoch": 1.75,
"learning_rate": 4.124343257443082e-05,
"loss": 0.7259,
"step": 29000
},
{
"epoch": 1.78,
"learning_rate": 4.1092457273989973e-05,
"loss": 0.7247,
"step": 29500
},
{
"epoch": 1.81,
"learning_rate": 4.094148197354913e-05,
"loss": 0.719,
"step": 30000
},
{
"epoch": 1.84,
"learning_rate": 4.079050667310828e-05,
"loss": 0.7197,
"step": 30500
},
{
"epoch": 1.87,
"learning_rate": 4.063953137266743e-05,
"loss": 0.7261,
"step": 31000
},
{
"epoch": 1.9,
"learning_rate": 4.0488556072226584e-05,
"loss": 0.7196,
"step": 31500
},
{
"epoch": 1.93,
"learning_rate": 4.0337580771785735e-05,
"loss": 0.7213,
"step": 32000
},
{
"epoch": 1.93,
"eval_bleu": 57.4421,
"eval_gen_len": 16.6742,
"eval_loss": 0.9302786588668823,
"eval_runtime": 2082.6332,
"eval_samples_per_second": 15.902,
"eval_steps_per_second": 0.994,
"step": 32000
},
{
"epoch": 1.96,
"learning_rate": 4.018660547134489e-05,
"loss": 0.7199,
"step": 32500
},
{
"epoch": 1.99,
"learning_rate": 4.0035630170904044e-05,
"loss": 0.7118,
"step": 33000
},
{
"epoch": 2.02,
"learning_rate": 3.9884654870463195e-05,
"loss": 0.6634,
"step": 33500
},
{
"epoch": 2.05,
"learning_rate": 3.9733679570022346e-05,
"loss": 0.6322,
"step": 34000
},
{
"epoch": 2.08,
"learning_rate": 3.95827042695815e-05,
"loss": 0.6185,
"step": 34500
},
{
"epoch": 2.11,
"learning_rate": 3.9431728969140655e-05,
"loss": 0.613,
"step": 35000
},
{
"epoch": 2.14,
"learning_rate": 3.9280753668699806e-05,
"loss": 0.6078,
"step": 35500
},
{
"epoch": 2.17,
"learning_rate": 3.912977836825896e-05,
"loss": 0.5996,
"step": 36000
},
{
"epoch": 2.2,
"learning_rate": 3.897880306781811e-05,
"loss": 0.5912,
"step": 36500
},
{
"epoch": 2.23,
"learning_rate": 3.882782776737726e-05,
"loss": 0.5958,
"step": 37000
},
{
"epoch": 2.26,
"learning_rate": 3.867685246693641e-05,
"loss": 0.5929,
"step": 37500
},
{
"epoch": 2.29,
"learning_rate": 3.852587716649556e-05,
"loss": 0.5749,
"step": 38000
},
{
"epoch": 2.32,
"learning_rate": 3.837490186605471e-05,
"loss": 0.5789,
"step": 38500
},
{
"epoch": 2.36,
"learning_rate": 3.822392656561386e-05,
"loss": 0.5741,
"step": 39000
},
{
"epoch": 2.39,
"learning_rate": 3.807295126517302e-05,
"loss": 0.5816,
"step": 39500
},
{
"epoch": 2.42,
"learning_rate": 3.792197596473217e-05,
"loss": 0.5702,
"step": 40000
},
{
"epoch": 2.42,
"eval_bleu": 56.774,
"eval_gen_len": 16.4703,
"eval_loss": 0.9621614813804626,
"eval_runtime": 2069.3431,
"eval_samples_per_second": 16.004,
"eval_steps_per_second": 1.0,
"step": 40000
},
{
"epoch": 2.45,
"learning_rate": 3.777100066429132e-05,
"loss": 0.5631,
"step": 40500
},
{
"epoch": 2.48,
"learning_rate": 3.7620025363850474e-05,
"loss": 0.5546,
"step": 41000
},
{
"epoch": 2.51,
"learning_rate": 3.7469050063409625e-05,
"loss": 0.5607,
"step": 41500
},
{
"epoch": 2.54,
"learning_rate": 3.731807476296878e-05,
"loss": 0.5473,
"step": 42000
},
{
"epoch": 2.57,
"learning_rate": 3.7167099462527934e-05,
"loss": 0.5545,
"step": 42500
},
{
"epoch": 2.6,
"learning_rate": 3.7016124162087085e-05,
"loss": 0.5394,
"step": 43000
},
{
"epoch": 2.63,
"learning_rate": 3.6865148861646236e-05,
"loss": 0.547,
"step": 43500
},
{
"epoch": 2.66,
"learning_rate": 3.671417356120539e-05,
"loss": 0.553,
"step": 44000
},
{
"epoch": 2.69,
"learning_rate": 3.6563198260764545e-05,
"loss": 0.5456,
"step": 44500
},
{
"epoch": 2.72,
"learning_rate": 3.6412222960323696e-05,
"loss": 0.5513,
"step": 45000
},
{
"epoch": 2.75,
"learning_rate": 3.626124765988285e-05,
"loss": 0.5384,
"step": 45500
},
{
"epoch": 2.78,
"learning_rate": 3.6110272359442e-05,
"loss": 0.5392,
"step": 46000
},
{
"epoch": 2.81,
"learning_rate": 3.595929705900115e-05,
"loss": 0.5314,
"step": 46500
},
{
"epoch": 2.84,
"learning_rate": 3.5808321758560307e-05,
"loss": 0.5372,
"step": 47000
},
{
"epoch": 2.87,
"learning_rate": 3.565734645811946e-05,
"loss": 0.5304,
"step": 47500
},
{
"epoch": 2.9,
"learning_rate": 3.55063711576786e-05,
"loss": 0.5416,
"step": 48000
},
{
"epoch": 2.9,
"eval_bleu": 57.4192,
"eval_gen_len": 16.6763,
"eval_loss": 0.9696939587593079,
"eval_runtime": 2067.4369,
"eval_samples_per_second": 16.019,
"eval_steps_per_second": 1.001,
"step": 48000
}
],
"logging_steps": 500,
"max_steps": 165590,
"num_train_epochs": 10,
"save_steps": 8000,
"total_flos": 1.6644596837352735e+18,
"trial_name": null,
"trial_params": null
}