|
{ |
|
"best_metric": 1.3331981897354126, |
|
"best_model_checkpoint": "/content/drive/My Drive/Colab Notebooks/aai520-project/checkpoints/distilbert-finetuned-uncased/checkpoint-1600", |
|
"epoch": 4.0, |
|
"eval_steps": 100, |
|
"global_step": 2040, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.607843137254902e-05, |
|
"loss": 3.6437, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_loss": 2.1780340671539307, |
|
"eval_runtime": 8.4412, |
|
"eval_samples_per_second": 1417.927, |
|
"eval_steps_per_second": 11.136, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.215686274509804e-05, |
|
"loss": 2.1596, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 1.6557185649871826, |
|
"eval_runtime": 8.3581, |
|
"eval_samples_per_second": 1432.022, |
|
"eval_steps_per_second": 11.247, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.23529411764706e-06, |
|
"loss": 1.8138, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_loss": 1.5682721138000488, |
|
"eval_runtime": 8.4191, |
|
"eval_samples_per_second": 1421.643, |
|
"eval_steps_per_second": 11.165, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.313725490196079e-06, |
|
"loss": 1.6987, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_loss": 1.5075817108154297, |
|
"eval_runtime": 8.3595, |
|
"eval_samples_per_second": 1431.779, |
|
"eval_steps_per_second": 11.245, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.921568627450981e-07, |
|
"loss": 1.6586, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_loss": 1.535025715827942, |
|
"eval_runtime": 8.4289, |
|
"eval_samples_per_second": 1419.991, |
|
"eval_steps_per_second": 11.152, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.23529411764706e-06, |
|
"loss": 1.5957, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_loss": 1.4431041479110718, |
|
"eval_runtime": 8.5594, |
|
"eval_samples_per_second": 1398.342, |
|
"eval_steps_per_second": 21.964, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 6.274509803921569e-06, |
|
"loss": 1.5825, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"eval_loss": 1.495514154434204, |
|
"eval_runtime": 8.5285, |
|
"eval_samples_per_second": 1403.419, |
|
"eval_steps_per_second": 22.044, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.313725490196079e-06, |
|
"loss": 1.5523, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_loss": 1.4444433450698853, |
|
"eval_runtime": 8.5112, |
|
"eval_samples_per_second": 1406.272, |
|
"eval_steps_per_second": 22.089, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.3529411764705885e-06, |
|
"loss": 1.5346, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"eval_loss": 1.393009066581726, |
|
"eval_runtime": 8.5265, |
|
"eval_samples_per_second": 1403.737, |
|
"eval_steps_per_second": 22.049, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.921568627450981e-07, |
|
"loss": 1.5098, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_loss": 1.4284701347351074, |
|
"eval_runtime": 8.5994, |
|
"eval_samples_per_second": 1391.84, |
|
"eval_steps_per_second": 21.862, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.215686274509804e-06, |
|
"loss": 1.4632, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"eval_loss": 1.3630493879318237, |
|
"eval_runtime": 8.4807, |
|
"eval_samples_per_second": 1411.328, |
|
"eval_steps_per_second": 22.168, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 8.23529411764706e-06, |
|
"loss": 1.4468, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"eval_loss": 1.370953917503357, |
|
"eval_runtime": 8.5147, |
|
"eval_samples_per_second": 1405.685, |
|
"eval_steps_per_second": 22.079, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.2549019607843145e-06, |
|
"loss": 1.4343, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 1.3422259092330933, |
|
"eval_runtime": 8.4859, |
|
"eval_samples_per_second": 1410.461, |
|
"eval_steps_per_second": 22.154, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.274509803921569e-06, |
|
"loss": 1.4225, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"eval_loss": 1.397080659866333, |
|
"eval_runtime": 8.4725, |
|
"eval_samples_per_second": 1412.689, |
|
"eval_steps_per_second": 22.189, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 5.294117647058824e-06, |
|
"loss": 1.408, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"eval_loss": 1.435463547706604, |
|
"eval_runtime": 8.4775, |
|
"eval_samples_per_second": 1411.85, |
|
"eval_steps_per_second": 22.176, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 4.313725490196079e-06, |
|
"loss": 1.3609, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"eval_loss": 1.3331981897354126, |
|
"eval_runtime": 8.4786, |
|
"eval_samples_per_second": 1411.679, |
|
"eval_steps_per_second": 22.174, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.3398, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_loss": 1.3791619539260864, |
|
"eval_runtime": 8.4678, |
|
"eval_samples_per_second": 1413.466, |
|
"eval_steps_per_second": 22.202, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.3529411764705885e-06, |
|
"loss": 1.3224, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"eval_loss": 1.41716730594635, |
|
"eval_runtime": 8.4259, |
|
"eval_samples_per_second": 1420.506, |
|
"eval_steps_per_second": 22.312, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.3725490196078434e-06, |
|
"loss": 1.3152, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"eval_loss": 1.3955893516540527, |
|
"eval_runtime": 8.444, |
|
"eval_samples_per_second": 1417.453, |
|
"eval_steps_per_second": 22.264, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.921568627450981e-07, |
|
"loss": 1.3141, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"eval_loss": 1.3748189210891724, |
|
"eval_runtime": 8.4509, |
|
"eval_samples_per_second": 1416.303, |
|
"eval_steps_per_second": 22.246, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2040, |
|
"total_flos": 8.491863563129856e+16, |
|
"train_loss": 0.025466402839211855, |
|
"train_runtime": 27.6855, |
|
"train_samples_per_second": 18855.037, |
|
"train_steps_per_second": 73.685 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.3331981897354126, |
|
"eval_runtime": 8.4623, |
|
"eval_samples_per_second": 1414.395, |
|
"eval_steps_per_second": 22.216, |
|
"step": 2040 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2040, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 8.491863563129856e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|