|
{ |
|
"best_metric": 76.43196644229354, |
|
"best_model_checkpoint": "/root/turkic_qa/en_kaz_models/en_kaz_mdeberta_base_squad_model/checkpoint-2764", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 6910, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"step": 691, |
|
"train_exact_match": 63.03696303696304, |
|
"train_f1": 78.13915659047835, |
|
"train_runtime": 17.4035, |
|
"train_samples_per_second": 74.468, |
|
"train_steps_per_second": 2.701 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 40.22502899169922, |
|
"learning_rate": 5e-06, |
|
"loss": 1.2642, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_exact_match": 58.5625, |
|
"eval_f1": 74.11433052314848, |
|
"eval_runtime": 54.8873, |
|
"eval_samples_per_second": 74.626, |
|
"eval_steps_per_second": 2.678, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 1382, |
|
"train_exact_match": 67.33266733266733, |
|
"train_f1": 80.90558473928773, |
|
"train_runtime": 17.2817, |
|
"train_samples_per_second": 75.745, |
|
"train_steps_per_second": 2.72 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 28.721803665161133, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0309, |
|
"step": 1382 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_exact_match": 60.84375, |
|
"eval_f1": 75.39680273889411, |
|
"eval_runtime": 54.2212, |
|
"eval_samples_per_second": 75.542, |
|
"eval_steps_per_second": 2.711, |
|
"step": 1382 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 2073, |
|
"train_exact_match": 72.22777222777223, |
|
"train_f1": 86.54756181813792, |
|
"train_runtime": 16.9462, |
|
"train_samples_per_second": 74.943, |
|
"train_steps_per_second": 2.714 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 25.19014549255371, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 0.8566, |
|
"step": 2073 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_exact_match": 62.1875, |
|
"eval_f1": 76.29400510573826, |
|
"eval_runtime": 54.2582, |
|
"eval_samples_per_second": 75.491, |
|
"eval_steps_per_second": 2.709, |
|
"step": 2073 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2764, |
|
"train_exact_match": 75.52447552447552, |
|
"train_f1": 88.34455431353304, |
|
"train_runtime": 17.9414, |
|
"train_samples_per_second": 75.078, |
|
"train_steps_per_second": 2.731 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 31.350345611572266, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.7007, |
|
"step": 2764 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_exact_match": 62.21875, |
|
"eval_f1": 76.43196644229354, |
|
"eval_runtime": 54.0444, |
|
"eval_samples_per_second": 75.79, |
|
"eval_steps_per_second": 2.72, |
|
"step": 2764 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3455, |
|
"train_exact_match": 80.01998001998003, |
|
"train_f1": 91.67585411210774, |
|
"train_runtime": 17.0774, |
|
"train_samples_per_second": 75.246, |
|
"train_steps_per_second": 2.694 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 41.42879867553711, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.5836, |
|
"step": 3455 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_exact_match": 62.0, |
|
"eval_f1": 75.89595740381746, |
|
"eval_runtime": 54.8769, |
|
"eval_samples_per_second": 74.64, |
|
"eval_steps_per_second": 2.679, |
|
"step": 3455 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"step": 4146, |
|
"train_exact_match": 81.71828171828172, |
|
"train_f1": 92.46355571003183, |
|
"train_runtime": 16.6375, |
|
"train_samples_per_second": 75.252, |
|
"train_steps_per_second": 2.705 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 20.05802345275879, |
|
"learning_rate": 5e-06, |
|
"loss": 0.494, |
|
"step": 4146 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_exact_match": 62.46875, |
|
"eval_f1": 75.95799025269356, |
|
"eval_runtime": 54.1924, |
|
"eval_samples_per_second": 75.583, |
|
"eval_steps_per_second": 2.713, |
|
"step": 4146 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"step": 4837, |
|
"train_exact_match": 82.11788211788212, |
|
"train_f1": 93.12695306139514, |
|
"train_runtime": 17.1308, |
|
"train_samples_per_second": 74.953, |
|
"train_steps_per_second": 2.685 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 36.17577362060547, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.4281, |
|
"step": 4837 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_exact_match": 62.09375, |
|
"eval_f1": 75.87457028727934, |
|
"eval_runtime": 54.4468, |
|
"eval_samples_per_second": 75.229, |
|
"eval_steps_per_second": 2.7, |
|
"step": 4837 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"step": 5528, |
|
"train_exact_match": 86.01398601398601, |
|
"train_f1": 94.42525886876679, |
|
"train_runtime": 17.0114, |
|
"train_samples_per_second": 75.479, |
|
"train_steps_per_second": 2.704 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 12.529890060424805, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.3806, |
|
"step": 5528 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_exact_match": 62.0625, |
|
"eval_f1": 75.78487868224668, |
|
"eval_runtime": 54.4766, |
|
"eval_samples_per_second": 75.188, |
|
"eval_steps_per_second": 2.698, |
|
"step": 5528 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"step": 6219, |
|
"train_exact_match": 86.11388611388611, |
|
"train_f1": 94.8958657040008, |
|
"train_runtime": 17.878, |
|
"train_samples_per_second": 72.1, |
|
"train_steps_per_second": 2.629 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 37.82954788208008, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.3457, |
|
"step": 6219 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_exact_match": 62.03125, |
|
"eval_f1": 75.65181251557605, |
|
"eval_runtime": 54.6102, |
|
"eval_samples_per_second": 75.004, |
|
"eval_steps_per_second": 2.692, |
|
"step": 6219 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 6910, |
|
"train_exact_match": 87.81218781218782, |
|
"train_f1": 95.32192987518101, |
|
"train_runtime": 16.6546, |
|
"train_samples_per_second": 74.814, |
|
"train_steps_per_second": 2.702 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 8.968611717224121, |
|
"learning_rate": 0.0, |
|
"loss": 0.3243, |
|
"step": 6910 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_exact_match": 61.9375, |
|
"eval_f1": 75.6746197198531, |
|
"eval_runtime": 54.8671, |
|
"eval_samples_per_second": 74.653, |
|
"eval_steps_per_second": 2.679, |
|
"step": 6910 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 6910, |
|
"total_flos": 3.788022162461184e+16, |
|
"train_loss": 0.6408887451878504, |
|
"train_runtime": 5720.422, |
|
"train_samples_per_second": 33.789, |
|
"train_steps_per_second": 1.208 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 6910, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 3.788022162461184e+16, |
|
"train_batch_size": 28, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|