|
{ |
|
"best_metric": 0.9408151870463428, |
|
"best_model_checkpoint": "/tmp/classification_phobertbase/checkpoint-5820", |
|
"epoch": 40.0, |
|
"eval_steps": 500, |
|
"global_step": 15520, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.9251814628699051, |
|
"eval_loss": 0.21701952815055847, |
|
"eval_runtime": 9.8556, |
|
"eval_samples_per_second": 363.447, |
|
"eval_steps_per_second": 5.682, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.2886597938144329, |
|
"grad_norm": 4.1991682052612305, |
|
"learning_rate": 2.9033505154639176e-05, |
|
"loss": 0.3098, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.9369067560022334, |
|
"eval_loss": 0.2119385302066803, |
|
"eval_runtime": 9.8764, |
|
"eval_samples_per_second": 362.683, |
|
"eval_steps_per_second": 5.67, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 2.5773195876288657, |
|
"grad_norm": 6.048534870147705, |
|
"learning_rate": 2.806701030927835e-05, |
|
"loss": 0.1763, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_f1": 0.932998324958124, |
|
"eval_loss": 0.20233677327632904, |
|
"eval_runtime": 9.8515, |
|
"eval_samples_per_second": 363.598, |
|
"eval_steps_per_second": 5.684, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 3.865979381443299, |
|
"grad_norm": 3.9996299743652344, |
|
"learning_rate": 2.7100515463917526e-05, |
|
"loss": 0.1286, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_f1": 0.9343941931881632, |
|
"eval_loss": 0.26061517000198364, |
|
"eval_runtime": 9.8544, |
|
"eval_samples_per_second": 363.492, |
|
"eval_steps_per_second": 5.683, |
|
"step": 1552 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_f1": 0.9391401451702959, |
|
"eval_loss": 0.23778466880321503, |
|
"eval_runtime": 9.8578, |
|
"eval_samples_per_second": 363.368, |
|
"eval_steps_per_second": 5.681, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 5.154639175257732, |
|
"grad_norm": 3.624183416366577, |
|
"learning_rate": 2.61340206185567e-05, |
|
"loss": 0.0943, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_f1": 0.9335566722501396, |
|
"eval_loss": 0.2650683522224426, |
|
"eval_runtime": 9.8594, |
|
"eval_samples_per_second": 363.308, |
|
"eval_steps_per_second": 5.68, |
|
"step": 2328 |
|
}, |
|
{ |
|
"epoch": 6.443298969072165, |
|
"grad_norm": 7.170408248901367, |
|
"learning_rate": 2.5167525773195877e-05, |
|
"loss": 0.0706, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.2985905408859253, |
|
"eval_runtime": 9.8626, |
|
"eval_samples_per_second": 363.191, |
|
"eval_steps_per_second": 5.678, |
|
"step": 2716 |
|
}, |
|
{ |
|
"epoch": 7.731958762886598, |
|
"grad_norm": 2.5986905097961426, |
|
"learning_rate": 2.4201030927835052e-05, |
|
"loss": 0.0626, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_f1": 0.9304857621440537, |
|
"eval_loss": 0.31677183508872986, |
|
"eval_runtime": 9.879, |
|
"eval_samples_per_second": 362.589, |
|
"eval_steps_per_second": 5.669, |
|
"step": 3104 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_f1": 0.9357900614182021, |
|
"eval_loss": 0.30201148986816406, |
|
"eval_runtime": 9.8829, |
|
"eval_samples_per_second": 362.444, |
|
"eval_steps_per_second": 5.666, |
|
"step": 3492 |
|
}, |
|
{ |
|
"epoch": 9.02061855670103, |
|
"grad_norm": 0.1557622104883194, |
|
"learning_rate": 2.3234536082474227e-05, |
|
"loss": 0.0515, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.30621805787086487, |
|
"eval_runtime": 9.8777, |
|
"eval_samples_per_second": 362.634, |
|
"eval_steps_per_second": 5.669, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 10.309278350515465, |
|
"grad_norm": 0.21442590653896332, |
|
"learning_rate": 2.2268041237113402e-05, |
|
"loss": 0.0397, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_f1": 0.9343941931881632, |
|
"eval_loss": 0.34871262311935425, |
|
"eval_runtime": 9.8759, |
|
"eval_samples_per_second": 362.7, |
|
"eval_steps_per_second": 5.67, |
|
"step": 4268 |
|
}, |
|
{ |
|
"epoch": 11.597938144329897, |
|
"grad_norm": 13.442221641540527, |
|
"learning_rate": 2.1301546391752577e-05, |
|
"loss": 0.0337, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_f1": 0.9290898939140145, |
|
"eval_loss": 0.4043188989162445, |
|
"eval_runtime": 9.879, |
|
"eval_samples_per_second": 362.586, |
|
"eval_steps_per_second": 5.669, |
|
"step": 4656 |
|
}, |
|
{ |
|
"epoch": 12.88659793814433, |
|
"grad_norm": 0.3749821186065674, |
|
"learning_rate": 2.0335051546391752e-05, |
|
"loss": 0.031, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.37788987159729004, |
|
"eval_runtime": 9.8649, |
|
"eval_samples_per_second": 363.104, |
|
"eval_steps_per_second": 5.677, |
|
"step": 5044 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_f1": 0.9293690675600224, |
|
"eval_loss": 0.39344093203544617, |
|
"eval_runtime": 9.8929, |
|
"eval_samples_per_second": 362.077, |
|
"eval_steps_per_second": 5.661, |
|
"step": 5432 |
|
}, |
|
{ |
|
"epoch": 14.175257731958762, |
|
"grad_norm": 0.08398671448230743, |
|
"learning_rate": 1.936855670103093e-05, |
|
"loss": 0.0266, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_f1": 0.9408151870463428, |
|
"eval_loss": 0.36774319410324097, |
|
"eval_runtime": 9.8647, |
|
"eval_samples_per_second": 363.113, |
|
"eval_steps_per_second": 5.677, |
|
"step": 5820 |
|
}, |
|
{ |
|
"epoch": 15.463917525773196, |
|
"grad_norm": 0.2722782790660858, |
|
"learning_rate": 1.8402061855670106e-05, |
|
"loss": 0.0246, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_f1": 0.9355108877721943, |
|
"eval_loss": 0.3874046206474304, |
|
"eval_runtime": 9.8701, |
|
"eval_samples_per_second": 362.913, |
|
"eval_steps_per_second": 5.674, |
|
"step": 6208 |
|
}, |
|
{ |
|
"epoch": 16.75257731958763, |
|
"grad_norm": 8.601401329040527, |
|
"learning_rate": 1.743556701030928e-05, |
|
"loss": 0.0222, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_f1": 0.9343941931881632, |
|
"eval_loss": 0.42573627829551697, |
|
"eval_runtime": 9.8744, |
|
"eval_samples_per_second": 362.755, |
|
"eval_steps_per_second": 5.671, |
|
"step": 6596 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_f1": 0.9369067560022334, |
|
"eval_loss": 0.4371758997440338, |
|
"eval_runtime": 9.8898, |
|
"eval_samples_per_second": 362.193, |
|
"eval_steps_per_second": 5.662, |
|
"step": 6984 |
|
}, |
|
{ |
|
"epoch": 18.04123711340206, |
|
"grad_norm": 0.05734695494174957, |
|
"learning_rate": 1.6469072164948456e-05, |
|
"loss": 0.022, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_f1": 0.9363484087102177, |
|
"eval_loss": 0.44078370928764343, |
|
"eval_runtime": 9.8828, |
|
"eval_samples_per_second": 362.448, |
|
"eval_steps_per_second": 5.666, |
|
"step": 7372 |
|
}, |
|
{ |
|
"epoch": 19.329896907216494, |
|
"grad_norm": 0.19072239100933075, |
|
"learning_rate": 1.550257731958763e-05, |
|
"loss": 0.0176, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_f1": 0.9357900614182021, |
|
"eval_loss": 0.4601401090621948, |
|
"eval_runtime": 9.8712, |
|
"eval_samples_per_second": 362.874, |
|
"eval_steps_per_second": 5.673, |
|
"step": 7760 |
|
}, |
|
{ |
|
"epoch": 20.61855670103093, |
|
"grad_norm": 0.14792193472385406, |
|
"learning_rate": 1.4536082474226805e-05, |
|
"loss": 0.0142, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_f1": 0.9360692350642099, |
|
"eval_loss": 0.45033514499664307, |
|
"eval_runtime": 9.8715, |
|
"eval_samples_per_second": 362.861, |
|
"eval_steps_per_second": 5.673, |
|
"step": 8148 |
|
}, |
|
{ |
|
"epoch": 21.90721649484536, |
|
"grad_norm": 0.03246806561946869, |
|
"learning_rate": 1.356958762886598e-05, |
|
"loss": 0.0134, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.4835437834262848, |
|
"eval_runtime": 9.8869, |
|
"eval_samples_per_second": 362.298, |
|
"eval_steps_per_second": 5.664, |
|
"step": 8536 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_f1": 0.9391401451702959, |
|
"eval_loss": 0.45941534638404846, |
|
"eval_runtime": 9.8785, |
|
"eval_samples_per_second": 362.605, |
|
"eval_steps_per_second": 5.669, |
|
"step": 8924 |
|
}, |
|
{ |
|
"epoch": 23.195876288659793, |
|
"grad_norm": 0.08111796528100967, |
|
"learning_rate": 1.2603092783505155e-05, |
|
"loss": 0.0126, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.48089537024497986, |
|
"eval_runtime": 9.8641, |
|
"eval_samples_per_second": 363.135, |
|
"eval_steps_per_second": 5.677, |
|
"step": 9312 |
|
}, |
|
{ |
|
"epoch": 24.484536082474225, |
|
"grad_norm": 0.06357069313526154, |
|
"learning_rate": 1.163659793814433e-05, |
|
"loss": 0.0109, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_f1": 0.9369067560022334, |
|
"eval_loss": 0.48593300580978394, |
|
"eval_runtime": 9.8681, |
|
"eval_samples_per_second": 362.988, |
|
"eval_steps_per_second": 5.675, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 25.77319587628866, |
|
"grad_norm": 0.006769911386072636, |
|
"learning_rate": 1.0670103092783506e-05, |
|
"loss": 0.012, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_f1": 0.9385817978782803, |
|
"eval_loss": 0.48239514231681824, |
|
"eval_runtime": 9.8778, |
|
"eval_samples_per_second": 362.631, |
|
"eval_steps_per_second": 5.669, |
|
"step": 10088 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_f1": 0.9360692350642099, |
|
"eval_loss": 0.5067496299743652, |
|
"eval_runtime": 9.8844, |
|
"eval_samples_per_second": 362.39, |
|
"eval_steps_per_second": 5.666, |
|
"step": 10476 |
|
}, |
|
{ |
|
"epoch": 27.061855670103093, |
|
"grad_norm": 0.037592578679323196, |
|
"learning_rate": 9.703608247422681e-06, |
|
"loss": 0.0101, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_f1": 0.937465103294249, |
|
"eval_loss": 0.48702046275138855, |
|
"eval_runtime": 9.8977, |
|
"eval_samples_per_second": 361.901, |
|
"eval_steps_per_second": 5.658, |
|
"step": 10864 |
|
}, |
|
{ |
|
"epoch": 28.350515463917525, |
|
"grad_norm": 0.12630492448806763, |
|
"learning_rate": 8.737113402061856e-06, |
|
"loss": 0.0102, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_f1": 0.9355108877721943, |
|
"eval_loss": 0.5301576256752014, |
|
"eval_runtime": 9.8877, |
|
"eval_samples_per_second": 362.268, |
|
"eval_steps_per_second": 5.664, |
|
"step": 11252 |
|
}, |
|
{ |
|
"epoch": 29.63917525773196, |
|
"grad_norm": 0.2997748851776123, |
|
"learning_rate": 7.770618556701031e-06, |
|
"loss": 0.0088, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_f1": 0.9366275823562256, |
|
"eval_loss": 0.49529990553855896, |
|
"eval_runtime": 9.8876, |
|
"eval_samples_per_second": 362.27, |
|
"eval_steps_per_second": 5.664, |
|
"step": 11640 |
|
}, |
|
{ |
|
"epoch": 30.927835051546392, |
|
"grad_norm": 0.33649611473083496, |
|
"learning_rate": 6.8041237113402065e-06, |
|
"loss": 0.0093, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_f1": 0.9360692350642099, |
|
"eval_loss": 0.4913821220397949, |
|
"eval_runtime": 9.8622, |
|
"eval_samples_per_second": 363.206, |
|
"eval_steps_per_second": 5.678, |
|
"step": 12028 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_f1": 0.9388609715242882, |
|
"eval_loss": 0.5014358162879944, |
|
"eval_runtime": 9.8775, |
|
"eval_samples_per_second": 362.641, |
|
"eval_steps_per_second": 5.669, |
|
"step": 12416 |
|
}, |
|
{ |
|
"epoch": 32.21649484536083, |
|
"grad_norm": 0.011193674989044666, |
|
"learning_rate": 5.837628865979382e-06, |
|
"loss": 0.0084, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_f1": 0.9383026242322725, |
|
"eval_loss": 0.5026176571846008, |
|
"eval_runtime": 9.8893, |
|
"eval_samples_per_second": 362.21, |
|
"eval_steps_per_second": 5.663, |
|
"step": 12804 |
|
}, |
|
{ |
|
"epoch": 33.50515463917526, |
|
"grad_norm": 0.26669424772262573, |
|
"learning_rate": 4.871134020618557e-06, |
|
"loss": 0.0078, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_f1": 0.9380234505862647, |
|
"eval_loss": 0.5043131709098816, |
|
"eval_runtime": 9.8862, |
|
"eval_samples_per_second": 362.323, |
|
"eval_steps_per_second": 5.664, |
|
"step": 13192 |
|
}, |
|
{ |
|
"epoch": 34.79381443298969, |
|
"grad_norm": 0.9884344339370728, |
|
"learning_rate": 3.904639175257732e-06, |
|
"loss": 0.0075, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_f1": 0.9377442769402569, |
|
"eval_loss": 0.5035226941108704, |
|
"eval_runtime": 9.8894, |
|
"eval_samples_per_second": 362.204, |
|
"eval_steps_per_second": 5.663, |
|
"step": 13580 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_f1": 0.9377442769402569, |
|
"eval_loss": 0.5007095336914062, |
|
"eval_runtime": 9.9011, |
|
"eval_samples_per_second": 361.777, |
|
"eval_steps_per_second": 5.656, |
|
"step": 13968 |
|
}, |
|
{ |
|
"epoch": 36.08247422680412, |
|
"grad_norm": 0.02359125204384327, |
|
"learning_rate": 2.938144329896907e-06, |
|
"loss": 0.0077, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_f1": 0.9377442769402569, |
|
"eval_loss": 0.5102269053459167, |
|
"eval_runtime": 9.8918, |
|
"eval_samples_per_second": 362.118, |
|
"eval_steps_per_second": 5.661, |
|
"step": 14356 |
|
}, |
|
{ |
|
"epoch": 37.371134020618555, |
|
"grad_norm": 0.00852921698242426, |
|
"learning_rate": 1.9716494845360827e-06, |
|
"loss": 0.0076, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_f1": 0.9391401451702959, |
|
"eval_loss": 0.5068992376327515, |
|
"eval_runtime": 9.8982, |
|
"eval_samples_per_second": 361.883, |
|
"eval_steps_per_second": 5.658, |
|
"step": 14744 |
|
}, |
|
{ |
|
"epoch": 38.65979381443299, |
|
"grad_norm": 0.01492872554808855, |
|
"learning_rate": 1.0051546391752577e-06, |
|
"loss": 0.0059, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_f1": 0.9385817978782803, |
|
"eval_loss": 0.5104745030403137, |
|
"eval_runtime": 9.9017, |
|
"eval_samples_per_second": 361.758, |
|
"eval_steps_per_second": 5.656, |
|
"step": 15132 |
|
}, |
|
{ |
|
"epoch": 39.94845360824742, |
|
"grad_norm": 0.013034285046160221, |
|
"learning_rate": 3.865979381443299e-08, |
|
"loss": 0.0071, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_f1": 0.9383026242322725, |
|
"eval_loss": 0.5110998749732971, |
|
"eval_runtime": 9.8692, |
|
"eval_samples_per_second": 362.949, |
|
"eval_steps_per_second": 5.674, |
|
"step": 15520 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"step": 15520, |
|
"total_flos": 6.517582470540288e+16, |
|
"train_loss": 0.040758436324900574, |
|
"train_runtime": 8835.4698, |
|
"train_samples_per_second": 112.143, |
|
"train_steps_per_second": 1.757 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 15520, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.517582470540288e+16, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|