|
{ |
|
"best_metric": 0.0380592942237854, |
|
"best_model_checkpoint": "doc-topic-model_eval-02_train-01/checkpoint-14000", |
|
"epoch": 9.368836291913215, |
|
"eval_steps": 1000, |
|
"global_step": 19000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.32817578315734863, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1659, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.34106990694999695, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0941, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.09066470712423325, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1347, |
|
"eval_samples_per_second": 668.329, |
|
"eval_steps_per_second": 2.637, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.4004117548465729, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.088, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.37588435411453247, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0787, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.07067307829856873, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.2547, |
|
"eval_samples_per_second": 661.789, |
|
"eval_steps_per_second": 2.611, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.43946701288223267, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0696, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.4252438545227051, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0628, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9822313873889196, |
|
"eval_f1": 0.1224889759921607, |
|
"eval_loss": 0.057456616312265396, |
|
"eval_precision": 0.7682177348551361, |
|
"eval_recall": 0.066550045634317, |
|
"eval_runtime": 14.2849, |
|
"eval_samples_per_second": 567.732, |
|
"eval_steps_per_second": 2.24, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.3535268306732178, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0584, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.3976692855358124, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0537, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9842042603852205, |
|
"eval_f1": 0.3201366436893796, |
|
"eval_loss": 0.050263650715351105, |
|
"eval_precision": 0.8086286594761171, |
|
"eval_recall": 0.19957407970794036, |
|
"eval_runtime": 14.3334, |
|
"eval_samples_per_second": 565.811, |
|
"eval_steps_per_second": 2.233, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.24940212070941925, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0499, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.40094295144081116, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0478, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9851467607749763, |
|
"eval_f1": 0.4262564327165225, |
|
"eval_loss": 0.04696211963891983, |
|
"eval_precision": 0.7606486908948809, |
|
"eval_recall": 0.2960906601764527, |
|
"eval_runtime": 14.3295, |
|
"eval_samples_per_second": 565.965, |
|
"eval_steps_per_second": 2.233, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.42049625515937805, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0463, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.41673073172569275, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0453, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9857760392306929, |
|
"eval_f1": 0.4983003399320136, |
|
"eval_loss": 0.044437214732170105, |
|
"eval_precision": 0.7269544924154026, |
|
"eval_recall": 0.37906905993306966, |
|
"eval_runtime": 14.5887, |
|
"eval_samples_per_second": 555.911, |
|
"eval_steps_per_second": 2.193, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.3240315914154053, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.0424, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.39667147397994995, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0389, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9864237425060589, |
|
"eval_f1": 0.5409058231488139, |
|
"eval_loss": 0.04192586615681648, |
|
"eval_precision": 0.7312427108980174, |
|
"eval_recall": 0.4291907514450867, |
|
"eval_runtime": 14.4636, |
|
"eval_samples_per_second": 560.719, |
|
"eval_steps_per_second": 2.212, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.39966270327568054, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0403, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.42601555585861206, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0393, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9863302011139929, |
|
"eval_f1": 0.5480106846618867, |
|
"eval_loss": 0.04111693799495697, |
|
"eval_precision": 0.7138322549139299, |
|
"eval_recall": 0.4447064192272589, |
|
"eval_runtime": 14.5558, |
|
"eval_samples_per_second": 557.167, |
|
"eval_steps_per_second": 2.198, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.41485270857810974, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0361, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.4282112717628479, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0349, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9868149156001531, |
|
"eval_f1": 0.57472, |
|
"eval_loss": 0.039900876581668854, |
|
"eval_precision": 0.7202933425002864, |
|
"eval_recall": 0.4780955278369334, |
|
"eval_runtime": 14.4367, |
|
"eval_samples_per_second": 561.763, |
|
"eval_steps_per_second": 2.217, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.5737874507904053, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0358, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.42196542024612427, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0344, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9870388480235838, |
|
"eval_f1": 0.575815204786864, |
|
"eval_loss": 0.03913598880171776, |
|
"eval_precision": 0.7379621923671382, |
|
"eval_recall": 0.47208700943109216, |
|
"eval_runtime": 14.3745, |
|
"eval_samples_per_second": 564.193, |
|
"eval_steps_per_second": 2.226, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.2073846012353897, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0315, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.3816153109073639, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0302, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.9871295548280113, |
|
"eval_f1": 0.5903739455997113, |
|
"eval_loss": 0.03845709562301636, |
|
"eval_precision": 0.7254184680190666, |
|
"eval_recall": 0.49771828414968056, |
|
"eval_runtime": 14.5373, |
|
"eval_samples_per_second": 557.876, |
|
"eval_steps_per_second": 2.201, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.4880998730659485, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0321, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.31077542901039124, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0305, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.9871040435392662, |
|
"eval_f1": 0.5966218912089374, |
|
"eval_loss": 0.038683824241161346, |
|
"eval_precision": 0.7151663301094696, |
|
"eval_recall": 0.5117888652266505, |
|
"eval_runtime": 14.4549, |
|
"eval_samples_per_second": 561.056, |
|
"eval_steps_per_second": 2.214, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.4404219686985016, |
|
"learning_rate": 1.876735700197239e-05, |
|
"loss": 0.0281, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.39860355854034424, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.027, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.9874441940558697, |
|
"eval_f1": 0.605653238370799, |
|
"eval_loss": 0.0383986160159111, |
|
"eval_precision": 0.7301706557904905, |
|
"eval_recall": 0.5174170976574384, |
|
"eval_runtime": 14.2815, |
|
"eval_samples_per_second": 567.867, |
|
"eval_steps_per_second": 2.241, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65680473372781, |
|
"grad_norm": 0.3162294924259186, |
|
"learning_rate": 1.8668836291913217e-05, |
|
"loss": 0.0267, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"grad_norm": 0.3698256313800812, |
|
"learning_rate": 1.8619526627218937e-05, |
|
"loss": 0.0282, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"eval_accuracy": 0.9875349008602974, |
|
"eval_f1": 0.6079436544376589, |
|
"eval_loss": 0.0380592942237854, |
|
"eval_precision": 0.7344103392568659, |
|
"eval_recall": 0.5186340127776088, |
|
"eval_runtime": 14.3349, |
|
"eval_samples_per_second": 565.751, |
|
"eval_steps_per_second": 2.232, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.149901380670611, |
|
"grad_norm": 0.42440325021743774, |
|
"learning_rate": 1.8570216962524657e-05, |
|
"loss": 0.0252, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"grad_norm": 0.5403586626052856, |
|
"learning_rate": 1.8521005917159765e-05, |
|
"loss": 0.0235, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"eval_accuracy": 0.9874016752412943, |
|
"eval_f1": 0.6180880773361976, |
|
"eval_loss": 0.038525186479091644, |
|
"eval_precision": 0.7102794509726473, |
|
"eval_recall": 0.5470794037115911, |
|
"eval_runtime": 14.711, |
|
"eval_samples_per_second": 551.288, |
|
"eval_steps_per_second": 2.175, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.642998027613412, |
|
"grad_norm": 0.3601667582988739, |
|
"learning_rate": 1.8471696252465485e-05, |
|
"loss": 0.0243, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"grad_norm": 0.4192611575126648, |
|
"learning_rate": 1.8422386587771205e-05, |
|
"loss": 0.0255, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"eval_accuracy": 0.9876312768400017, |
|
"eval_f1": 0.6257237208903376, |
|
"eval_loss": 0.038173139095306396, |
|
"eval_precision": 0.7173763398564263, |
|
"eval_recall": 0.5548372376026772, |
|
"eval_runtime": 14.4228, |
|
"eval_samples_per_second": 562.305, |
|
"eval_steps_per_second": 2.219, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.136094674556213, |
|
"grad_norm": 0.13463503122329712, |
|
"learning_rate": 1.8373076923076926e-05, |
|
"loss": 0.0224, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"grad_norm": 0.25073572993278503, |
|
"learning_rate": 1.8323865877712033e-05, |
|
"loss": 0.0214, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"eval_accuracy": 0.9877319047011636, |
|
"eval_f1": 0.6352911435072048, |
|
"eval_loss": 0.0381532646715641, |
|
"eval_precision": 0.7121670130360854, |
|
"eval_recall": 0.5733951931852753, |
|
"eval_runtime": 14.3056, |
|
"eval_samples_per_second": 566.91, |
|
"eval_steps_per_second": 2.237, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.629191321499015, |
|
"grad_norm": 0.39663228392601013, |
|
"learning_rate": 1.8274556213017754e-05, |
|
"loss": 0.0215, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"grad_norm": 0.7587671279907227, |
|
"learning_rate": 1.822524654832347e-05, |
|
"loss": 0.0222, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"eval_accuracy": 0.9876114347265331, |
|
"eval_f1": 0.628153316033522, |
|
"eval_loss": 0.038794856518507004, |
|
"eval_precision": 0.7127135823921228, |
|
"eval_recall": 0.5615302707636143, |
|
"eval_runtime": 14.4829, |
|
"eval_samples_per_second": 559.969, |
|
"eval_steps_per_second": 2.209, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.122287968441814, |
|
"grad_norm": 0.14514634013175964, |
|
"learning_rate": 1.8175936883629194e-05, |
|
"loss": 0.0198, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 9.368836291913215, |
|
"grad_norm": 0.44190242886543274, |
|
"learning_rate": 1.8126725838264302e-05, |
|
"loss": 0.0192, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 9.368836291913215, |
|
"eval_accuracy": 0.9874668707569766, |
|
"eval_f1": 0.632078219263574, |
|
"eval_loss": 0.039640188217163086, |
|
"eval_precision": 0.6977128685588316, |
|
"eval_recall": 0.5777304533008822, |
|
"eval_runtime": 14.7066, |
|
"eval_samples_per_second": 551.452, |
|
"eval_steps_per_second": 2.176, |
|
"step": 19000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 429748313255712.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|