|
{ |
|
"best_metric": 0.037347666919231415, |
|
"best_model_checkpoint": "doc-topic-model_eval-01_train-03/checkpoint-11000", |
|
"epoch": 7.889546351084813, |
|
"eval_steps": 1000, |
|
"global_step": 16000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3881610631942749, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1669, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.3564796447753906, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0935, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.08976549655199051, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.0327, |
|
"eval_samples_per_second": 673.996, |
|
"eval_steps_per_second": 2.659, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.4097365140914917, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0869, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3858756422996521, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0764, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.07017117738723755, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.2763, |
|
"eval_samples_per_second": 660.624, |
|
"eval_steps_per_second": 2.607, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.3765203356742859, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.068, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.3380969762802124, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0621, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9819748572076478, |
|
"eval_f1": 0.06950541410594088, |
|
"eval_loss": 0.05700037255883217, |
|
"eval_precision": 0.8911819887429644, |
|
"eval_recall": 0.03616292348686715, |
|
"eval_runtime": 14.2939, |
|
"eval_samples_per_second": 567.375, |
|
"eval_steps_per_second": 2.239, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.3490855395793915, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.056, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.4068734347820282, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0542, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9839533993792253, |
|
"eval_f1": 0.2863985881759738, |
|
"eval_loss": 0.049796320497989655, |
|
"eval_precision": 0.8319296960820213, |
|
"eval_recall": 0.17297297297297298, |
|
"eval_runtime": 14.4691, |
|
"eval_samples_per_second": 560.505, |
|
"eval_steps_per_second": 2.212, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.39391544461250305, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0488, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.2698710858821869, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0468, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9851807758266367, |
|
"eval_f1": 0.4191111111111111, |
|
"eval_loss": 0.04676121473312378, |
|
"eval_precision": 0.7753340184994861, |
|
"eval_recall": 0.2871716787209745, |
|
"eval_runtime": 14.3877, |
|
"eval_samples_per_second": 563.675, |
|
"eval_steps_per_second": 2.224, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.39123860001564026, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0456, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.4525609016418457, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0441, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9861062686905623, |
|
"eval_f1": 0.4897725498360485, |
|
"eval_loss": 0.04347195476293564, |
|
"eval_precision": 0.774103323461665, |
|
"eval_recall": 0.35820327369623145, |
|
"eval_runtime": 14.2071, |
|
"eval_samples_per_second": 570.842, |
|
"eval_steps_per_second": 2.252, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.47004055976867676, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.0409, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.3331545293331146, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0395, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9860283175305072, |
|
"eval_f1": 0.5278735632183909, |
|
"eval_loss": 0.041799116879701614, |
|
"eval_precision": 0.7115558424790187, |
|
"eval_recall": 0.4195660449181576, |
|
"eval_runtime": 14.3982, |
|
"eval_samples_per_second": 563.265, |
|
"eval_steps_per_second": 2.223, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.46907347440719604, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0382, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.3860171437263489, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0384, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9865853140014457, |
|
"eval_f1": 0.5588440922861804, |
|
"eval_loss": 0.040071167051792145, |
|
"eval_precision": 0.7205528846153846, |
|
"eval_recall": 0.45641416063951273, |
|
"eval_runtime": 14.3408, |
|
"eval_samples_per_second": 565.521, |
|
"eval_steps_per_second": 2.231, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.39796024560928345, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0348, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.3643665313720703, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0343, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9868985359354848, |
|
"eval_f1": 0.5774364600475407, |
|
"eval_loss": 0.03921710327267647, |
|
"eval_precision": 0.7225717881249285, |
|
"eval_recall": 0.48085268366958506, |
|
"eval_runtime": 14.341, |
|
"eval_samples_per_second": 565.513, |
|
"eval_steps_per_second": 2.231, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.2764923572540283, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0345, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.432156503200531, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9873393143132503, |
|
"eval_f1": 0.5918954726118142, |
|
"eval_loss": 0.037751466035842896, |
|
"eval_precision": 0.7400045693397304, |
|
"eval_recall": 0.49318614389036924, |
|
"eval_runtime": 14.1834, |
|
"eval_samples_per_second": 571.794, |
|
"eval_steps_per_second": 2.256, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.27451589703559875, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0309, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.49061715602874756, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0305, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.9875731677934153, |
|
"eval_f1": 0.5988654039710861, |
|
"eval_loss": 0.037347666919231415, |
|
"eval_precision": 0.7503152585119798, |
|
"eval_recall": 0.49828701941377995, |
|
"eval_runtime": 14.3069, |
|
"eval_samples_per_second": 566.858, |
|
"eval_steps_per_second": 2.237, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.42573484778404236, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0299, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.4632101058959961, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0295, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.9875462392108508, |
|
"eval_f1": 0.6107641196013289, |
|
"eval_loss": 0.03775852918624878, |
|
"eval_precision": 0.7302966101694915, |
|
"eval_recall": 0.524857251617815, |
|
"eval_runtime": 14.2377, |
|
"eval_samples_per_second": 569.612, |
|
"eval_steps_per_second": 2.248, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.5974611043930054, |
|
"learning_rate": 1.876735700197239e-05, |
|
"loss": 0.0271, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.42065271735191345, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.0271, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.9877163144691526, |
|
"eval_f1": 0.607952232324603, |
|
"eval_loss": 0.03745845705270767, |
|
"eval_precision": 0.7489968791796701, |
|
"eval_recall": 0.5116102017510468, |
|
"eval_runtime": 14.2135, |
|
"eval_samples_per_second": 570.583, |
|
"eval_steps_per_second": 2.251, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65680473372781, |
|
"grad_norm": 0.7562135457992554, |
|
"learning_rate": 1.8668836291913217e-05, |
|
"loss": 0.0265, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"grad_norm": 0.5237112641334534, |
|
"learning_rate": 1.8619526627218937e-05, |
|
"loss": 0.0257, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"eval_accuracy": 0.987587340731607, |
|
"eval_f1": 0.6144567705581969, |
|
"eval_loss": 0.03767068684101105, |
|
"eval_precision": 0.7284208328984448, |
|
"eval_recall": 0.5313285116102018, |
|
"eval_runtime": 14.5272, |
|
"eval_samples_per_second": 558.263, |
|
"eval_steps_per_second": 2.203, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.149901380670611, |
|
"grad_norm": 0.38859695196151733, |
|
"learning_rate": 1.8570216962524657e-05, |
|
"loss": 0.0239, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"grad_norm": 0.276382178068161, |
|
"learning_rate": 1.8520907297830377e-05, |
|
"loss": 0.0234, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"eval_accuracy": 0.9875816715563304, |
|
"eval_f1": 0.6242710120068611, |
|
"eval_loss": 0.037651773542165756, |
|
"eval_precision": 0.7146784486990673, |
|
"eval_recall": 0.5541682527598021, |
|
"eval_runtime": 14.1634, |
|
"eval_samples_per_second": 572.603, |
|
"eval_steps_per_second": 2.259, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.642998027613412, |
|
"grad_norm": 0.23438788950443268, |
|
"learning_rate": 1.8471696252465485e-05, |
|
"loss": 0.0234, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"grad_norm": 0.3130985498428345, |
|
"learning_rate": 1.8422386587771205e-05, |
|
"loss": 0.0241, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"eval_accuracy": 0.9878750513769009, |
|
"eval_f1": 0.6261416772276363, |
|
"eval_loss": 0.03778611123561859, |
|
"eval_precision": 0.7349199835863767, |
|
"eval_recall": 0.5454130186524553, |
|
"eval_runtime": 14.2528, |
|
"eval_samples_per_second": 569.01, |
|
"eval_steps_per_second": 2.245, |
|
"step": 16000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 360703978994412.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|