|
{ |
|
"best_metric": 0.03750571236014366, |
|
"best_model_checkpoint": "doc-topic-model_eval-02_train-04/checkpoint-13000", |
|
"epoch": 8.871365204534253, |
|
"eval_steps": 1000, |
|
"global_step": 18000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2464268112370626, |
|
"grad_norm": 0.36145079135894775, |
|
"learning_rate": 1.9950714637752587e-05, |
|
"loss": 0.1655, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4928536224741252, |
|
"grad_norm": 0.3764462471008301, |
|
"learning_rate": 1.990142927550518e-05, |
|
"loss": 0.0934, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4928536224741252, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.09037782996892929, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.0127, |
|
"eval_samples_per_second": 675.12, |
|
"eval_steps_per_second": 2.664, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7392804337111878, |
|
"grad_norm": 0.3353608548641205, |
|
"learning_rate": 1.9852143913257764e-05, |
|
"loss": 0.0875, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9857072449482503, |
|
"grad_norm": 0.3118976652622223, |
|
"learning_rate": 1.980285855101035e-05, |
|
"loss": 0.0778, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9857072449482503, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.07024028897285461, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1524, |
|
"eval_samples_per_second": 667.356, |
|
"eval_steps_per_second": 2.633, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232134056185313, |
|
"grad_norm": 0.36931222677230835, |
|
"learning_rate": 1.975357318876294e-05, |
|
"loss": 0.0682, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4785608674223756, |
|
"grad_norm": 0.3769352436065674, |
|
"learning_rate": 1.9704287826515527e-05, |
|
"loss": 0.0618, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4785608674223756, |
|
"eval_accuracy": 0.9827912184474964, |
|
"eval_f1": 0.1751358695652174, |
|
"eval_loss": 0.05670051649212837, |
|
"eval_precision": 0.8199745547073791, |
|
"eval_recall": 0.09803772436872528, |
|
"eval_runtime": 14.2669, |
|
"eval_samples_per_second": 568.448, |
|
"eval_steps_per_second": 2.243, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.724987678659438, |
|
"grad_norm": 0.3530617356300354, |
|
"learning_rate": 1.9655002464268113e-05, |
|
"loss": 0.0565, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.9714144898965007, |
|
"grad_norm": 0.3648615777492523, |
|
"learning_rate": 1.96057171020207e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.9714144898965007, |
|
"eval_accuracy": 0.9842510310812534, |
|
"eval_f1": 0.33580394500896593, |
|
"eval_loss": 0.049183741211891174, |
|
"eval_precision": 0.7841987716359575, |
|
"eval_recall": 0.21364466078491026, |
|
"eval_runtime": 14.4394, |
|
"eval_samples_per_second": 561.657, |
|
"eval_steps_per_second": 2.216, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2178413011335634, |
|
"grad_norm": 0.4629921317100525, |
|
"learning_rate": 1.955643173977329e-05, |
|
"loss": 0.0479, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.464268112370626, |
|
"grad_norm": 0.38679325580596924, |
|
"learning_rate": 1.9507146377525875e-05, |
|
"loss": 0.0473, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.464268112370626, |
|
"eval_accuracy": 0.9854925804668566, |
|
"eval_f1": 0.46681946036045424, |
|
"eval_loss": 0.045466337352991104, |
|
"eval_precision": 0.7406611570247934, |
|
"eval_recall": 0.3408122908427137, |
|
"eval_runtime": 14.1723, |
|
"eval_samples_per_second": 572.243, |
|
"eval_steps_per_second": 2.258, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.7106949236076883, |
|
"grad_norm": 0.35379377007484436, |
|
"learning_rate": 1.9457861015278464e-05, |
|
"loss": 0.0442, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.957121734844751, |
|
"grad_norm": 0.3872750997543335, |
|
"learning_rate": 1.9408575653031053e-05, |
|
"loss": 0.0436, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.957121734844751, |
|
"eval_accuracy": 0.9860694190512636, |
|
"eval_f1": 0.5016478223394008, |
|
"eval_loss": 0.042579714208841324, |
|
"eval_precision": 0.7523954372623575, |
|
"eval_recall": 0.3762549437176757, |
|
"eval_runtime": 14.177, |
|
"eval_samples_per_second": 572.053, |
|
"eval_steps_per_second": 2.257, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2035485460818136, |
|
"grad_norm": 0.4312683939933777, |
|
"learning_rate": 1.9359290290783638e-05, |
|
"loss": 0.0399, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.449975357318876, |
|
"grad_norm": 0.46780943870544434, |
|
"learning_rate": 1.9310004928536227e-05, |
|
"loss": 0.0389, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.449975357318876, |
|
"eval_accuracy": 0.9865073628413906, |
|
"eval_f1": 0.5336533751347114, |
|
"eval_loss": 0.04068835824728012, |
|
"eval_precision": 0.7496559317368566, |
|
"eval_recall": 0.4142835412229997, |
|
"eval_runtime": 14.2131, |
|
"eval_samples_per_second": 570.6, |
|
"eval_steps_per_second": 2.251, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.696402168555939, |
|
"grad_norm": 0.3296278417110443, |
|
"learning_rate": 1.9260719566288815e-05, |
|
"loss": 0.0379, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9428289797930014, |
|
"grad_norm": 0.22207264602184296, |
|
"learning_rate": 1.92114342040414e-05, |
|
"loss": 0.0376, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9428289797930014, |
|
"eval_accuracy": 0.986619329053106, |
|
"eval_f1": 0.563381584423993, |
|
"eval_loss": 0.03988400474190712, |
|
"eval_precision": 0.7187020648967551, |
|
"eval_recall": 0.463264374809857, |
|
"eval_runtime": 14.2691, |
|
"eval_samples_per_second": 568.36, |
|
"eval_steps_per_second": 2.243, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.189255791030064, |
|
"grad_norm": 0.22761479020118713, |
|
"learning_rate": 1.916214884179399e-05, |
|
"loss": 0.034, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.435682602267127, |
|
"grad_norm": 0.4061233699321747, |
|
"learning_rate": 1.9112863479546578e-05, |
|
"loss": 0.0339, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.435682602267127, |
|
"eval_accuracy": 0.9869708179202631, |
|
"eval_f1": 0.5653222374580359, |
|
"eval_loss": 0.038919780403375626, |
|
"eval_precision": 0.7471566054243219, |
|
"eval_recall": 0.4546699117736538, |
|
"eval_runtime": 14.2717, |
|
"eval_samples_per_second": 568.258, |
|
"eval_steps_per_second": 2.242, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.68210941350419, |
|
"grad_norm": 0.31547975540161133, |
|
"learning_rate": 1.9063578117299164e-05, |
|
"loss": 0.0328, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.928536224741252, |
|
"grad_norm": 0.307452529668808, |
|
"learning_rate": 1.9014292755051752e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.928536224741252, |
|
"eval_accuracy": 0.9873194721997818, |
|
"eval_f1": 0.581505215398288, |
|
"eval_loss": 0.038509998470544815, |
|
"eval_precision": 0.7551937796136557, |
|
"eval_recall": 0.472771524186188, |
|
"eval_runtime": 14.3086, |
|
"eval_samples_per_second": 566.79, |
|
"eval_steps_per_second": 2.236, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.1749630359783145, |
|
"grad_norm": 0.3740977942943573, |
|
"learning_rate": 1.8965007392804338e-05, |
|
"loss": 0.0303, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.421389847215377, |
|
"grad_norm": 0.37522149085998535, |
|
"learning_rate": 1.891582060128142e-05, |
|
"loss": 0.0295, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.421389847215377, |
|
"eval_accuracy": 0.9872060886942472, |
|
"eval_f1": 0.6024223739264479, |
|
"eval_loss": 0.03767974302172661, |
|
"eval_precision": 0.7156011300617349, |
|
"eval_recall": 0.5201551566778218, |
|
"eval_runtime": 14.3451, |
|
"eval_samples_per_second": 565.349, |
|
"eval_steps_per_second": 2.231, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.667816658452439, |
|
"grad_norm": 0.3089052736759186, |
|
"learning_rate": 1.886653523903401e-05, |
|
"loss": 0.029, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.914243469689502, |
|
"grad_norm": 0.3211393356323242, |
|
"learning_rate": 1.8817249876786596e-05, |
|
"loss": 0.0305, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.914243469689502, |
|
"eval_accuracy": 0.9873534872514421, |
|
"eval_f1": 0.5992364697956434, |
|
"eval_loss": 0.03828778117895126, |
|
"eval_precision": 0.7317099923220357, |
|
"eval_recall": 0.5073775479160328, |
|
"eval_runtime": 14.3412, |
|
"eval_samples_per_second": 565.503, |
|
"eval_steps_per_second": 2.231, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.160670280926565, |
|
"grad_norm": 0.16942404210567474, |
|
"learning_rate": 1.8767964514539182e-05, |
|
"loss": 0.0275, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.407097092163627, |
|
"grad_norm": 0.3766990303993225, |
|
"learning_rate": 1.8718777723016266e-05, |
|
"loss": 0.0254, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.407097092163627, |
|
"eval_accuracy": 0.9875646640305001, |
|
"eval_f1": 0.6140921885995778, |
|
"eval_loss": 0.03750571236014366, |
|
"eval_precision": 0.7280976220275344, |
|
"eval_recall": 0.5309552783693338, |
|
"eval_runtime": 14.3059, |
|
"eval_samples_per_second": 566.897, |
|
"eval_steps_per_second": 2.237, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65352390340069, |
|
"grad_norm": 0.5648388266563416, |
|
"learning_rate": 1.8669492360768852e-05, |
|
"loss": 0.0264, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.899950714637752, |
|
"grad_norm": 0.4413442015647888, |
|
"learning_rate": 1.862020699852144e-05, |
|
"loss": 0.0273, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.899950714637752, |
|
"eval_accuracy": 0.9876582054225661, |
|
"eval_f1": 0.6163200563976031, |
|
"eval_loss": 0.03790096566081047, |
|
"eval_precision": 0.7325094260578131, |
|
"eval_recall": 0.5319440219044722, |
|
"eval_runtime": 14.1405, |
|
"eval_samples_per_second": 573.531, |
|
"eval_steps_per_second": 2.263, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.146377525874815, |
|
"grad_norm": 0.36695852875709534, |
|
"learning_rate": 1.857092163627403e-05, |
|
"loss": 0.0234, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.392804337111878, |
|
"grad_norm": 0.36567145586013794, |
|
"learning_rate": 1.852173484475111e-05, |
|
"loss": 0.0228, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.392804337111878, |
|
"eval_accuracy": 0.9877120625876951, |
|
"eval_f1": 0.6165074309978769, |
|
"eval_loss": 0.03786341845989227, |
|
"eval_precision": 0.7366807610993658, |
|
"eval_recall": 0.530042592029206, |
|
"eval_runtime": 14.1212, |
|
"eval_samples_per_second": 574.314, |
|
"eval_steps_per_second": 2.266, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.639231148348941, |
|
"grad_norm": 0.5829640626907349, |
|
"learning_rate": 1.84724494825037e-05, |
|
"loss": 0.0236, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.885657959586003, |
|
"grad_norm": 0.470248281955719, |
|
"learning_rate": 1.8423164120256285e-05, |
|
"loss": 0.0235, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.885657959586003, |
|
"eval_accuracy": 0.9873563218390805, |
|
"eval_f1": 0.629849383842994, |
|
"eval_loss": 0.03791099041700363, |
|
"eval_precision": 0.6929608326485894, |
|
"eval_recall": 0.5772741101308184, |
|
"eval_runtime": 14.1983, |
|
"eval_samples_per_second": 571.194, |
|
"eval_steps_per_second": 2.254, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.132084770823065, |
|
"grad_norm": 0.48524007201194763, |
|
"learning_rate": 1.8373878758008873e-05, |
|
"loss": 0.0217, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.378511582060128, |
|
"grad_norm": 0.3671877384185791, |
|
"learning_rate": 1.8324593395761462e-05, |
|
"loss": 0.0208, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.378511582060128, |
|
"eval_accuracy": 0.9877219836444293, |
|
"eval_f1": 0.6341484015372271, |
|
"eval_loss": 0.037882279604673386, |
|
"eval_precision": 0.7129427404804862, |
|
"eval_recall": 0.5710374201399452, |
|
"eval_runtime": 14.1368, |
|
"eval_samples_per_second": 573.681, |
|
"eval_steps_per_second": 2.264, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.62493839329719, |
|
"grad_norm": 0.2823368012905121, |
|
"learning_rate": 1.8275308033514047e-05, |
|
"loss": 0.0202, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.871365204534253, |
|
"grad_norm": 0.374859094619751, |
|
"learning_rate": 1.8226121241991132e-05, |
|
"loss": 0.0204, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.871365204534253, |
|
"eval_accuracy": 0.987852374675794, |
|
"eval_f1": 0.6380948359582823, |
|
"eval_loss": 0.03811024874448776, |
|
"eval_precision": 0.7172282866635026, |
|
"eval_recall": 0.5746881655004563, |
|
"eval_runtime": 14.2361, |
|
"eval_samples_per_second": 569.679, |
|
"eval_steps_per_second": 2.248, |
|
"step": 18000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 408291319078542.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|