|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 555, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018026137899954935, |
|
"grad_norm": 16.059541702270508, |
|
"learning_rate": 4.9548736462093865e-05, |
|
"loss": 0.6728, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03605227579990987, |
|
"grad_norm": 22.368194580078125, |
|
"learning_rate": 4.909747292418773e-05, |
|
"loss": 0.5752, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054078413699864804, |
|
"grad_norm": 21.088069915771484, |
|
"learning_rate": 4.864620938628159e-05, |
|
"loss": 0.5414, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07210455159981974, |
|
"grad_norm": 17.759164810180664, |
|
"learning_rate": 4.819494584837546e-05, |
|
"loss": 0.5483, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09013068949977468, |
|
"grad_norm": 35.45125961303711, |
|
"learning_rate": 4.7743682310469314e-05, |
|
"loss": 0.5284, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10815682739972961, |
|
"grad_norm": 34.69957733154297, |
|
"learning_rate": 4.7292418772563177e-05, |
|
"loss": 0.4792, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12618296529968454, |
|
"grad_norm": 31.315162658691406, |
|
"learning_rate": 4.684115523465704e-05, |
|
"loss": 0.505, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14420910319963948, |
|
"grad_norm": 28.01712417602539, |
|
"learning_rate": 4.63898916967509e-05, |
|
"loss": 0.4575, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16223524109959442, |
|
"grad_norm": 22.80365562438965, |
|
"learning_rate": 4.5938628158844764e-05, |
|
"loss": 0.4678, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18026137899954936, |
|
"grad_norm": 38.55846405029297, |
|
"learning_rate": 4.548736462093863e-05, |
|
"loss": 0.4724, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19828751689950427, |
|
"grad_norm": 29.91495704650879, |
|
"learning_rate": 4.5036101083032495e-05, |
|
"loss": 0.4891, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21631365479945922, |
|
"grad_norm": 26.530668258666992, |
|
"learning_rate": 4.458483754512636e-05, |
|
"loss": 0.4578, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23433979269941416, |
|
"grad_norm": 53.58538055419922, |
|
"learning_rate": 4.413357400722022e-05, |
|
"loss": 0.4735, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.25236593059936907, |
|
"grad_norm": 22.026208877563477, |
|
"learning_rate": 4.368231046931408e-05, |
|
"loss": 0.4743, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.270392068499324, |
|
"grad_norm": 28.057222366333008, |
|
"learning_rate": 4.3231046931407945e-05, |
|
"loss": 0.4559, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.28841820639927895, |
|
"grad_norm": 22.690412521362305, |
|
"learning_rate": 4.277978339350181e-05, |
|
"loss": 0.4638, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3064443442992339, |
|
"grad_norm": 21.292953491210938, |
|
"learning_rate": 4.232851985559567e-05, |
|
"loss": 0.4877, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.32447048219918884, |
|
"grad_norm": 36.56389236450195, |
|
"learning_rate": 4.187725631768953e-05, |
|
"loss": 0.4927, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3424966200991438, |
|
"grad_norm": 17.6762752532959, |
|
"learning_rate": 4.1425992779783394e-05, |
|
"loss": 0.491, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3605227579990987, |
|
"grad_norm": 19.962385177612305, |
|
"learning_rate": 4.0974729241877256e-05, |
|
"loss": 0.4428, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3785488958990536, |
|
"grad_norm": 23.71368408203125, |
|
"learning_rate": 4.052346570397112e-05, |
|
"loss": 0.4318, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.39657503379900855, |
|
"grad_norm": 24.030736923217773, |
|
"learning_rate": 4.007220216606498e-05, |
|
"loss": 0.4532, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4146011716989635, |
|
"grad_norm": 23.85675811767578, |
|
"learning_rate": 3.962093862815885e-05, |
|
"loss": 0.4427, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.43262730959891843, |
|
"grad_norm": 32.091190338134766, |
|
"learning_rate": 3.916967509025271e-05, |
|
"loss": 0.4432, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.45065344749887337, |
|
"grad_norm": 32.51976776123047, |
|
"learning_rate": 3.8718411552346575e-05, |
|
"loss": 0.4289, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4686795853988283, |
|
"grad_norm": 17.93523406982422, |
|
"learning_rate": 3.826714801444044e-05, |
|
"loss": 0.4485, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.48670572329878325, |
|
"grad_norm": 34.29135513305664, |
|
"learning_rate": 3.78158844765343e-05, |
|
"loss": 0.4373, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5047318611987381, |
|
"grad_norm": 21.879440307617188, |
|
"learning_rate": 3.7364620938628155e-05, |
|
"loss": 0.4367, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5227579990986931, |
|
"grad_norm": 16.587345123291016, |
|
"learning_rate": 3.6913357400722025e-05, |
|
"loss": 0.4476, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.540784136998648, |
|
"grad_norm": 21.923376083374023, |
|
"learning_rate": 3.646209386281589e-05, |
|
"loss": 0.4186, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.558810274898603, |
|
"grad_norm": 25.472400665283203, |
|
"learning_rate": 3.601083032490975e-05, |
|
"loss": 0.4317, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5768364127985579, |
|
"grad_norm": 36.07789611816406, |
|
"learning_rate": 3.555956678700361e-05, |
|
"loss": 0.4281, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5948625506985128, |
|
"grad_norm": 32.51295852661133, |
|
"learning_rate": 3.5108303249097474e-05, |
|
"loss": 0.4391, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6128886885984678, |
|
"grad_norm": 19.069826126098633, |
|
"learning_rate": 3.4657039711191336e-05, |
|
"loss": 0.4249, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6309148264984227, |
|
"grad_norm": 22.865673065185547, |
|
"learning_rate": 3.42057761732852e-05, |
|
"loss": 0.4394, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6489409643983777, |
|
"grad_norm": 35.98612594604492, |
|
"learning_rate": 3.375451263537907e-05, |
|
"loss": 0.407, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6669671022983326, |
|
"grad_norm": 19.89366340637207, |
|
"learning_rate": 3.330324909747293e-05, |
|
"loss": 0.434, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.6849932401982876, |
|
"grad_norm": 20.73752212524414, |
|
"learning_rate": 3.2851985559566786e-05, |
|
"loss": 0.3815, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.7030193780982424, |
|
"grad_norm": 20.23769187927246, |
|
"learning_rate": 3.240072202166065e-05, |
|
"loss": 0.4159, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7210455159981974, |
|
"grad_norm": 40.59239196777344, |
|
"learning_rate": 3.194945848375451e-05, |
|
"loss": 0.4582, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7390716538981523, |
|
"grad_norm": 22.728296279907227, |
|
"learning_rate": 3.149819494584837e-05, |
|
"loss": 0.4387, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.7570977917981072, |
|
"grad_norm": 20.43068504333496, |
|
"learning_rate": 3.104693140794224e-05, |
|
"loss": 0.4302, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7751239296980622, |
|
"grad_norm": 30.140209197998047, |
|
"learning_rate": 3.0595667870036104e-05, |
|
"loss": 0.3858, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7931500675980171, |
|
"grad_norm": 15.338220596313477, |
|
"learning_rate": 3.0144404332129967e-05, |
|
"loss": 0.4634, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8111762054979721, |
|
"grad_norm": 24.517107009887695, |
|
"learning_rate": 2.969314079422383e-05, |
|
"loss": 0.4079, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.829202343397927, |
|
"grad_norm": 25.604442596435547, |
|
"learning_rate": 2.924187725631769e-05, |
|
"loss": 0.3903, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.847228481297882, |
|
"grad_norm": 26.43514060974121, |
|
"learning_rate": 2.879061371841155e-05, |
|
"loss": 0.4037, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.8652546191978369, |
|
"grad_norm": 18.30228042602539, |
|
"learning_rate": 2.8339350180505413e-05, |
|
"loss": 0.4374, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8832807570977917, |
|
"grad_norm": 16.212318420410156, |
|
"learning_rate": 2.7888086642599282e-05, |
|
"loss": 0.4219, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9013068949977467, |
|
"grad_norm": 23.251705169677734, |
|
"learning_rate": 2.7436823104693144e-05, |
|
"loss": 0.3784, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9193330328977016, |
|
"grad_norm": 37.26618194580078, |
|
"learning_rate": 2.6985559566787007e-05, |
|
"loss": 0.4144, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.9373591707976566, |
|
"grad_norm": 22.583784103393555, |
|
"learning_rate": 2.6534296028880866e-05, |
|
"loss": 0.4123, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.9553853086976115, |
|
"grad_norm": 18.812397003173828, |
|
"learning_rate": 2.6083032490974728e-05, |
|
"loss": 0.422, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.9734114465975665, |
|
"grad_norm": 20.292518615722656, |
|
"learning_rate": 2.563176895306859e-05, |
|
"loss": 0.4346, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.9914375844975214, |
|
"grad_norm": 25.314495086669922, |
|
"learning_rate": 2.518050541516246e-05, |
|
"loss": 0.393, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.8425531914893617, |
|
"eval_fn": 307, |
|
"eval_fp": 433, |
|
"eval_loss": 0.36401602625846863, |
|
"eval_precision": 0.8205553253211769, |
|
"eval_recall": 0.8657630083078268, |
|
"eval_runtime": 13.0048, |
|
"eval_samples_per_second": 359.329, |
|
"eval_steps_per_second": 44.983, |
|
"eval_tn": 1953, |
|
"eval_tp": 1980, |
|
"step": 555 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1108, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4670747454750720.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|