|
{ |
|
"best_metric": 1.1020288467407227, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.38628681796233705, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005150490906164494, |
|
"grad_norm": 1.6126657724380493, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.941, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005150490906164494, |
|
"eval_loss": 1.8850927352905273, |
|
"eval_runtime": 4.0033, |
|
"eval_samples_per_second": 12.49, |
|
"eval_steps_per_second": 3.247, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010300981812328988, |
|
"grad_norm": 1.6743135452270508, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.7523, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01545147271849348, |
|
"grad_norm": 1.6910467147827148, |
|
"learning_rate": 0.0001, |
|
"loss": 1.7829, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020601963624657976, |
|
"grad_norm": 1.367571234703064, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 1.5894, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02575245453082247, |
|
"grad_norm": 1.495391607284546, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 1.5111, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03090294543698696, |
|
"grad_norm": 1.160764455795288, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.4233, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03605343634315146, |
|
"grad_norm": 0.8878348469734192, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 1.348, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04120392724931595, |
|
"grad_norm": 0.9640621542930603, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 1.3584, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04635441815548044, |
|
"grad_norm": 0.9921740889549255, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.2667, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05150490906164494, |
|
"grad_norm": 0.9959519505500793, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 1.1275, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05665539996780943, |
|
"grad_norm": 1.0067778825759888, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 1.0443, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06180589087397392, |
|
"grad_norm": 1.338114857673645, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.0808, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06695638178013842, |
|
"grad_norm": 1.3758338689804077, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 1.619, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07210687268630292, |
|
"grad_norm": 0.9469649195671082, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 1.4238, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0772573635924674, |
|
"grad_norm": 0.6833482384681702, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 1.3451, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0824078544986319, |
|
"grad_norm": 0.8655621409416199, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 1.3856, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0875583454047964, |
|
"grad_norm": 0.7689515948295593, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 1.2611, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09270883631096088, |
|
"grad_norm": 0.7660016417503357, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 1.236, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09785932721712538, |
|
"grad_norm": 0.7246228456497192, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 1.1257, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10300981812328988, |
|
"grad_norm": 0.791655421257019, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 1.1965, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10816030902945437, |
|
"grad_norm": 0.8405879735946655, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.2137, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11331079993561886, |
|
"grad_norm": 0.7571701407432556, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 1.0821, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11846129084178336, |
|
"grad_norm": 0.9514710903167725, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 1.0548, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12361178174794785, |
|
"grad_norm": 1.1480365991592407, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 1.0676, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12876227265411236, |
|
"grad_norm": 1.1530849933624268, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 0.959, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12876227265411236, |
|
"eval_loss": 1.2043055295944214, |
|
"eval_runtime": 4.0623, |
|
"eval_samples_per_second": 12.308, |
|
"eval_steps_per_second": 3.2, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13391276356027684, |
|
"grad_norm": 0.6668666005134583, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 1.4982, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13906325446644133, |
|
"grad_norm": 0.6831625699996948, |
|
"learning_rate": 7.75e-05, |
|
"loss": 1.3068, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14421374537260584, |
|
"grad_norm": 0.654782772064209, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 1.3077, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14936423627877032, |
|
"grad_norm": 0.6191349625587463, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 1.3137, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1545147271849348, |
|
"grad_norm": 0.6687305569648743, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.1626, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15966521809109932, |
|
"grad_norm": 0.7296125888824463, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 1.1292, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1648157089972638, |
|
"grad_norm": 0.7525869607925415, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 1.0622, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1699661999034283, |
|
"grad_norm": 0.7842842936515808, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 1.0998, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1751166908095928, |
|
"grad_norm": 0.7670237421989441, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 1.0795, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.18026718171575729, |
|
"grad_norm": 0.8848092555999756, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 1.0421, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18541767262192177, |
|
"grad_norm": 0.9541935324668884, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.9614, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.19056816352808628, |
|
"grad_norm": 0.9986409544944763, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 0.9052, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19571865443425077, |
|
"grad_norm": 0.7817866206169128, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 1.4343, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.20086914534041525, |
|
"grad_norm": 0.632110059261322, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.3297, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20601963624657976, |
|
"grad_norm": 0.5873474478721619, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 1.225, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21117012715274425, |
|
"grad_norm": 0.5888330936431885, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 1.2015, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21632061805890873, |
|
"grad_norm": 0.6649183034896851, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 1.1975, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.22147110896507324, |
|
"grad_norm": 0.7081603407859802, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 1.1672, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22662159987123773, |
|
"grad_norm": 0.7628908753395081, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 1.1333, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2317720907774022, |
|
"grad_norm": 0.699059009552002, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 1.1043, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23692258168356672, |
|
"grad_norm": 0.7549357414245605, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 1.0802, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2420730725897312, |
|
"grad_norm": 0.7739465236663818, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 1.0429, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.2472235634958957, |
|
"grad_norm": 0.8181856274604797, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.8891, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2523740544020602, |
|
"grad_norm": 0.9725480079650879, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 0.9871, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2575245453082247, |
|
"grad_norm": 1.2461752891540527, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 0.8804, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2575245453082247, |
|
"eval_loss": 1.1305104494094849, |
|
"eval_runtime": 4.0432, |
|
"eval_samples_per_second": 12.366, |
|
"eval_steps_per_second": 3.215, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2626750362143892, |
|
"grad_norm": 0.6626567244529724, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.4105, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2678255271205537, |
|
"grad_norm": 0.6616899371147156, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 1.2103, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.27297601802671817, |
|
"grad_norm": 0.6504210233688354, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 1.1649, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.27812650893288265, |
|
"grad_norm": 0.6488938331604004, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.2075, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.28327699983904714, |
|
"grad_norm": 0.6831638813018799, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 1.238, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2884274907452117, |
|
"grad_norm": 0.7102789878845215, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 1.1096, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.29357798165137616, |
|
"grad_norm": 0.697617769241333, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.0315, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.29872847255754065, |
|
"grad_norm": 0.789955735206604, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 1.0317, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.30387896346370513, |
|
"grad_norm": 0.8065967559814453, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 1.0469, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.3090294543698696, |
|
"grad_norm": 0.7964063286781311, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.9775, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3141799452760341, |
|
"grad_norm": 0.8694634437561035, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 0.8733, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.31933043618219864, |
|
"grad_norm": 0.9869593381881714, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 0.9035, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3244809270883631, |
|
"grad_norm": 0.5871992707252502, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.3583, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3296314179945276, |
|
"grad_norm": 0.5578927397727966, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 1.3325, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.3347819089006921, |
|
"grad_norm": 0.587887704372406, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 1.3098, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3399323998068566, |
|
"grad_norm": 0.6099755764007568, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.2052, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.34508289071302106, |
|
"grad_norm": 0.6458743810653687, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 1.2037, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.3502333816191856, |
|
"grad_norm": 0.6708971261978149, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 1.1093, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.3553838725253501, |
|
"grad_norm": 0.6868105530738831, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.1062, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.36053436343151457, |
|
"grad_norm": 0.7059977650642395, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 1.1001, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.36568485433767906, |
|
"grad_norm": 0.7179769277572632, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 0.9938, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.37083534524384354, |
|
"grad_norm": 0.7872999310493469, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.0442, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.375985836150008, |
|
"grad_norm": 0.914609968662262, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 0.8831, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.38113632705617256, |
|
"grad_norm": 0.9258915185928345, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 0.8793, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.38628681796233705, |
|
"grad_norm": 1.1383943557739258, |
|
"learning_rate": 1e-05, |
|
"loss": 0.841, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.38628681796233705, |
|
"eval_loss": 1.1020288467407227, |
|
"eval_runtime": 4.0849, |
|
"eval_samples_per_second": 12.24, |
|
"eval_steps_per_second": 3.182, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.261719016701952e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|