|
{ |
|
"best_metric": 2.5323638916015625, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 1.2397330595482545, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01642710472279261, |
|
"grad_norm": 0.83213871717453, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 4.1897, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01642710472279261, |
|
"eval_loss": 4.481392860412598, |
|
"eval_runtime": 2.0431, |
|
"eval_samples_per_second": 24.472, |
|
"eval_steps_per_second": 6.363, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03285420944558522, |
|
"grad_norm": 0.8668023943901062, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 4.2593, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.049281314168377825, |
|
"grad_norm": 0.893962025642395, |
|
"learning_rate": 0.0001, |
|
"loss": 4.1721, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06570841889117043, |
|
"grad_norm": 0.9961223602294922, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 4.157, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08213552361396304, |
|
"grad_norm": 1.0611786842346191, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 4.1331, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09856262833675565, |
|
"grad_norm": 1.086363673210144, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 4.0068, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.11498973305954825, |
|
"grad_norm": 1.2030413150787354, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 3.7748, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.13141683778234087, |
|
"grad_norm": 1.2147985696792603, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 3.7078, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.14784394250513347, |
|
"grad_norm": 1.1192842721939087, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 3.4211, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.16427104722792607, |
|
"grad_norm": 1.0096282958984375, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 3.4068, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1806981519507187, |
|
"grad_norm": 1.0285571813583374, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 3.2925, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1971252566735113, |
|
"grad_norm": 1.28445303440094, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 3.2952, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2135523613963039, |
|
"grad_norm": 0.6762813925743103, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 3.0429, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2299794661190965, |
|
"grad_norm": 0.7080486416816711, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 3.2334, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.2464065708418891, |
|
"grad_norm": 0.7197335362434387, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 3.1313, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.26283367556468173, |
|
"grad_norm": 0.742653489112854, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 3.0879, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2792607802874743, |
|
"grad_norm": 0.6773972511291504, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 2.9061, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.29568788501026694, |
|
"grad_norm": 0.6461683511734009, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.8895, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.31211498973305957, |
|
"grad_norm": 0.5850861072540283, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 2.9273, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.32854209445585214, |
|
"grad_norm": 0.6079311966896057, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 2.8932, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34496919917864477, |
|
"grad_norm": 0.6674569845199585, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.65, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3613963039014374, |
|
"grad_norm": 0.7020171284675598, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 2.79, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.37782340862423, |
|
"grad_norm": 0.6968078017234802, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 2.883, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3942505133470226, |
|
"grad_norm": 0.9217126965522766, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.7329, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4106776180698152, |
|
"grad_norm": 1.5251928567886353, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 2.4426, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4106776180698152, |
|
"eval_loss": 2.718066692352295, |
|
"eval_runtime": 2.0417, |
|
"eval_samples_per_second": 24.49, |
|
"eval_steps_per_second": 6.367, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4271047227926078, |
|
"grad_norm": 0.8887903690338135, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 3.0674, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.44353182751540043, |
|
"grad_norm": 0.8925255537033081, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.9501, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.459958932238193, |
|
"grad_norm": 0.8643532395362854, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 2.8978, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.47638603696098564, |
|
"grad_norm": 0.781319797039032, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 3.0056, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.4928131416837782, |
|
"grad_norm": 0.6614418625831604, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 2.794, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5092402464065708, |
|
"grad_norm": 0.5951665043830872, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 2.8334, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5256673511293635, |
|
"grad_norm": 0.5194978713989258, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 2.7077, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5420944558521561, |
|
"grad_norm": 0.5276866555213928, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 2.6171, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5585215605749486, |
|
"grad_norm": 0.5625050663948059, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 2.549, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5749486652977412, |
|
"grad_norm": 0.6556904911994934, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 2.7686, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5913757700205339, |
|
"grad_norm": 0.8730102181434631, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 2.8905, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6078028747433265, |
|
"grad_norm": 1.161773681640625, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 2.7177, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6242299794661191, |
|
"grad_norm": 0.5960615277290344, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 2.7356, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6406570841889117, |
|
"grad_norm": 0.5807135105133057, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.7539, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6570841889117043, |
|
"grad_norm": 0.5186903476715088, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 2.8117, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6735112936344969, |
|
"grad_norm": 0.5063162446022034, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 2.9084, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6899383983572895, |
|
"grad_norm": 0.5014790892601013, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.7374, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7063655030800822, |
|
"grad_norm": 0.43926894664764404, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 2.6996, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.7227926078028748, |
|
"grad_norm": 0.49418267607688904, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 2.6503, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7392197125256673, |
|
"grad_norm": 0.5326753258705139, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 2.7032, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.75564681724846, |
|
"grad_norm": 0.6158781051635742, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 2.7546, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.7720739219712526, |
|
"grad_norm": 0.687337338924408, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 2.7108, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.7885010266940452, |
|
"grad_norm": 0.8074669241905212, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 2.735, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8049281314168378, |
|
"grad_norm": 1.0225483179092407, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 2.5535, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8213552361396304, |
|
"grad_norm": 1.648162841796875, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 2.565, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8213552361396304, |
|
"eval_loss": 2.5712802410125732, |
|
"eval_runtime": 2.0469, |
|
"eval_samples_per_second": 24.427, |
|
"eval_steps_per_second": 6.351, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.837782340862423, |
|
"grad_norm": 0.6522903442382812, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 2.9041, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.8542094455852156, |
|
"grad_norm": 0.635415256023407, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 2.7645, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.8706365503080082, |
|
"grad_norm": 0.6358153820037842, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 2.756, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.8870636550308009, |
|
"grad_norm": 0.5567989945411682, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 2.6265, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9034907597535934, |
|
"grad_norm": 0.5632911324501038, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 2.7177, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.919917864476386, |
|
"grad_norm": 0.6154216527938843, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 2.65, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.9363449691991786, |
|
"grad_norm": 0.6227327585220337, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 2.6083, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.9527720739219713, |
|
"grad_norm": 0.5644506812095642, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 2.6311, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.9691991786447639, |
|
"grad_norm": 0.7271634936332703, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 2.7145, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.9856262833675564, |
|
"grad_norm": 0.9412290453910828, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.5722, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0097535934291582, |
|
"grad_norm": 1.2142462730407715, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 4.4605, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.0261806981519508, |
|
"grad_norm": 0.6724117994308472, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 2.7223, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.0426078028747434, |
|
"grad_norm": 0.6196894645690918, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 2.5821, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.0590349075975358, |
|
"grad_norm": 0.6005904078483582, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 2.6824, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.0754620123203285, |
|
"grad_norm": 0.5330093502998352, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 2.5775, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.091889117043121, |
|
"grad_norm": 0.5218847393989563, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 2.642, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.1083162217659137, |
|
"grad_norm": 0.5194135904312134, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 2.5922, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.1247433264887063, |
|
"grad_norm": 0.5434170365333557, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 2.4498, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.141170431211499, |
|
"grad_norm": 0.5660905838012695, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 2.6018, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.1575975359342916, |
|
"grad_norm": 0.6597437262535095, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 2.5986, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1740246406570842, |
|
"grad_norm": 0.8075286746025085, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 2.6485, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.1904517453798769, |
|
"grad_norm": 1.09507155418396, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 2.577, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.2068788501026695, |
|
"grad_norm": 0.9469268918037415, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 1.7967, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.2233059548254621, |
|
"grad_norm": 0.9090391993522644, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 3.4325, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.2397330595482545, |
|
"grad_norm": 0.629118025302887, |
|
"learning_rate": 1e-05, |
|
"loss": 2.5815, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.2397330595482545, |
|
"eval_loss": 2.5323638916015625, |
|
"eval_runtime": 2.0652, |
|
"eval_samples_per_second": 24.21, |
|
"eval_steps_per_second": 6.295, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.582762825875456e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|