|
{ |
|
"best_metric": 0.019864432513713837, |
|
"best_model_checkpoint": "/home/paperspace/Data/models/akash_unifo_757/llm3br256/checkpoint-400", |
|
"epoch": 4.010025062656641, |
|
"eval_steps": 5, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010025062656641603, |
|
"grad_norm": 0.28569191694259644, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.0958, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 0.36814188957214355, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.0953, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03007518796992481, |
|
"grad_norm": 0.2642641067504883, |
|
"learning_rate": 6e-06, |
|
"loss": 0.081, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 0.2690538763999939, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.0846, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.05012531328320802, |
|
"grad_norm": 0.2873159646987915, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0892, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05012531328320802, |
|
"eval_loss": 0.09708356112241745, |
|
"eval_runtime": 10.7924, |
|
"eval_samples_per_second": 4.633, |
|
"eval_steps_per_second": 1.205, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06015037593984962, |
|
"grad_norm": 0.1856367141008377, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0689, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 0.1307721883058548, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.0501, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.08020050125313283, |
|
"grad_norm": 0.15366266667842865, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.0515, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.09022556390977443, |
|
"grad_norm": 0.11183061450719833, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.0556, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10025062656641603, |
|
"grad_norm": 0.09610645473003387, |
|
"learning_rate": 2e-05, |
|
"loss": 0.045, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10025062656641603, |
|
"eval_loss": 0.04338453337550163, |
|
"eval_runtime": 7.8458, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11027568922305764, |
|
"grad_norm": 0.09562630206346512, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.0468, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.12030075187969924, |
|
"grad_norm": 0.08241941034793854, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.0379, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.13032581453634084, |
|
"grad_norm": 0.08410688489675522, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.0364, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 0.07379286736249924, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 0.0274, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.15037593984962405, |
|
"grad_norm": 0.0845954418182373, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0496, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15037593984962405, |
|
"eval_loss": 0.03607996553182602, |
|
"eval_runtime": 7.8661, |
|
"eval_samples_per_second": 6.356, |
|
"eval_steps_per_second": 1.653, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.16040100250626566, |
|
"grad_norm": 0.08074897527694702, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.0312, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.17042606516290726, |
|
"grad_norm": 0.060424063354730606, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.03, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.18045112781954886, |
|
"grad_norm": 0.06651686877012253, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.0296, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 0.058752626180648804, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.0272, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.20050125313283207, |
|
"grad_norm": 0.05913863703608513, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0279, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20050125313283207, |
|
"eval_loss": 0.03392041102051735, |
|
"eval_runtime": 7.8762, |
|
"eval_samples_per_second": 6.348, |
|
"eval_steps_per_second": 1.651, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 0.05515163764357567, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.028, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.22055137844611528, |
|
"grad_norm": 0.06397461146116257, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.0283, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.23057644110275688, |
|
"grad_norm": 0.055562298744916916, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.0281, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.24060150375939848, |
|
"grad_norm": 0.07124784588813782, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.0277, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2506265664160401, |
|
"grad_norm": 0.04555438458919525, |
|
"learning_rate": 5e-05, |
|
"loss": 0.027, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2506265664160401, |
|
"eval_loss": 0.033857107162475586, |
|
"eval_runtime": 7.8415, |
|
"eval_samples_per_second": 6.376, |
|
"eval_steps_per_second": 1.658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2606516290726817, |
|
"grad_norm": 0.07086709886789322, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 0.0253, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2706766917293233, |
|
"grad_norm": 0.056244127452373505, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.0282, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 0.04824177920818329, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 0.024, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2907268170426065, |
|
"grad_norm": 0.061910007148981094, |
|
"learning_rate": 5.8e-05, |
|
"loss": 0.0301, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.3007518796992481, |
|
"grad_norm": 0.05887613445520401, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0265, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3007518796992481, |
|
"eval_loss": 0.030988477170467377, |
|
"eval_runtime": 7.846, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3107769423558897, |
|
"grad_norm": 0.06682919710874557, |
|
"learning_rate": 6.2e-05, |
|
"loss": 0.0283, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3208020050125313, |
|
"grad_norm": 0.04526979848742485, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.0227, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3308270676691729, |
|
"grad_norm": 0.037424199283123016, |
|
"learning_rate": 6.6e-05, |
|
"loss": 0.0224, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3408521303258145, |
|
"grad_norm": 0.04883978143334389, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 0.0251, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 0.04064160957932472, |
|
"learning_rate": 7e-05, |
|
"loss": 0.0255, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"eval_loss": 0.029722385108470917, |
|
"eval_runtime": 7.8878, |
|
"eval_samples_per_second": 6.339, |
|
"eval_steps_per_second": 1.648, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3609022556390977, |
|
"grad_norm": 0.04703061655163765, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.0251, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.37092731829573933, |
|
"grad_norm": 0.03412939980626106, |
|
"learning_rate": 7.4e-05, |
|
"loss": 0.0221, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 0.038730327039957047, |
|
"learning_rate": 7.6e-05, |
|
"loss": 0.021, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.39097744360902253, |
|
"grad_norm": 0.0406007319688797, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 0.0196, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.40100250626566414, |
|
"grad_norm": 0.040442854166030884, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0239, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.40100250626566414, |
|
"eval_loss": 0.027504581958055496, |
|
"eval_runtime": 7.8471, |
|
"eval_samples_per_second": 6.372, |
|
"eval_steps_per_second": 1.657, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.41102756892230574, |
|
"grad_norm": 0.040039267390966415, |
|
"learning_rate": 8.2e-05, |
|
"loss": 0.0192, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 0.050538670271635056, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.025, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.43107769423558895, |
|
"grad_norm": 0.039062079042196274, |
|
"learning_rate": 8.6e-05, |
|
"loss": 0.0247, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.44110275689223055, |
|
"grad_norm": 0.061014942824840546, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.0173, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.45112781954887216, |
|
"grad_norm": 0.039824020117521286, |
|
"learning_rate": 9e-05, |
|
"loss": 0.019, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.45112781954887216, |
|
"eval_loss": 0.02625363878905773, |
|
"eval_runtime": 7.8454, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.46115288220551376, |
|
"grad_norm": 0.056794699281454086, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.0202, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.47117794486215536, |
|
"grad_norm": 0.032318562269210815, |
|
"learning_rate": 9.4e-05, |
|
"loss": 0.0191, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.48120300751879697, |
|
"grad_norm": 0.05849836394190788, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.0247, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.49122807017543857, |
|
"grad_norm": 0.03626992926001549, |
|
"learning_rate": 9.8e-05, |
|
"loss": 0.0211, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5012531328320802, |
|
"grad_norm": 0.04883263632655144, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0177, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5012531328320802, |
|
"eval_loss": 0.025471089407801628, |
|
"eval_runtime": 7.8509, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5112781954887218, |
|
"grad_norm": 0.028212551027536392, |
|
"learning_rate": 9.999875400032707e-05, |
|
"loss": 0.0169, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5213032581453634, |
|
"grad_norm": 0.03416428342461586, |
|
"learning_rate": 9.999501606340891e-05, |
|
"loss": 0.0189, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.531328320802005, |
|
"grad_norm": 0.04827010631561279, |
|
"learning_rate": 9.998878637554424e-05, |
|
"loss": 0.022, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5413533834586466, |
|
"grad_norm": 0.044534213840961456, |
|
"learning_rate": 9.998006524722059e-05, |
|
"loss": 0.0229, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5513784461152882, |
|
"grad_norm": 0.03421458229422569, |
|
"learning_rate": 9.996885311309891e-05, |
|
"loss": 0.0178, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5513784461152882, |
|
"eval_loss": 0.02499430626630783, |
|
"eval_runtime": 7.8452, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.03985392674803734, |
|
"learning_rate": 9.995515053199182e-05, |
|
"loss": 0.0213, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.037616852670907974, |
|
"learning_rate": 9.993895818683579e-05, |
|
"loss": 0.0163, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.581453634085213, |
|
"grad_norm": 0.03808311000466347, |
|
"learning_rate": 9.992027688465707e-05, |
|
"loss": 0.0212, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5914786967418546, |
|
"grad_norm": 0.03867008537054062, |
|
"learning_rate": 9.989910755653154e-05, |
|
"loss": 0.0205, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.6015037593984962, |
|
"grad_norm": 0.03990580886602402, |
|
"learning_rate": 9.987545125753819e-05, |
|
"loss": 0.0179, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6015037593984962, |
|
"eval_loss": 0.02382907271385193, |
|
"eval_runtime": 7.8913, |
|
"eval_samples_per_second": 6.336, |
|
"eval_steps_per_second": 1.647, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6115288220551378, |
|
"grad_norm": 0.0392407588660717, |
|
"learning_rate": 9.98493091667067e-05, |
|
"loss": 0.0239, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.6215538847117794, |
|
"grad_norm": 0.030434446409344673, |
|
"learning_rate": 9.982068258695853e-05, |
|
"loss": 0.0164, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 0.042929526418447495, |
|
"learning_rate": 9.978957294504203e-05, |
|
"loss": 0.0223, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6416040100250626, |
|
"grad_norm": 0.028332175686955452, |
|
"learning_rate": 9.975598179146133e-05, |
|
"loss": 0.0185, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6516290726817042, |
|
"grad_norm": 0.037013668566942215, |
|
"learning_rate": 9.97199108003991e-05, |
|
"loss": 0.0199, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6516290726817042, |
|
"eval_loss": 0.023866703733801842, |
|
"eval_runtime": 7.8611, |
|
"eval_samples_per_second": 6.36, |
|
"eval_steps_per_second": 1.654, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6616541353383458, |
|
"grad_norm": 0.04389967769384384, |
|
"learning_rate": 9.968136176963307e-05, |
|
"loss": 0.0215, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6716791979949874, |
|
"grad_norm": 0.03696281462907791, |
|
"learning_rate": 9.964033662044643e-05, |
|
"loss": 0.02, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.681704260651629, |
|
"grad_norm": 0.03329788148403168, |
|
"learning_rate": 9.959683739753207e-05, |
|
"loss": 0.0194, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6917293233082706, |
|
"grad_norm": 0.0394810289144516, |
|
"learning_rate": 9.955086626889068e-05, |
|
"loss": 0.0188, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 0.03089299239218235, |
|
"learning_rate": 9.950242552572271e-05, |
|
"loss": 0.0165, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"eval_loss": 0.023690788075327873, |
|
"eval_runtime": 7.9034, |
|
"eval_samples_per_second": 6.326, |
|
"eval_steps_per_second": 1.645, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7117794486215538, |
|
"grad_norm": 0.037516698241233826, |
|
"learning_rate": 9.945151758231421e-05, |
|
"loss": 0.0181, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.7218045112781954, |
|
"grad_norm": 0.03217910975217819, |
|
"learning_rate": 9.939814497591636e-05, |
|
"loss": 0.0168, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.731829573934837, |
|
"grad_norm": 0.041075628250837326, |
|
"learning_rate": 9.934231036661919e-05, |
|
"loss": 0.02, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7418546365914787, |
|
"grad_norm": 0.036599840968847275, |
|
"learning_rate": 9.928401653721891e-05, |
|
"loss": 0.0209, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7518796992481203, |
|
"grad_norm": 0.03515414148569107, |
|
"learning_rate": 9.922326639307917e-05, |
|
"loss": 0.0192, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7518796992481203, |
|
"eval_loss": 0.02293418161571026, |
|
"eval_runtime": 7.8475, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 1.657, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 0.030638370662927628, |
|
"learning_rate": 9.91600629619864e-05, |
|
"loss": 0.0219, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7719298245614035, |
|
"grad_norm": 0.029526930302381516, |
|
"learning_rate": 9.909440939399876e-05, |
|
"loss": 0.0188, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7819548872180451, |
|
"grad_norm": 0.043333012610673904, |
|
"learning_rate": 9.902630896128923e-05, |
|
"loss": 0.0218, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7919799498746867, |
|
"grad_norm": 0.04038293659687042, |
|
"learning_rate": 9.895576505798248e-05, |
|
"loss": 0.0207, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.8020050125313283, |
|
"grad_norm": 0.02755335345864296, |
|
"learning_rate": 9.888278119998573e-05, |
|
"loss": 0.0158, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8020050125313283, |
|
"eval_loss": 0.02307852730154991, |
|
"eval_runtime": 7.844, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8120300751879699, |
|
"grad_norm": 0.036036524921655655, |
|
"learning_rate": 9.88073610248135e-05, |
|
"loss": 0.0158, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.8220551378446115, |
|
"grad_norm": 0.03679794445633888, |
|
"learning_rate": 9.872950829140633e-05, |
|
"loss": 0.0276, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.8320802005012531, |
|
"grad_norm": 0.03850351274013519, |
|
"learning_rate": 9.864922687994347e-05, |
|
"loss": 0.0159, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.03365923836827278, |
|
"learning_rate": 9.856652079164937e-05, |
|
"loss": 0.0204, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8521303258145363, |
|
"grad_norm": 0.04202227666974068, |
|
"learning_rate": 9.848139414859441e-05, |
|
"loss": 0.0202, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8521303258145363, |
|
"eval_loss": 0.023334117606282234, |
|
"eval_runtime": 7.8493, |
|
"eval_samples_per_second": 6.37, |
|
"eval_steps_per_second": 1.656, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8621553884711779, |
|
"grad_norm": 0.027862610295414925, |
|
"learning_rate": 9.839385119348937e-05, |
|
"loss": 0.0162, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8721804511278195, |
|
"grad_norm": 0.05110170319676399, |
|
"learning_rate": 9.830389628947398e-05, |
|
"loss": 0.0224, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8822055137844611, |
|
"grad_norm": 0.039190806448459625, |
|
"learning_rate": 9.82115339198995e-05, |
|
"loss": 0.0207, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8922305764411027, |
|
"grad_norm": 0.03106129728257656, |
|
"learning_rate": 9.811676868810517e-05, |
|
"loss": 0.0217, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.9022556390977443, |
|
"grad_norm": 0.04018354043364525, |
|
"learning_rate": 9.801960531718896e-05, |
|
"loss": 0.0203, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9022556390977443, |
|
"eval_loss": 0.023184653371572495, |
|
"eval_runtime": 7.8695, |
|
"eval_samples_per_second": 6.354, |
|
"eval_steps_per_second": 1.652, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9122807017543859, |
|
"grad_norm": 0.031660296022892, |
|
"learning_rate": 9.792004864977198e-05, |
|
"loss": 0.023, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.9223057644110275, |
|
"grad_norm": 0.030742425471544266, |
|
"learning_rate": 9.781810364775722e-05, |
|
"loss": 0.0166, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.9323308270676691, |
|
"grad_norm": 0.03069412149488926, |
|
"learning_rate": 9.771377539208228e-05, |
|
"loss": 0.018, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.9423558897243107, |
|
"grad_norm": 0.02346673421561718, |
|
"learning_rate": 9.760706908246603e-05, |
|
"loss": 0.0161, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 0.0320681668817997, |
|
"learning_rate": 9.749799003714954e-05, |
|
"loss": 0.0231, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"eval_loss": 0.02278592251241207, |
|
"eval_runtime": 7.8596, |
|
"eval_samples_per_second": 6.362, |
|
"eval_steps_per_second": 1.654, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9624060150375939, |
|
"grad_norm": 0.03784003481268883, |
|
"learning_rate": 9.738654369263103e-05, |
|
"loss": 0.0225, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9724310776942355, |
|
"grad_norm": 0.03227786719799042, |
|
"learning_rate": 9.727273560339483e-05, |
|
"loss": 0.0145, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9824561403508771, |
|
"grad_norm": 0.030028732493519783, |
|
"learning_rate": 9.715657144163463e-05, |
|
"loss": 0.0156, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9924812030075187, |
|
"grad_norm": 0.03690657019615173, |
|
"learning_rate": 9.703805699697072e-05, |
|
"loss": 0.0229, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.0025062656641603, |
|
"grad_norm": 0.03713948652148247, |
|
"learning_rate": 9.691719817616147e-05, |
|
"loss": 0.0175, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0025062656641603, |
|
"eval_loss": 0.022534934803843498, |
|
"eval_runtime": 7.8577, |
|
"eval_samples_per_second": 6.363, |
|
"eval_steps_per_second": 1.654, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.012531328320802, |
|
"grad_norm": 0.03210939094424248, |
|
"learning_rate": 9.679400100280896e-05, |
|
"loss": 0.0178, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.0225563909774436, |
|
"grad_norm": 0.021419484168291092, |
|
"learning_rate": 9.666847161705867e-05, |
|
"loss": 0.0147, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.0325814536340852, |
|
"grad_norm": 0.03585834801197052, |
|
"learning_rate": 9.654061627529354e-05, |
|
"loss": 0.02, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.0426065162907268, |
|
"grad_norm": 0.02969486080110073, |
|
"learning_rate": 9.641044134982215e-05, |
|
"loss": 0.0129, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"grad_norm": 0.023357393220067024, |
|
"learning_rate": 9.627795332856107e-05, |
|
"loss": 0.0137, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"eval_loss": 0.022470790892839432, |
|
"eval_runtime": 7.8421, |
|
"eval_samples_per_second": 6.376, |
|
"eval_steps_per_second": 1.658, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.06265664160401, |
|
"grad_norm": 0.030119987204670906, |
|
"learning_rate": 9.614315881471154e-05, |
|
"loss": 0.0155, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.0726817042606516, |
|
"grad_norm": 0.023424195125699043, |
|
"learning_rate": 9.600606452643037e-05, |
|
"loss": 0.0131, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.0827067669172932, |
|
"grad_norm": 0.028333192691206932, |
|
"learning_rate": 9.586667729649513e-05, |
|
"loss": 0.0169, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.0927318295739348, |
|
"grad_norm": 0.024621840566396713, |
|
"learning_rate": 9.572500407196348e-05, |
|
"loss": 0.0135, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.1027568922305764, |
|
"grad_norm": 0.0384870246052742, |
|
"learning_rate": 9.55810519138271e-05, |
|
"loss": 0.0286, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.1027568922305764, |
|
"eval_loss": 0.02285490557551384, |
|
"eval_runtime": 7.8692, |
|
"eval_samples_per_second": 6.354, |
|
"eval_steps_per_second": 1.652, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.112781954887218, |
|
"grad_norm": 0.029268387705087662, |
|
"learning_rate": 9.543482799665969e-05, |
|
"loss": 0.0152, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.1228070175438596, |
|
"grad_norm": 0.02799990586936474, |
|
"learning_rate": 9.528633960825933e-05, |
|
"loss": 0.0142, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.1328320802005012, |
|
"grad_norm": 0.019951699301600456, |
|
"learning_rate": 9.513559414928538e-05, |
|
"loss": 0.0118, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.02334384061396122, |
|
"learning_rate": 9.498259913288953e-05, |
|
"loss": 0.0128, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.1528822055137844, |
|
"grad_norm": 0.028435515239834785, |
|
"learning_rate": 9.482736218434143e-05, |
|
"loss": 0.0169, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.1528822055137844, |
|
"eval_loss": 0.022512206807732582, |
|
"eval_runtime": 7.8509, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.162907268170426, |
|
"grad_norm": 0.03522834554314613, |
|
"learning_rate": 9.466989104064853e-05, |
|
"loss": 0.0183, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.1729323308270676, |
|
"grad_norm": 0.025662166997790337, |
|
"learning_rate": 9.451019355017056e-05, |
|
"loss": 0.0114, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.1829573934837092, |
|
"grad_norm": 0.027821151539683342, |
|
"learning_rate": 9.43482776722284e-05, |
|
"loss": 0.0111, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.1929824561403508, |
|
"grad_norm": 0.039541855454444885, |
|
"learning_rate": 9.418415147670725e-05, |
|
"loss": 0.0153, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.2030075187969924, |
|
"grad_norm": 0.02246340923011303, |
|
"learning_rate": 9.401782314365457e-05, |
|
"loss": 0.0141, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.2030075187969924, |
|
"eval_loss": 0.022168081253767014, |
|
"eval_runtime": 7.868, |
|
"eval_samples_per_second": 6.355, |
|
"eval_steps_per_second": 1.652, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.213032581453634, |
|
"grad_norm": 0.027894971892237663, |
|
"learning_rate": 9.38493009628723e-05, |
|
"loss": 0.0166, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.2230576441102756, |
|
"grad_norm": 0.023307248950004578, |
|
"learning_rate": 9.36785933335037e-05, |
|
"loss": 0.0141, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.2330827067669172, |
|
"grad_norm": 0.03410186246037483, |
|
"learning_rate": 9.350570876361482e-05, |
|
"loss": 0.0145, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.2431077694235588, |
|
"grad_norm": 0.029240259900689125, |
|
"learning_rate": 9.333065586977035e-05, |
|
"loss": 0.0102, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.2531328320802004, |
|
"grad_norm": 0.026229720562696457, |
|
"learning_rate": 9.315344337660421e-05, |
|
"loss": 0.0149, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.2531328320802004, |
|
"eval_loss": 0.02199222519993782, |
|
"eval_runtime": 7.8428, |
|
"eval_samples_per_second": 6.375, |
|
"eval_steps_per_second": 1.658, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 0.02851460874080658, |
|
"learning_rate": 9.297408011638477e-05, |
|
"loss": 0.017, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.2731829573934836, |
|
"grad_norm": 0.02489631064236164, |
|
"learning_rate": 9.279257502857455e-05, |
|
"loss": 0.0106, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.2832080200501252, |
|
"grad_norm": 0.031498756259679794, |
|
"learning_rate": 9.260893715938477e-05, |
|
"loss": 0.0188, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.2932330827067668, |
|
"grad_norm": 0.026509685441851616, |
|
"learning_rate": 9.24231756613244e-05, |
|
"loss": 0.0166, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.3032581453634084, |
|
"grad_norm": 0.02835785411298275, |
|
"learning_rate": 9.22352997927441e-05, |
|
"loss": 0.0123, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.3032581453634084, |
|
"eval_loss": 0.022584008052945137, |
|
"eval_runtime": 7.8608, |
|
"eval_samples_per_second": 6.361, |
|
"eval_steps_per_second": 1.654, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.31328320802005, |
|
"grad_norm": 0.03289595618844032, |
|
"learning_rate": 9.20453189173747e-05, |
|
"loss": 0.0173, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.3233082706766917, |
|
"grad_norm": 0.037241410464048386, |
|
"learning_rate": 9.185324250386054e-05, |
|
"loss": 0.0205, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.02691718004643917, |
|
"learning_rate": 9.165908012528755e-05, |
|
"loss": 0.0144, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.3433583959899749, |
|
"grad_norm": 0.028582507744431496, |
|
"learning_rate": 9.146284145870614e-05, |
|
"loss": 0.0182, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.3533834586466165, |
|
"grad_norm": 0.02578776516020298, |
|
"learning_rate": 9.126453628464888e-05, |
|
"loss": 0.0137, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.3533834586466165, |
|
"eval_loss": 0.022607652470469475, |
|
"eval_runtime": 7.8746, |
|
"eval_samples_per_second": 6.35, |
|
"eval_steps_per_second": 1.651, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.363408521303258, |
|
"grad_norm": 0.021922461688518524, |
|
"learning_rate": 9.106417448664306e-05, |
|
"loss": 0.0124, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.3734335839598997, |
|
"grad_norm": 0.027213698253035545, |
|
"learning_rate": 9.086176605071805e-05, |
|
"loss": 0.0145, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.3834586466165413, |
|
"grad_norm": 0.03600913658738136, |
|
"learning_rate": 9.06573210649077e-05, |
|
"loss": 0.0123, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.3934837092731829, |
|
"grad_norm": 0.02878659963607788, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0172, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.4035087719298245, |
|
"grad_norm": 0.022535493597388268, |
|
"learning_rate": 9.024236230276629e-05, |
|
"loss": 0.0118, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.4035087719298245, |
|
"eval_loss": 0.02263771742582321, |
|
"eval_runtime": 7.8445, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.413533834586466, |
|
"grad_norm": 0.025746768340468407, |
|
"learning_rate": 9.003186920797452e-05, |
|
"loss": 0.0165, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.4235588972431077, |
|
"grad_norm": 0.017796190455555916, |
|
"learning_rate": 8.981938092534517e-05, |
|
"loss": 0.0078, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.4335839598997493, |
|
"grad_norm": 0.03525475040078163, |
|
"learning_rate": 8.960490804529144e-05, |
|
"loss": 0.0205, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.443609022556391, |
|
"grad_norm": 0.019581295549869537, |
|
"learning_rate": 8.938846125713891e-05, |
|
"loss": 0.0091, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.4536340852130325, |
|
"grad_norm": 0.03014942817389965, |
|
"learning_rate": 8.917005134859263e-05, |
|
"loss": 0.015, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.4536340852130325, |
|
"eval_loss": 0.021907737478613853, |
|
"eval_runtime": 7.8749, |
|
"eval_samples_per_second": 6.349, |
|
"eval_steps_per_second": 1.651, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.463659147869674, |
|
"grad_norm": 0.028763974085450172, |
|
"learning_rate": 8.894968920519959e-05, |
|
"loss": 0.0156, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.4736842105263157, |
|
"grad_norm": 0.022081634029746056, |
|
"learning_rate": 8.872738580980615e-05, |
|
"loss": 0.013, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.4837092731829573, |
|
"grad_norm": 0.031854048371315, |
|
"learning_rate": 8.850315224201063e-05, |
|
"loss": 0.0159, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.493734335839599, |
|
"grad_norm": 0.03208814561367035, |
|
"learning_rate": 8.827699967761108e-05, |
|
"loss": 0.0178, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.5037593984962405, |
|
"grad_norm": 0.02058330550789833, |
|
"learning_rate": 8.80489393880484e-05, |
|
"loss": 0.0059, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.5037593984962405, |
|
"eval_loss": 0.02317122556269169, |
|
"eval_runtime": 7.8482, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 1.656, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.5137844611528823, |
|
"grad_norm": 0.036856647580862045, |
|
"learning_rate": 8.78189827398444e-05, |
|
"loss": 0.0165, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 0.02781173400580883, |
|
"learning_rate": 8.758714119403543e-05, |
|
"loss": 0.0135, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.5338345864661656, |
|
"grad_norm": 0.020493071526288986, |
|
"learning_rate": 8.73534263056011e-05, |
|
"loss": 0.0118, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.543859649122807, |
|
"grad_norm": 0.031307898461818695, |
|
"learning_rate": 8.711784972288839e-05, |
|
"loss": 0.0129, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.5538847117794488, |
|
"grad_norm": 0.0317239835858345, |
|
"learning_rate": 8.688042318703111e-05, |
|
"loss": 0.0155, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.5538847117794488, |
|
"eval_loss": 0.022369742393493652, |
|
"eval_runtime": 7.9066, |
|
"eval_samples_per_second": 6.324, |
|
"eval_steps_per_second": 1.644, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.5639097744360901, |
|
"grad_norm": 0.0316733792424202, |
|
"learning_rate": 8.66411585313647e-05, |
|
"loss": 0.0146, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.573934837092732, |
|
"grad_norm": 0.027159877121448517, |
|
"learning_rate": 8.640006768083647e-05, |
|
"loss": 0.0124, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.5839598997493733, |
|
"grad_norm": 0.03147249296307564, |
|
"learning_rate": 8.615716265141123e-05, |
|
"loss": 0.017, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.5939849624060152, |
|
"grad_norm": 0.018718725070357323, |
|
"learning_rate": 8.59124555494725e-05, |
|
"loss": 0.0096, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.6040100250626566, |
|
"grad_norm": 0.025683386251330376, |
|
"learning_rate": 8.566595857121902e-05, |
|
"loss": 0.0168, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.6040100250626566, |
|
"eval_loss": 0.022817488759756088, |
|
"eval_runtime": 7.8759, |
|
"eval_samples_per_second": 6.349, |
|
"eval_steps_per_second": 1.651, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.6140350877192984, |
|
"grad_norm": 0.02304229326546192, |
|
"learning_rate": 8.541768400205697e-05, |
|
"loss": 0.0137, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.6240601503759398, |
|
"grad_norm": 0.029396969825029373, |
|
"learning_rate": 8.516764421598762e-05, |
|
"loss": 0.0177, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.6340852130325816, |
|
"grad_norm": 0.024926166981458664, |
|
"learning_rate": 8.491585167499066e-05, |
|
"loss": 0.0151, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.644110275689223, |
|
"grad_norm": 0.030115393921732903, |
|
"learning_rate": 8.4662318928403e-05, |
|
"loss": 0.0161, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.6541353383458648, |
|
"grad_norm": 0.024759652093052864, |
|
"learning_rate": 8.440705861229344e-05, |
|
"loss": 0.0115, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.6541353383458648, |
|
"eval_loss": 0.022450853139162064, |
|
"eval_runtime": 7.8428, |
|
"eval_samples_per_second": 6.375, |
|
"eval_steps_per_second": 1.658, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.6641604010025062, |
|
"grad_norm": 0.02288990467786789, |
|
"learning_rate": 8.415008344883279e-05, |
|
"loss": 0.0138, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.674185463659148, |
|
"grad_norm": 0.031593967229127884, |
|
"learning_rate": 8.389140624565985e-05, |
|
"loss": 0.0118, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 0.023670073598623276, |
|
"learning_rate": 8.363103989524302e-05, |
|
"loss": 0.0138, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.6942355889724312, |
|
"grad_norm": 0.02136528678238392, |
|
"learning_rate": 8.33689973742378e-05, |
|
"loss": 0.0107, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.7042606516290726, |
|
"grad_norm": 0.025891460478305817, |
|
"learning_rate": 8.310529174284004e-05, |
|
"loss": 0.0156, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.7042606516290726, |
|
"eval_loss": 0.022050362080335617, |
|
"eval_runtime": 7.8487, |
|
"eval_samples_per_second": 6.37, |
|
"eval_steps_per_second": 1.656, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.02511603571474552, |
|
"learning_rate": 8.28399361441349e-05, |
|
"loss": 0.0122, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.7243107769423558, |
|
"grad_norm": 0.027994632720947266, |
|
"learning_rate": 8.257294380344198e-05, |
|
"loss": 0.013, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.7343358395989976, |
|
"grad_norm": 0.03440297394990921, |
|
"learning_rate": 8.230432802765607e-05, |
|
"loss": 0.0174, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.744360902255639, |
|
"grad_norm": 0.03783298283815384, |
|
"learning_rate": 8.20341022045839e-05, |
|
"loss": 0.0107, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.7543859649122808, |
|
"grad_norm": 0.050754014402627945, |
|
"learning_rate": 8.176227980227694e-05, |
|
"loss": 0.0174, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.7543859649122808, |
|
"eval_loss": 0.021764207631349564, |
|
"eval_runtime": 7.8631, |
|
"eval_samples_per_second": 6.359, |
|
"eval_steps_per_second": 1.653, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.7644110275689222, |
|
"grad_norm": 0.02387530915439129, |
|
"learning_rate": 8.148887436836018e-05, |
|
"loss": 0.0125, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.774436090225564, |
|
"grad_norm": 0.0533299446105957, |
|
"learning_rate": 8.121389952935687e-05, |
|
"loss": 0.015, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.7844611528822054, |
|
"grad_norm": 0.026894379407167435, |
|
"learning_rate": 8.093736899000939e-05, |
|
"loss": 0.0135, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.7944862155388472, |
|
"grad_norm": 0.032237350940704346, |
|
"learning_rate": 8.065929653259621e-05, |
|
"loss": 0.0183, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.8045112781954886, |
|
"grad_norm": 0.026856914162635803, |
|
"learning_rate": 8.037969601624495e-05, |
|
"loss": 0.0147, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.8045112781954886, |
|
"eval_loss": 0.021373717114329338, |
|
"eval_runtime": 7.8519, |
|
"eval_samples_per_second": 6.368, |
|
"eval_steps_per_second": 1.656, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.8145363408521304, |
|
"grad_norm": 0.026055332273244858, |
|
"learning_rate": 8.009858137624171e-05, |
|
"loss": 0.0141, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.8245614035087718, |
|
"grad_norm": 0.02705809473991394, |
|
"learning_rate": 7.981596662333645e-05, |
|
"loss": 0.0107, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.8345864661654137, |
|
"grad_norm": 0.025101376697421074, |
|
"learning_rate": 7.953186584304476e-05, |
|
"loss": 0.0109, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.844611528822055, |
|
"grad_norm": 0.027133477851748466, |
|
"learning_rate": 7.924629319494583e-05, |
|
"loss": 0.0113, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.8546365914786969, |
|
"grad_norm": 0.028486840426921844, |
|
"learning_rate": 7.895926291197667e-05, |
|
"loss": 0.0113, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.8546365914786969, |
|
"eval_loss": 0.02112492173910141, |
|
"eval_runtime": 7.8502, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.8646616541353382, |
|
"grad_norm": 0.03550581634044647, |
|
"learning_rate": 7.867078929972287e-05, |
|
"loss": 0.0131, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.87468671679198, |
|
"grad_norm": 0.027450090274214745, |
|
"learning_rate": 7.838088673570547e-05, |
|
"loss": 0.0114, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.8847117794486214, |
|
"grad_norm": 0.028570324182510376, |
|
"learning_rate": 7.808956966866446e-05, |
|
"loss": 0.0128, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.8947368421052633, |
|
"grad_norm": 0.03284519538283348, |
|
"learning_rate": 7.779685261783866e-05, |
|
"loss": 0.0156, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.02620542049407959, |
|
"learning_rate": 7.750275017224207e-05, |
|
"loss": 0.0128, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"eval_loss": 0.021003421396017075, |
|
"eval_runtime": 7.8602, |
|
"eval_samples_per_second": 6.361, |
|
"eval_steps_per_second": 1.654, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.9147869674185465, |
|
"grad_norm": 0.018250567838549614, |
|
"learning_rate": 7.72072769899367e-05, |
|
"loss": 0.0101, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.9248120300751879, |
|
"grad_norm": 0.02913222834467888, |
|
"learning_rate": 7.691044779730211e-05, |
|
"loss": 0.0139, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.9348370927318297, |
|
"grad_norm": 0.02514669857919216, |
|
"learning_rate": 7.661227738830137e-05, |
|
"loss": 0.0133, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.944862155388471, |
|
"grad_norm": 0.025002580136060715, |
|
"learning_rate": 7.631278062374376e-05, |
|
"loss": 0.0146, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.954887218045113, |
|
"grad_norm": 0.03078017756342888, |
|
"learning_rate": 7.60119724305441e-05, |
|
"loss": 0.0158, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.954887218045113, |
|
"eval_loss": 0.020739883184432983, |
|
"eval_runtime": 7.8876, |
|
"eval_samples_per_second": 6.339, |
|
"eval_steps_per_second": 1.648, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.9649122807017543, |
|
"grad_norm": 0.019327951595187187, |
|
"learning_rate": 7.570986780097884e-05, |
|
"loss": 0.0102, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.974937343358396, |
|
"grad_norm": 0.023565849289298058, |
|
"learning_rate": 7.540648179193874e-05, |
|
"loss": 0.0155, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.9849624060150375, |
|
"grad_norm": 0.025518329814076424, |
|
"learning_rate": 7.51018295241785e-05, |
|
"loss": 0.0095, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.9949874686716793, |
|
"grad_norm": 0.029831720516085625, |
|
"learning_rate": 7.479592618156321e-05, |
|
"loss": 0.0146, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 2.0050125313283207, |
|
"grad_norm": 0.03035411797463894, |
|
"learning_rate": 7.448878701031142e-05, |
|
"loss": 0.0139, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.0050125313283207, |
|
"eval_loss": 0.02077317051589489, |
|
"eval_runtime": 7.8495, |
|
"eval_samples_per_second": 6.37, |
|
"eval_steps_per_second": 1.656, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.0150375939849625, |
|
"grad_norm": 0.02455909363925457, |
|
"learning_rate": 7.418042731823544e-05, |
|
"loss": 0.011, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 2.025062656641604, |
|
"grad_norm": 0.021201658993959427, |
|
"learning_rate": 7.387086247397826e-05, |
|
"loss": 0.0085, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 2.0350877192982457, |
|
"grad_norm": 0.024311352521181107, |
|
"learning_rate": 7.356010790624767e-05, |
|
"loss": 0.0136, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 2.045112781954887, |
|
"grad_norm": 0.024387311190366745, |
|
"learning_rate": 7.324817910304728e-05, |
|
"loss": 0.0136, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 2.055137844611529, |
|
"grad_norm": 0.021270353347063065, |
|
"learning_rate": 7.293509161090452e-05, |
|
"loss": 0.0095, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.055137844611529, |
|
"eval_loss": 0.02157193422317505, |
|
"eval_runtime": 7.8654, |
|
"eval_samples_per_second": 6.357, |
|
"eval_steps_per_second": 1.653, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.0651629072681703, |
|
"grad_norm": 0.03074588254094124, |
|
"learning_rate": 7.262086103409597e-05, |
|
"loss": 0.0105, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 2.075187969924812, |
|
"grad_norm": 0.02580447867512703, |
|
"learning_rate": 7.230550303386942e-05, |
|
"loss": 0.01, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 2.0852130325814535, |
|
"grad_norm": 0.028543729335069656, |
|
"learning_rate": 7.198903332766347e-05, |
|
"loss": 0.0133, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 2.0952380952380953, |
|
"grad_norm": 0.030399933457374573, |
|
"learning_rate": 7.167146768832418e-05, |
|
"loss": 0.011, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 0.03573281690478325, |
|
"learning_rate": 7.13528219433188e-05, |
|
"loss": 0.0117, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"eval_loss": 0.021564830094575882, |
|
"eval_runtime": 7.8471, |
|
"eval_samples_per_second": 6.372, |
|
"eval_steps_per_second": 1.657, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.1152882205513786, |
|
"grad_norm": 0.031177852302789688, |
|
"learning_rate": 7.103311197394716e-05, |
|
"loss": 0.0095, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 2.12531328320802, |
|
"grad_norm": 0.030712947249412537, |
|
"learning_rate": 7.071235371454987e-05, |
|
"loss": 0.0113, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.1353383458646618, |
|
"grad_norm": 0.028729625046253204, |
|
"learning_rate": 7.039056315171445e-05, |
|
"loss": 0.0103, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 2.145363408521303, |
|
"grad_norm": 0.03274873271584511, |
|
"learning_rate": 7.006775632347831e-05, |
|
"loss": 0.0103, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 2.155388471177945, |
|
"grad_norm": 0.030903812497854233, |
|
"learning_rate": 6.974394931852956e-05, |
|
"loss": 0.0117, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.155388471177945, |
|
"eval_loss": 0.020912937819957733, |
|
"eval_runtime": 7.8659, |
|
"eval_samples_per_second": 6.357, |
|
"eval_steps_per_second": 1.653, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.1654135338345863, |
|
"grad_norm": 0.02950100228190422, |
|
"learning_rate": 6.941915827540509e-05, |
|
"loss": 0.0086, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 2.175438596491228, |
|
"grad_norm": 0.0353955514729023, |
|
"learning_rate": 6.909339938168623e-05, |
|
"loss": 0.012, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 2.1854636591478696, |
|
"grad_norm": 0.020289815962314606, |
|
"learning_rate": 6.876668887319198e-05, |
|
"loss": 0.0079, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 2.1954887218045114, |
|
"grad_norm": 0.03004349209368229, |
|
"learning_rate": 6.843904303316983e-05, |
|
"loss": 0.0109, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 2.2055137844611528, |
|
"grad_norm": 0.025885947048664093, |
|
"learning_rate": 6.811047819148413e-05, |
|
"loss": 0.0098, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.2055137844611528, |
|
"eval_loss": 0.02112417295575142, |
|
"eval_runtime": 7.8566, |
|
"eval_samples_per_second": 6.364, |
|
"eval_steps_per_second": 1.655, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.2155388471177946, |
|
"grad_norm": 0.02360019087791443, |
|
"learning_rate": 6.77810107238023e-05, |
|
"loss": 0.0104, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 2.225563909774436, |
|
"grad_norm": 0.0300395917147398, |
|
"learning_rate": 6.745065705077864e-05, |
|
"loss": 0.008, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 2.235588972431078, |
|
"grad_norm": 0.03650784492492676, |
|
"learning_rate": 6.711943363723588e-05, |
|
"loss": 0.0146, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 2.245614035087719, |
|
"grad_norm": 0.0672539472579956, |
|
"learning_rate": 6.678735699134459e-05, |
|
"loss": 0.0153, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 2.255639097744361, |
|
"grad_norm": 0.03322110325098038, |
|
"learning_rate": 6.64544436638005e-05, |
|
"loss": 0.0116, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.255639097744361, |
|
"eval_loss": 0.020799538120627403, |
|
"eval_runtime": 7.858, |
|
"eval_samples_per_second": 6.363, |
|
"eval_steps_per_second": 1.654, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.2656641604010024, |
|
"grad_norm": 0.031463854014873505, |
|
"learning_rate": 6.612071024699948e-05, |
|
"loss": 0.0075, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 2.275689223057644, |
|
"grad_norm": 0.031025653705000877, |
|
"learning_rate": 6.578617337421064e-05, |
|
"loss": 0.0124, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.03126023709774017, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0119, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 2.2957393483709274, |
|
"grad_norm": 0.021827111020684242, |
|
"learning_rate": 6.511475599313627e-05, |
|
"loss": 0.0117, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 2.305764411027569, |
|
"grad_norm": 0.019596287980675697, |
|
"learning_rate": 6.477790894828421e-05, |
|
"loss": 0.0091, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.305764411027569, |
|
"eval_loss": 0.021051470190286636, |
|
"eval_runtime": 7.8473, |
|
"eval_samples_per_second": 6.372, |
|
"eval_steps_per_second": 1.657, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.3157894736842106, |
|
"grad_norm": 0.0325470045208931, |
|
"learning_rate": 6.44403253726435e-05, |
|
"loss": 0.0144, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 2.325814536340852, |
|
"grad_norm": 0.023597577586770058, |
|
"learning_rate": 6.410202209137515e-05, |
|
"loss": 0.0114, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 2.335839598997494, |
|
"grad_norm": 0.023727184161543846, |
|
"learning_rate": 6.376301596551025e-05, |
|
"loss": 0.008, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 2.345864661654135, |
|
"grad_norm": 0.024764055386185646, |
|
"learning_rate": 6.342332389110971e-05, |
|
"loss": 0.0081, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 2.355889724310777, |
|
"grad_norm": 0.033406369388103485, |
|
"learning_rate": 6.308296279842205e-05, |
|
"loss": 0.0144, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.355889724310777, |
|
"eval_loss": 0.020991137251257896, |
|
"eval_runtime": 7.8508, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.3659147869674184, |
|
"grad_norm": 0.023914620280265808, |
|
"learning_rate": 6.274194965103965e-05, |
|
"loss": 0.0105, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 2.3759398496240602, |
|
"grad_norm": 0.033582258969545364, |
|
"learning_rate": 6.240030144505339e-05, |
|
"loss": 0.011, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 2.3859649122807016, |
|
"grad_norm": 0.02053799293935299, |
|
"learning_rate": 6.205803520820531e-05, |
|
"loss": 0.0088, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 2.3959899749373434, |
|
"grad_norm": 0.022256596013903618, |
|
"learning_rate": 6.171516799904022e-05, |
|
"loss": 0.0116, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 2.406015037593985, |
|
"grad_norm": 0.02922326698899269, |
|
"learning_rate": 6.137171690605533e-05, |
|
"loss": 0.0128, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.406015037593985, |
|
"eval_loss": 0.021076854318380356, |
|
"eval_runtime": 7.8443, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.4160401002506267, |
|
"grad_norm": 0.02965260110795498, |
|
"learning_rate": 6.1027699046848595e-05, |
|
"loss": 0.009, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 2.426065162907268, |
|
"grad_norm": 0.02778303064405918, |
|
"learning_rate": 6.068313156726567e-05, |
|
"loss": 0.0117, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 2.43609022556391, |
|
"grad_norm": 0.017490755766630173, |
|
"learning_rate": 6.033803164054519e-05, |
|
"loss": 0.0093, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 2.4461152882205512, |
|
"grad_norm": 0.02218572050333023, |
|
"learning_rate": 5.999241646646301e-05, |
|
"loss": 0.0115, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 2.456140350877193, |
|
"grad_norm": 0.024667710065841675, |
|
"learning_rate": 5.9646303270474845e-05, |
|
"loss": 0.0097, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.456140350877193, |
|
"eval_loss": 0.020879259333014488, |
|
"eval_runtime": 7.8715, |
|
"eval_samples_per_second": 6.352, |
|
"eval_steps_per_second": 1.652, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.4661654135338344, |
|
"grad_norm": 0.032372280955314636, |
|
"learning_rate": 5.92997093028579e-05, |
|
"loss": 0.0121, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 2.4761904761904763, |
|
"grad_norm": 0.027126269415020943, |
|
"learning_rate": 5.895265183785097e-05, |
|
"loss": 0.0109, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 2.4862155388471177, |
|
"grad_norm": 0.017588460817933083, |
|
"learning_rate": 5.8605148172793546e-05, |
|
"loss": 0.0062, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 2.4962406015037595, |
|
"grad_norm": 0.037390079349279404, |
|
"learning_rate": 5.8257215627263794e-05, |
|
"loss": 0.0131, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 2.506265664160401, |
|
"grad_norm": 0.03616498038172722, |
|
"learning_rate": 5.79088715422152e-05, |
|
"loss": 0.0137, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.506265664160401, |
|
"eval_loss": 0.02056640014052391, |
|
"eval_runtime": 7.8501, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.5162907268170427, |
|
"grad_norm": 0.014069591648876667, |
|
"learning_rate": 5.7560133279112415e-05, |
|
"loss": 0.0059, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 0.017873914912343025, |
|
"learning_rate": 5.7211018219065916e-05, |
|
"loss": 0.0069, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.536340852130326, |
|
"grad_norm": 0.025818752124905586, |
|
"learning_rate": 5.686154376196572e-05, |
|
"loss": 0.0115, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.5463659147869673, |
|
"grad_norm": 0.03378435969352722, |
|
"learning_rate": 5.651172732561419e-05, |
|
"loss": 0.0146, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.556390977443609, |
|
"grad_norm": 0.041657060384750366, |
|
"learning_rate": 5.616158634485793e-05, |
|
"loss": 0.0163, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.556390977443609, |
|
"eval_loss": 0.020469658076763153, |
|
"eval_runtime": 7.8576, |
|
"eval_samples_per_second": 6.363, |
|
"eval_steps_per_second": 1.654, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.5664160401002505, |
|
"grad_norm": 0.01962040551006794, |
|
"learning_rate": 5.581113827071889e-05, |
|
"loss": 0.0073, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.5764411027568923, |
|
"grad_norm": 0.023728540167212486, |
|
"learning_rate": 5.546040056952443e-05, |
|
"loss": 0.012, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.5864661654135337, |
|
"grad_norm": 0.027505788952112198, |
|
"learning_rate": 5.510939072203704e-05, |
|
"loss": 0.0081, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.5964912280701755, |
|
"grad_norm": 0.028974315151572227, |
|
"learning_rate": 5.4758126222582916e-05, |
|
"loss": 0.0102, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.606516290726817, |
|
"grad_norm": 0.02385525219142437, |
|
"learning_rate": 5.4406624578180096e-05, |
|
"loss": 0.0104, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.606516290726817, |
|
"eval_loss": 0.020320262759923935, |
|
"eval_runtime": 7.8519, |
|
"eval_samples_per_second": 6.368, |
|
"eval_steps_per_second": 1.656, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.6165413533834587, |
|
"grad_norm": 0.03372911363840103, |
|
"learning_rate": 5.405490330766595e-05, |
|
"loss": 0.0132, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.6265664160401, |
|
"grad_norm": 0.03256263956427574, |
|
"learning_rate": 5.3702979940824004e-05, |
|
"loss": 0.0111, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.636591478696742, |
|
"grad_norm": 0.03408624231815338, |
|
"learning_rate": 5.335087201751026e-05, |
|
"loss": 0.0085, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.6466165413533833, |
|
"grad_norm": 0.030111782252788544, |
|
"learning_rate": 5.2998597086779e-05, |
|
"loss": 0.0138, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.656641604010025, |
|
"grad_norm": 0.03473125770688057, |
|
"learning_rate": 5.2646172706008156e-05, |
|
"loss": 0.0124, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.656641604010025, |
|
"eval_loss": 0.020433049649000168, |
|
"eval_runtime": 7.8438, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.02227642573416233, |
|
"learning_rate": 5.2293616440024286e-05, |
|
"loss": 0.0072, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.6766917293233083, |
|
"grad_norm": 0.02503472939133644, |
|
"learning_rate": 5.194094586022703e-05, |
|
"loss": 0.0093, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.6867167919799497, |
|
"grad_norm": 0.022726213559508324, |
|
"learning_rate": 5.158817854371349e-05, |
|
"loss": 0.0081, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.6967418546365916, |
|
"grad_norm": 0.02940908446907997, |
|
"learning_rate": 5.123533207240212e-05, |
|
"loss": 0.0136, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.706766917293233, |
|
"grad_norm": 0.028102362528443336, |
|
"learning_rate": 5.088242403215644e-05, |
|
"loss": 0.0131, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.706766917293233, |
|
"eval_loss": 0.020840449258685112, |
|
"eval_runtime": 7.8435, |
|
"eval_samples_per_second": 6.375, |
|
"eval_steps_per_second": 1.657, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.7167919799498748, |
|
"grad_norm": 0.021073520183563232, |
|
"learning_rate": 5.052947201190852e-05, |
|
"loss": 0.0093, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.726817042606516, |
|
"grad_norm": 0.024549121037125587, |
|
"learning_rate": 5.017649360278246e-05, |
|
"loss": 0.0094, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.736842105263158, |
|
"grad_norm": 0.023642798885703087, |
|
"learning_rate": 4.982350639721754e-05, |
|
"loss": 0.0105, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.7468671679197993, |
|
"grad_norm": 0.030647899955511093, |
|
"learning_rate": 4.947052798809148e-05, |
|
"loss": 0.0141, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.756892230576441, |
|
"grad_norm": 0.022092748433351517, |
|
"learning_rate": 4.911757596784357e-05, |
|
"loss": 0.0089, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.756892230576441, |
|
"eval_loss": 0.020474795252084732, |
|
"eval_runtime": 7.8473, |
|
"eval_samples_per_second": 6.372, |
|
"eval_steps_per_second": 1.657, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.7669172932330826, |
|
"grad_norm": 0.02058694139122963, |
|
"learning_rate": 4.8764667927597884e-05, |
|
"loss": 0.0082, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.7769423558897244, |
|
"grad_norm": 0.026842277497053146, |
|
"learning_rate": 4.841182145628652e-05, |
|
"loss": 0.0092, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.7869674185463658, |
|
"grad_norm": 0.03636394441127777, |
|
"learning_rate": 4.8059054139772996e-05, |
|
"loss": 0.0121, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.7969924812030076, |
|
"grad_norm": 0.02889190800487995, |
|
"learning_rate": 4.770638355997574e-05, |
|
"loss": 0.0106, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.807017543859649, |
|
"grad_norm": 0.027845360338687897, |
|
"learning_rate": 4.735382729399184e-05, |
|
"loss": 0.0093, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.807017543859649, |
|
"eval_loss": 0.020694730803370476, |
|
"eval_runtime": 7.846, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.817042606516291, |
|
"grad_norm": 0.04447019472718239, |
|
"learning_rate": 4.7001402913221016e-05, |
|
"loss": 0.014, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.827067669172932, |
|
"grad_norm": 0.02357538230717182, |
|
"learning_rate": 4.6649127982489746e-05, |
|
"loss": 0.0052, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.837092731829574, |
|
"grad_norm": 0.0315440371632576, |
|
"learning_rate": 4.629702005917601e-05, |
|
"loss": 0.0161, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.8471177944862154, |
|
"grad_norm": 0.016042588278651237, |
|
"learning_rate": 4.594509669233406e-05, |
|
"loss": 0.0075, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.02733796462416649, |
|
"learning_rate": 4.559337542181993e-05, |
|
"loss": 0.0139, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_loss": 0.02120617963373661, |
|
"eval_runtime": 7.8783, |
|
"eval_samples_per_second": 6.347, |
|
"eval_steps_per_second": 1.65, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.8671679197994986, |
|
"grad_norm": 0.023688923567533493, |
|
"learning_rate": 4.5241873777417096e-05, |
|
"loss": 0.009, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.8771929824561404, |
|
"grad_norm": 0.026211708784103394, |
|
"learning_rate": 4.489060927796297e-05, |
|
"loss": 0.01, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.887218045112782, |
|
"grad_norm": 0.020815694704651833, |
|
"learning_rate": 4.4539599430475575e-05, |
|
"loss": 0.01, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.8972431077694236, |
|
"grad_norm": 0.02503124438226223, |
|
"learning_rate": 4.418886172928113e-05, |
|
"loss": 0.0082, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.907268170426065, |
|
"grad_norm": 0.03107696771621704, |
|
"learning_rate": 4.383841365514208e-05, |
|
"loss": 0.0121, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.907268170426065, |
|
"eval_loss": 0.02047637850046158, |
|
"eval_runtime": 7.8528, |
|
"eval_samples_per_second": 6.367, |
|
"eval_steps_per_second": 1.655, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.917293233082707, |
|
"grad_norm": 0.029050437733530998, |
|
"learning_rate": 4.3488272674385825e-05, |
|
"loss": 0.0089, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.927318295739348, |
|
"grad_norm": 0.020520159974694252, |
|
"learning_rate": 4.313845623803431e-05, |
|
"loss": 0.0082, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.93734335839599, |
|
"grad_norm": 0.02317202091217041, |
|
"learning_rate": 4.278898178093409e-05, |
|
"loss": 0.0108, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 0.02789182774722576, |
|
"learning_rate": 4.243986672088758e-05, |
|
"loss": 0.0131, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.9573934837092732, |
|
"grad_norm": 0.02339678630232811, |
|
"learning_rate": 4.209112845778481e-05, |
|
"loss": 0.0101, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.9573934837092732, |
|
"eval_loss": 0.02035236358642578, |
|
"eval_runtime": 7.8472, |
|
"eval_samples_per_second": 6.372, |
|
"eval_steps_per_second": 1.657, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.9674185463659146, |
|
"grad_norm": 0.034076038748025894, |
|
"learning_rate": 4.174278437273621e-05, |
|
"loss": 0.0062, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.9774436090225564, |
|
"grad_norm": 0.026215389370918274, |
|
"learning_rate": 4.139485182720646e-05, |
|
"loss": 0.0113, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.987468671679198, |
|
"grad_norm": 0.021045586094260216, |
|
"learning_rate": 4.104734816214905e-05, |
|
"loss": 0.0082, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.9974937343358397, |
|
"grad_norm": 0.031620148569345474, |
|
"learning_rate": 4.0700290697142126e-05, |
|
"loss": 0.0135, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 3.007518796992481, |
|
"grad_norm": 0.016665760427713394, |
|
"learning_rate": 4.035369672952516e-05, |
|
"loss": 0.0087, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.007518796992481, |
|
"eval_loss": 0.019945178180933, |
|
"eval_runtime": 7.8901, |
|
"eval_samples_per_second": 6.337, |
|
"eval_steps_per_second": 1.648, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.017543859649123, |
|
"grad_norm": 0.01904158480465412, |
|
"learning_rate": 4.000758353353701e-05, |
|
"loss": 0.0082, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 3.0275689223057642, |
|
"grad_norm": 0.019835559651255608, |
|
"learning_rate": 3.966196835945482e-05, |
|
"loss": 0.0066, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 3.037593984962406, |
|
"grad_norm": 0.02055111713707447, |
|
"learning_rate": 3.9316868432734335e-05, |
|
"loss": 0.0083, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 3.0476190476190474, |
|
"grad_norm": 0.02135693095624447, |
|
"learning_rate": 3.897230095315141e-05, |
|
"loss": 0.0083, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 3.0576441102756893, |
|
"grad_norm": 0.01703447662293911, |
|
"learning_rate": 3.8628283093944686e-05, |
|
"loss": 0.0079, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 3.0576441102756893, |
|
"eval_loss": 0.020382743328809738, |
|
"eval_runtime": 7.8801, |
|
"eval_samples_per_second": 6.345, |
|
"eval_steps_per_second": 1.65, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 3.0676691729323307, |
|
"grad_norm": 0.0192066989839077, |
|
"learning_rate": 3.828483200095978e-05, |
|
"loss": 0.0086, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 3.0776942355889725, |
|
"grad_norm": 0.022239364683628082, |
|
"learning_rate": 3.79419647917947e-05, |
|
"loss": 0.0086, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 3.087719298245614, |
|
"grad_norm": 0.025497829541563988, |
|
"learning_rate": 3.7599698554946625e-05, |
|
"loss": 0.0095, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 3.0977443609022557, |
|
"grad_norm": 0.024126721546053886, |
|
"learning_rate": 3.725805034896035e-05, |
|
"loss": 0.0076, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 3.107769423558897, |
|
"grad_norm": 0.02745652385056019, |
|
"learning_rate": 3.691703720157798e-05, |
|
"loss": 0.01, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 3.107769423558897, |
|
"eval_loss": 0.020815785974264145, |
|
"eval_runtime": 7.85, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 3.117794486215539, |
|
"grad_norm": 0.032075103372335434, |
|
"learning_rate": 3.657667610889032e-05, |
|
"loss": 0.0077, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 3.1278195488721803, |
|
"grad_norm": 0.03226759657263756, |
|
"learning_rate": 3.623698403448976e-05, |
|
"loss": 0.0093, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 3.137844611528822, |
|
"grad_norm": 0.02231232076883316, |
|
"learning_rate": 3.589797790862485e-05, |
|
"loss": 0.0067, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 3.1478696741854635, |
|
"grad_norm": 0.029100285843014717, |
|
"learning_rate": 3.555967462735651e-05, |
|
"loss": 0.0072, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"grad_norm": 0.030341139063239098, |
|
"learning_rate": 3.52220910517158e-05, |
|
"loss": 0.0089, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"eval_loss": 0.021213330328464508, |
|
"eval_runtime": 7.8447, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 3.1679197994987467, |
|
"grad_norm": 0.024791590869426727, |
|
"learning_rate": 3.488524400686375e-05, |
|
"loss": 0.0088, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 3.1779448621553885, |
|
"grad_norm": 0.029614470899105072, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.01, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 3.18796992481203, |
|
"grad_norm": 0.028596239164471626, |
|
"learning_rate": 3.421382662578937e-05, |
|
"loss": 0.0092, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 3.1979949874686717, |
|
"grad_norm": 0.038685377687215805, |
|
"learning_rate": 3.387928975300053e-05, |
|
"loss": 0.0117, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 3.208020050125313, |
|
"grad_norm": 0.03576364368200302, |
|
"learning_rate": 3.35455563361995e-05, |
|
"loss": 0.0079, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.208020050125313, |
|
"eval_loss": 0.020832277834415436, |
|
"eval_runtime": 7.8486, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 1.656, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.218045112781955, |
|
"grad_norm": 0.026977384462952614, |
|
"learning_rate": 3.321264300865541e-05, |
|
"loss": 0.0073, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 3.2280701754385963, |
|
"grad_norm": 0.023794258013367653, |
|
"learning_rate": 3.288056636276413e-05, |
|
"loss": 0.0067, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 3.238095238095238, |
|
"grad_norm": 0.03268418088555336, |
|
"learning_rate": 3.2549342949221365e-05, |
|
"loss": 0.0106, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 3.2481203007518795, |
|
"grad_norm": 0.022963467985391617, |
|
"learning_rate": 3.22189892761977e-05, |
|
"loss": 0.0082, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 3.2581453634085213, |
|
"grad_norm": 0.023204447701573372, |
|
"learning_rate": 3.188952180851589e-05, |
|
"loss": 0.006, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 3.2581453634085213, |
|
"eval_loss": 0.020569898188114166, |
|
"eval_runtime": 7.8459, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 3.2681704260651627, |
|
"grad_norm": 0.028982898220419884, |
|
"learning_rate": 3.156095696683018e-05, |
|
"loss": 0.0085, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 3.2781954887218046, |
|
"grad_norm": 0.019690126180648804, |
|
"learning_rate": 3.123331112680801e-05, |
|
"loss": 0.0065, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 3.288220551378446, |
|
"grad_norm": 0.02583892270922661, |
|
"learning_rate": 3.090660061831378e-05, |
|
"loss": 0.0083, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 3.2982456140350878, |
|
"grad_norm": 0.025384318083524704, |
|
"learning_rate": 3.058084172459491e-05, |
|
"loss": 0.0068, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 3.308270676691729, |
|
"grad_norm": 0.030685538426041603, |
|
"learning_rate": 3.0256050681470444e-05, |
|
"loss": 0.0094, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.308270676691729, |
|
"eval_loss": 0.020690646022558212, |
|
"eval_runtime": 7.8578, |
|
"eval_samples_per_second": 6.363, |
|
"eval_steps_per_second": 1.654, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.318295739348371, |
|
"grad_norm": 0.03488805517554283, |
|
"learning_rate": 2.9932243676521693e-05, |
|
"loss": 0.0079, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 3.3283208020050123, |
|
"grad_norm": 0.02308155782520771, |
|
"learning_rate": 2.9609436848285567e-05, |
|
"loss": 0.0072, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 3.338345864661654, |
|
"grad_norm": 0.0337197482585907, |
|
"learning_rate": 2.9287646285450133e-05, |
|
"loss": 0.0076, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 3.3483709273182956, |
|
"grad_norm": 0.02416362054646015, |
|
"learning_rate": 2.8966888026052873e-05, |
|
"loss": 0.0067, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 3.3583959899749374, |
|
"grad_norm": 0.026764515787363052, |
|
"learning_rate": 2.8647178056681194e-05, |
|
"loss": 0.0091, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 3.3583959899749374, |
|
"eval_loss": 0.02051025629043579, |
|
"eval_runtime": 7.8462, |
|
"eval_samples_per_second": 6.373, |
|
"eval_steps_per_second": 1.657, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 3.3684210526315788, |
|
"grad_norm": 0.030355772003531456, |
|
"learning_rate": 2.8328532311675838e-05, |
|
"loss": 0.0079, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 3.3784461152882206, |
|
"grad_norm": 0.02602885104715824, |
|
"learning_rate": 2.8010966672336536e-05, |
|
"loss": 0.0083, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 3.388471177944862, |
|
"grad_norm": 0.026122696697711945, |
|
"learning_rate": 2.7694496966130607e-05, |
|
"loss": 0.0078, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 3.398496240601504, |
|
"grad_norm": 0.028853295370936394, |
|
"learning_rate": 2.7379138965904037e-05, |
|
"loss": 0.0084, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 3.408521303258145, |
|
"grad_norm": 0.020907724276185036, |
|
"learning_rate": 2.7064908389095468e-05, |
|
"loss": 0.0077, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.408521303258145, |
|
"eval_loss": 0.020498408004641533, |
|
"eval_runtime": 7.8504, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 1.656, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.418546365914787, |
|
"grad_norm": 0.019861876964569092, |
|
"learning_rate": 2.675182089695274e-05, |
|
"loss": 0.0062, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 0.028957007452845573, |
|
"learning_rate": 2.643989209375235e-05, |
|
"loss": 0.0078, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 3.43859649122807, |
|
"grad_norm": 0.02627895213663578, |
|
"learning_rate": 2.6129137526021773e-05, |
|
"loss": 0.0073, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 3.4486215538847116, |
|
"grad_norm": 0.028672248125076294, |
|
"learning_rate": 2.581957268176459e-05, |
|
"loss": 0.0082, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 3.4586466165413534, |
|
"grad_norm": 0.021196383982896805, |
|
"learning_rate": 2.5511212989688586e-05, |
|
"loss": 0.0074, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.4586466165413534, |
|
"eval_loss": 0.02022087387740612, |
|
"eval_runtime": 7.8477, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 1.657, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.468671679197995, |
|
"grad_norm": 0.024043511599302292, |
|
"learning_rate": 2.520407381843679e-05, |
|
"loss": 0.0077, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 3.4786967418546366, |
|
"grad_norm": 0.035246286541223526, |
|
"learning_rate": 2.4898170475821493e-05, |
|
"loss": 0.0088, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 3.488721804511278, |
|
"grad_norm": 0.02104768343269825, |
|
"learning_rate": 2.4593518208061274e-05, |
|
"loss": 0.0068, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 3.49874686716792, |
|
"grad_norm": 0.025738101452589035, |
|
"learning_rate": 2.4290132199021176e-05, |
|
"loss": 0.0068, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 3.5087719298245617, |
|
"grad_norm": 0.02881302498281002, |
|
"learning_rate": 2.3988027569455895e-05, |
|
"loss": 0.007, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.5087719298245617, |
|
"eval_loss": 0.020256277173757553, |
|
"eval_runtime": 7.8561, |
|
"eval_samples_per_second": 6.364, |
|
"eval_steps_per_second": 1.655, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.518796992481203, |
|
"grad_norm": 0.034245338290929794, |
|
"learning_rate": 2.3687219376256255e-05, |
|
"loss": 0.0103, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 3.5288220551378444, |
|
"grad_norm": 0.021362487226724625, |
|
"learning_rate": 2.3387722611698653e-05, |
|
"loss": 0.0063, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 3.5388471177944862, |
|
"grad_norm": 0.023916201665997505, |
|
"learning_rate": 2.30895522026979e-05, |
|
"loss": 0.0073, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 3.548872180451128, |
|
"grad_norm": 0.022715197876095772, |
|
"learning_rate": 2.2792723010063316e-05, |
|
"loss": 0.0076, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 3.5588972431077694, |
|
"grad_norm": 0.03498687595129013, |
|
"learning_rate": 2.2497249827757933e-05, |
|
"loss": 0.0087, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.5588972431077694, |
|
"eval_loss": 0.020144272595643997, |
|
"eval_runtime": 7.8441, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.568922305764411, |
|
"grad_norm": 0.019907698035240173, |
|
"learning_rate": 2.2203147382161336e-05, |
|
"loss": 0.0063, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 3.5789473684210527, |
|
"grad_norm": 0.02432534098625183, |
|
"learning_rate": 2.1910430331335553e-05, |
|
"loss": 0.0075, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 3.5889724310776945, |
|
"grad_norm": 0.03905978426337242, |
|
"learning_rate": 2.1619113264294554e-05, |
|
"loss": 0.0078, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 3.598997493734336, |
|
"grad_norm": 0.024142051115632057, |
|
"learning_rate": 2.132921070027714e-05, |
|
"loss": 0.0075, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 3.6090225563909772, |
|
"grad_norm": 0.023508962243795395, |
|
"learning_rate": 2.1040737088023323e-05, |
|
"loss": 0.0067, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.6090225563909772, |
|
"eval_loss": 0.02008051984012127, |
|
"eval_runtime": 7.8442, |
|
"eval_samples_per_second": 6.374, |
|
"eval_steps_per_second": 1.657, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.619047619047619, |
|
"grad_norm": 0.04632345959544182, |
|
"learning_rate": 2.0753706805054185e-05, |
|
"loss": 0.0109, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 3.629072681704261, |
|
"grad_norm": 0.03106398694217205, |
|
"learning_rate": 2.0468134156955254e-05, |
|
"loss": 0.0069, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 3.6390977443609023, |
|
"grad_norm": 0.032094817608594894, |
|
"learning_rate": 2.0184033376663573e-05, |
|
"loss": 0.0081, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 3.6491228070175437, |
|
"grad_norm": 0.025334365665912628, |
|
"learning_rate": 1.99014186237583e-05, |
|
"loss": 0.0079, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 3.6591478696741855, |
|
"grad_norm": 0.023328186944127083, |
|
"learning_rate": 1.962030398375506e-05, |
|
"loss": 0.007, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.6591478696741855, |
|
"eval_loss": 0.020097460597753525, |
|
"eval_runtime": 7.8736, |
|
"eval_samples_per_second": 6.35, |
|
"eval_steps_per_second": 1.651, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.6691729323308273, |
|
"grad_norm": 0.02302337810397148, |
|
"learning_rate": 1.9340703467403793e-05, |
|
"loss": 0.0056, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 3.6791979949874687, |
|
"grad_norm": 0.024331629276275635, |
|
"learning_rate": 1.9062631009990612e-05, |
|
"loss": 0.0088, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 3.68922305764411, |
|
"grad_norm": 0.02797406166791916, |
|
"learning_rate": 1.8786100470643143e-05, |
|
"loss": 0.0067, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 3.699248120300752, |
|
"grad_norm": 0.028790263459086418, |
|
"learning_rate": 1.8511125631639826e-05, |
|
"loss": 0.0082, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 3.7092731829573937, |
|
"grad_norm": 0.02699551172554493, |
|
"learning_rate": 1.8237720197723075e-05, |
|
"loss": 0.006, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.7092731829573937, |
|
"eval_loss": 0.019944829866290092, |
|
"eval_runtime": 7.875, |
|
"eval_samples_per_second": 6.349, |
|
"eval_steps_per_second": 1.651, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.719298245614035, |
|
"grad_norm": 0.04655482620000839, |
|
"learning_rate": 1.7965897795416127e-05, |
|
"loss": 0.0116, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 3.7293233082706765, |
|
"grad_norm": 0.023725813254714012, |
|
"learning_rate": 1.769567197234395e-05, |
|
"loss": 0.0043, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 3.7393483709273183, |
|
"grad_norm": 0.03052056021988392, |
|
"learning_rate": 1.742705619655802e-05, |
|
"loss": 0.0077, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 3.74937343358396, |
|
"grad_norm": 0.022183619439601898, |
|
"learning_rate": 1.7160063855865106e-05, |
|
"loss": 0.0065, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 3.7593984962406015, |
|
"grad_norm": 0.025241268798708916, |
|
"learning_rate": 1.689470825715998e-05, |
|
"loss": 0.0073, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.7593984962406015, |
|
"eval_loss": 0.019921263679862022, |
|
"eval_runtime": 7.8625, |
|
"eval_samples_per_second": 6.359, |
|
"eval_steps_per_second": 1.653, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.769423558897243, |
|
"grad_norm": 0.027392975986003876, |
|
"learning_rate": 1.6631002625762204e-05, |
|
"loss": 0.0088, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 3.7794486215538847, |
|
"grad_norm": 0.029222341254353523, |
|
"learning_rate": 1.6368960104757e-05, |
|
"loss": 0.0078, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 3.7894736842105265, |
|
"grad_norm": 0.02577180787920952, |
|
"learning_rate": 1.6108593754340158e-05, |
|
"loss": 0.0047, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 3.799498746867168, |
|
"grad_norm": 0.037013255059719086, |
|
"learning_rate": 1.5849916551167198e-05, |
|
"loss": 0.0106, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"grad_norm": 0.020461726933717728, |
|
"learning_rate": 1.559294138770656e-05, |
|
"loss": 0.0071, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"eval_loss": 0.019906265661120415, |
|
"eval_runtime": 7.8481, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 1.656, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.819548872180451, |
|
"grad_norm": 0.021040087565779686, |
|
"learning_rate": 1.533768107159701e-05, |
|
"loss": 0.0064, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 3.829573934837093, |
|
"grad_norm": 0.019402172416448593, |
|
"learning_rate": 1.5084148325009367e-05, |
|
"loss": 0.0068, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 3.8395989974937343, |
|
"grad_norm": 0.02361433021724224, |
|
"learning_rate": 1.4832355784012386e-05, |
|
"loss": 0.0074, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 3.8496240601503757, |
|
"grad_norm": 0.02696828916668892, |
|
"learning_rate": 1.4582315997943046e-05, |
|
"loss": 0.0084, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 3.8596491228070176, |
|
"grad_norm": 0.040230002254247665, |
|
"learning_rate": 1.4334041428781003e-05, |
|
"loss": 0.01, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.8596491228070176, |
|
"eval_loss": 0.0194709375500679, |
|
"eval_runtime": 7.8884, |
|
"eval_samples_per_second": 6.338, |
|
"eval_steps_per_second": 1.648, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.8696741854636594, |
|
"grad_norm": 0.0518641360104084, |
|
"learning_rate": 1.4087544450527513e-05, |
|
"loss": 0.0096, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 3.8796992481203008, |
|
"grad_norm": 0.025948142632842064, |
|
"learning_rate": 1.3842837348588783e-05, |
|
"loss": 0.0073, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 3.889724310776942, |
|
"grad_norm": 0.025720641016960144, |
|
"learning_rate": 1.3599932319163538e-05, |
|
"loss": 0.0061, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 3.899749373433584, |
|
"grad_norm": 0.052690256386995316, |
|
"learning_rate": 1.3358841468635302e-05, |
|
"loss": 0.0103, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 3.909774436090226, |
|
"grad_norm": 0.02759932167828083, |
|
"learning_rate": 1.3119576812968892e-05, |
|
"loss": 0.0081, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.909774436090226, |
|
"eval_loss": 0.019465075805783272, |
|
"eval_runtime": 7.8746, |
|
"eval_samples_per_second": 6.35, |
|
"eval_steps_per_second": 1.651, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.919799498746867, |
|
"grad_norm": 0.028103185817599297, |
|
"learning_rate": 1.2882150277111621e-05, |
|
"loss": 0.0079, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 3.9298245614035086, |
|
"grad_norm": 0.024139197543263435, |
|
"learning_rate": 1.2646573694398906e-05, |
|
"loss": 0.0046, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 3.9398496240601504, |
|
"grad_norm": 0.024406850337982178, |
|
"learning_rate": 1.2412858805964573e-05, |
|
"loss": 0.0084, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 3.949874686716792, |
|
"grad_norm": 0.02168712578713894, |
|
"learning_rate": 1.2181017260155608e-05, |
|
"loss": 0.0073, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 3.9598997493734336, |
|
"grad_norm": 0.020631562918424606, |
|
"learning_rate": 1.1951060611951615e-05, |
|
"loss": 0.0077, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.9598997493734336, |
|
"eval_loss": 0.019751163199543953, |
|
"eval_runtime": 7.853, |
|
"eval_samples_per_second": 6.367, |
|
"eval_steps_per_second": 1.655, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.969924812030075, |
|
"grad_norm": 0.026922499760985374, |
|
"learning_rate": 1.1723000322388927e-05, |
|
"loss": 0.0084, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 3.979949874686717, |
|
"grad_norm": 0.03158457204699516, |
|
"learning_rate": 1.1496847757989381e-05, |
|
"loss": 0.0105, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 3.9899749373433586, |
|
"grad_norm": 0.022796666249632835, |
|
"learning_rate": 1.1272614190193853e-05, |
|
"loss": 0.0072, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.04201427102088928, |
|
"learning_rate": 1.1050310794800405e-05, |
|
"loss": 0.0116, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 4.010025062656641, |
|
"grad_norm": 0.02010524645447731, |
|
"learning_rate": 1.0829948651407374e-05, |
|
"loss": 0.007, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.010025062656641, |
|
"eval_loss": 0.019864432513713837, |
|
"eval_runtime": 7.853, |
|
"eval_samples_per_second": 6.367, |
|
"eval_steps_per_second": 1.655, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 495, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.790179630436844e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|