|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3003003003003003, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003003003003003003, |
|
"grad_norm": 1.031739354133606, |
|
"learning_rate": 1e-05, |
|
"loss": 1.9291, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003003003003003003, |
|
"eval_loss": 1.886456847190857, |
|
"eval_runtime": 63.4329, |
|
"eval_samples_per_second": 4.43, |
|
"eval_steps_per_second": 0.568, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006006006006006006, |
|
"grad_norm": 1.023872971534729, |
|
"learning_rate": 2e-05, |
|
"loss": 1.9021, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009009009009009009, |
|
"grad_norm": 1.0211207866668701, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8857, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012012012012012012, |
|
"grad_norm": 0.890316367149353, |
|
"learning_rate": 4e-05, |
|
"loss": 1.8535, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015015015015015015, |
|
"grad_norm": 0.8935955762863159, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8881, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018018018018018018, |
|
"grad_norm": 0.7172915935516357, |
|
"learning_rate": 6e-05, |
|
"loss": 1.7194, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.021021021021021023, |
|
"grad_norm": 0.6998835206031799, |
|
"learning_rate": 7e-05, |
|
"loss": 1.6528, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.024024024024024024, |
|
"grad_norm": 1.0133540630340576, |
|
"learning_rate": 8e-05, |
|
"loss": 1.6748, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"grad_norm": 1.1556763648986816, |
|
"learning_rate": 9e-05, |
|
"loss": 1.7526, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"eval_loss": 1.6583313941955566, |
|
"eval_runtime": 62.8385, |
|
"eval_samples_per_second": 4.472, |
|
"eval_steps_per_second": 0.573, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03003003003003003, |
|
"grad_norm": 0.973503589630127, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6329, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03303303303303303, |
|
"grad_norm": 0.817086398601532, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 1.5464, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.036036036036036036, |
|
"grad_norm": 0.7001307606697083, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.49, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03903903903903904, |
|
"grad_norm": 0.7110887169837952, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 1.5194, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.042042042042042045, |
|
"grad_norm": 0.7024288177490234, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 1.5195, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04504504504504504, |
|
"grad_norm": 0.7325843572616577, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 1.5147, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04804804804804805, |
|
"grad_norm": 0.6620925664901733, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.4085, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05105105105105105, |
|
"grad_norm": 0.51695716381073, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 1.4237, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"grad_norm": 0.5765568017959595, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 1.502, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"eval_loss": 1.4250683784484863, |
|
"eval_runtime": 62.8113, |
|
"eval_samples_per_second": 4.474, |
|
"eval_steps_per_second": 0.573, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.057057057057057055, |
|
"grad_norm": 0.6175252199172974, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.4511, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06006006006006006, |
|
"grad_norm": 0.6237648725509644, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 1.351, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06306306306306306, |
|
"grad_norm": 0.6306739449501038, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.3199, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06606606606606606, |
|
"grad_norm": 0.49844619631767273, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.3723, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06906906906906907, |
|
"grad_norm": 0.5354206562042236, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.3546, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"grad_norm": 0.5377964377403259, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.3483, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07507507507507508, |
|
"grad_norm": 0.5178672075271606, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.2779, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07807807807807808, |
|
"grad_norm": 0.5024471282958984, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.2697, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"grad_norm": 0.5718808770179749, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 1.2743, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"eval_loss": 1.3096789121627808, |
|
"eval_runtime": 62.829, |
|
"eval_samples_per_second": 4.472, |
|
"eval_steps_per_second": 0.573, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08408408408408409, |
|
"grad_norm": 0.5483112931251526, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.2454, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08708708708708708, |
|
"grad_norm": 0.5792304873466492, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 1.2741, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09009009009009009, |
|
"grad_norm": 0.5762671828269958, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.2769, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09309309309309309, |
|
"grad_norm": 0.5792064070701599, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 1.2854, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0960960960960961, |
|
"grad_norm": 0.5290413498878479, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.242, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0990990990990991, |
|
"grad_norm": 0.6523710489273071, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.3281, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1021021021021021, |
|
"grad_norm": 0.5404489636421204, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 1.1963, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10510510510510511, |
|
"grad_norm": 0.5534884929656982, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.2266, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 0.6106987595558167, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.2606, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"eval_loss": 1.221689224243164, |
|
"eval_runtime": 62.7867, |
|
"eval_samples_per_second": 4.475, |
|
"eval_steps_per_second": 0.573, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.44786205887794495, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.2073, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11411411411411411, |
|
"grad_norm": 0.5416382551193237, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.1682, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11711711711711711, |
|
"grad_norm": 0.572591245174408, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 1.2197, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12012012012012012, |
|
"grad_norm": 0.5145111680030823, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.0935, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12312312312312312, |
|
"grad_norm": 0.6535457372665405, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 1.0762, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12612612612612611, |
|
"grad_norm": 0.5444203615188599, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.0495, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12912912912912913, |
|
"grad_norm": 0.6166582703590393, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.0792, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.13213213213213212, |
|
"grad_norm": 0.531032145023346, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.9994, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 0.6408705115318298, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.2118, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"eval_loss": 1.1635631322860718, |
|
"eval_runtime": 62.7574, |
|
"eval_samples_per_second": 4.478, |
|
"eval_steps_per_second": 0.574, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13813813813813813, |
|
"grad_norm": 0.5408794283866882, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.1765, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14114114114114115, |
|
"grad_norm": 0.6319204568862915, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.1065, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14414414414414414, |
|
"grad_norm": 0.5713602900505066, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.1523, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14714714714714713, |
|
"grad_norm": 0.5445019602775574, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.1104, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.15015015015015015, |
|
"grad_norm": 0.7082126140594482, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.2276, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15315315315315314, |
|
"grad_norm": 0.6700219511985779, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 1.1085, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.15615615615615616, |
|
"grad_norm": 0.70674067735672, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 1.0032, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.15915915915915915, |
|
"grad_norm": 0.6413066387176514, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 1.0976, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"grad_norm": 0.5945645570755005, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.0874, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"eval_loss": 1.1248000860214233, |
|
"eval_runtime": 62.8374, |
|
"eval_samples_per_second": 4.472, |
|
"eval_steps_per_second": 0.573, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16516516516516516, |
|
"grad_norm": 0.5928006172180176, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1226, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.16816816816816818, |
|
"grad_norm": 0.5897536277770996, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 1.061, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.17117117117117117, |
|
"grad_norm": 0.6264550089836121, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 1.0425, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.17417417417417416, |
|
"grad_norm": 0.5464336276054382, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.0925, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.17717717717717718, |
|
"grad_norm": 0.5804792642593384, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 1.1799, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.18018018018018017, |
|
"grad_norm": 0.6067994832992554, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.0761, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1831831831831832, |
|
"grad_norm": 0.5299519896507263, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 1.0996, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.18618618618618618, |
|
"grad_norm": 0.7131068706512451, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 1.0971, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"grad_norm": 0.5988982915878296, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 1.0842, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"eval_loss": 1.098732352256775, |
|
"eval_runtime": 62.8005, |
|
"eval_samples_per_second": 4.474, |
|
"eval_steps_per_second": 0.573, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1921921921921922, |
|
"grad_norm": 0.7022519111633301, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.046, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.19519519519519518, |
|
"grad_norm": 0.5209968686103821, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.013, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.1981981981981982, |
|
"grad_norm": 0.6534311771392822, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 1.0596, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.2012012012012012, |
|
"grad_norm": 0.6855350136756897, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 1.1311, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.2042042042042042, |
|
"grad_norm": 0.5221145749092102, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 1.0387, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2072072072072072, |
|
"grad_norm": 0.48642462491989136, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 1.0218, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.21021021021021022, |
|
"grad_norm": 0.6832244396209717, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.9763, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2132132132132132, |
|
"grad_norm": 0.5838537812232971, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 1.0369, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 0.579451322555542, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.9797, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"eval_loss": 1.0829377174377441, |
|
"eval_runtime": 63.4887, |
|
"eval_samples_per_second": 4.426, |
|
"eval_steps_per_second": 0.567, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.21921921921921922, |
|
"grad_norm": 0.688620924949646, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.0522, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.5413029193878174, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.0064, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.22522522522522523, |
|
"grad_norm": 0.5388117432594299, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 1.0725, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.22822822822822822, |
|
"grad_norm": 0.5668699145317078, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 1.0711, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.23123123123123124, |
|
"grad_norm": 0.5445249676704407, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 1.0106, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.23423423423423423, |
|
"grad_norm": 0.6580765843391418, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 1.08, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.23723723723723725, |
|
"grad_norm": 0.5185084939002991, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 1.0145, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.24024024024024024, |
|
"grad_norm": 0.6230930089950562, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.0017, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24324324324324326, |
|
"grad_norm": 0.5382445454597473, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 1.0962, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.24324324324324326, |
|
"eval_loss": 1.0725353956222534, |
|
"eval_runtime": 62.8805, |
|
"eval_samples_per_second": 4.469, |
|
"eval_steps_per_second": 0.573, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.24624624624624625, |
|
"grad_norm": 0.5744390487670898, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.0982, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.24924924924924924, |
|
"grad_norm": 0.5682293176651001, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 1.0377, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.25225225225225223, |
|
"grad_norm": 0.5965850949287415, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.0009, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.2552552552552553, |
|
"grad_norm": 0.47231999039649963, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.0434, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.25825825825825827, |
|
"grad_norm": 0.5315771698951721, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 0.9827, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.26126126126126126, |
|
"grad_norm": 0.5202770829200745, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 1.0421, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.26426426426426425, |
|
"grad_norm": 0.551384449005127, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 1.1419, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.2672672672672673, |
|
"grad_norm": 0.48965591192245483, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 1.0447, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 0.5531151294708252, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.0893, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"eval_loss": 1.0678226947784424, |
|
"eval_runtime": 62.7776, |
|
"eval_samples_per_second": 4.476, |
|
"eval_steps_per_second": 0.573, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2732732732732733, |
|
"grad_norm": 0.4997876286506653, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 1.0278, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.27627627627627627, |
|
"grad_norm": 0.5054656267166138, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 1.0288, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.27927927927927926, |
|
"grad_norm": 0.5292591452598572, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.9494, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.2822822822822823, |
|
"grad_norm": 0.47419047355651855, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.9839, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.2852852852852853, |
|
"grad_norm": 0.49080803990364075, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.0356, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.2882882882882883, |
|
"grad_norm": 0.5078819990158081, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 1.1231, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.2912912912912913, |
|
"grad_norm": 0.4944559931755066, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 1.0759, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.29429429429429427, |
|
"grad_norm": 0.4932954013347626, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 1.0656, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.2972972972972973, |
|
"grad_norm": 0.4978138208389282, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 1.0526, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2972972972972973, |
|
"eval_loss": 1.066880702972412, |
|
"eval_runtime": 62.8204, |
|
"eval_samples_per_second": 4.473, |
|
"eval_steps_per_second": 0.573, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.3003003003003003, |
|
"grad_norm": 0.47974127531051636, |
|
"learning_rate": 0.0, |
|
"loss": 1.0622, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.483774567120896e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|