|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.461538461538462, |
|
"eval_steps": 500, |
|
"global_step": 63, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.10256410256410256, |
|
"grad_norm": 45.74676831817011, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 1.7736, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.20512820512820512, |
|
"grad_norm": 43.8997476738742, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 1.7436, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 33.1407433311641, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 1.5701, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.41025641025641024, |
|
"grad_norm": 15.347997497580668, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 1.3552, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 42.292432586275865, |
|
"learning_rate": 1.4285714285714287e-05, |
|
"loss": 1.2627, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 36.376951471898245, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 1.2198, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.717948717948718, |
|
"grad_norm": 7.974991090529373, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1377, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.8205128205128205, |
|
"grad_norm": 16.501115024701512, |
|
"learning_rate": 1.998426815017817e-05, |
|
"loss": 1.1813, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"grad_norm": 7.188262087017176, |
|
"learning_rate": 1.9937122098932428e-05, |
|
"loss": 1.1565, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.0256410256410255, |
|
"grad_norm": 4.140303367679906, |
|
"learning_rate": 1.985871018518236e-05, |
|
"loss": 1.03, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.1282051282051282, |
|
"grad_norm": 2.728109728903398, |
|
"learning_rate": 1.9749279121818235e-05, |
|
"loss": 0.9516, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.2307692307692308, |
|
"grad_norm": 5.943582967422805, |
|
"learning_rate": 1.9609173219450998e-05, |
|
"loss": 0.9511, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 2.215718821444342, |
|
"learning_rate": 1.9438833303083677e-05, |
|
"loss": 0.927, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.435897435897436, |
|
"grad_norm": 3.7476553781843758, |
|
"learning_rate": 1.9238795325112867e-05, |
|
"loss": 0.9357, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 2.3784620064293667, |
|
"learning_rate": 1.900968867902419e-05, |
|
"loss": 0.9271, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.641025641025641, |
|
"grad_norm": 1.921424026693961, |
|
"learning_rate": 1.8752234219087538e-05, |
|
"loss": 0.9106, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.7435897435897436, |
|
"grad_norm": 1.74875556144991, |
|
"learning_rate": 1.8467241992282842e-05, |
|
"loss": 0.8924, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.8461538461538463, |
|
"grad_norm": 1.5465709392794136, |
|
"learning_rate": 1.8155608689592604e-05, |
|
"loss": 0.8985, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.9487179487179487, |
|
"grad_norm": 1.6088170768182135, |
|
"learning_rate": 1.78183148246803e-05, |
|
"loss": 0.8731, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.051282051282051, |
|
"grad_norm": 1.550794028416961, |
|
"learning_rate": 1.7456421648831658e-05, |
|
"loss": 0.8108, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.1538461538461537, |
|
"grad_norm": 1.1891604452468383, |
|
"learning_rate": 1.7071067811865477e-05, |
|
"loss": 0.7693, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.2564102564102564, |
|
"grad_norm": 1.7874206012739782, |
|
"learning_rate": 1.6663465779520042e-05, |
|
"loss": 0.7545, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.358974358974359, |
|
"grad_norm": 1.546137200179085, |
|
"learning_rate": 1.6234898018587336e-05, |
|
"loss": 0.7505, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.4615384615384617, |
|
"grad_norm": 1.5723321757671156, |
|
"learning_rate": 1.578671296179806e-05, |
|
"loss": 0.7592, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.564102564102564, |
|
"grad_norm": 1.147889286365701, |
|
"learning_rate": 1.5320320765153367e-05, |
|
"loss": 0.737, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 1.2373533333897078, |
|
"learning_rate": 1.4837188871052399e-05, |
|
"loss": 0.7165, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.769230769230769, |
|
"grad_norm": 1.1523919084408827, |
|
"learning_rate": 1.4338837391175582e-05, |
|
"loss": 0.6998, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.871794871794872, |
|
"grad_norm": 1.1820135421697542, |
|
"learning_rate": 1.3826834323650899e-05, |
|
"loss": 0.7086, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.9743589743589745, |
|
"grad_norm": 1.163468789617511, |
|
"learning_rate": 1.3302790619551673e-05, |
|
"loss": 0.7175, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 1.7767072503264743, |
|
"learning_rate": 1.2768355114248493e-05, |
|
"loss": 0.593, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.1794871794871793, |
|
"grad_norm": 1.81233019892104, |
|
"learning_rate": 1.2225209339563144e-05, |
|
"loss": 0.5519, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 3.282051282051282, |
|
"grad_norm": 2.1736744456228694, |
|
"learning_rate": 1.1675062233047365e-05, |
|
"loss": 0.5437, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.3846153846153846, |
|
"grad_norm": 2.523447078328654, |
|
"learning_rate": 1.1119644761033079e-05, |
|
"loss": 0.5334, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.4871794871794872, |
|
"grad_norm": 1.736445380863662, |
|
"learning_rate": 1.0560704472371919e-05, |
|
"loss": 0.5068, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.58974358974359, |
|
"grad_norm": 1.4506284420650815, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5092, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.6923076923076925, |
|
"grad_norm": 1.4702124114670763, |
|
"learning_rate": 9.439295527628083e-06, |
|
"loss": 0.4965, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.7948717948717947, |
|
"grad_norm": 1.460168348735874, |
|
"learning_rate": 8.880355238966923e-06, |
|
"loss": 0.4902, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.8974358974358974, |
|
"grad_norm": 1.2827679925297792, |
|
"learning_rate": 8.324937766952638e-06, |
|
"loss": 0.4923, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.3138838652676037, |
|
"learning_rate": 7.774790660436857e-06, |
|
"loss": 0.4731, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 4.102564102564102, |
|
"grad_norm": 2.82702405107918, |
|
"learning_rate": 7.2316448857515076e-06, |
|
"loss": 0.377, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.205128205128205, |
|
"grad_norm": 2.2541347262792897, |
|
"learning_rate": 6.697209380448333e-06, |
|
"loss": 0.3562, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 4.3076923076923075, |
|
"grad_norm": 2.5922750621707293, |
|
"learning_rate": 6.173165676349103e-06, |
|
"loss": 0.3482, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.410256410256411, |
|
"grad_norm": 3.552401632591804, |
|
"learning_rate": 5.66116260882442e-06, |
|
"loss": 0.3332, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.512820512820513, |
|
"grad_norm": 2.5996491260559615, |
|
"learning_rate": 5.1628111289476025e-06, |
|
"loss": 0.3292, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 2.0225922587821805, |
|
"learning_rate": 4.679679234846636e-06, |
|
"loss": 0.3134, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.717948717948718, |
|
"grad_norm": 1.794698781237276, |
|
"learning_rate": 4.213287038201943e-06, |
|
"loss": 0.3144, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.82051282051282, |
|
"grad_norm": 1.78570038657741, |
|
"learning_rate": 3.7651019814126656e-06, |
|
"loss": 0.3054, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.923076923076923, |
|
"grad_norm": 1.855808340157979, |
|
"learning_rate": 3.3365342204799613e-06, |
|
"loss": 0.3, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 5.0256410256410255, |
|
"grad_norm": 1.9551127034348588, |
|
"learning_rate": 2.9289321881345257e-06, |
|
"loss": 0.2895, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"grad_norm": 1.8579864077346009, |
|
"learning_rate": 2.5435783511683444e-06, |
|
"loss": 0.2339, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.230769230769231, |
|
"grad_norm": 1.624477309859244, |
|
"learning_rate": 2.1816851753197023e-06, |
|
"loss": 0.2185, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 5.333333333333333, |
|
"grad_norm": 1.7017286954162238, |
|
"learning_rate": 1.8443913104073984e-06, |
|
"loss": 0.2273, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.435897435897436, |
|
"grad_norm": 1.612088436018407, |
|
"learning_rate": 1.5327580077171589e-06, |
|
"loss": 0.2157, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 5.538461538461538, |
|
"grad_norm": 1.6667785081621305, |
|
"learning_rate": 1.2477657809124632e-06, |
|
"loss": 0.2163, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.641025641025641, |
|
"grad_norm": 1.5501897496769197, |
|
"learning_rate": 9.903113209758098e-07, |
|
"loss": 0.2134, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.743589743589744, |
|
"grad_norm": 1.6228058819139755, |
|
"learning_rate": 7.612046748871327e-07, |
|
"loss": 0.2247, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.846153846153846, |
|
"grad_norm": 1.4413136220349998, |
|
"learning_rate": 5.611666969163243e-07, |
|
"loss": 0.2157, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.948717948717949, |
|
"grad_norm": 1.3872967872162942, |
|
"learning_rate": 3.908267805490051e-07, |
|
"loss": 0.2111, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 6.051282051282051, |
|
"grad_norm": 1.5157677115577846, |
|
"learning_rate": 2.507208781817638e-07, |
|
"loss": 0.1947, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 1.5908969907868264, |
|
"learning_rate": 1.4128981481764115e-07, |
|
"loss": 0.1848, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.256410256410256, |
|
"grad_norm": 1.5844350731835435, |
|
"learning_rate": 6.287790106757396e-08, |
|
"loss": 0.1888, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 6.358974358974359, |
|
"grad_norm": 1.6757546348969776, |
|
"learning_rate": 1.5731849821833955e-08, |
|
"loss": 0.1826, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 6.461538461538462, |
|
"grad_norm": 1.5614630318772702, |
|
"learning_rate": 0.0, |
|
"loss": 0.1757, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 6.461538461538462, |
|
"step": 63, |
|
"total_flos": 13164745850880.0, |
|
"train_loss": 0.6425543285551525, |
|
"train_runtime": 766.0702, |
|
"train_samples_per_second": 5.647, |
|
"train_steps_per_second": 0.082 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 63, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 999999, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 13164745850880.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|