lesso01's picture
Training in progress, step 100, checkpoint
188939f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11926058437686345,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011926058437686344,
"grad_norm": 0.028310367837548256,
"learning_rate": 1e-05,
"loss": 10.3757,
"step": 1
},
{
"epoch": 0.0011926058437686344,
"eval_loss": 10.374403953552246,
"eval_runtime": 1.2981,
"eval_samples_per_second": 543.892,
"eval_steps_per_second": 68.564,
"step": 1
},
{
"epoch": 0.002385211687537269,
"grad_norm": 0.03164586424827576,
"learning_rate": 2e-05,
"loss": 10.3722,
"step": 2
},
{
"epoch": 0.0035778175313059034,
"grad_norm": 0.029512399807572365,
"learning_rate": 3e-05,
"loss": 10.3745,
"step": 3
},
{
"epoch": 0.004770423375074538,
"grad_norm": 0.026691459119319916,
"learning_rate": 4e-05,
"loss": 10.3714,
"step": 4
},
{
"epoch": 0.005963029218843173,
"grad_norm": 0.02487982250750065,
"learning_rate": 5e-05,
"loss": 10.3785,
"step": 5
},
{
"epoch": 0.007155635062611807,
"grad_norm": 0.03070474974811077,
"learning_rate": 6e-05,
"loss": 10.3763,
"step": 6
},
{
"epoch": 0.008348240906380441,
"grad_norm": 0.026400011032819748,
"learning_rate": 7e-05,
"loss": 10.3702,
"step": 7
},
{
"epoch": 0.009540846750149075,
"grad_norm": 0.029420413076877594,
"learning_rate": 8e-05,
"loss": 10.3734,
"step": 8
},
{
"epoch": 0.01073345259391771,
"grad_norm": 0.02753315307199955,
"learning_rate": 9e-05,
"loss": 10.3707,
"step": 9
},
{
"epoch": 0.01073345259391771,
"eval_loss": 10.374176025390625,
"eval_runtime": 1.3566,
"eval_samples_per_second": 520.423,
"eval_steps_per_second": 65.606,
"step": 9
},
{
"epoch": 0.011926058437686345,
"grad_norm": 0.029479164630174637,
"learning_rate": 0.0001,
"loss": 10.379,
"step": 10
},
{
"epoch": 0.01311866428145498,
"grad_norm": 0.03541594371199608,
"learning_rate": 9.99695413509548e-05,
"loss": 10.3755,
"step": 11
},
{
"epoch": 0.014311270125223614,
"grad_norm": 0.02918224409222603,
"learning_rate": 9.987820251299122e-05,
"loss": 10.3716,
"step": 12
},
{
"epoch": 0.015503875968992248,
"grad_norm": 0.024971051141619682,
"learning_rate": 9.972609476841367e-05,
"loss": 10.3763,
"step": 13
},
{
"epoch": 0.016696481812760882,
"grad_norm": 0.03236817568540573,
"learning_rate": 9.951340343707852e-05,
"loss": 10.3733,
"step": 14
},
{
"epoch": 0.017889087656529516,
"grad_norm": 0.03112615831196308,
"learning_rate": 9.924038765061042e-05,
"loss": 10.3704,
"step": 15
},
{
"epoch": 0.01908169350029815,
"grad_norm": 0.031148822978138924,
"learning_rate": 9.890738003669029e-05,
"loss": 10.3724,
"step": 16
},
{
"epoch": 0.020274299344066785,
"grad_norm": 0.023717327043414116,
"learning_rate": 9.851478631379982e-05,
"loss": 10.3755,
"step": 17
},
{
"epoch": 0.02146690518783542,
"grad_norm": 0.030834253877401352,
"learning_rate": 9.806308479691595e-05,
"loss": 10.373,
"step": 18
},
{
"epoch": 0.02146690518783542,
"eval_loss": 10.373565673828125,
"eval_runtime": 1.3256,
"eval_samples_per_second": 532.577,
"eval_steps_per_second": 67.138,
"step": 18
},
{
"epoch": 0.022659511031604056,
"grad_norm": 0.02731081284582615,
"learning_rate": 9.755282581475769e-05,
"loss": 10.3715,
"step": 19
},
{
"epoch": 0.02385211687537269,
"grad_norm": 0.03342270106077194,
"learning_rate": 9.698463103929542e-05,
"loss": 10.3723,
"step": 20
},
{
"epoch": 0.025044722719141325,
"grad_norm": 0.026991521939635277,
"learning_rate": 9.635919272833938e-05,
"loss": 10.371,
"step": 21
},
{
"epoch": 0.02623732856290996,
"grad_norm": 0.026482809334993362,
"learning_rate": 9.567727288213005e-05,
"loss": 10.3701,
"step": 22
},
{
"epoch": 0.027429934406678593,
"grad_norm": 0.03881950303912163,
"learning_rate": 9.493970231495835e-05,
"loss": 10.3726,
"step": 23
},
{
"epoch": 0.028622540250447227,
"grad_norm": 0.04189687594771385,
"learning_rate": 9.414737964294636e-05,
"loss": 10.3686,
"step": 24
},
{
"epoch": 0.02981514609421586,
"grad_norm": 0.02757728472352028,
"learning_rate": 9.330127018922194e-05,
"loss": 10.3762,
"step": 25
},
{
"epoch": 0.031007751937984496,
"grad_norm": 0.030814575031399727,
"learning_rate": 9.24024048078213e-05,
"loss": 10.3724,
"step": 26
},
{
"epoch": 0.03220035778175313,
"grad_norm": 0.041639234870672226,
"learning_rate": 9.145187862775209e-05,
"loss": 10.3687,
"step": 27
},
{
"epoch": 0.03220035778175313,
"eval_loss": 10.37288761138916,
"eval_runtime": 1.4457,
"eval_samples_per_second": 488.342,
"eval_steps_per_second": 61.562,
"step": 27
},
{
"epoch": 0.033392963625521764,
"grad_norm": 0.033235449343919754,
"learning_rate": 9.045084971874738e-05,
"loss": 10.3687,
"step": 28
},
{
"epoch": 0.0345855694692904,
"grad_norm": 0.03943062946200371,
"learning_rate": 8.940053768033609e-05,
"loss": 10.3701,
"step": 29
},
{
"epoch": 0.03577817531305903,
"grad_norm": 0.03091137297451496,
"learning_rate": 8.83022221559489e-05,
"loss": 10.3748,
"step": 30
},
{
"epoch": 0.03697078115682767,
"grad_norm": 0.032856788486242294,
"learning_rate": 8.715724127386972e-05,
"loss": 10.3732,
"step": 31
},
{
"epoch": 0.0381633870005963,
"grad_norm": 0.033723410218954086,
"learning_rate": 8.596699001693255e-05,
"loss": 10.372,
"step": 32
},
{
"epoch": 0.03935599284436494,
"grad_norm": 0.03143460303544998,
"learning_rate": 8.473291852294987e-05,
"loss": 10.3778,
"step": 33
},
{
"epoch": 0.04054859868813357,
"grad_norm": 0.04015008732676506,
"learning_rate": 8.345653031794292e-05,
"loss": 10.3748,
"step": 34
},
{
"epoch": 0.04174120453190221,
"grad_norm": 0.03837386518716812,
"learning_rate": 8.213938048432697e-05,
"loss": 10.376,
"step": 35
},
{
"epoch": 0.04293381037567084,
"grad_norm": 0.03937862068414688,
"learning_rate": 8.07830737662829e-05,
"loss": 10.3753,
"step": 36
},
{
"epoch": 0.04293381037567084,
"eval_loss": 10.372147560119629,
"eval_runtime": 1.3806,
"eval_samples_per_second": 511.379,
"eval_steps_per_second": 64.466,
"step": 36
},
{
"epoch": 0.044126416219439475,
"grad_norm": 0.0406220406293869,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3738,
"step": 37
},
{
"epoch": 0.04531902206320811,
"grad_norm": 0.04260128363966942,
"learning_rate": 7.795964517353735e-05,
"loss": 10.375,
"step": 38
},
{
"epoch": 0.046511627906976744,
"grad_norm": 0.03788283094763756,
"learning_rate": 7.649596321166024e-05,
"loss": 10.3741,
"step": 39
},
{
"epoch": 0.04770423375074538,
"grad_norm": 0.04250438138842583,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3744,
"step": 40
},
{
"epoch": 0.04889683959451401,
"grad_norm": 0.04480942711234093,
"learning_rate": 7.347357813929454e-05,
"loss": 10.3732,
"step": 41
},
{
"epoch": 0.05008944543828265,
"grad_norm": 0.038544051349163055,
"learning_rate": 7.191855733945387e-05,
"loss": 10.3734,
"step": 42
},
{
"epoch": 0.05128205128205128,
"grad_norm": 0.04134129360318184,
"learning_rate": 7.033683215379002e-05,
"loss": 10.3719,
"step": 43
},
{
"epoch": 0.05247465712581992,
"grad_norm": 0.037152618169784546,
"learning_rate": 6.873032967079561e-05,
"loss": 10.3742,
"step": 44
},
{
"epoch": 0.05366726296958855,
"grad_norm": 0.04474302753806114,
"learning_rate": 6.710100716628344e-05,
"loss": 10.3699,
"step": 45
},
{
"epoch": 0.05366726296958855,
"eval_loss": 10.371367454528809,
"eval_runtime": 1.3423,
"eval_samples_per_second": 525.961,
"eval_steps_per_second": 66.304,
"step": 45
},
{
"epoch": 0.054859868813357186,
"grad_norm": 0.04609611630439758,
"learning_rate": 6.545084971874738e-05,
"loss": 10.378,
"step": 46
},
{
"epoch": 0.05605247465712582,
"grad_norm": 0.05456465855240822,
"learning_rate": 6.378186779084995e-05,
"loss": 10.3707,
"step": 47
},
{
"epoch": 0.057245080500894455,
"grad_norm": 0.0481850728392601,
"learning_rate": 6.209609477998338e-05,
"loss": 10.3727,
"step": 48
},
{
"epoch": 0.05843768634466309,
"grad_norm": 0.04843265563249588,
"learning_rate": 6.0395584540887963e-05,
"loss": 10.3749,
"step": 49
},
{
"epoch": 0.05963029218843172,
"grad_norm": 0.04366787523031235,
"learning_rate": 5.868240888334653e-05,
"loss": 10.3662,
"step": 50
},
{
"epoch": 0.06082289803220036,
"grad_norm": 0.04697554558515549,
"learning_rate": 5.695865504800327e-05,
"loss": 10.3749,
"step": 51
},
{
"epoch": 0.06201550387596899,
"grad_norm": 0.04818224534392357,
"learning_rate": 5.522642316338268e-05,
"loss": 10.3719,
"step": 52
},
{
"epoch": 0.06320810971973763,
"grad_norm": 0.05981869250535965,
"learning_rate": 5.348782368720626e-05,
"loss": 10.3733,
"step": 53
},
{
"epoch": 0.06440071556350627,
"grad_norm": 0.03551531583070755,
"learning_rate": 5.174497483512506e-05,
"loss": 10.3679,
"step": 54
},
{
"epoch": 0.06440071556350627,
"eval_loss": 10.370601654052734,
"eval_runtime": 1.407,
"eval_samples_per_second": 501.781,
"eval_steps_per_second": 63.256,
"step": 54
},
{
"epoch": 0.06559332140727489,
"grad_norm": 0.054153647273778915,
"learning_rate": 5e-05,
"loss": 10.3686,
"step": 55
},
{
"epoch": 0.06678592725104353,
"grad_norm": 0.056153807789087296,
"learning_rate": 4.825502516487497e-05,
"loss": 10.3693,
"step": 56
},
{
"epoch": 0.06797853309481217,
"grad_norm": 0.059691403061151505,
"learning_rate": 4.6512176312793736e-05,
"loss": 10.3712,
"step": 57
},
{
"epoch": 0.0691711389385808,
"grad_norm": 0.05296963453292847,
"learning_rate": 4.477357683661734e-05,
"loss": 10.3684,
"step": 58
},
{
"epoch": 0.07036374478234943,
"grad_norm": 0.05294171720743179,
"learning_rate": 4.3041344951996746e-05,
"loss": 10.3702,
"step": 59
},
{
"epoch": 0.07155635062611806,
"grad_norm": 0.058304980397224426,
"learning_rate": 4.131759111665349e-05,
"loss": 10.3684,
"step": 60
},
{
"epoch": 0.0727489564698867,
"grad_norm": 0.05720147863030434,
"learning_rate": 3.960441545911204e-05,
"loss": 10.3688,
"step": 61
},
{
"epoch": 0.07394156231365534,
"grad_norm": 0.05712497606873512,
"learning_rate": 3.790390522001662e-05,
"loss": 10.3677,
"step": 62
},
{
"epoch": 0.07513416815742398,
"grad_norm": 0.05657053366303444,
"learning_rate": 3.6218132209150045e-05,
"loss": 10.369,
"step": 63
},
{
"epoch": 0.07513416815742398,
"eval_loss": 10.3699312210083,
"eval_runtime": 1.3966,
"eval_samples_per_second": 505.497,
"eval_steps_per_second": 63.724,
"step": 63
},
{
"epoch": 0.0763267740011926,
"grad_norm": 0.06129881739616394,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.3663,
"step": 64
},
{
"epoch": 0.07751937984496124,
"grad_norm": 0.05678679794073105,
"learning_rate": 3.289899283371657e-05,
"loss": 10.3734,
"step": 65
},
{
"epoch": 0.07871198568872988,
"grad_norm": 0.054665569216012955,
"learning_rate": 3.12696703292044e-05,
"loss": 10.3676,
"step": 66
},
{
"epoch": 0.07990459153249851,
"grad_norm": 0.053208716213703156,
"learning_rate": 2.9663167846209998e-05,
"loss": 10.3679,
"step": 67
},
{
"epoch": 0.08109719737626714,
"grad_norm": 0.06714367121458054,
"learning_rate": 2.8081442660546125e-05,
"loss": 10.3629,
"step": 68
},
{
"epoch": 0.08228980322003578,
"grad_norm": 0.05434398725628853,
"learning_rate": 2.6526421860705473e-05,
"loss": 10.3735,
"step": 69
},
{
"epoch": 0.08348240906380441,
"grad_norm": 0.06771057844161987,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3695,
"step": 70
},
{
"epoch": 0.08467501490757305,
"grad_norm": 0.053727179765701294,
"learning_rate": 2.350403678833976e-05,
"loss": 10.3692,
"step": 71
},
{
"epoch": 0.08586762075134168,
"grad_norm": 0.0703674703836441,
"learning_rate": 2.2040354826462668e-05,
"loss": 10.3643,
"step": 72
},
{
"epoch": 0.08586762075134168,
"eval_loss": 10.369429588317871,
"eval_runtime": 1.3413,
"eval_samples_per_second": 526.339,
"eval_steps_per_second": 66.352,
"step": 72
},
{
"epoch": 0.08706022659511031,
"grad_norm": 0.05981586128473282,
"learning_rate": 2.061073738537635e-05,
"loss": 10.3704,
"step": 73
},
{
"epoch": 0.08825283243887895,
"grad_norm": 0.054827235639095306,
"learning_rate": 1.9216926233717085e-05,
"loss": 10.3701,
"step": 74
},
{
"epoch": 0.08944543828264759,
"grad_norm": 0.06241114065051079,
"learning_rate": 1.7860619515673033e-05,
"loss": 10.3686,
"step": 75
},
{
"epoch": 0.09063804412641623,
"grad_norm": 0.0732855573296547,
"learning_rate": 1.6543469682057106e-05,
"loss": 10.3666,
"step": 76
},
{
"epoch": 0.09183064997018485,
"grad_norm": 0.05297171697020531,
"learning_rate": 1.526708147705013e-05,
"loss": 10.3696,
"step": 77
},
{
"epoch": 0.09302325581395349,
"grad_norm": 0.05004735663533211,
"learning_rate": 1.4033009983067452e-05,
"loss": 10.3691,
"step": 78
},
{
"epoch": 0.09421586165772212,
"grad_norm": 0.056061867624521255,
"learning_rate": 1.2842758726130283e-05,
"loss": 10.3688,
"step": 79
},
{
"epoch": 0.09540846750149076,
"grad_norm": 0.06218897923827171,
"learning_rate": 1.1697777844051105e-05,
"loss": 10.3682,
"step": 80
},
{
"epoch": 0.09660107334525939,
"grad_norm": 0.05653758719563484,
"learning_rate": 1.0599462319663905e-05,
"loss": 10.3744,
"step": 81
},
{
"epoch": 0.09660107334525939,
"eval_loss": 10.369139671325684,
"eval_runtime": 1.4083,
"eval_samples_per_second": 501.311,
"eval_steps_per_second": 63.196,
"step": 81
},
{
"epoch": 0.09779367918902802,
"grad_norm": 0.0647573173046112,
"learning_rate": 9.549150281252633e-06,
"loss": 10.371,
"step": 82
},
{
"epoch": 0.09898628503279666,
"grad_norm": 0.0794467180967331,
"learning_rate": 8.548121372247918e-06,
"loss": 10.3652,
"step": 83
},
{
"epoch": 0.1001788908765653,
"grad_norm": 0.06957988440990448,
"learning_rate": 7.597595192178702e-06,
"loss": 10.3704,
"step": 84
},
{
"epoch": 0.10137149672033392,
"grad_norm": 0.050267431885004044,
"learning_rate": 6.698729810778065e-06,
"loss": 10.3687,
"step": 85
},
{
"epoch": 0.10256410256410256,
"grad_norm": 0.05273786932229996,
"learning_rate": 5.852620357053651e-06,
"loss": 10.3704,
"step": 86
},
{
"epoch": 0.1037567084078712,
"grad_norm": 0.050435155630111694,
"learning_rate": 5.060297685041659e-06,
"loss": 10.3724,
"step": 87
},
{
"epoch": 0.10494931425163984,
"grad_norm": 0.05046040192246437,
"learning_rate": 4.322727117869951e-06,
"loss": 10.3641,
"step": 88
},
{
"epoch": 0.10614192009540847,
"grad_norm": 0.08423416316509247,
"learning_rate": 3.6408072716606346e-06,
"loss": 10.3732,
"step": 89
},
{
"epoch": 0.1073345259391771,
"grad_norm": 0.0676737129688263,
"learning_rate": 3.0153689607045845e-06,
"loss": 10.3709,
"step": 90
},
{
"epoch": 0.1073345259391771,
"eval_loss": 10.369009971618652,
"eval_runtime": 1.4423,
"eval_samples_per_second": 489.493,
"eval_steps_per_second": 61.707,
"step": 90
},
{
"epoch": 0.10852713178294573,
"grad_norm": 0.061363909393548965,
"learning_rate": 2.4471741852423237e-06,
"loss": 10.371,
"step": 91
},
{
"epoch": 0.10971973762671437,
"grad_norm": 0.05670735239982605,
"learning_rate": 1.9369152030840556e-06,
"loss": 10.3729,
"step": 92
},
{
"epoch": 0.11091234347048301,
"grad_norm": 0.04956693947315216,
"learning_rate": 1.4852136862001764e-06,
"loss": 10.3691,
"step": 93
},
{
"epoch": 0.11210494931425163,
"grad_norm": 0.06154552474617958,
"learning_rate": 1.0926199633097157e-06,
"loss": 10.369,
"step": 94
},
{
"epoch": 0.11329755515802027,
"grad_norm": 0.07234995067119598,
"learning_rate": 7.596123493895991e-07,
"loss": 10.3676,
"step": 95
},
{
"epoch": 0.11449016100178891,
"grad_norm": 0.07232902944087982,
"learning_rate": 4.865965629214819e-07,
"loss": 10.3691,
"step": 96
},
{
"epoch": 0.11568276684555755,
"grad_norm": 0.05388721078634262,
"learning_rate": 2.7390523158633554e-07,
"loss": 10.3661,
"step": 97
},
{
"epoch": 0.11687537268932618,
"grad_norm": 0.04916412755846977,
"learning_rate": 1.2179748700879012e-07,
"loss": 10.3708,
"step": 98
},
{
"epoch": 0.11806797853309481,
"grad_norm": 0.06878488510847092,
"learning_rate": 3.04586490452119e-08,
"loss": 10.3664,
"step": 99
},
{
"epoch": 0.11806797853309481,
"eval_loss": 10.368987083435059,
"eval_runtime": 1.3332,
"eval_samples_per_second": 529.551,
"eval_steps_per_second": 66.756,
"step": 99
},
{
"epoch": 0.11926058437686345,
"grad_norm": 0.05977727845311165,
"learning_rate": 0.0,
"loss": 10.3683,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5230244659200.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}