lesso08's picture
Training in progress, step 100, checkpoint
61d4c4a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.12399256044637322,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012399256044637321,
"grad_norm": 5.2169108390808105,
"learning_rate": 1e-05,
"loss": 7.3895,
"step": 1
},
{
"epoch": 0.0012399256044637321,
"eval_loss": 8.801884651184082,
"eval_runtime": 75.11,
"eval_samples_per_second": 9.04,
"eval_steps_per_second": 1.132,
"step": 1
},
{
"epoch": 0.0024798512089274642,
"grad_norm": 6.102718830108643,
"learning_rate": 2e-05,
"loss": 8.3013,
"step": 2
},
{
"epoch": 0.0037197768133911966,
"grad_norm": 7.641967296600342,
"learning_rate": 3e-05,
"loss": 9.8099,
"step": 3
},
{
"epoch": 0.0049597024178549285,
"grad_norm": 7.007597923278809,
"learning_rate": 4e-05,
"loss": 8.8113,
"step": 4
},
{
"epoch": 0.006199628022318661,
"grad_norm": 7.305963039398193,
"learning_rate": 5e-05,
"loss": 8.227,
"step": 5
},
{
"epoch": 0.007439553626782393,
"grad_norm": 6.986813068389893,
"learning_rate": 6e-05,
"loss": 7.3762,
"step": 6
},
{
"epoch": 0.008679479231246125,
"grad_norm": 9.701449394226074,
"learning_rate": 7e-05,
"loss": 8.279,
"step": 7
},
{
"epoch": 0.009919404835709857,
"grad_norm": 11.91723346710205,
"learning_rate": 8e-05,
"loss": 8.0782,
"step": 8
},
{
"epoch": 0.011159330440173589,
"grad_norm": 14.6170072555542,
"learning_rate": 9e-05,
"loss": 7.8138,
"step": 9
},
{
"epoch": 0.011159330440173589,
"eval_loss": 5.656815052032471,
"eval_runtime": 74.6017,
"eval_samples_per_second": 9.102,
"eval_steps_per_second": 1.139,
"step": 9
},
{
"epoch": 0.012399256044637322,
"grad_norm": 14.114734649658203,
"learning_rate": 0.0001,
"loss": 4.837,
"step": 10
},
{
"epoch": 0.013639181649101054,
"grad_norm": 17.290054321289062,
"learning_rate": 9.99695413509548e-05,
"loss": 4.4749,
"step": 11
},
{
"epoch": 0.014879107253564786,
"grad_norm": 10.144243240356445,
"learning_rate": 9.987820251299122e-05,
"loss": 2.5553,
"step": 12
},
{
"epoch": 0.016119032858028518,
"grad_norm": 14.201713562011719,
"learning_rate": 9.972609476841367e-05,
"loss": 2.7354,
"step": 13
},
{
"epoch": 0.01735895846249225,
"grad_norm": 17.404315948486328,
"learning_rate": 9.951340343707852e-05,
"loss": 2.6308,
"step": 14
},
{
"epoch": 0.018598884066955982,
"grad_norm": 16.002302169799805,
"learning_rate": 9.924038765061042e-05,
"loss": 1.9386,
"step": 15
},
{
"epoch": 0.019838809671419714,
"grad_norm": 9.651491165161133,
"learning_rate": 9.890738003669029e-05,
"loss": 0.823,
"step": 16
},
{
"epoch": 0.021078735275883446,
"grad_norm": 10.251285552978516,
"learning_rate": 9.851478631379982e-05,
"loss": 0.9242,
"step": 17
},
{
"epoch": 0.022318660880347178,
"grad_norm": 13.128811836242676,
"learning_rate": 9.806308479691595e-05,
"loss": 1.1386,
"step": 18
},
{
"epoch": 0.022318660880347178,
"eval_loss": 0.9062702059745789,
"eval_runtime": 74.5537,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.14,
"step": 18
},
{
"epoch": 0.023558586484810913,
"grad_norm": 9.78515338897705,
"learning_rate": 9.755282581475769e-05,
"loss": 1.0803,
"step": 19
},
{
"epoch": 0.024798512089274645,
"grad_norm": 9.0294828414917,
"learning_rate": 9.698463103929542e-05,
"loss": 0.7872,
"step": 20
},
{
"epoch": 0.026038437693738377,
"grad_norm": 11.253486633300781,
"learning_rate": 9.635919272833938e-05,
"loss": 0.5286,
"step": 21
},
{
"epoch": 0.02727836329820211,
"grad_norm": 4.686726093292236,
"learning_rate": 9.567727288213005e-05,
"loss": 0.259,
"step": 22
},
{
"epoch": 0.02851828890266584,
"grad_norm": 8.056584358215332,
"learning_rate": 9.493970231495835e-05,
"loss": 0.7311,
"step": 23
},
{
"epoch": 0.029758214507129573,
"grad_norm": 6.690127372741699,
"learning_rate": 9.414737964294636e-05,
"loss": 0.7759,
"step": 24
},
{
"epoch": 0.030998140111593304,
"grad_norm": 4.335594177246094,
"learning_rate": 9.330127018922194e-05,
"loss": 0.2789,
"step": 25
},
{
"epoch": 0.032238065716057036,
"grad_norm": 6.1196136474609375,
"learning_rate": 9.24024048078213e-05,
"loss": 0.5025,
"step": 26
},
{
"epoch": 0.03347799132052077,
"grad_norm": 3.9458327293395996,
"learning_rate": 9.145187862775209e-05,
"loss": 0.3508,
"step": 27
},
{
"epoch": 0.03347799132052077,
"eval_loss": 0.526975154876709,
"eval_runtime": 74.6112,
"eval_samples_per_second": 9.101,
"eval_steps_per_second": 1.139,
"step": 27
},
{
"epoch": 0.0347179169249845,
"grad_norm": 6.157963275909424,
"learning_rate": 9.045084971874738e-05,
"loss": 1.073,
"step": 28
},
{
"epoch": 0.03595784252944823,
"grad_norm": 5.92735481262207,
"learning_rate": 8.940053768033609e-05,
"loss": 0.7696,
"step": 29
},
{
"epoch": 0.037197768133911964,
"grad_norm": 5.284574508666992,
"learning_rate": 8.83022221559489e-05,
"loss": 0.7974,
"step": 30
},
{
"epoch": 0.038437693738375696,
"grad_norm": 6.857171535491943,
"learning_rate": 8.715724127386972e-05,
"loss": 0.8253,
"step": 31
},
{
"epoch": 0.03967761934283943,
"grad_norm": 4.358470439910889,
"learning_rate": 8.596699001693255e-05,
"loss": 0.3629,
"step": 32
},
{
"epoch": 0.04091754494730316,
"grad_norm": 2.598677158355713,
"learning_rate": 8.473291852294987e-05,
"loss": 0.1976,
"step": 33
},
{
"epoch": 0.04215747055176689,
"grad_norm": 5.9318623542785645,
"learning_rate": 8.345653031794292e-05,
"loss": 0.9802,
"step": 34
},
{
"epoch": 0.04339739615623062,
"grad_norm": 3.030052900314331,
"learning_rate": 8.213938048432697e-05,
"loss": 0.2208,
"step": 35
},
{
"epoch": 0.044637321760694355,
"grad_norm": 3.3238260746002197,
"learning_rate": 8.07830737662829e-05,
"loss": 0.419,
"step": 36
},
{
"epoch": 0.044637321760694355,
"eval_loss": 0.45689505338668823,
"eval_runtime": 74.5368,
"eval_samples_per_second": 9.11,
"eval_steps_per_second": 1.14,
"step": 36
},
{
"epoch": 0.04587724736515809,
"grad_norm": 3.60581111907959,
"learning_rate": 7.938926261462366e-05,
"loss": 0.4493,
"step": 37
},
{
"epoch": 0.047117172969621826,
"grad_norm": 4.116817474365234,
"learning_rate": 7.795964517353735e-05,
"loss": 0.4905,
"step": 38
},
{
"epoch": 0.04835709857408556,
"grad_norm": 3.7342023849487305,
"learning_rate": 7.649596321166024e-05,
"loss": 0.3049,
"step": 39
},
{
"epoch": 0.04959702417854929,
"grad_norm": 4.842495918273926,
"learning_rate": 7.500000000000001e-05,
"loss": 0.6202,
"step": 40
},
{
"epoch": 0.05083694978301302,
"grad_norm": 3.281841516494751,
"learning_rate": 7.347357813929454e-05,
"loss": 0.2174,
"step": 41
},
{
"epoch": 0.052076875387476754,
"grad_norm": 3.592432975769043,
"learning_rate": 7.191855733945387e-05,
"loss": 0.3781,
"step": 42
},
{
"epoch": 0.053316800991940486,
"grad_norm": 2.5120394229888916,
"learning_rate": 7.033683215379002e-05,
"loss": 0.254,
"step": 43
},
{
"epoch": 0.05455672659640422,
"grad_norm": 1.8886101245880127,
"learning_rate": 6.873032967079561e-05,
"loss": 0.1548,
"step": 44
},
{
"epoch": 0.05579665220086795,
"grad_norm": 5.326384544372559,
"learning_rate": 6.710100716628344e-05,
"loss": 0.5272,
"step": 45
},
{
"epoch": 0.05579665220086795,
"eval_loss": 0.41755691170692444,
"eval_runtime": 74.5587,
"eval_samples_per_second": 9.107,
"eval_steps_per_second": 1.14,
"step": 45
},
{
"epoch": 0.05703657780533168,
"grad_norm": 6.461116790771484,
"learning_rate": 6.545084971874738e-05,
"loss": 0.587,
"step": 46
},
{
"epoch": 0.05827650340979541,
"grad_norm": 4.284915924072266,
"learning_rate": 6.378186779084995e-05,
"loss": 0.2202,
"step": 47
},
{
"epoch": 0.059516429014259145,
"grad_norm": 3.3877992630004883,
"learning_rate": 6.209609477998338e-05,
"loss": 0.5544,
"step": 48
},
{
"epoch": 0.06075635461872288,
"grad_norm": 5.712876319885254,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.1408,
"step": 49
},
{
"epoch": 0.06199628022318661,
"grad_norm": 2.4888880252838135,
"learning_rate": 5.868240888334653e-05,
"loss": 0.3312,
"step": 50
},
{
"epoch": 0.06323620582765034,
"grad_norm": 2.2069196701049805,
"learning_rate": 5.695865504800327e-05,
"loss": 0.1405,
"step": 51
},
{
"epoch": 0.06447613143211407,
"grad_norm": 3.5193231105804443,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2545,
"step": 52
},
{
"epoch": 0.0657160570365778,
"grad_norm": 3.9061081409454346,
"learning_rate": 5.348782368720626e-05,
"loss": 0.5383,
"step": 53
},
{
"epoch": 0.06695598264104154,
"grad_norm": 3.045947551727295,
"learning_rate": 5.174497483512506e-05,
"loss": 0.2484,
"step": 54
},
{
"epoch": 0.06695598264104154,
"eval_loss": 0.3930973410606384,
"eval_runtime": 74.6298,
"eval_samples_per_second": 9.098,
"eval_steps_per_second": 1.139,
"step": 54
},
{
"epoch": 0.06819590824550527,
"grad_norm": 3.8757569789886475,
"learning_rate": 5e-05,
"loss": 0.6221,
"step": 55
},
{
"epoch": 0.069435833849969,
"grad_norm": 2.1376354694366455,
"learning_rate": 4.825502516487497e-05,
"loss": 0.1202,
"step": 56
},
{
"epoch": 0.07067575945443273,
"grad_norm": 2.5388710498809814,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.3296,
"step": 57
},
{
"epoch": 0.07191568505889646,
"grad_norm": 2.990429639816284,
"learning_rate": 4.477357683661734e-05,
"loss": 0.1526,
"step": 58
},
{
"epoch": 0.0731556106633602,
"grad_norm": 4.948442459106445,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.0436,
"step": 59
},
{
"epoch": 0.07439553626782393,
"grad_norm": 7.169388771057129,
"learning_rate": 4.131759111665349e-05,
"loss": 0.8199,
"step": 60
},
{
"epoch": 0.07563546187228766,
"grad_norm": 3.115478277206421,
"learning_rate": 3.960441545911204e-05,
"loss": 0.2636,
"step": 61
},
{
"epoch": 0.07687538747675139,
"grad_norm": 3.5272696018218994,
"learning_rate": 3.790390522001662e-05,
"loss": 0.5775,
"step": 62
},
{
"epoch": 0.07811531308121512,
"grad_norm": 3.178927183151245,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.3514,
"step": 63
},
{
"epoch": 0.07811531308121512,
"eval_loss": 0.37893226742744446,
"eval_runtime": 74.5475,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.14,
"step": 63
},
{
"epoch": 0.07935523868567886,
"grad_norm": 1.964901328086853,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.1657,
"step": 64
},
{
"epoch": 0.08059516429014259,
"grad_norm": 4.481605052947998,
"learning_rate": 3.289899283371657e-05,
"loss": 0.8851,
"step": 65
},
{
"epoch": 0.08183508989460632,
"grad_norm": 1.225839376449585,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0641,
"step": 66
},
{
"epoch": 0.08307501549907005,
"grad_norm": 3.0964016914367676,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.3271,
"step": 67
},
{
"epoch": 0.08431494110353378,
"grad_norm": 3.8873424530029297,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2556,
"step": 68
},
{
"epoch": 0.08555486670799752,
"grad_norm": 2.3811612129211426,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.3136,
"step": 69
},
{
"epoch": 0.08679479231246125,
"grad_norm": 4.680008411407471,
"learning_rate": 2.500000000000001e-05,
"loss": 0.6543,
"step": 70
},
{
"epoch": 0.08803471791692498,
"grad_norm": 3.486687183380127,
"learning_rate": 2.350403678833976e-05,
"loss": 0.5671,
"step": 71
},
{
"epoch": 0.08927464352138871,
"grad_norm": 2.453212261199951,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.1672,
"step": 72
},
{
"epoch": 0.08927464352138871,
"eval_loss": 0.3752855658531189,
"eval_runtime": 74.5418,
"eval_samples_per_second": 9.109,
"eval_steps_per_second": 1.14,
"step": 72
},
{
"epoch": 0.09051456912585244,
"grad_norm": 4.2769951820373535,
"learning_rate": 2.061073738537635e-05,
"loss": 0.285,
"step": 73
},
{
"epoch": 0.09175449473031617,
"grad_norm": 3.424060106277466,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.4328,
"step": 74
},
{
"epoch": 0.0929944203347799,
"grad_norm": 3.5395214557647705,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.4982,
"step": 75
},
{
"epoch": 0.09423434593924365,
"grad_norm": 2.7212040424346924,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.2379,
"step": 76
},
{
"epoch": 0.09547427154370738,
"grad_norm": 3.9562039375305176,
"learning_rate": 1.526708147705013e-05,
"loss": 0.5157,
"step": 77
},
{
"epoch": 0.09671419714817112,
"grad_norm": 4.127841472625732,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.7147,
"step": 78
},
{
"epoch": 0.09795412275263485,
"grad_norm": 2.836074113845825,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.285,
"step": 79
},
{
"epoch": 0.09919404835709858,
"grad_norm": 4.456368446350098,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.2177,
"step": 80
},
{
"epoch": 0.10043397396156231,
"grad_norm": 2.3970136642456055,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.1053,
"step": 81
},
{
"epoch": 0.10043397396156231,
"eval_loss": 0.3661847710609436,
"eval_runtime": 74.5661,
"eval_samples_per_second": 9.106,
"eval_steps_per_second": 1.14,
"step": 81
},
{
"epoch": 0.10167389956602604,
"grad_norm": 2.0069854259490967,
"learning_rate": 9.549150281252633e-06,
"loss": 0.2474,
"step": 82
},
{
"epoch": 0.10291382517048978,
"grad_norm": 3.5164527893066406,
"learning_rate": 8.548121372247918e-06,
"loss": 0.6209,
"step": 83
},
{
"epoch": 0.10415375077495351,
"grad_norm": 3.549330234527588,
"learning_rate": 7.597595192178702e-06,
"loss": 0.3775,
"step": 84
},
{
"epoch": 0.10539367637941724,
"grad_norm": 3.223816394805908,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2977,
"step": 85
},
{
"epoch": 0.10663360198388097,
"grad_norm": 3.326846122741699,
"learning_rate": 5.852620357053651e-06,
"loss": 0.3848,
"step": 86
},
{
"epoch": 0.1078735275883447,
"grad_norm": 2.9460747241973877,
"learning_rate": 5.060297685041659e-06,
"loss": 0.224,
"step": 87
},
{
"epoch": 0.10911345319280844,
"grad_norm": 3.0875232219696045,
"learning_rate": 4.322727117869951e-06,
"loss": 0.2752,
"step": 88
},
{
"epoch": 0.11035337879727217,
"grad_norm": 4.651269435882568,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2754,
"step": 89
},
{
"epoch": 0.1115933044017359,
"grad_norm": 3.0588595867156982,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.4322,
"step": 90
},
{
"epoch": 0.1115933044017359,
"eval_loss": 0.362551748752594,
"eval_runtime": 74.5244,
"eval_samples_per_second": 9.111,
"eval_steps_per_second": 1.141,
"step": 90
},
{
"epoch": 0.11283323000619963,
"grad_norm": 2.9374754428863525,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.3,
"step": 91
},
{
"epoch": 0.11407315561066336,
"grad_norm": 2.171053171157837,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.1882,
"step": 92
},
{
"epoch": 0.1153130812151271,
"grad_norm": 2.6169354915618896,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.3603,
"step": 93
},
{
"epoch": 0.11655300681959083,
"grad_norm": 2.496931791305542,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.3489,
"step": 94
},
{
"epoch": 0.11779293242405456,
"grad_norm": 1.8905479907989502,
"learning_rate": 7.596123493895991e-07,
"loss": 0.1084,
"step": 95
},
{
"epoch": 0.11903285802851829,
"grad_norm": 3.700955390930176,
"learning_rate": 4.865965629214819e-07,
"loss": 0.458,
"step": 96
},
{
"epoch": 0.12027278363298202,
"grad_norm": 5.287838935852051,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.2966,
"step": 97
},
{
"epoch": 0.12151270923744575,
"grad_norm": 2.670868158340454,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.4348,
"step": 98
},
{
"epoch": 0.12275263484190949,
"grad_norm": 2.351815938949585,
"learning_rate": 3.04586490452119e-08,
"loss": 0.3598,
"step": 99
},
{
"epoch": 0.12275263484190949,
"eval_loss": 0.36131006479263306,
"eval_runtime": 74.5619,
"eval_samples_per_second": 9.107,
"eval_steps_per_second": 1.14,
"step": 99
},
{
"epoch": 0.12399256044637322,
"grad_norm": 2.446971893310547,
"learning_rate": 0.0,
"loss": 0.2263,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.41887283560448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}