lesso08's picture
Training in progress, step 100, checkpoint
010320b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.049431537320810674,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004943153732081067,
"grad_norm": 6.547516345977783,
"learning_rate": 1e-05,
"loss": 5.7764,
"step": 1
},
{
"epoch": 0.0004943153732081067,
"eval_loss": 5.737674236297607,
"eval_runtime": 16.0182,
"eval_samples_per_second": 106.379,
"eval_steps_per_second": 13.297,
"step": 1
},
{
"epoch": 0.0009886307464162135,
"grad_norm": 7.008223533630371,
"learning_rate": 2e-05,
"loss": 6.3647,
"step": 2
},
{
"epoch": 0.0014829461196243204,
"grad_norm": 5.026308536529541,
"learning_rate": 3e-05,
"loss": 5.694,
"step": 3
},
{
"epoch": 0.001977261492832427,
"grad_norm": 5.698261737823486,
"learning_rate": 4e-05,
"loss": 5.7188,
"step": 4
},
{
"epoch": 0.002471576866040534,
"grad_norm": 5.912988662719727,
"learning_rate": 5e-05,
"loss": 5.928,
"step": 5
},
{
"epoch": 0.002965892239248641,
"grad_norm": 5.69352912902832,
"learning_rate": 6e-05,
"loss": 6.028,
"step": 6
},
{
"epoch": 0.0034602076124567475,
"grad_norm": 5.584896087646484,
"learning_rate": 7e-05,
"loss": 5.8712,
"step": 7
},
{
"epoch": 0.003954522985664854,
"grad_norm": 6.3733696937561035,
"learning_rate": 8e-05,
"loss": 5.419,
"step": 8
},
{
"epoch": 0.004448838358872961,
"grad_norm": 7.493472099304199,
"learning_rate": 9e-05,
"loss": 5.4343,
"step": 9
},
{
"epoch": 0.004448838358872961,
"eval_loss": 5.5214762687683105,
"eval_runtime": 16.0221,
"eval_samples_per_second": 106.353,
"eval_steps_per_second": 13.294,
"step": 9
},
{
"epoch": 0.004943153732081068,
"grad_norm": 6.703868389129639,
"learning_rate": 0.0001,
"loss": 6.0423,
"step": 10
},
{
"epoch": 0.005437469105289174,
"grad_norm": 4.725719928741455,
"learning_rate": 9.99695413509548e-05,
"loss": 5.1156,
"step": 11
},
{
"epoch": 0.005931784478497282,
"grad_norm": 5.6615753173828125,
"learning_rate": 9.987820251299122e-05,
"loss": 5.3645,
"step": 12
},
{
"epoch": 0.006426099851705388,
"grad_norm": 5.23847770690918,
"learning_rate": 9.972609476841367e-05,
"loss": 5.3262,
"step": 13
},
{
"epoch": 0.006920415224913495,
"grad_norm": 5.267978668212891,
"learning_rate": 9.951340343707852e-05,
"loss": 5.5226,
"step": 14
},
{
"epoch": 0.007414730598121601,
"grad_norm": 4.9637451171875,
"learning_rate": 9.924038765061042e-05,
"loss": 5.5525,
"step": 15
},
{
"epoch": 0.007909045971329708,
"grad_norm": 5.008317470550537,
"learning_rate": 9.890738003669029e-05,
"loss": 4.968,
"step": 16
},
{
"epoch": 0.008403361344537815,
"grad_norm": 4.9471940994262695,
"learning_rate": 9.851478631379982e-05,
"loss": 5.3732,
"step": 17
},
{
"epoch": 0.008897676717745922,
"grad_norm": 4.622035980224609,
"learning_rate": 9.806308479691595e-05,
"loss": 4.9966,
"step": 18
},
{
"epoch": 0.008897676717745922,
"eval_loss": 5.08128547668457,
"eval_runtime": 16.0198,
"eval_samples_per_second": 106.368,
"eval_steps_per_second": 13.296,
"step": 18
},
{
"epoch": 0.009391992090954029,
"grad_norm": 5.7005438804626465,
"learning_rate": 9.755282581475769e-05,
"loss": 5.3554,
"step": 19
},
{
"epoch": 0.009886307464162136,
"grad_norm": 4.7721662521362305,
"learning_rate": 9.698463103929542e-05,
"loss": 4.8581,
"step": 20
},
{
"epoch": 0.010380622837370242,
"grad_norm": 4.946169376373291,
"learning_rate": 9.635919272833938e-05,
"loss": 5.1969,
"step": 21
},
{
"epoch": 0.010874938210578349,
"grad_norm": 4.1228837966918945,
"learning_rate": 9.567727288213005e-05,
"loss": 4.9139,
"step": 22
},
{
"epoch": 0.011369253583786456,
"grad_norm": 4.201045513153076,
"learning_rate": 9.493970231495835e-05,
"loss": 5.1366,
"step": 23
},
{
"epoch": 0.011863568956994563,
"grad_norm": 3.6851260662078857,
"learning_rate": 9.414737964294636e-05,
"loss": 4.8877,
"step": 24
},
{
"epoch": 0.012357884330202669,
"grad_norm": 4.2649006843566895,
"learning_rate": 9.330127018922194e-05,
"loss": 5.2366,
"step": 25
},
{
"epoch": 0.012852199703410776,
"grad_norm": 4.966331481933594,
"learning_rate": 9.24024048078213e-05,
"loss": 5.4225,
"step": 26
},
{
"epoch": 0.013346515076618883,
"grad_norm": 4.310700416564941,
"learning_rate": 9.145187862775209e-05,
"loss": 4.83,
"step": 27
},
{
"epoch": 0.013346515076618883,
"eval_loss": 4.848480224609375,
"eval_runtime": 16.1237,
"eval_samples_per_second": 105.683,
"eval_steps_per_second": 13.21,
"step": 27
},
{
"epoch": 0.01384083044982699,
"grad_norm": 4.0290350914001465,
"learning_rate": 9.045084971874738e-05,
"loss": 4.9112,
"step": 28
},
{
"epoch": 0.014335145823035097,
"grad_norm": 3.842944622039795,
"learning_rate": 8.940053768033609e-05,
"loss": 5.2481,
"step": 29
},
{
"epoch": 0.014829461196243203,
"grad_norm": 3.50050687789917,
"learning_rate": 8.83022221559489e-05,
"loss": 5.4302,
"step": 30
},
{
"epoch": 0.01532377656945131,
"grad_norm": 4.106557369232178,
"learning_rate": 8.715724127386972e-05,
"loss": 5.1726,
"step": 31
},
{
"epoch": 0.015818091942659415,
"grad_norm": 3.827751398086548,
"learning_rate": 8.596699001693255e-05,
"loss": 5.0649,
"step": 32
},
{
"epoch": 0.016312407315867524,
"grad_norm": 3.7269105911254883,
"learning_rate": 8.473291852294987e-05,
"loss": 4.2374,
"step": 33
},
{
"epoch": 0.01680672268907563,
"grad_norm": 4.410573959350586,
"learning_rate": 8.345653031794292e-05,
"loss": 4.7856,
"step": 34
},
{
"epoch": 0.01730103806228374,
"grad_norm": 3.2043802738189697,
"learning_rate": 8.213938048432697e-05,
"loss": 4.8706,
"step": 35
},
{
"epoch": 0.017795353435491844,
"grad_norm": 3.296766996383667,
"learning_rate": 8.07830737662829e-05,
"loss": 4.6144,
"step": 36
},
{
"epoch": 0.017795353435491844,
"eval_loss": 4.6987786293029785,
"eval_runtime": 16.0343,
"eval_samples_per_second": 106.272,
"eval_steps_per_second": 13.284,
"step": 36
},
{
"epoch": 0.01828966880869995,
"grad_norm": 3.310955047607422,
"learning_rate": 7.938926261462366e-05,
"loss": 4.5446,
"step": 37
},
{
"epoch": 0.018783984181908058,
"grad_norm": 3.780261516571045,
"learning_rate": 7.795964517353735e-05,
"loss": 4.623,
"step": 38
},
{
"epoch": 0.019278299555116164,
"grad_norm": 3.485846519470215,
"learning_rate": 7.649596321166024e-05,
"loss": 4.2782,
"step": 39
},
{
"epoch": 0.019772614928324272,
"grad_norm": 3.0565738677978516,
"learning_rate": 7.500000000000001e-05,
"loss": 4.4511,
"step": 40
},
{
"epoch": 0.020266930301532378,
"grad_norm": 3.8564116954803467,
"learning_rate": 7.347357813929454e-05,
"loss": 4.6911,
"step": 41
},
{
"epoch": 0.020761245674740483,
"grad_norm": 3.3952383995056152,
"learning_rate": 7.191855733945387e-05,
"loss": 4.5288,
"step": 42
},
{
"epoch": 0.021255561047948592,
"grad_norm": 3.3177618980407715,
"learning_rate": 7.033683215379002e-05,
"loss": 5.0228,
"step": 43
},
{
"epoch": 0.021749876421156698,
"grad_norm": 3.6685125827789307,
"learning_rate": 6.873032967079561e-05,
"loss": 4.0529,
"step": 44
},
{
"epoch": 0.022244191794364803,
"grad_norm": 3.952240467071533,
"learning_rate": 6.710100716628344e-05,
"loss": 4.7966,
"step": 45
},
{
"epoch": 0.022244191794364803,
"eval_loss": 4.604233264923096,
"eval_runtime": 16.0286,
"eval_samples_per_second": 106.31,
"eval_steps_per_second": 13.289,
"step": 45
},
{
"epoch": 0.022738507167572912,
"grad_norm": 3.639888286590576,
"learning_rate": 6.545084971874738e-05,
"loss": 4.9396,
"step": 46
},
{
"epoch": 0.023232822540781017,
"grad_norm": 3.4729416370391846,
"learning_rate": 6.378186779084995e-05,
"loss": 4.2892,
"step": 47
},
{
"epoch": 0.023727137913989126,
"grad_norm": 3.783601760864258,
"learning_rate": 6.209609477998338e-05,
"loss": 4.4242,
"step": 48
},
{
"epoch": 0.02422145328719723,
"grad_norm": 3.583552360534668,
"learning_rate": 6.0395584540887963e-05,
"loss": 4.63,
"step": 49
},
{
"epoch": 0.024715768660405337,
"grad_norm": 3.3208210468292236,
"learning_rate": 5.868240888334653e-05,
"loss": 3.8583,
"step": 50
},
{
"epoch": 0.025210084033613446,
"grad_norm": 3.7597432136535645,
"learning_rate": 5.695865504800327e-05,
"loss": 4.9806,
"step": 51
},
{
"epoch": 0.02570439940682155,
"grad_norm": 3.736236333847046,
"learning_rate": 5.522642316338268e-05,
"loss": 4.5005,
"step": 52
},
{
"epoch": 0.02619871478002966,
"grad_norm": 3.141097068786621,
"learning_rate": 5.348782368720626e-05,
"loss": 3.89,
"step": 53
},
{
"epoch": 0.026693030153237766,
"grad_norm": 4.1620612144470215,
"learning_rate": 5.174497483512506e-05,
"loss": 4.5286,
"step": 54
},
{
"epoch": 0.026693030153237766,
"eval_loss": 4.54187536239624,
"eval_runtime": 16.0486,
"eval_samples_per_second": 106.177,
"eval_steps_per_second": 13.272,
"step": 54
},
{
"epoch": 0.02718734552644587,
"grad_norm": 3.6479244232177734,
"learning_rate": 5e-05,
"loss": 4.2116,
"step": 55
},
{
"epoch": 0.02768166089965398,
"grad_norm": 3.2480669021606445,
"learning_rate": 4.825502516487497e-05,
"loss": 4.7313,
"step": 56
},
{
"epoch": 0.028175976272862086,
"grad_norm": 3.7081687450408936,
"learning_rate": 4.6512176312793736e-05,
"loss": 4.6713,
"step": 57
},
{
"epoch": 0.028670291646070194,
"grad_norm": 3.3734421730041504,
"learning_rate": 4.477357683661734e-05,
"loss": 4.124,
"step": 58
},
{
"epoch": 0.0291646070192783,
"grad_norm": 3.3389265537261963,
"learning_rate": 4.3041344951996746e-05,
"loss": 4.3516,
"step": 59
},
{
"epoch": 0.029658922392486405,
"grad_norm": 3.852362871170044,
"learning_rate": 4.131759111665349e-05,
"loss": 4.4847,
"step": 60
},
{
"epoch": 0.030153237765694514,
"grad_norm": 3.656104803085327,
"learning_rate": 3.960441545911204e-05,
"loss": 4.5655,
"step": 61
},
{
"epoch": 0.03064755313890262,
"grad_norm": 3.5071182250976562,
"learning_rate": 3.790390522001662e-05,
"loss": 4.7392,
"step": 62
},
{
"epoch": 0.031141868512110725,
"grad_norm": 3.654244899749756,
"learning_rate": 3.6218132209150045e-05,
"loss": 4.2389,
"step": 63
},
{
"epoch": 0.031141868512110725,
"eval_loss": 4.511841773986816,
"eval_runtime": 16.0359,
"eval_samples_per_second": 106.262,
"eval_steps_per_second": 13.283,
"step": 63
},
{
"epoch": 0.03163618388531883,
"grad_norm": 3.5840067863464355,
"learning_rate": 3.4549150281252636e-05,
"loss": 4.2239,
"step": 64
},
{
"epoch": 0.03213049925852694,
"grad_norm": 3.3929975032806396,
"learning_rate": 3.289899283371657e-05,
"loss": 4.3055,
"step": 65
},
{
"epoch": 0.03262481463173505,
"grad_norm": 3.057061195373535,
"learning_rate": 3.12696703292044e-05,
"loss": 4.8372,
"step": 66
},
{
"epoch": 0.033119130004943154,
"grad_norm": 3.8172447681427,
"learning_rate": 2.9663167846209998e-05,
"loss": 3.8134,
"step": 67
},
{
"epoch": 0.03361344537815126,
"grad_norm": 3.3502960205078125,
"learning_rate": 2.8081442660546125e-05,
"loss": 4.2059,
"step": 68
},
{
"epoch": 0.034107760751359364,
"grad_norm": 3.4546589851379395,
"learning_rate": 2.6526421860705473e-05,
"loss": 4.945,
"step": 69
},
{
"epoch": 0.03460207612456748,
"grad_norm": 3.2949929237365723,
"learning_rate": 2.500000000000001e-05,
"loss": 4.4671,
"step": 70
},
{
"epoch": 0.03509639149777558,
"grad_norm": 3.4756672382354736,
"learning_rate": 2.350403678833976e-05,
"loss": 4.6566,
"step": 71
},
{
"epoch": 0.03559070687098369,
"grad_norm": 3.5453972816467285,
"learning_rate": 2.2040354826462668e-05,
"loss": 4.5702,
"step": 72
},
{
"epoch": 0.03559070687098369,
"eval_loss": 4.497529029846191,
"eval_runtime": 16.0325,
"eval_samples_per_second": 106.284,
"eval_steps_per_second": 13.286,
"step": 72
},
{
"epoch": 0.03608502224419179,
"grad_norm": 3.2409377098083496,
"learning_rate": 2.061073738537635e-05,
"loss": 4.9055,
"step": 73
},
{
"epoch": 0.0365793376173999,
"grad_norm": 3.345048666000366,
"learning_rate": 1.9216926233717085e-05,
"loss": 4.4359,
"step": 74
},
{
"epoch": 0.03707365299060801,
"grad_norm": 3.1976587772369385,
"learning_rate": 1.7860619515673033e-05,
"loss": 4.5264,
"step": 75
},
{
"epoch": 0.037567968363816116,
"grad_norm": 3.8845295906066895,
"learning_rate": 1.6543469682057106e-05,
"loss": 4.8841,
"step": 76
},
{
"epoch": 0.03806228373702422,
"grad_norm": 3.018535852432251,
"learning_rate": 1.526708147705013e-05,
"loss": 4.3427,
"step": 77
},
{
"epoch": 0.03855659911023233,
"grad_norm": 3.39355731010437,
"learning_rate": 1.4033009983067452e-05,
"loss": 4.6911,
"step": 78
},
{
"epoch": 0.03905091448344043,
"grad_norm": 3.304109573364258,
"learning_rate": 1.2842758726130283e-05,
"loss": 4.4204,
"step": 79
},
{
"epoch": 0.039545229856648545,
"grad_norm": 3.374882936477661,
"learning_rate": 1.1697777844051105e-05,
"loss": 4.485,
"step": 80
},
{
"epoch": 0.04003954522985665,
"grad_norm": 3.7261548042297363,
"learning_rate": 1.0599462319663905e-05,
"loss": 4.2292,
"step": 81
},
{
"epoch": 0.04003954522985665,
"eval_loss": 4.489640235900879,
"eval_runtime": 16.0787,
"eval_samples_per_second": 105.979,
"eval_steps_per_second": 13.247,
"step": 81
},
{
"epoch": 0.040533860603064756,
"grad_norm": 3.5689377784729004,
"learning_rate": 9.549150281252633e-06,
"loss": 4.4952,
"step": 82
},
{
"epoch": 0.04102817597627286,
"grad_norm": 3.5652458667755127,
"learning_rate": 8.548121372247918e-06,
"loss": 4.2532,
"step": 83
},
{
"epoch": 0.04152249134948097,
"grad_norm": 3.5636212825775146,
"learning_rate": 7.597595192178702e-06,
"loss": 4.8038,
"step": 84
},
{
"epoch": 0.04201680672268908,
"grad_norm": 2.6824896335601807,
"learning_rate": 6.698729810778065e-06,
"loss": 4.2188,
"step": 85
},
{
"epoch": 0.042511122095897184,
"grad_norm": 3.0280864238739014,
"learning_rate": 5.852620357053651e-06,
"loss": 4.6739,
"step": 86
},
{
"epoch": 0.04300543746910529,
"grad_norm": 3.2650485038757324,
"learning_rate": 5.060297685041659e-06,
"loss": 4.0116,
"step": 87
},
{
"epoch": 0.043499752842313395,
"grad_norm": 3.5868074893951416,
"learning_rate": 4.322727117869951e-06,
"loss": 4.4603,
"step": 88
},
{
"epoch": 0.0439940682155215,
"grad_norm": 2.4912710189819336,
"learning_rate": 3.6408072716606346e-06,
"loss": 4.9682,
"step": 89
},
{
"epoch": 0.044488383588729606,
"grad_norm": 2.7958810329437256,
"learning_rate": 3.0153689607045845e-06,
"loss": 4.45,
"step": 90
},
{
"epoch": 0.044488383588729606,
"eval_loss": 4.486810207366943,
"eval_runtime": 16.024,
"eval_samples_per_second": 106.34,
"eval_steps_per_second": 13.293,
"step": 90
},
{
"epoch": 0.04498269896193772,
"grad_norm": 4.003046989440918,
"learning_rate": 2.4471741852423237e-06,
"loss": 4.7876,
"step": 91
},
{
"epoch": 0.045477014335145824,
"grad_norm": 3.0942835807800293,
"learning_rate": 1.9369152030840556e-06,
"loss": 5.1126,
"step": 92
},
{
"epoch": 0.04597132970835393,
"grad_norm": 2.8093137741088867,
"learning_rate": 1.4852136862001764e-06,
"loss": 4.389,
"step": 93
},
{
"epoch": 0.046465645081562035,
"grad_norm": 3.216890811920166,
"learning_rate": 1.0926199633097157e-06,
"loss": 5.2788,
"step": 94
},
{
"epoch": 0.04695996045477014,
"grad_norm": 3.361987590789795,
"learning_rate": 7.596123493895991e-07,
"loss": 4.564,
"step": 95
},
{
"epoch": 0.04745427582797825,
"grad_norm": 2.9318277835845947,
"learning_rate": 4.865965629214819e-07,
"loss": 4.2455,
"step": 96
},
{
"epoch": 0.04794859120118636,
"grad_norm": 3.228325605392456,
"learning_rate": 2.7390523158633554e-07,
"loss": 4.3779,
"step": 97
},
{
"epoch": 0.04844290657439446,
"grad_norm": 3.264146327972412,
"learning_rate": 1.2179748700879012e-07,
"loss": 4.5083,
"step": 98
},
{
"epoch": 0.04893722194760257,
"grad_norm": 3.3395464420318604,
"learning_rate": 3.04586490452119e-08,
"loss": 4.5052,
"step": 99
},
{
"epoch": 0.04893722194760257,
"eval_loss": 4.485798358917236,
"eval_runtime": 16.0193,
"eval_samples_per_second": 106.372,
"eval_steps_per_second": 13.296,
"step": 99
},
{
"epoch": 0.049431537320810674,
"grad_norm": 3.2715954780578613,
"learning_rate": 0.0,
"loss": 4.3831,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 27149520076800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}