|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.00970667637334304, |
|
"eval_steps": 100, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.42666909333576e-05, |
|
"eval_loss": 2.878453493118286, |
|
"eval_runtime": 1372.4063, |
|
"eval_samples_per_second": 12.643, |
|
"eval_steps_per_second": 6.322, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000121333454666788, |
|
"grad_norm": 11.818228721618652, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 9.5301, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.000242666909333576, |
|
"grad_norm": 10.987421035766602, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 9.7386, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.000364000364000364, |
|
"grad_norm": 12.346467971801758, |
|
"learning_rate": 5e-05, |
|
"loss": 9.5456, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.000485333818667152, |
|
"grad_norm": 11.592193603515625, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 8.9309, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00060666727333394, |
|
"grad_norm": 14.079370498657227, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 8.7923, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.000728000728000728, |
|
"grad_norm": 13.561756134033203, |
|
"learning_rate": 0.0001, |
|
"loss": 8.3366, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.000849334182667516, |
|
"grad_norm": 22.25603485107422, |
|
"learning_rate": 9.995494831023409e-05, |
|
"loss": 8.7607, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.000970667637334304, |
|
"grad_norm": 18.023517608642578, |
|
"learning_rate": 9.981987442712633e-05, |
|
"loss": 9.2432, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.001092001092001092, |
|
"grad_norm": 25.709226608276367, |
|
"learning_rate": 9.959502176294383e-05, |
|
"loss": 9.2506, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00121333454666788, |
|
"grad_norm": 33.24303436279297, |
|
"learning_rate": 9.928079551738543e-05, |
|
"loss": 10.1706, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.001334668001334668, |
|
"grad_norm": 9.444133758544922, |
|
"learning_rate": 9.887776194738432e-05, |
|
"loss": 8.8397, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.001456001456001456, |
|
"grad_norm": 8.350540161132812, |
|
"learning_rate": 9.838664734667495e-05, |
|
"loss": 8.4448, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.001577334910668244, |
|
"grad_norm": 7.977123260498047, |
|
"learning_rate": 9.780833673696254e-05, |
|
"loss": 8.3745, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.001698668365335032, |
|
"grad_norm": 8.620443344116211, |
|
"learning_rate": 9.714387227305422e-05, |
|
"loss": 8.2024, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00182000182000182, |
|
"grad_norm": 10.907526969909668, |
|
"learning_rate": 9.639445136482548e-05, |
|
"loss": 8.5011, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.001941335274668608, |
|
"grad_norm": 13.035191535949707, |
|
"learning_rate": 9.55614245194068e-05, |
|
"loss": 8.7188, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.002062668729335396, |
|
"grad_norm": 16.095685958862305, |
|
"learning_rate": 9.464629290747842e-05, |
|
"loss": 8.6738, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.002184002184002184, |
|
"grad_norm": 24.661006927490234, |
|
"learning_rate": 9.365070565805941e-05, |
|
"loss": 8.3647, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.002305335638668972, |
|
"grad_norm": 30.535736083984375, |
|
"learning_rate": 9.257645688666556e-05, |
|
"loss": 10.0638, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.00242666909333576, |
|
"grad_norm": 46.029945373535156, |
|
"learning_rate": 9.142548246219212e-05, |
|
"loss": 9.7736, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.00242666909333576, |
|
"eval_loss": 2.331610918045044, |
|
"eval_runtime": 1377.2905, |
|
"eval_samples_per_second": 12.598, |
|
"eval_steps_per_second": 6.299, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.002548002548002548, |
|
"grad_norm": 12.967903137207031, |
|
"learning_rate": 9.019985651834703e-05, |
|
"loss": 8.8067, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.002669336002669336, |
|
"grad_norm": 8.18023681640625, |
|
"learning_rate": 8.890178771592199e-05, |
|
"loss": 8.9571, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.002790669457336124, |
|
"grad_norm": 8.041296005249023, |
|
"learning_rate": 8.753361526263621e-05, |
|
"loss": 8.3393, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.002912002912002912, |
|
"grad_norm": 9.720775604248047, |
|
"learning_rate": 8.609780469772623e-05, |
|
"loss": 8.3803, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0030333363666697, |
|
"grad_norm": 11.025723457336426, |
|
"learning_rate": 8.459694344887732e-05, |
|
"loss": 8.1113, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.003154669821336488, |
|
"grad_norm": 14.601977348327637, |
|
"learning_rate": 8.303373616950408e-05, |
|
"loss": 8.8132, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.003276003276003276, |
|
"grad_norm": 14.005407333374023, |
|
"learning_rate": 8.141099986478212e-05, |
|
"loss": 8.5404, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.003397336730670064, |
|
"grad_norm": 19.68910026550293, |
|
"learning_rate": 7.973165881521434e-05, |
|
"loss": 8.8285, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.003518670185336852, |
|
"grad_norm": 26.25465965270996, |
|
"learning_rate": 7.799873930687978e-05, |
|
"loss": 9.6295, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.00364000364000364, |
|
"grad_norm": 31.541311264038086, |
|
"learning_rate": 7.621536417786159e-05, |
|
"loss": 9.3287, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.003761337094670428, |
|
"grad_norm": 8.852987289428711, |
|
"learning_rate": 7.438474719068173e-05, |
|
"loss": 8.5892, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.003882670549337216, |
|
"grad_norm": 6.915546417236328, |
|
"learning_rate": 7.251018724088367e-05, |
|
"loss": 8.5808, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.004004004004004004, |
|
"grad_norm": 8.234546661376953, |
|
"learning_rate": 7.059506241219965e-05, |
|
"loss": 8.3382, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.004125337458670792, |
|
"grad_norm": 9.939790725708008, |
|
"learning_rate": 6.864282388901544e-05, |
|
"loss": 8.1358, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.00424667091333758, |
|
"grad_norm": 9.333702087402344, |
|
"learning_rate": 6.665698973710288e-05, |
|
"loss": 8.4612, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.004368004368004368, |
|
"grad_norm": 11.787590980529785, |
|
"learning_rate": 6.464113856382752e-05, |
|
"loss": 7.9593, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.004489337822671156, |
|
"grad_norm": 12.82723331451416, |
|
"learning_rate": 6.259890306925627e-05, |
|
"loss": 8.0649, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.004610671277337944, |
|
"grad_norm": 15.473737716674805, |
|
"learning_rate": 6.0533963499786314e-05, |
|
"loss": 8.3759, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.004732004732004732, |
|
"grad_norm": 30.10947608947754, |
|
"learning_rate": 5.8450041016092464e-05, |
|
"loss": 8.452, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.00485333818667152, |
|
"grad_norm": 34.635498046875, |
|
"learning_rate": 5.6350890987343944e-05, |
|
"loss": 9.8912, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.00485333818667152, |
|
"eval_loss": 2.1580023765563965, |
|
"eval_runtime": 1378.5777, |
|
"eval_samples_per_second": 12.586, |
|
"eval_steps_per_second": 6.293, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.004974671641338308, |
|
"grad_norm": 7.406271934509277, |
|
"learning_rate": 5.4240296223775465e-05, |
|
"loss": 8.5834, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.005096005096005096, |
|
"grad_norm": 7.651792049407959, |
|
"learning_rate": 5.212206015980742e-05, |
|
"loss": 8.4876, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.005217338550671884, |
|
"grad_norm": 7.223945617675781, |
|
"learning_rate": 5e-05, |
|
"loss": 8.0378, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.005338672005338672, |
|
"grad_norm": 9.113810539245605, |
|
"learning_rate": 4.78779398401926e-05, |
|
"loss": 8.6686, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.00546000546000546, |
|
"grad_norm": 9.819528579711914, |
|
"learning_rate": 4.575970377622456e-05, |
|
"loss": 8.2328, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.005581338914672248, |
|
"grad_norm": 12.666204452514648, |
|
"learning_rate": 4.364910901265606e-05, |
|
"loss": 8.1959, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.005702672369339036, |
|
"grad_norm": 14.743393898010254, |
|
"learning_rate": 4.1549958983907555e-05, |
|
"loss": 8.423, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.005824005824005824, |
|
"grad_norm": 16.668725967407227, |
|
"learning_rate": 3.94660365002137e-05, |
|
"loss": 8.2184, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.005945339278672612, |
|
"grad_norm": 22.31512451171875, |
|
"learning_rate": 3.740109693074375e-05, |
|
"loss": 8.5404, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.0060666727333394, |
|
"grad_norm": 29.229515075683594, |
|
"learning_rate": 3.5358861436172485e-05, |
|
"loss": 8.5538, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.006188006188006188, |
|
"grad_norm": 5.700004577636719, |
|
"learning_rate": 3.334301026289712e-05, |
|
"loss": 8.3141, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.006309339642672976, |
|
"grad_norm": 6.940698623657227, |
|
"learning_rate": 3.135717611098458e-05, |
|
"loss": 8.3771, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.006430673097339764, |
|
"grad_norm": 7.7714996337890625, |
|
"learning_rate": 2.9404937587800375e-05, |
|
"loss": 8.2132, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.006552006552006552, |
|
"grad_norm": 7.959089279174805, |
|
"learning_rate": 2.748981275911633e-05, |
|
"loss": 8.0231, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.00667334000667334, |
|
"grad_norm": 9.615630149841309, |
|
"learning_rate": 2.5615252809318284e-05, |
|
"loss": 8.3783, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.006794673461340128, |
|
"grad_norm": 10.886713027954102, |
|
"learning_rate": 2.3784635822138424e-05, |
|
"loss": 8.1763, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.006916006916006916, |
|
"grad_norm": 13.622564315795898, |
|
"learning_rate": 2.2001260693120233e-05, |
|
"loss": 8.4149, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.007037340370673704, |
|
"grad_norm": 12.632689476013184, |
|
"learning_rate": 2.026834118478567e-05, |
|
"loss": 8.6351, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.007158673825340492, |
|
"grad_norm": 23.478530883789062, |
|
"learning_rate": 1.858900013521788e-05, |
|
"loss": 9.0105, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.00728000728000728, |
|
"grad_norm": 38.015594482421875, |
|
"learning_rate": 1.6966263830495936e-05, |
|
"loss": 10.2111, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.00728000728000728, |
|
"eval_loss": 2.095071792602539, |
|
"eval_runtime": 1377.368, |
|
"eval_samples_per_second": 12.597, |
|
"eval_steps_per_second": 6.299, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.007401340734674068, |
|
"grad_norm": 5.979774475097656, |
|
"learning_rate": 1.5403056551122697e-05, |
|
"loss": 8.241, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.007522674189340856, |
|
"grad_norm": 7.0730977058410645, |
|
"learning_rate": 1.3902195302273779e-05, |
|
"loss": 8.447, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.007644007644007644, |
|
"grad_norm": 8.113957405090332, |
|
"learning_rate": 1.246638473736378e-05, |
|
"loss": 7.9333, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.007765341098674432, |
|
"grad_norm": 8.48040771484375, |
|
"learning_rate": 1.1098212284078036e-05, |
|
"loss": 7.9684, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.00788667455334122, |
|
"grad_norm": 10.31874942779541, |
|
"learning_rate": 9.800143481652979e-06, |
|
"loss": 8.1914, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.008008008008008008, |
|
"grad_norm": 11.321582794189453, |
|
"learning_rate": 8.574517537807897e-06, |
|
"loss": 7.8632, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.008129341462674796, |
|
"grad_norm": 13.255796432495117, |
|
"learning_rate": 7.423543113334436e-06, |
|
"loss": 8.512, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.008250674917341584, |
|
"grad_norm": 15.251378059387207, |
|
"learning_rate": 6.349294341940593e-06, |
|
"loss": 7.9517, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.008372008372008372, |
|
"grad_norm": 27.332515716552734, |
|
"learning_rate": 5.353707092521582e-06, |
|
"loss": 8.3471, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.00849334182667516, |
|
"grad_norm": 31.89665985107422, |
|
"learning_rate": 4.43857548059321e-06, |
|
"loss": 10.6653, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.008614675281341948, |
|
"grad_norm": 5.819257736206055, |
|
"learning_rate": 3.605548635174533e-06, |
|
"loss": 8.2614, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.008736008736008737, |
|
"grad_norm": 6.525805950164795, |
|
"learning_rate": 2.85612772694579e-06, |
|
"loss": 8.3166, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.008857342190675525, |
|
"grad_norm": 7.613458156585693, |
|
"learning_rate": 2.191663263037458e-06, |
|
"loss": 8.2945, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.008978675645342313, |
|
"grad_norm": 8.144882202148438, |
|
"learning_rate": 1.6133526533250565e-06, |
|
"loss": 8.2034, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.009100009100009101, |
|
"grad_norm": 11.035107612609863, |
|
"learning_rate": 1.1222380526156928e-06, |
|
"loss": 8.6847, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.009221342554675887, |
|
"grad_norm": 10.601295471191406, |
|
"learning_rate": 7.192044826145771e-07, |
|
"loss": 8.3069, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.009342676009342675, |
|
"grad_norm": 13.104430198669434, |
|
"learning_rate": 4.049782370561583e-07, |
|
"loss": 7.9392, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.009464009464009463, |
|
"grad_norm": 15.135002136230469, |
|
"learning_rate": 1.8012557287367392e-07, |
|
"loss": 8.3515, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.009585342918676252, |
|
"grad_norm": 21.206899642944336, |
|
"learning_rate": 4.5051689765929214e-08, |
|
"loss": 8.7798, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.00970667637334304, |
|
"grad_norm": 25.19953727722168, |
|
"learning_rate": 0.0, |
|
"loss": 9.0401, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.00970667637334304, |
|
"eval_loss": 2.0877583026885986, |
|
"eval_runtime": 1377.6722, |
|
"eval_samples_per_second": 12.594, |
|
"eval_steps_per_second": 6.298, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.403012163651502e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|