|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 730, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0136986301369863, |
|
"grad_norm": 5.0625, |
|
"learning_rate": 2.7397260273972604e-06, |
|
"loss": 3.0538, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0684931506849315, |
|
"grad_norm": 2.875, |
|
"learning_rate": 1.3698630136986302e-05, |
|
"loss": 3.0796, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"grad_norm": 2.515625, |
|
"learning_rate": 2.7397260273972603e-05, |
|
"loss": 3.0061, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2054794520547945, |
|
"grad_norm": 6.15625, |
|
"learning_rate": 4.1095890410958905e-05, |
|
"loss": 2.8959, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.273972602739726, |
|
"grad_norm": 1.921875, |
|
"learning_rate": 5.479452054794521e-05, |
|
"loss": 2.636, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3424657534246575, |
|
"grad_norm": 1.5546875, |
|
"learning_rate": 6.84931506849315e-05, |
|
"loss": 2.4025, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.410958904109589, |
|
"grad_norm": 1.3671875, |
|
"learning_rate": 8.219178082191781e-05, |
|
"loss": 2.1878, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4794520547945205, |
|
"grad_norm": 1.59375, |
|
"learning_rate": 9.58904109589041e-05, |
|
"loss": 1.9943, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.547945205479452, |
|
"grad_norm": 1.796875, |
|
"learning_rate": 0.00010958904109589041, |
|
"loss": 1.8382, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6164383561643836, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 0.0001232876712328767, |
|
"loss": 1.7025, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.684931506849315, |
|
"grad_norm": 1.0859375, |
|
"learning_rate": 0.000136986301369863, |
|
"loss": 1.5949, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7534246575342466, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 0.00015068493150684933, |
|
"loss": 1.5023, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.821917808219178, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00016438356164383562, |
|
"loss": 1.4428, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8904109589041096, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.00017808219178082192, |
|
"loss": 1.3924, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.958904109589041, |
|
"grad_norm": 0.40234375, |
|
"learning_rate": 0.0001917808219178082, |
|
"loss": 1.3438, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.541907787322998, |
|
"eval_runtime": 0.5592, |
|
"eval_samples_per_second": 17.884, |
|
"eval_steps_per_second": 1.788, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.0273972602739727, |
|
"grad_norm": 0.4296875, |
|
"learning_rate": 0.00019999542705801296, |
|
"loss": 1.3109, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.095890410958904, |
|
"grad_norm": 0.478515625, |
|
"learning_rate": 0.00019994398626371643, |
|
"loss": 1.2812, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.1643835616438356, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 0.0001998354179989585, |
|
"loss": 1.2712, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.2328767123287672, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 0.00019966978432080316, |
|
"loss": 1.2483, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.3013698630136985, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.00019944717990461207, |
|
"loss": 1.2383, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.36986301369863, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.000199167731989929, |
|
"loss": 1.2227, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.4383561643835616, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 0.00019883160030775016, |
|
"loss": 1.2099, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.5068493150684932, |
|
"grad_norm": 0.318359375, |
|
"learning_rate": 0.00019843897698922284, |
|
"loss": 1.2186, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.5753424657534247, |
|
"grad_norm": 0.443359375, |
|
"learning_rate": 0.0001979900864558242, |
|
"loss": 1.1989, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.643835616438356, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 0.00019748518529108316, |
|
"loss": 1.2146, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.7123287671232876, |
|
"grad_norm": 0.435546875, |
|
"learning_rate": 0.00019692456209391846, |
|
"loss": 1.2015, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.7808219178082192, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 0.00019630853731367713, |
|
"loss": 1.1884, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.8493150684931505, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 0.0001956374630669672, |
|
"loss": 1.1902, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.9178082191780823, |
|
"grad_norm": 0.5234375, |
|
"learning_rate": 0.00019491172293638968, |
|
"loss": 1.1781, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.9863013698630136, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00019413173175128473, |
|
"loss": 1.1767, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.497932195663452, |
|
"eval_runtime": 0.5527, |
|
"eval_samples_per_second": 18.093, |
|
"eval_steps_per_second": 1.809, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 2.0547945205479454, |
|
"grad_norm": 0.494140625, |
|
"learning_rate": 0.00019329793535061723, |
|
"loss": 1.1406, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.1232876712328768, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.00019241081032813772, |
|
"loss": 1.1353, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.191780821917808, |
|
"grad_norm": 0.296875, |
|
"learning_rate": 0.0001914708637599636, |
|
"loss": 1.1387, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.26027397260274, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00019047863291473717, |
|
"loss": 1.1186, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.328767123287671, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.0001894346849465257, |
|
"loss": 1.1269, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.3972602739726026, |
|
"grad_norm": 0.435546875, |
|
"learning_rate": 0.00018833961657063885, |
|
"loss": 1.1377, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.4657534246575343, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 0.00018719405372254948, |
|
"loss": 1.1241, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.5342465753424657, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 0.00018599865120011192, |
|
"loss": 1.1301, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.602739726027397, |
|
"grad_norm": 0.40625, |
|
"learning_rate": 0.00018475409228928312, |
|
"loss": 1.1252, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.671232876712329, |
|
"grad_norm": 0.53515625, |
|
"learning_rate": 0.00018346108837355972, |
|
"loss": 1.1193, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.73972602739726, |
|
"grad_norm": 0.494140625, |
|
"learning_rate": 0.00018212037852735486, |
|
"loss": 1.1163, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.808219178082192, |
|
"grad_norm": 0.478515625, |
|
"learning_rate": 0.00018073272909354727, |
|
"loss": 1.1165, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.8767123287671232, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 0.00017929893324544332, |
|
"loss": 1.1205, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.9452054794520546, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 0.00017781981053340337, |
|
"loss": 1.1163, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.496716022491455, |
|
"eval_runtime": 0.5457, |
|
"eval_samples_per_second": 18.324, |
|
"eval_steps_per_second": 1.832, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 3.0136986301369864, |
|
"grad_norm": 0.310546875, |
|
"learning_rate": 0.00017629620641639103, |
|
"loss": 1.1017, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.0821917808219177, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 0.00017472899177871297, |
|
"loss": 1.0756, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 3.1506849315068495, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 0.00017311906243222614, |
|
"loss": 1.0712, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.219178082191781, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 0.00017146733860429612, |
|
"loss": 1.0791, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.287671232876712, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00016977476441179992, |
|
"loss": 1.078, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.356164383561644, |
|
"grad_norm": 0.51171875, |
|
"learning_rate": 0.0001680423073214737, |
|
"loss": 1.0753, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 3.4246575342465753, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 0.00016627095759691362, |
|
"loss": 1.0861, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.493150684931507, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00016446172773254629, |
|
"loss": 1.0716, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 3.5616438356164384, |
|
"grad_norm": 0.453125, |
|
"learning_rate": 0.0001626156518748922, |
|
"loss": 1.0669, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.6301369863013697, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 0.0001607337852314527, |
|
"loss": 1.0759, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 3.6986301369863015, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00015881720346755905, |
|
"loss": 1.0831, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.767123287671233, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.00015686700209152738, |
|
"loss": 1.08, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 3.8356164383561646, |
|
"grad_norm": 0.58203125, |
|
"learning_rate": 0.00015488429582847192, |
|
"loss": 1.0743, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.904109589041096, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.0001528702179831338, |
|
"loss": 1.0544, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 3.9726027397260273, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 0.00015082591979208976, |
|
"loss": 1.0605, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.5120725631713867, |
|
"eval_runtime": 0.5541, |
|
"eval_samples_per_second": 18.046, |
|
"eval_steps_per_second": 1.805, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 4.041095890410959, |
|
"grad_norm": 0.41796875, |
|
"learning_rate": 0.00014875256976571135, |
|
"loss": 1.0555, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 4.109589041095891, |
|
"grad_norm": 0.3671875, |
|
"learning_rate": 0.00014665135302025035, |
|
"loss": 1.0315, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.178082191780822, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 0.00014452347060043237, |
|
"loss": 1.0247, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 4.2465753424657535, |
|
"grad_norm": 0.45703125, |
|
"learning_rate": 0.0001423701387929459, |
|
"loss": 1.0381, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.315068493150685, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 0.00014019258843121893, |
|
"loss": 1.0427, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 4.383561643835616, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 0.00013799206419188103, |
|
"loss": 1.038, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 4.4520547945205475, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 0.0001357698238833126, |
|
"loss": 1.0269, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 4.52054794520548, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 0.00013352713772668765, |
|
"loss": 1.0337, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.589041095890411, |
|
"grad_norm": 0.421875, |
|
"learning_rate": 0.00013126528762992247, |
|
"loss": 1.031, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 4.657534246575342, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 0.00012898556645494325, |
|
"loss": 1.0291, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.726027397260274, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 0.0001266892772786929, |
|
"loss": 1.0362, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 4.794520547945205, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.00012437773264829897, |
|
"loss": 1.0442, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.863013698630137, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00012205225383082843, |
|
"loss": 1.0357, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 4.931506849315069, |
|
"grad_norm": 0.5703125, |
|
"learning_rate": 0.00011971417005805818, |
|
"loss": 1.0336, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.0362, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.540134906768799, |
|
"eval_runtime": 0.5557, |
|
"eval_samples_per_second": 17.997, |
|
"eval_steps_per_second": 1.8, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 5.068493150684931, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 0.00011500553983446527, |
|
"loss": 0.9955, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 5.136986301369863, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.00011263768481255264, |
|
"loss": 1.0022, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 5.205479452054795, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.00011026260615475333, |
|
"loss": 1.0047, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 5.273972602739726, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 0.00010788166144385888, |
|
"loss": 0.9985, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 5.342465753424658, |
|
"grad_norm": 0.455078125, |
|
"learning_rate": 0.0001054962116156667, |
|
"loss": 1.002, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 5.410958904109589, |
|
"grad_norm": 0.57421875, |
|
"learning_rate": 0.0001031076201810762, |
|
"loss": 1.0136, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 5.47945205479452, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 0.00010071725244671282, |
|
"loss": 0.9965, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.5479452054794525, |
|
"grad_norm": 0.431640625, |
|
"learning_rate": 9.83264747345259e-05, |
|
"loss": 1.0025, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 5.616438356164384, |
|
"grad_norm": 0.474609375, |
|
"learning_rate": 9.593665360080599e-05, |
|
"loss": 1.0099, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 5.684931506849315, |
|
"grad_norm": 0.42578125, |
|
"learning_rate": 9.354915505506839e-05, |
|
"loss": 1.0071, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 5.7534246575342465, |
|
"grad_norm": 0.44921875, |
|
"learning_rate": 9.116534377924883e-05, |
|
"loss": 1.0035, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 5.821917808219178, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 8.878658234765858e-05, |
|
"loss": 0.9981, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 5.890410958904109, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 8.641423044814374e-05, |
|
"loss": 1.0106, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 5.958904109589041, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 8.404964410489485e-05, |
|
"loss": 1.0052, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.5710809230804443, |
|
"eval_runtime": 0.5523, |
|
"eval_samples_per_second": 18.105, |
|
"eval_steps_per_second": 1.81, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 6.027397260273973, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 8.169417490335007e-05, |
|
"loss": 0.9954, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 6.095890410958904, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 7.934916921763628e-05, |
|
"loss": 0.9689, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 6.164383561643835, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 7.701596744098818e-05, |
|
"loss": 0.9747, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.232876712328767, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 7.469590321958662e-05, |
|
"loss": 0.9715, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 6.301369863013699, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 7.239030269025311e-05, |
|
"loss": 0.9809, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 6.36986301369863, |
|
"grad_norm": 0.484375, |
|
"learning_rate": 7.010048372243698e-05, |
|
"loss": 0.9735, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 6.438356164383562, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 6.782775516492771e-05, |
|
"loss": 0.9895, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 6.506849315068493, |
|
"grad_norm": 0.44140625, |
|
"learning_rate": 6.5573416097724e-05, |
|
"loss": 0.9805, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 6.575342465753424, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 6.333875508948593e-05, |
|
"loss": 0.9865, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 6.6438356164383565, |
|
"grad_norm": 0.52734375, |
|
"learning_rate": 6.112504946099604e-05, |
|
"loss": 0.9844, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 6.712328767123288, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 5.8933564555049105e-05, |
|
"loss": 0.9775, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 6.780821917808219, |
|
"grad_norm": 0.52734375, |
|
"learning_rate": 5.6765553013188766e-05, |
|
"loss": 0.9852, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 6.8493150684931505, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 5.462225405970401e-05, |
|
"loss": 0.9741, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.917808219178082, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 5.2504892793295e-05, |
|
"loss": 0.9851, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 6.986301369863014, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 5.041467948681269e-05, |
|
"loss": 0.9813, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.591195583343506, |
|
"eval_runtime": 0.5557, |
|
"eval_samples_per_second": 17.996, |
|
"eval_steps_per_second": 1.8, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 7.054794520547945, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 4.835280889547351e-05, |
|
"loss": 0.9656, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 7.123287671232877, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 4.6320459573942856e-05, |
|
"loss": 0.9546, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 7.191780821917808, |
|
"grad_norm": 0.40625, |
|
"learning_rate": 4.431879320267972e-05, |
|
"loss": 0.9675, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 7.260273972602739, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 4.2348953923925916e-05, |
|
"loss": 0.9552, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 7.328767123287671, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 4.041206768772022e-05, |
|
"loss": 0.9671, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 7.397260273972603, |
|
"grad_norm": 0.392578125, |
|
"learning_rate": 3.850924160831115e-05, |
|
"loss": 0.9625, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 7.465753424657534, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 3.6641563331336125e-05, |
|
"loss": 0.9607, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 7.534246575342466, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 3.4810100412128747e-05, |
|
"loss": 0.9566, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.602739726027397, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 3.3015899705509734e-05, |
|
"loss": 0.9709, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 7.671232876712329, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 3.125998676740987e-05, |
|
"loss": 0.9659, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 7.739726027397261, |
|
"grad_norm": 0.37890625, |
|
"learning_rate": 2.9543365268667867e-05, |
|
"loss": 0.9582, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 7.808219178082192, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 2.7867016421336776e-05, |
|
"loss": 0.9593, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 7.876712328767123, |
|
"grad_norm": 0.375, |
|
"learning_rate": 2.6231898417828603e-05, |
|
"loss": 0.9619, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 7.945205479452055, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 2.4638945883216235e-05, |
|
"loss": 0.9593, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.613189220428467, |
|
"eval_runtime": 0.5557, |
|
"eval_samples_per_second": 17.996, |
|
"eval_steps_per_second": 1.8, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 8.013698630136986, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 2.3089069341006565e-05, |
|
"loss": 0.954, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 8.082191780821917, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 2.1583154692689976e-05, |
|
"loss": 0.9542, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 8.150684931506849, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 2.0122062711363532e-05, |
|
"loss": 0.9519, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 8.219178082191782, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 1.8706628549717452e-05, |
|
"loss": 0.9478, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 8.287671232876713, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 1.7337661262666294e-05, |
|
"loss": 0.9475, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 8.356164383561644, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 1.601594334489702e-05, |
|
"loss": 0.9484, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 8.424657534246576, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 1.474223028359939e-05, |
|
"loss": 0.9574, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 8.493150684931507, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 1.3517250126632986e-05, |
|
"loss": 0.9537, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 8.561643835616438, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 1.2341703066379074e-05, |
|
"loss": 0.9515, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 8.63013698630137, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 1.1216261039514087e-05, |
|
"loss": 0.9459, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 8.698630136986301, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 1.0141567342934132e-05, |
|
"loss": 0.9538, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 8.767123287671232, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 9.118236266049707e-06, |
|
"loss": 0.9437, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 8.835616438356164, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 8.146852739661105e-06, |
|
"loss": 0.948, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 8.904109589041095, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 7.2279720016148244e-06, |
|
"loss": 0.9595, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.972602739726028, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 6.36211927943271e-06, |
|
"loss": 0.953, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.61802339553833, |
|
"eval_runtime": 0.5528, |
|
"eval_samples_per_second": 18.091, |
|
"eval_steps_per_second": 1.809, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 9.04109589041096, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 5.549789490094304e-06, |
|
"loss": 0.9475, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 9.10958904109589, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 4.79144695714504e-06, |
|
"loss": 0.9546, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 9.178082191780822, |
|
"grad_norm": 0.390625, |
|
"learning_rate": 4.087525145291204e-06, |
|
"loss": 0.9509, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 9.246575342465754, |
|
"grad_norm": 0.40234375, |
|
"learning_rate": 3.4384264126337328e-06, |
|
"loss": 0.9491, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 9.315068493150685, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 2.8445217806824077e-06, |
|
"loss": 0.95, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 9.383561643835616, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 2.30615072228183e-06, |
|
"loss": 0.945, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 9.452054794520548, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.8236209675705274e-06, |
|
"loss": 0.9579, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 9.520547945205479, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 1.397208328083921e-06, |
|
"loss": 0.9463, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 9.58904109589041, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.0271565391018922e-06, |
|
"loss": 0.9501, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 9.657534246575342, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 7.136771203310245e-07, |
|
"loss": 0.9471, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 9.726027397260275, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 4.569492550008603e-07, |
|
"loss": 0.9428, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 9.794520547945206, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 2.5711968744382974e-07, |
|
"loss": 0.941, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 9.863013698630137, |
|
"grad_norm": 0.3671875, |
|
"learning_rate": 1.143026392168789e-07, |
|
"loss": 0.9407, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 9.931506849315069, |
|
"grad_norm": 0.3671875, |
|
"learning_rate": 2.8579743813006432e-08, |
|
"loss": 0.9568, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 0.0, |
|
"loss": 0.9482, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.6190853118896484, |
|
"eval_runtime": 0.5494, |
|
"eval_samples_per_second": 18.201, |
|
"eval_steps_per_second": 1.82, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 730, |
|
"total_flos": 4.3089359154446336e+17, |
|
"train_loss": 1.133717959547696, |
|
"train_runtime": 3901.5754, |
|
"train_samples_per_second": 8.981, |
|
"train_steps_per_second": 0.187 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 730, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.3089359154446336e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|