|
{ |
|
"best_metric": 3.553039312362671, |
|
"best_model_checkpoint": "output/kendrick-lamar/checkpoint-819", |
|
"epoch": 3.0, |
|
"global_step": 819, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001370864757190084, |
|
"loss": 4.2722, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013674627861254597, |
|
"loss": 4.3361, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013618053464655754, |
|
"loss": 4.2033, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013539111628975924, |
|
"loss": 4.0514, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001343806363162431, |
|
"loss": 4.1327, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013315243915785902, |
|
"loss": 3.96, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00013171058983499535, |
|
"loss": 3.9217, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001300598605023948, |
|
"loss": 3.7032, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00012820571465453544, |
|
"loss": 3.9717, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00012615428904285254, |
|
"loss": 4.0279, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001239123733646515, |
|
"loss": 3.8994, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001214873877909356, |
|
"loss": 4.0263, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00011888735840752609, |
|
"loss": 3.9161, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00011612089065075853, |
|
"loss": 3.8483, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011319714082567585, |
|
"loss": 3.8609, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00011012578580098631, |
|
"loss": 3.9798, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010691699098108779, |
|
"loss": 3.7969, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00010358137666116333, |
|
"loss": 3.9596, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00010012998287670373, |
|
"loss": 4.1164, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.657423286379646e-05, |
|
"loss": 3.7924, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.292589525111794e-05, |
|
"loss": 4.0304, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 8.91970451087642e-05, |
|
"loss": 3.8561, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 8.540002398283833e-05, |
|
"loss": 3.8825, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.154739904807008e-05, |
|
"loss": 3.8513, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.765192151366211e-05, |
|
"loss": 3.752, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.372648442002871e-05, |
|
"loss": 3.6579, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 6.978407996610966e-05, |
|
"loss": 3.595, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.583775650849414e-05, |
|
"loss": 3.8015, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.190057537467733e-05, |
|
"loss": 3.756, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.79855676333867e-05, |
|
"loss": 3.8087, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.410569096505683e-05, |
|
"loss": 3.6749, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.0273786775201065e-05, |
|
"loss": 3.9157, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.6502537692623556e-05, |
|
"loss": 3.6094, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.2804425593141775e-05, |
|
"loss": 3.8656, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.9191690287750474e-05, |
|
"loss": 3.8787, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.567628901195867e-05, |
|
"loss": 3.7476, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.226985685037943e-05, |
|
"loss": 3.6491, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.898366822755775e-05, |
|
"loss": 3.7995, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 2.5828599592490882e-05, |
|
"loss": 3.8123, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 2.2815093420347238e-05, |
|
"loss": 3.9104, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.9953123650527866e-05, |
|
"loss": 3.8532, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.725216267546246e-05, |
|
"loss": 3.7647, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.472114998939829e-05, |
|
"loss": 3.6447, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2368462600946557e-05, |
|
"loss": 3.8278, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.0201887307313513e-05, |
|
"loss": 3.7164, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.228594921980851e-06, |
|
"loss": 3.7653, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.455116541136077e-06, |
|
"loss": 3.6369, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.88732192740426e-06, |
|
"loss": 3.8294, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.53040008242582e-06, |
|
"loss": 3.9352, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.3888420725801435e-06, |
|
"loss": 3.7973, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.4664261646974657e-06, |
|
"loss": 3.892, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 7.662053209561833e-07, |
|
"loss": 3.7137, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 2.9049709435396624e-07, |
|
"loss": 3.7595, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.087595819657002e-08, |
|
"loss": 3.7287, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.6416754722595215, |
|
"eval_runtime": 10.1198, |
|
"eval_samples_per_second": 39.625, |
|
"eval_steps_per_second": 5.04, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.816809499134402e-08, |
|
"loss": 3.6986, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.2244866199319123e-07, |
|
"loss": 3.6869, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 6.530415424531654e-07, |
|
"loss": 3.6129, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.3085215833929946e-06, |
|
"loss": 3.6142, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 2.1867193124992013e-06, |
|
"loss": 3.6029, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.2847281185250116e-06, |
|
"loss": 3.5627, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.598913871434634e-06, |
|
"loss": 3.4652, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 6.124926950450006e-06, |
|
"loss": 3.632, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 7.857716640189785e-06, |
|
"loss": 3.6625, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 9.791547847253513e-06, |
|
"loss": 3.6645, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.1920020081922749e-05, |
|
"loss": 3.6207, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.4236088642155179e-05, |
|
"loss": 3.738, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.6732087929757627e-05, |
|
"loss": 3.4442, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.9399756821567315e-05, |
|
"loss": 3.7553, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.223026601166984e-05, |
|
"loss": 3.5033, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.5214247234157134e-05, |
|
"loss": 3.6562, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.8341824269706243e-05, |
|
"loss": 3.7108, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.1602645633354905e-05, |
|
"loss": 3.6002, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.4985918835285396e-05, |
|
"loss": 3.6699, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.848044610122224e-05, |
|
"loss": 3.6041, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.207466143421867e-05, |
|
"loss": 3.5919, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.5756668895166686e-05, |
|
"loss": 3.7467, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.9514281975331363e-05, |
|
"loss": 3.5324, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.333506393059682e-05, |
|
"loss": 3.6562, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 5.720636894392822e-05, |
|
"loss": 3.6013, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 6.11153839798114e-05, |
|
"loss": 3.6502, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 6.504917119214327e-05, |
|
"loss": 3.469, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.899471074521375e-05, |
|
"loss": 3.5713, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.293894390605244e-05, |
|
"loss": 3.5885, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.686881626551516e-05, |
|
"loss": 3.6544, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 8.077132094505965e-05, |
|
"loss": 3.5478, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 8.463354164620745e-05, |
|
"loss": 3.5412, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 8.844269540020853e-05, |
|
"loss": 3.4616, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.218617487641824e-05, |
|
"loss": 3.6015, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.585159010935713e-05, |
|
"loss": 3.5197, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 9.942680950634723e-05, |
|
"loss": 3.4169, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010290000000000001, |
|
"loss": 3.6481, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00010625966621266112, |
|
"loss": 3.5313, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00010949468850318882, |
|
"loss": 3.5705, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011259435977013931, |
|
"loss": 3.6504, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011554842088955072, |
|
"loss": 3.5506, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011834709467003562, |
|
"loss": 3.6447, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00012098111821279934, |
|
"loss": 3.6195, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0001234417735694802, |
|
"loss": 3.6632, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00012572091659634235, |
|
"loss": 3.4833, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0001278110039093217, |
|
"loss": 3.5942, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001297051178507093, |
|
"loss": 3.599, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00013139698938484013, |
|
"loss": 3.6315, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00013288101884700735, |
|
"loss": 3.6382, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00013415229447692924, |
|
"loss": 3.6227, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00013520660867542716, |
|
"loss": 3.4538, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00013604047193050914, |
|
"loss": 3.4112, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013665112436676765, |
|
"loss": 3.6708, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00013703654487986559, |
|
"loss": 3.5041, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00013719545782587793, |
|
"loss": 3.6424, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 3.619663715362549, |
|
"eval_runtime": 10.1067, |
|
"eval_samples_per_second": 39.676, |
|
"eval_steps_per_second": 5.046, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0001371273372433488, |
|
"loss": 3.4712, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0001368324085940902, |
|
"loss": 3.4434, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00013631164801696085, |
|
"loss": 3.3224, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00013556677909709434, |
|
"loss": 3.3818, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.000134600267161271, |
|
"loss": 3.3693, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00013341531111831246, |
|
"loss": 3.5516, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00013201583287150687, |
|
"loss": 3.3951, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00013040646433810595, |
|
"loss": 3.4224, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00012859253211885616, |
|
"loss": 3.4472, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00012658003986830435, |
|
"loss": 3.5578, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00012437564842422732, |
|
"loss": 3.3637, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00012198665376195207, |
|
"loss": 3.4449, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00011942096284653183, |
|
"loss": 3.4946, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00011668706746270184, |
|
"loss": 3.4818, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00011379401610923057, |
|
"loss": 3.4296, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011075138405068863, |
|
"loss": 3.4033, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00010756924162575734, |
|
"loss": 3.3291, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00010425812091696729, |
|
"loss": 3.1571, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00010082898089218288, |
|
"loss": 3.5678, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 9.72931711332038e-05, |
|
"loss": 3.3972, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 9.36623942715347e-05, |
|
"loss": 3.3953, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 8.99486672556498e-05, |
|
"loss": 3.446, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 8.616428157794779e-05, |
|
"loss": 3.3233, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 8.232176259303673e-05, |
|
"loss": 3.3524, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 7.843382806199401e-05, |
|
"loss": 3.3982, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.451334605981051e-05, |
|
"loss": 3.5036, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 7.05732923853327e-05, |
|
"loss": 3.3534, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 6.662670761466734e-05, |
|
"loss": 3.4981, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 6.268665394018953e-05, |
|
"loss": 3.3561, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.876617193800604e-05, |
|
"loss": 3.3629, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.4878237406963316e-05, |
|
"loss": 3.478, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.103571842205231e-05, |
|
"loss": 3.2104, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.7251332744350254e-05, |
|
"loss": 3.5289, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.3537605728465284e-05, |
|
"loss": 3.2297, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 3.990682886679629e-05, |
|
"loss": 3.4077, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 3.637101910781716e-05, |
|
"loss": 3.4158, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3.294187908303268e-05, |
|
"loss": 3.3786, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.9630758374242683e-05, |
|
"loss": 3.4328, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.6448615949311343e-05, |
|
"loss": 3.469, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.340598389076947e-05, |
|
"loss": 3.3519, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 2.051293253729814e-05, |
|
"loss": 3.2819, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.7779037153468233e-05, |
|
"loss": 3.2787, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.521334623804796e-05, |
|
"loss": 3.2639, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.2824351575772677e-05, |
|
"loss": 3.3124, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.0619960131695668e-05, |
|
"loss": 3.2562, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 8.607467881143831e-06, |
|
"loss": 3.1959, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 6.793535661894062e-06, |
|
"loss": 3.2322, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.184167128493107e-06, |
|
"loss": 3.5785, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.784688881687565e-06, |
|
"loss": 3.4374, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.599732838729015e-06, |
|
"loss": 3.4859, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.6332209029056513e-06, |
|
"loss": 3.396, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.883519830391712e-07, |
|
"loss": 3.3425, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 3.6759140590977833e-07, |
|
"loss": 3.241, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 7.266275665120308e-08, |
|
"loss": 3.3081, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 3.553039312362671, |
|
"eval_runtime": 10.0835, |
|
"eval_samples_per_second": 39.768, |
|
"eval_steps_per_second": 5.058, |
|
"step": 819 |
|
} |
|
], |
|
"max_steps": 819, |
|
"num_train_epochs": 3, |
|
"total_flos": 855992696832000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|