Fred
180K steps (WandB run: https://wandb.ai/cmu-11785-s24-sg06/huggingface/runs/3060wpp0 at 80K steps)
d5dd255
raw
history blame
145 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 1000,
"global_step": 80000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 10.401179313659668,
"learning_rate": 9.900000000000002e-06,
"loss": 0.7996,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.4217194318771362,
"learning_rate": 1.9900000000000003e-05,
"loss": 0.8501,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 11.115049362182617,
"learning_rate": 2.9900000000000002e-05,
"loss": 0.8403,
"step": 300
},
{
"epoch": 0.01,
"grad_norm": 4.1231770515441895,
"learning_rate": 3.99e-05,
"loss": 0.8814,
"step": 400
},
{
"epoch": 0.01,
"grad_norm": 3.6332690715789795,
"learning_rate": 4.9800000000000004e-05,
"loss": 0.8949,
"step": 500
},
{
"epoch": 0.01,
"grad_norm": 5.6715312004089355,
"learning_rate": 4.9950753768844225e-05,
"loss": 0.841,
"step": 600
},
{
"epoch": 0.02,
"grad_norm": 4.035426139831543,
"learning_rate": 4.990050251256282e-05,
"loss": 0.9221,
"step": 700
},
{
"epoch": 0.02,
"grad_norm": 3.536616325378418,
"learning_rate": 4.985025125628141e-05,
"loss": 0.8898,
"step": 800
},
{
"epoch": 0.02,
"grad_norm": 4.088434219360352,
"learning_rate": 4.9800000000000004e-05,
"loss": 0.97,
"step": 900
},
{
"epoch": 0.03,
"grad_norm": 14.10112190246582,
"learning_rate": 4.974974874371859e-05,
"loss": 0.9325,
"step": 1000
},
{
"epoch": 0.03,
"eval_loss": 0.9976623058319092,
"eval_runtime": 93.1357,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 1000
},
{
"epoch": 0.03,
"grad_norm": 13.908395767211914,
"learning_rate": 4.969949748743719e-05,
"loss": 0.9053,
"step": 1100
},
{
"epoch": 0.03,
"grad_norm": 7.617687702178955,
"learning_rate": 4.9649246231155784e-05,
"loss": 0.8859,
"step": 1200
},
{
"epoch": 0.03,
"grad_norm": 7.366157054901123,
"learning_rate": 4.959899497487438e-05,
"loss": 0.9627,
"step": 1300
},
{
"epoch": 0.04,
"grad_norm": 3.9606947898864746,
"learning_rate": 4.9548743718592964e-05,
"loss": 0.8525,
"step": 1400
},
{
"epoch": 0.04,
"grad_norm": 2.4160988330841064,
"learning_rate": 4.949849246231156e-05,
"loss": 0.9759,
"step": 1500
},
{
"epoch": 0.04,
"grad_norm": 4.402566909790039,
"learning_rate": 4.944824120603016e-05,
"loss": 0.9102,
"step": 1600
},
{
"epoch": 0.04,
"grad_norm": 3.5195939540863037,
"learning_rate": 4.9397989949748743e-05,
"loss": 0.9614,
"step": 1700
},
{
"epoch": 0.04,
"grad_norm": 9.411144256591797,
"learning_rate": 4.934773869346734e-05,
"loss": 1.0199,
"step": 1800
},
{
"epoch": 0.05,
"grad_norm": 2.964963912963867,
"learning_rate": 4.929748743718593e-05,
"loss": 0.9602,
"step": 1900
},
{
"epoch": 0.05,
"grad_norm": 14.135363578796387,
"learning_rate": 4.924723618090453e-05,
"loss": 0.9442,
"step": 2000
},
{
"epoch": 0.05,
"eval_loss": 1.0027893781661987,
"eval_runtime": 93.2106,
"eval_samples_per_second": 10.728,
"eval_steps_per_second": 2.682,
"step": 2000
},
{
"epoch": 0.05,
"grad_norm": 1.2493922710418701,
"learning_rate": 4.9196984924623116e-05,
"loss": 0.9231,
"step": 2100
},
{
"epoch": 0.06,
"grad_norm": 7.772951126098633,
"learning_rate": 4.914673366834171e-05,
"loss": 0.9684,
"step": 2200
},
{
"epoch": 0.06,
"grad_norm": 3.6931562423706055,
"learning_rate": 4.90964824120603e-05,
"loss": 0.9534,
"step": 2300
},
{
"epoch": 0.06,
"grad_norm": 2.797088384628296,
"learning_rate": 4.9046231155778896e-05,
"loss": 0.96,
"step": 2400
},
{
"epoch": 0.06,
"grad_norm": 5.056826591491699,
"learning_rate": 4.899597989949749e-05,
"loss": 0.9278,
"step": 2500
},
{
"epoch": 0.07,
"grad_norm": 2.841571092605591,
"learning_rate": 4.894572864321608e-05,
"loss": 0.9642,
"step": 2600
},
{
"epoch": 0.07,
"grad_norm": 5.591983795166016,
"learning_rate": 4.8895477386934676e-05,
"loss": 0.9236,
"step": 2700
},
{
"epoch": 0.07,
"grad_norm": 4.040654182434082,
"learning_rate": 4.884522613065327e-05,
"loss": 0.891,
"step": 2800
},
{
"epoch": 0.07,
"grad_norm": 4.5285820960998535,
"learning_rate": 4.879497487437186e-05,
"loss": 0.9275,
"step": 2900
},
{
"epoch": 0.07,
"grad_norm": 3.6710784435272217,
"learning_rate": 4.8744723618090456e-05,
"loss": 0.9237,
"step": 3000
},
{
"epoch": 0.07,
"eval_loss": 1.0115267038345337,
"eval_runtime": 93.1331,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 3000
},
{
"epoch": 0.08,
"grad_norm": 2.3687329292297363,
"learning_rate": 4.869447236180905e-05,
"loss": 0.9455,
"step": 3100
},
{
"epoch": 0.08,
"grad_norm": 11.586736679077148,
"learning_rate": 4.864422110552764e-05,
"loss": 0.964,
"step": 3200
},
{
"epoch": 0.08,
"grad_norm": 5.422958850860596,
"learning_rate": 4.8593969849246235e-05,
"loss": 0.9705,
"step": 3300
},
{
"epoch": 0.09,
"grad_norm": 2.363290548324585,
"learning_rate": 4.854371859296483e-05,
"loss": 0.9341,
"step": 3400
},
{
"epoch": 0.09,
"grad_norm": 1.7168620824813843,
"learning_rate": 4.8493467336683415e-05,
"loss": 0.9739,
"step": 3500
},
{
"epoch": 0.09,
"grad_norm": 2.3149633407592773,
"learning_rate": 4.844321608040201e-05,
"loss": 0.9258,
"step": 3600
},
{
"epoch": 0.09,
"grad_norm": 2.771425485610962,
"learning_rate": 4.839296482412061e-05,
"loss": 0.9374,
"step": 3700
},
{
"epoch": 0.1,
"grad_norm": 2.6544713973999023,
"learning_rate": 4.83427135678392e-05,
"loss": 0.9557,
"step": 3800
},
{
"epoch": 0.1,
"grad_norm": 2.7681453227996826,
"learning_rate": 4.829246231155779e-05,
"loss": 0.9842,
"step": 3900
},
{
"epoch": 0.1,
"grad_norm": 2.4862678050994873,
"learning_rate": 4.824221105527638e-05,
"loss": 0.9888,
"step": 4000
},
{
"epoch": 0.1,
"eval_loss": 1.0101147890090942,
"eval_runtime": 93.0664,
"eval_samples_per_second": 10.745,
"eval_steps_per_second": 2.686,
"step": 4000
},
{
"epoch": 0.1,
"grad_norm": 4.182258129119873,
"learning_rate": 4.819195979899498e-05,
"loss": 0.977,
"step": 4100
},
{
"epoch": 0.1,
"grad_norm": 4.485065937042236,
"learning_rate": 4.814170854271357e-05,
"loss": 0.9598,
"step": 4200
},
{
"epoch": 0.11,
"grad_norm": 2.235346555709839,
"learning_rate": 4.809145728643216e-05,
"loss": 0.9888,
"step": 4300
},
{
"epoch": 0.11,
"grad_norm": 4.2939252853393555,
"learning_rate": 4.8041206030150754e-05,
"loss": 0.9336,
"step": 4400
},
{
"epoch": 0.11,
"grad_norm": 5.683385372161865,
"learning_rate": 4.7991457286432165e-05,
"loss": 0.9724,
"step": 4500
},
{
"epoch": 0.12,
"grad_norm": 6.78421688079834,
"learning_rate": 4.794120603015075e-05,
"loss": 1.0393,
"step": 4600
},
{
"epoch": 0.12,
"grad_norm": 2.5248563289642334,
"learning_rate": 4.789095477386935e-05,
"loss": 0.9658,
"step": 4700
},
{
"epoch": 0.12,
"grad_norm": 2.3800323009490967,
"learning_rate": 4.7840703517587945e-05,
"loss": 0.959,
"step": 4800
},
{
"epoch": 0.12,
"grad_norm": 2.4079301357269287,
"learning_rate": 4.779045226130653e-05,
"loss": 0.9631,
"step": 4900
},
{
"epoch": 0.12,
"grad_norm": 3.746002674102783,
"learning_rate": 4.7740201005025125e-05,
"loss": 0.9662,
"step": 5000
},
{
"epoch": 0.12,
"eval_loss": 1.036652684211731,
"eval_runtime": 93.1422,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 5000
},
{
"epoch": 0.13,
"grad_norm": 2.498887777328491,
"learning_rate": 4.7689949748743725e-05,
"loss": 0.9513,
"step": 5100
},
{
"epoch": 0.13,
"grad_norm": 3.1052420139312744,
"learning_rate": 4.763969849246232e-05,
"loss": 0.8982,
"step": 5200
},
{
"epoch": 0.13,
"grad_norm": 3.442906618118286,
"learning_rate": 4.7589447236180904e-05,
"loss": 0.9924,
"step": 5300
},
{
"epoch": 0.14,
"grad_norm": 4.208202838897705,
"learning_rate": 4.75391959798995e-05,
"loss": 0.9818,
"step": 5400
},
{
"epoch": 0.14,
"grad_norm": 4.999510288238525,
"learning_rate": 4.74889447236181e-05,
"loss": 0.9401,
"step": 5500
},
{
"epoch": 0.14,
"grad_norm": 5.245172023773193,
"learning_rate": 4.7438693467336684e-05,
"loss": 0.8937,
"step": 5600
},
{
"epoch": 0.14,
"grad_norm": 3.1338207721710205,
"learning_rate": 4.738844221105528e-05,
"loss": 1.0115,
"step": 5700
},
{
"epoch": 0.14,
"grad_norm": 2.9757256507873535,
"learning_rate": 4.733819095477387e-05,
"loss": 0.9858,
"step": 5800
},
{
"epoch": 0.15,
"grad_norm": 2.5199265480041504,
"learning_rate": 4.7287939698492464e-05,
"loss": 0.8944,
"step": 5900
},
{
"epoch": 0.15,
"grad_norm": 8.123644828796387,
"learning_rate": 4.723768844221106e-05,
"loss": 0.9412,
"step": 6000
},
{
"epoch": 0.15,
"eval_loss": 1.0194703340530396,
"eval_runtime": 93.1579,
"eval_samples_per_second": 10.734,
"eval_steps_per_second": 2.684,
"step": 6000
},
{
"epoch": 0.15,
"grad_norm": 14.286507606506348,
"learning_rate": 4.718793969849247e-05,
"loss": 0.974,
"step": 6100
},
{
"epoch": 0.15,
"grad_norm": 3.9008524417877197,
"learning_rate": 4.713768844221106e-05,
"loss": 0.9219,
"step": 6200
},
{
"epoch": 0.16,
"grad_norm": 4.235304832458496,
"learning_rate": 4.708743718592965e-05,
"loss": 0.991,
"step": 6300
},
{
"epoch": 0.16,
"grad_norm": 4.351159572601318,
"learning_rate": 4.703718592964824e-05,
"loss": 0.9695,
"step": 6400
},
{
"epoch": 0.16,
"grad_norm": 3.415191173553467,
"learning_rate": 4.698693467336684e-05,
"loss": 0.935,
"step": 6500
},
{
"epoch": 0.17,
"grad_norm": 3.367522954940796,
"learning_rate": 4.693668341708543e-05,
"loss": 0.9689,
"step": 6600
},
{
"epoch": 0.17,
"grad_norm": 3.1978042125701904,
"learning_rate": 4.688643216080402e-05,
"loss": 0.9833,
"step": 6700
},
{
"epoch": 0.17,
"grad_norm": 2.780592203140259,
"learning_rate": 4.6836180904522614e-05,
"loss": 0.9399,
"step": 6800
},
{
"epoch": 0.17,
"grad_norm": 7.248663902282715,
"learning_rate": 4.678592964824121e-05,
"loss": 0.9534,
"step": 6900
},
{
"epoch": 0.17,
"grad_norm": 5.009815692901611,
"learning_rate": 4.67356783919598e-05,
"loss": 0.9137,
"step": 7000
},
{
"epoch": 0.17,
"eval_loss": 1.0109317302703857,
"eval_runtime": 93.119,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 7000
},
{
"epoch": 0.18,
"grad_norm": 3.278932571411133,
"learning_rate": 4.6685427135678394e-05,
"loss": 0.8946,
"step": 7100
},
{
"epoch": 0.18,
"grad_norm": 2.084470272064209,
"learning_rate": 4.663517587939699e-05,
"loss": 0.9796,
"step": 7200
},
{
"epoch": 0.18,
"grad_norm": 4.415618896484375,
"learning_rate": 4.658492462311558e-05,
"loss": 0.9679,
"step": 7300
},
{
"epoch": 0.18,
"grad_norm": 2.9793307781219482,
"learning_rate": 4.6534673366834174e-05,
"loss": 0.9292,
"step": 7400
},
{
"epoch": 0.19,
"grad_norm": 3.963541030883789,
"learning_rate": 4.648442211055277e-05,
"loss": 0.9598,
"step": 7500
},
{
"epoch": 0.19,
"grad_norm": 2.6826908588409424,
"learning_rate": 4.643417085427136e-05,
"loss": 0.9775,
"step": 7600
},
{
"epoch": 0.19,
"grad_norm": 4.453563213348389,
"learning_rate": 4.6383919597989947e-05,
"loss": 0.9371,
"step": 7700
},
{
"epoch": 0.2,
"grad_norm": 5.058444976806641,
"learning_rate": 4.6333668341708547e-05,
"loss": 0.9613,
"step": 7800
},
{
"epoch": 0.2,
"grad_norm": 4.122437000274658,
"learning_rate": 4.628341708542714e-05,
"loss": 0.8921,
"step": 7900
},
{
"epoch": 0.2,
"grad_norm": 5.587231636047363,
"learning_rate": 4.623316582914573e-05,
"loss": 0.9731,
"step": 8000
},
{
"epoch": 0.2,
"eval_loss": 1.0011775493621826,
"eval_runtime": 93.123,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.685,
"step": 8000
},
{
"epoch": 0.2,
"grad_norm": 4.181956768035889,
"learning_rate": 4.618291457286432e-05,
"loss": 0.908,
"step": 8100
},
{
"epoch": 0.2,
"grad_norm": 9.223557472229004,
"learning_rate": 4.613266331658292e-05,
"loss": 0.9867,
"step": 8200
},
{
"epoch": 0.21,
"grad_norm": 3.491239547729492,
"learning_rate": 4.608241206030151e-05,
"loss": 0.8887,
"step": 8300
},
{
"epoch": 0.21,
"grad_norm": 5.799344062805176,
"learning_rate": 4.60321608040201e-05,
"loss": 0.9222,
"step": 8400
},
{
"epoch": 0.21,
"grad_norm": 3.5601634979248047,
"learning_rate": 4.598190954773869e-05,
"loss": 0.9504,
"step": 8500
},
{
"epoch": 0.21,
"grad_norm": 8.209439277648926,
"learning_rate": 4.593165829145729e-05,
"loss": 0.9304,
"step": 8600
},
{
"epoch": 0.22,
"grad_norm": 4.197356700897217,
"learning_rate": 4.5881407035175886e-05,
"loss": 0.9772,
"step": 8700
},
{
"epoch": 0.22,
"grad_norm": 6.555203914642334,
"learning_rate": 4.583115577889447e-05,
"loss": 0.9359,
"step": 8800
},
{
"epoch": 0.22,
"grad_norm": 7.102870464324951,
"learning_rate": 4.5780904522613065e-05,
"loss": 0.9311,
"step": 8900
},
{
"epoch": 0.23,
"grad_norm": 2.195941209793091,
"learning_rate": 4.5731155778894477e-05,
"loss": 0.933,
"step": 9000
},
{
"epoch": 0.23,
"eval_loss": 0.9800053834915161,
"eval_runtime": 93.1242,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.685,
"step": 9000
},
{
"epoch": 0.23,
"grad_norm": 3.8922510147094727,
"learning_rate": 4.568090452261306e-05,
"loss": 0.9445,
"step": 9100
},
{
"epoch": 0.23,
"grad_norm": 4.203242778778076,
"learning_rate": 4.5630653266331656e-05,
"loss": 0.9522,
"step": 9200
},
{
"epoch": 0.23,
"grad_norm": 7.515634059906006,
"learning_rate": 4.5580402010050256e-05,
"loss": 0.8782,
"step": 9300
},
{
"epoch": 0.23,
"grad_norm": 2.365694999694824,
"learning_rate": 4.553015075376885e-05,
"loss": 0.9633,
"step": 9400
},
{
"epoch": 0.24,
"grad_norm": 4.682378768920898,
"learning_rate": 4.5479899497487436e-05,
"loss": 0.9561,
"step": 9500
},
{
"epoch": 0.24,
"grad_norm": 3.9565162658691406,
"learning_rate": 4.542964824120603e-05,
"loss": 0.9423,
"step": 9600
},
{
"epoch": 0.24,
"grad_norm": 7.875938892364502,
"learning_rate": 4.537939698492463e-05,
"loss": 0.9658,
"step": 9700
},
{
"epoch": 0.24,
"grad_norm": 3.533388376235962,
"learning_rate": 4.5329145728643216e-05,
"loss": 0.9705,
"step": 9800
},
{
"epoch": 0.25,
"grad_norm": 2.1741228103637695,
"learning_rate": 4.527889447236181e-05,
"loss": 0.9221,
"step": 9900
},
{
"epoch": 0.25,
"grad_norm": 6.080275535583496,
"learning_rate": 4.52286432160804e-05,
"loss": 0.9519,
"step": 10000
},
{
"epoch": 0.25,
"eval_loss": 0.9981206059455872,
"eval_runtime": 93.1465,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 10000
},
{
"epoch": 0.25,
"grad_norm": 9.127328872680664,
"learning_rate": 4.5178391959799e-05,
"loss": 0.9187,
"step": 10100
},
{
"epoch": 0.26,
"grad_norm": 4.8638386726379395,
"learning_rate": 4.512814070351759e-05,
"loss": 0.9728,
"step": 10200
},
{
"epoch": 0.26,
"grad_norm": 3.1481430530548096,
"learning_rate": 4.507788944723618e-05,
"loss": 0.9145,
"step": 10300
},
{
"epoch": 0.26,
"grad_norm": 6.514681339263916,
"learning_rate": 4.5027638190954775e-05,
"loss": 0.9938,
"step": 10400
},
{
"epoch": 0.26,
"grad_norm": 3.8488306999206543,
"learning_rate": 4.497738693467337e-05,
"loss": 0.9035,
"step": 10500
},
{
"epoch": 0.27,
"grad_norm": 2.711252450942993,
"learning_rate": 4.492713567839196e-05,
"loss": 0.9622,
"step": 10600
},
{
"epoch": 0.27,
"grad_norm": 6.755681991577148,
"learning_rate": 4.4876884422110555e-05,
"loss": 0.9208,
"step": 10700
},
{
"epoch": 0.27,
"grad_norm": 3.9941232204437256,
"learning_rate": 4.482663316582915e-05,
"loss": 0.9512,
"step": 10800
},
{
"epoch": 0.27,
"grad_norm": 5.439061641693115,
"learning_rate": 4.477638190954774e-05,
"loss": 0.9231,
"step": 10900
},
{
"epoch": 0.28,
"grad_norm": 8.338215827941895,
"learning_rate": 4.4726130653266335e-05,
"loss": 0.9294,
"step": 11000
},
{
"epoch": 0.28,
"eval_loss": 0.9993205666542053,
"eval_runtime": 93.1743,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 11000
},
{
"epoch": 0.28,
"grad_norm": 3.963146924972534,
"learning_rate": 4.467587939698493e-05,
"loss": 0.9412,
"step": 11100
},
{
"epoch": 0.28,
"grad_norm": 7.156172752380371,
"learning_rate": 4.462613065326633e-05,
"loss": 0.8706,
"step": 11200
},
{
"epoch": 0.28,
"grad_norm": 6.4324049949646,
"learning_rate": 4.4575879396984925e-05,
"loss": 0.8719,
"step": 11300
},
{
"epoch": 0.28,
"grad_norm": 4.914628982543945,
"learning_rate": 4.452562814070352e-05,
"loss": 0.9558,
"step": 11400
},
{
"epoch": 0.29,
"grad_norm": 3.894350290298462,
"learning_rate": 4.447537688442211e-05,
"loss": 0.8914,
"step": 11500
},
{
"epoch": 0.29,
"grad_norm": 2.2890913486480713,
"learning_rate": 4.4425125628140705e-05,
"loss": 0.8934,
"step": 11600
},
{
"epoch": 0.29,
"grad_norm": 5.0195631980896,
"learning_rate": 4.43748743718593e-05,
"loss": 0.9633,
"step": 11700
},
{
"epoch": 0.29,
"grad_norm": 3.1142818927764893,
"learning_rate": 4.432462311557789e-05,
"loss": 0.9841,
"step": 11800
},
{
"epoch": 0.3,
"grad_norm": 6.496797561645508,
"learning_rate": 4.4274371859296485e-05,
"loss": 0.9646,
"step": 11900
},
{
"epoch": 0.3,
"grad_norm": 2.038761854171753,
"learning_rate": 4.422412060301508e-05,
"loss": 0.9719,
"step": 12000
},
{
"epoch": 0.3,
"eval_loss": 0.9653480648994446,
"eval_runtime": 93.1876,
"eval_samples_per_second": 10.731,
"eval_steps_per_second": 2.683,
"step": 12000
},
{
"epoch": 0.3,
"grad_norm": 2.2382993698120117,
"learning_rate": 4.417386934673367e-05,
"loss": 0.8572,
"step": 12100
},
{
"epoch": 0.3,
"grad_norm": 3.0564868450164795,
"learning_rate": 4.4123618090452265e-05,
"loss": 0.8978,
"step": 12200
},
{
"epoch": 0.31,
"grad_norm": 3.215782880783081,
"learning_rate": 4.407336683417085e-05,
"loss": 0.9181,
"step": 12300
},
{
"epoch": 0.31,
"grad_norm": 5.327606201171875,
"learning_rate": 4.402311557788945e-05,
"loss": 0.9393,
"step": 12400
},
{
"epoch": 0.31,
"grad_norm": 5.560494422912598,
"learning_rate": 4.3972864321608044e-05,
"loss": 0.935,
"step": 12500
},
{
"epoch": 0.32,
"grad_norm": 7.394079208374023,
"learning_rate": 4.392261306532664e-05,
"loss": 0.9092,
"step": 12600
},
{
"epoch": 0.32,
"grad_norm": 4.927297592163086,
"learning_rate": 4.3872361809045224e-05,
"loss": 0.9546,
"step": 12700
},
{
"epoch": 0.32,
"grad_norm": 3.5621089935302734,
"learning_rate": 4.3822110552763824e-05,
"loss": 0.9083,
"step": 12800
},
{
"epoch": 0.32,
"grad_norm": 4.207425117492676,
"learning_rate": 4.377185929648242e-05,
"loss": 0.9032,
"step": 12900
},
{
"epoch": 0.33,
"grad_norm": 4.688506603240967,
"learning_rate": 4.3721608040201004e-05,
"loss": 0.9435,
"step": 13000
},
{
"epoch": 0.33,
"eval_loss": 0.9503098726272583,
"eval_runtime": 93.1942,
"eval_samples_per_second": 10.73,
"eval_steps_per_second": 2.683,
"step": 13000
},
{
"epoch": 0.33,
"grad_norm": 6.992246627807617,
"learning_rate": 4.36713567839196e-05,
"loss": 0.9035,
"step": 13100
},
{
"epoch": 0.33,
"grad_norm": 2.7571117877960205,
"learning_rate": 4.36211055276382e-05,
"loss": 0.9447,
"step": 13200
},
{
"epoch": 0.33,
"grad_norm": 8.692018508911133,
"learning_rate": 4.3570854271356784e-05,
"loss": 0.9178,
"step": 13300
},
{
"epoch": 0.34,
"grad_norm": 6.485171318054199,
"learning_rate": 4.352060301507538e-05,
"loss": 0.9569,
"step": 13400
},
{
"epoch": 0.34,
"grad_norm": 6.329054832458496,
"learning_rate": 4.347035175879397e-05,
"loss": 0.9536,
"step": 13500
},
{
"epoch": 0.34,
"grad_norm": 3.318054437637329,
"learning_rate": 4.342010050251257e-05,
"loss": 0.9361,
"step": 13600
},
{
"epoch": 0.34,
"grad_norm": 2.6547791957855225,
"learning_rate": 4.3369849246231156e-05,
"loss": 0.9388,
"step": 13700
},
{
"epoch": 0.34,
"grad_norm": 3.93754243850708,
"learning_rate": 4.331959798994975e-05,
"loss": 0.9193,
"step": 13800
},
{
"epoch": 0.35,
"grad_norm": 3.3440163135528564,
"learning_rate": 4.326934673366834e-05,
"loss": 0.9142,
"step": 13900
},
{
"epoch": 0.35,
"grad_norm": 3.7675180435180664,
"learning_rate": 4.3219095477386936e-05,
"loss": 0.8883,
"step": 14000
},
{
"epoch": 0.35,
"eval_loss": 0.9775914549827576,
"eval_runtime": 93.1814,
"eval_samples_per_second": 10.732,
"eval_steps_per_second": 2.683,
"step": 14000
},
{
"epoch": 0.35,
"grad_norm": 3.446436643600464,
"learning_rate": 4.316884422110553e-05,
"loss": 0.8074,
"step": 14100
},
{
"epoch": 0.35,
"grad_norm": 7.831523895263672,
"learning_rate": 4.311859296482412e-05,
"loss": 0.9417,
"step": 14200
},
{
"epoch": 0.36,
"grad_norm": 7.415013790130615,
"learning_rate": 4.3068341708542716e-05,
"loss": 0.9201,
"step": 14300
},
{
"epoch": 0.36,
"grad_norm": 4.4936933517456055,
"learning_rate": 4.301809045226131e-05,
"loss": 0.9759,
"step": 14400
},
{
"epoch": 0.36,
"grad_norm": 2.7617504596710205,
"learning_rate": 4.29678391959799e-05,
"loss": 0.9495,
"step": 14500
},
{
"epoch": 0.36,
"grad_norm": 3.939483404159546,
"learning_rate": 4.2917587939698496e-05,
"loss": 0.9234,
"step": 14600
},
{
"epoch": 0.37,
"grad_norm": 2.7411184310913086,
"learning_rate": 4.286733668341709e-05,
"loss": 0.8905,
"step": 14700
},
{
"epoch": 0.37,
"grad_norm": 3.768512010574341,
"learning_rate": 4.2817085427135675e-05,
"loss": 0.9213,
"step": 14800
},
{
"epoch": 0.37,
"grad_norm": 4.408779621124268,
"learning_rate": 4.2766834170854275e-05,
"loss": 0.9483,
"step": 14900
},
{
"epoch": 0.38,
"grad_norm": 3.7037153244018555,
"learning_rate": 4.271658291457287e-05,
"loss": 0.9335,
"step": 15000
},
{
"epoch": 0.38,
"eval_loss": 1.018809199333191,
"eval_runtime": 93.2091,
"eval_samples_per_second": 10.729,
"eval_steps_per_second": 2.682,
"step": 15000
},
{
"epoch": 0.38,
"grad_norm": 3.3245444297790527,
"learning_rate": 4.266633165829146e-05,
"loss": 0.9258,
"step": 15100
},
{
"epoch": 0.38,
"grad_norm": 5.797372341156006,
"learning_rate": 4.2616582914572866e-05,
"loss": 0.9374,
"step": 15200
},
{
"epoch": 0.38,
"grad_norm": 3.6503055095672607,
"learning_rate": 4.256683417085428e-05,
"loss": 0.9754,
"step": 15300
},
{
"epoch": 0.39,
"grad_norm": 3.030193328857422,
"learning_rate": 4.2516582914572864e-05,
"loss": 0.94,
"step": 15400
},
{
"epoch": 0.39,
"grad_norm": 18.207677841186523,
"learning_rate": 4.246633165829146e-05,
"loss": 1.0033,
"step": 15500
},
{
"epoch": 0.39,
"grad_norm": 5.090402126312256,
"learning_rate": 4.241608040201005e-05,
"loss": 0.9071,
"step": 15600
},
{
"epoch": 0.39,
"grad_norm": 9.98360538482666,
"learning_rate": 4.236582914572865e-05,
"loss": 0.8533,
"step": 15700
},
{
"epoch": 0.4,
"grad_norm": 2.4563426971435547,
"learning_rate": 4.231557788944724e-05,
"loss": 0.9266,
"step": 15800
},
{
"epoch": 0.4,
"grad_norm": 10.532952308654785,
"learning_rate": 4.226532663316583e-05,
"loss": 0.943,
"step": 15900
},
{
"epoch": 0.4,
"grad_norm": 5.991729259490967,
"learning_rate": 4.221507537688442e-05,
"loss": 0.8991,
"step": 16000
},
{
"epoch": 0.4,
"eval_loss": 0.9398714303970337,
"eval_runtime": 93.111,
"eval_samples_per_second": 10.74,
"eval_steps_per_second": 2.685,
"step": 16000
},
{
"epoch": 0.4,
"grad_norm": 13.598296165466309,
"learning_rate": 4.2164824120603016e-05,
"loss": 0.8603,
"step": 16100
},
{
"epoch": 0.41,
"grad_norm": 2.855147123336792,
"learning_rate": 4.211457286432161e-05,
"loss": 0.9097,
"step": 16200
},
{
"epoch": 0.41,
"grad_norm": 3.4038333892822266,
"learning_rate": 4.20643216080402e-05,
"loss": 0.933,
"step": 16300
},
{
"epoch": 0.41,
"grad_norm": 6.272001266479492,
"learning_rate": 4.2014070351758796e-05,
"loss": 0.9801,
"step": 16400
},
{
"epoch": 0.41,
"grad_norm": 4.57353401184082,
"learning_rate": 4.196381909547739e-05,
"loss": 0.9635,
"step": 16500
},
{
"epoch": 0.41,
"grad_norm": 3.6893272399902344,
"learning_rate": 4.191356783919598e-05,
"loss": 0.9651,
"step": 16600
},
{
"epoch": 0.42,
"grad_norm": 6.611605167388916,
"learning_rate": 4.1863316582914576e-05,
"loss": 0.907,
"step": 16700
},
{
"epoch": 0.42,
"grad_norm": 4.06003475189209,
"learning_rate": 4.181306532663317e-05,
"loss": 0.872,
"step": 16800
},
{
"epoch": 0.42,
"grad_norm": 3.3336362838745117,
"learning_rate": 4.176281407035176e-05,
"loss": 0.9688,
"step": 16900
},
{
"epoch": 0.42,
"grad_norm": 3.704200267791748,
"learning_rate": 4.1712562814070356e-05,
"loss": 0.8985,
"step": 17000
},
{
"epoch": 0.42,
"eval_loss": 0.9714847207069397,
"eval_runtime": 93.2176,
"eval_samples_per_second": 10.728,
"eval_steps_per_second": 2.682,
"step": 17000
},
{
"epoch": 0.43,
"grad_norm": 3.743953227996826,
"learning_rate": 4.166231155778895e-05,
"loss": 0.8949,
"step": 17100
},
{
"epoch": 0.43,
"grad_norm": 2.003265619277954,
"learning_rate": 4.1612060301507535e-05,
"loss": 0.8789,
"step": 17200
},
{
"epoch": 0.43,
"grad_norm": 3.5944814682006836,
"learning_rate": 4.156180904522613e-05,
"loss": 0.9354,
"step": 17300
},
{
"epoch": 0.43,
"grad_norm": 2.2168691158294678,
"learning_rate": 4.151155778894473e-05,
"loss": 0.9476,
"step": 17400
},
{
"epoch": 0.44,
"grad_norm": 3.7228217124938965,
"learning_rate": 4.146130653266332e-05,
"loss": 0.9489,
"step": 17500
},
{
"epoch": 0.44,
"grad_norm": 2.9889585971832275,
"learning_rate": 4.141105527638191e-05,
"loss": 0.9163,
"step": 17600
},
{
"epoch": 0.44,
"grad_norm": 3.9288489818573,
"learning_rate": 4.13608040201005e-05,
"loss": 0.9425,
"step": 17700
},
{
"epoch": 0.45,
"grad_norm": 3.2130300998687744,
"learning_rate": 4.13105527638191e-05,
"loss": 0.9328,
"step": 17800
},
{
"epoch": 0.45,
"grad_norm": 1.677257776260376,
"learning_rate": 4.126030150753769e-05,
"loss": 0.9548,
"step": 17900
},
{
"epoch": 0.45,
"grad_norm": 7.897613048553467,
"learning_rate": 4.121005025125628e-05,
"loss": 0.9199,
"step": 18000
},
{
"epoch": 0.45,
"eval_loss": 0.9627432227134705,
"eval_runtime": 93.1711,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 18000
},
{
"epoch": 0.45,
"grad_norm": 3.7042174339294434,
"learning_rate": 4.1159798994974875e-05,
"loss": 0.9209,
"step": 18100
},
{
"epoch": 0.46,
"grad_norm": 2.224153757095337,
"learning_rate": 4.1110050251256286e-05,
"loss": 0.8761,
"step": 18200
},
{
"epoch": 0.46,
"grad_norm": 2.5457117557525635,
"learning_rate": 4.105979899497487e-05,
"loss": 0.9373,
"step": 18300
},
{
"epoch": 0.46,
"grad_norm": 3.5420138835906982,
"learning_rate": 4.100954773869347e-05,
"loss": 0.9571,
"step": 18400
},
{
"epoch": 0.46,
"grad_norm": 2.4865946769714355,
"learning_rate": 4.0959296482412065e-05,
"loss": 0.995,
"step": 18500
},
{
"epoch": 0.47,
"grad_norm": 11.961295127868652,
"learning_rate": 4.090904522613065e-05,
"loss": 0.9302,
"step": 18600
},
{
"epoch": 0.47,
"grad_norm": 4.357271671295166,
"learning_rate": 4.0858793969849245e-05,
"loss": 0.9839,
"step": 18700
},
{
"epoch": 0.47,
"grad_norm": 6.356178283691406,
"learning_rate": 4.0808542713567845e-05,
"loss": 0.9731,
"step": 18800
},
{
"epoch": 0.47,
"grad_norm": 6.007472991943359,
"learning_rate": 4.075829145728644e-05,
"loss": 0.9325,
"step": 18900
},
{
"epoch": 0.47,
"grad_norm": 2.252680540084839,
"learning_rate": 4.0708040201005025e-05,
"loss": 0.9013,
"step": 19000
},
{
"epoch": 0.47,
"eval_loss": 0.9862160682678223,
"eval_runtime": 93.127,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.685,
"step": 19000
},
{
"epoch": 0.48,
"grad_norm": 6.764468669891357,
"learning_rate": 4.065778894472362e-05,
"loss": 0.9728,
"step": 19100
},
{
"epoch": 0.48,
"grad_norm": 4.251089096069336,
"learning_rate": 4.060753768844222e-05,
"loss": 0.9485,
"step": 19200
},
{
"epoch": 0.48,
"grad_norm": 2.6718976497650146,
"learning_rate": 4.0557286432160804e-05,
"loss": 0.9733,
"step": 19300
},
{
"epoch": 0.48,
"grad_norm": 4.1308746337890625,
"learning_rate": 4.05070351758794e-05,
"loss": 0.9029,
"step": 19400
},
{
"epoch": 0.49,
"grad_norm": 2.442051887512207,
"learning_rate": 4.045678391959799e-05,
"loss": 0.9028,
"step": 19500
},
{
"epoch": 0.49,
"grad_norm": 9.007789611816406,
"learning_rate": 4.0406532663316584e-05,
"loss": 0.9743,
"step": 19600
},
{
"epoch": 0.49,
"grad_norm": 6.044341564178467,
"learning_rate": 4.035628140703518e-05,
"loss": 0.9178,
"step": 19700
},
{
"epoch": 0.49,
"grad_norm": 5.594725608825684,
"learning_rate": 4.030603015075377e-05,
"loss": 0.9361,
"step": 19800
},
{
"epoch": 0.5,
"grad_norm": 5.294704914093018,
"learning_rate": 4.0255778894472364e-05,
"loss": 0.8562,
"step": 19900
},
{
"epoch": 0.5,
"grad_norm": 4.336592674255371,
"learning_rate": 4.020552763819096e-05,
"loss": 0.9627,
"step": 20000
},
{
"epoch": 0.5,
"eval_loss": 1.0003739595413208,
"eval_runtime": 93.1458,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 20000
},
{
"epoch": 0.5,
"grad_norm": 9.015524864196777,
"learning_rate": 4.015527638190955e-05,
"loss": 0.9766,
"step": 20100
},
{
"epoch": 0.51,
"grad_norm": 3.2617218494415283,
"learning_rate": 4.0105527638190955e-05,
"loss": 0.9732,
"step": 20200
},
{
"epoch": 0.51,
"grad_norm": 3.908485174179077,
"learning_rate": 4.005527638190955e-05,
"loss": 0.9201,
"step": 20300
},
{
"epoch": 0.51,
"grad_norm": 1.562523603439331,
"learning_rate": 4.000502512562814e-05,
"loss": 0.9429,
"step": 20400
},
{
"epoch": 0.51,
"grad_norm": 4.0549774169921875,
"learning_rate": 3.9954773869346734e-05,
"loss": 0.9145,
"step": 20500
},
{
"epoch": 0.52,
"grad_norm": 7.296591758728027,
"learning_rate": 3.990452261306533e-05,
"loss": 0.9687,
"step": 20600
},
{
"epoch": 0.52,
"grad_norm": 3.0619189739227295,
"learning_rate": 3.985427135678392e-05,
"loss": 0.9581,
"step": 20700
},
{
"epoch": 0.52,
"grad_norm": 2.315051794052124,
"learning_rate": 3.9804020100502514e-05,
"loss": 0.9386,
"step": 20800
},
{
"epoch": 0.52,
"grad_norm": 3.446699857711792,
"learning_rate": 3.975376884422111e-05,
"loss": 0.9349,
"step": 20900
},
{
"epoch": 0.53,
"grad_norm": 1.18391752243042,
"learning_rate": 3.97035175879397e-05,
"loss": 0.8872,
"step": 21000
},
{
"epoch": 0.53,
"eval_loss": 0.9717727303504944,
"eval_runtime": 93.1746,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 21000
},
{
"epoch": 0.53,
"grad_norm": 5.245177745819092,
"learning_rate": 3.9653266331658294e-05,
"loss": 0.9061,
"step": 21100
},
{
"epoch": 0.53,
"grad_norm": 2.7087039947509766,
"learning_rate": 3.960301507537689e-05,
"loss": 0.8958,
"step": 21200
},
{
"epoch": 0.53,
"grad_norm": 2.1201984882354736,
"learning_rate": 3.955276381909548e-05,
"loss": 0.939,
"step": 21300
},
{
"epoch": 0.54,
"grad_norm": 2.5783448219299316,
"learning_rate": 3.950251256281407e-05,
"loss": 0.9643,
"step": 21400
},
{
"epoch": 0.54,
"grad_norm": 2.56453800201416,
"learning_rate": 3.945226130653267e-05,
"loss": 0.9249,
"step": 21500
},
{
"epoch": 0.54,
"grad_norm": 5.370649814605713,
"learning_rate": 3.940201005025126e-05,
"loss": 0.8936,
"step": 21600
},
{
"epoch": 0.54,
"grad_norm": 3.526343822479248,
"learning_rate": 3.935175879396985e-05,
"loss": 0.9637,
"step": 21700
},
{
"epoch": 0.55,
"grad_norm": 4.2002363204956055,
"learning_rate": 3.930150753768844e-05,
"loss": 0.981,
"step": 21800
},
{
"epoch": 0.55,
"grad_norm": 5.0254082679748535,
"learning_rate": 3.925125628140704e-05,
"loss": 0.8711,
"step": 21900
},
{
"epoch": 0.55,
"grad_norm": 3.774163007736206,
"learning_rate": 3.920100502512563e-05,
"loss": 0.989,
"step": 22000
},
{
"epoch": 0.55,
"eval_loss": 0.9514731168746948,
"eval_runtime": 93.1755,
"eval_samples_per_second": 10.732,
"eval_steps_per_second": 2.683,
"step": 22000
},
{
"epoch": 0.55,
"grad_norm": 5.07633638381958,
"learning_rate": 3.915075376884422e-05,
"loss": 0.896,
"step": 22100
},
{
"epoch": 0.56,
"grad_norm": 4.740135192871094,
"learning_rate": 3.910050251256281e-05,
"loss": 0.915,
"step": 22200
},
{
"epoch": 0.56,
"grad_norm": 4.904139041900635,
"learning_rate": 3.905025125628141e-05,
"loss": 0.8865,
"step": 22300
},
{
"epoch": 0.56,
"grad_norm": 2.3555753231048584,
"learning_rate": 3.9000000000000006e-05,
"loss": 0.9291,
"step": 22400
},
{
"epoch": 0.56,
"grad_norm": 12.583273887634277,
"learning_rate": 3.894974874371859e-05,
"loss": 0.8636,
"step": 22500
},
{
"epoch": 0.56,
"grad_norm": 9.234467506408691,
"learning_rate": 3.8899497487437186e-05,
"loss": 0.8784,
"step": 22600
},
{
"epoch": 0.57,
"grad_norm": 4.7821807861328125,
"learning_rate": 3.884924623115578e-05,
"loss": 0.8985,
"step": 22700
},
{
"epoch": 0.57,
"grad_norm": 14.900897026062012,
"learning_rate": 3.879899497487437e-05,
"loss": 0.9336,
"step": 22800
},
{
"epoch": 0.57,
"grad_norm": 2.347285509109497,
"learning_rate": 3.8748743718592966e-05,
"loss": 0.9099,
"step": 22900
},
{
"epoch": 0.57,
"grad_norm": 4.16949987411499,
"learning_rate": 3.869849246231156e-05,
"loss": 0.8553,
"step": 23000
},
{
"epoch": 0.57,
"eval_loss": 1.0030484199523926,
"eval_runtime": 93.1557,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 23000
},
{
"epoch": 0.58,
"grad_norm": 3.9661343097686768,
"learning_rate": 3.864824120603015e-05,
"loss": 0.9204,
"step": 23100
},
{
"epoch": 0.58,
"grad_norm": 5.575523853302002,
"learning_rate": 3.8597989949748745e-05,
"loss": 0.9099,
"step": 23200
},
{
"epoch": 0.58,
"grad_norm": 5.437506198883057,
"learning_rate": 3.854773869346734e-05,
"loss": 0.8914,
"step": 23300
},
{
"epoch": 0.58,
"grad_norm": 3.891209125518799,
"learning_rate": 3.849748743718593e-05,
"loss": 0.8746,
"step": 23400
},
{
"epoch": 0.59,
"grad_norm": 2.7176661491394043,
"learning_rate": 3.8447236180904525e-05,
"loss": 0.8414,
"step": 23500
},
{
"epoch": 0.59,
"grad_norm": 10.221911430358887,
"learning_rate": 3.839698492462312e-05,
"loss": 0.8716,
"step": 23600
},
{
"epoch": 0.59,
"grad_norm": 2.3445537090301514,
"learning_rate": 3.834673366834171e-05,
"loss": 0.9224,
"step": 23700
},
{
"epoch": 0.59,
"grad_norm": 8.813838005065918,
"learning_rate": 3.8296482412060305e-05,
"loss": 0.8915,
"step": 23800
},
{
"epoch": 0.6,
"grad_norm": 2.9498250484466553,
"learning_rate": 3.824623115577889e-05,
"loss": 0.9461,
"step": 23900
},
{
"epoch": 0.6,
"grad_norm": 4.244695663452148,
"learning_rate": 3.819597989949749e-05,
"loss": 0.927,
"step": 24000
},
{
"epoch": 0.6,
"eval_loss": 0.9381696581840515,
"eval_runtime": 93.1355,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 24000
},
{
"epoch": 0.6,
"grad_norm": 4.106844425201416,
"learning_rate": 3.8145728643216084e-05,
"loss": 0.885,
"step": 24100
},
{
"epoch": 0.6,
"grad_norm": 2.4798853397369385,
"learning_rate": 3.809547738693468e-05,
"loss": 0.9688,
"step": 24200
},
{
"epoch": 0.61,
"grad_norm": 3.655352830886841,
"learning_rate": 3.8045226130653264e-05,
"loss": 0.9232,
"step": 24300
},
{
"epoch": 0.61,
"grad_norm": 5.7722883224487305,
"learning_rate": 3.7994974874371864e-05,
"loss": 0.9077,
"step": 24400
},
{
"epoch": 0.61,
"grad_norm": 1.5788486003875732,
"learning_rate": 3.794472361809046e-05,
"loss": 0.9266,
"step": 24500
},
{
"epoch": 0.61,
"grad_norm": 3.382827043533325,
"learning_rate": 3.789497487437186e-05,
"loss": 0.8958,
"step": 24600
},
{
"epoch": 0.62,
"grad_norm": 5.517687797546387,
"learning_rate": 3.7844723618090455e-05,
"loss": 0.8796,
"step": 24700
},
{
"epoch": 0.62,
"grad_norm": 5.224692344665527,
"learning_rate": 3.779447236180905e-05,
"loss": 0.8697,
"step": 24800
},
{
"epoch": 0.62,
"grad_norm": 3.2972185611724854,
"learning_rate": 3.774422110552764e-05,
"loss": 0.8586,
"step": 24900
},
{
"epoch": 0.62,
"grad_norm": 3.59963321685791,
"learning_rate": 3.7694472361809046e-05,
"loss": 0.8565,
"step": 25000
},
{
"epoch": 0.62,
"eval_loss": 1.0313160419464111,
"eval_runtime": 93.1391,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 25000
},
{
"epoch": 0.63,
"grad_norm": 4.486480712890625,
"learning_rate": 3.764422110552764e-05,
"loss": 0.9122,
"step": 25100
},
{
"epoch": 0.63,
"grad_norm": 3.887725830078125,
"learning_rate": 3.759396984924623e-05,
"loss": 0.8762,
"step": 25200
},
{
"epoch": 0.63,
"grad_norm": 3.239841938018799,
"learning_rate": 3.7543718592964825e-05,
"loss": 0.9358,
"step": 25300
},
{
"epoch": 0.64,
"grad_norm": 7.870302200317383,
"learning_rate": 3.749346733668342e-05,
"loss": 0.8771,
"step": 25400
},
{
"epoch": 0.64,
"grad_norm": 5.073429584503174,
"learning_rate": 3.744321608040201e-05,
"loss": 0.8716,
"step": 25500
},
{
"epoch": 0.64,
"grad_norm": 8.586678504943848,
"learning_rate": 3.7392964824120605e-05,
"loss": 0.9783,
"step": 25600
},
{
"epoch": 0.64,
"grad_norm": 1.4139407873153687,
"learning_rate": 3.73427135678392e-05,
"loss": 0.9423,
"step": 25700
},
{
"epoch": 0.65,
"grad_norm": 4.109837532043457,
"learning_rate": 3.729246231155779e-05,
"loss": 0.8676,
"step": 25800
},
{
"epoch": 0.65,
"grad_norm": 3.691585063934326,
"learning_rate": 3.7242211055276385e-05,
"loss": 0.9379,
"step": 25900
},
{
"epoch": 0.65,
"grad_norm": 1.2495490312576294,
"learning_rate": 3.719195979899497e-05,
"loss": 0.8964,
"step": 26000
},
{
"epoch": 0.65,
"eval_loss": 0.9489035606384277,
"eval_runtime": 93.136,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 26000
},
{
"epoch": 0.65,
"grad_norm": 4.183409690856934,
"learning_rate": 3.714170854271357e-05,
"loss": 0.8376,
"step": 26100
},
{
"epoch": 0.66,
"grad_norm": 6.531569957733154,
"learning_rate": 3.7091457286432165e-05,
"loss": 0.9188,
"step": 26200
},
{
"epoch": 0.66,
"grad_norm": 2.014603853225708,
"learning_rate": 3.704120603015076e-05,
"loss": 0.9251,
"step": 26300
},
{
"epoch": 0.66,
"grad_norm": 3.1563830375671387,
"learning_rate": 3.6990954773869344e-05,
"loss": 0.8992,
"step": 26400
},
{
"epoch": 0.66,
"grad_norm": 6.678219318389893,
"learning_rate": 3.6940703517587944e-05,
"loss": 0.9677,
"step": 26500
},
{
"epoch": 0.67,
"grad_norm": 4.432798862457275,
"learning_rate": 3.689045226130654e-05,
"loss": 0.9171,
"step": 26600
},
{
"epoch": 0.67,
"grad_norm": 4.275862693786621,
"learning_rate": 3.6840201005025124e-05,
"loss": 0.9154,
"step": 26700
},
{
"epoch": 0.67,
"grad_norm": 3.015531301498413,
"learning_rate": 3.678994974874372e-05,
"loss": 0.8756,
"step": 26800
},
{
"epoch": 0.67,
"grad_norm": 5.022598743438721,
"learning_rate": 3.673969849246232e-05,
"loss": 0.9211,
"step": 26900
},
{
"epoch": 0.68,
"grad_norm": 4.387092113494873,
"learning_rate": 3.6689447236180904e-05,
"loss": 0.8892,
"step": 27000
},
{
"epoch": 0.68,
"eval_loss": 0.9665262699127197,
"eval_runtime": 93.1584,
"eval_samples_per_second": 10.734,
"eval_steps_per_second": 2.684,
"step": 27000
},
{
"epoch": 0.68,
"grad_norm": 1.8250643014907837,
"learning_rate": 3.66391959798995e-05,
"loss": 0.8914,
"step": 27100
},
{
"epoch": 0.68,
"grad_norm": 3.8293349742889404,
"learning_rate": 3.658894472361809e-05,
"loss": 0.9212,
"step": 27200
},
{
"epoch": 0.68,
"grad_norm": 5.134536266326904,
"learning_rate": 3.653869346733669e-05,
"loss": 0.9025,
"step": 27300
},
{
"epoch": 0.69,
"grad_norm": 4.095407485961914,
"learning_rate": 3.648844221105528e-05,
"loss": 0.8727,
"step": 27400
},
{
"epoch": 0.69,
"grad_norm": 6.101755142211914,
"learning_rate": 3.643819095477387e-05,
"loss": 0.8914,
"step": 27500
},
{
"epoch": 0.69,
"grad_norm": 11.917282104492188,
"learning_rate": 3.638793969849246e-05,
"loss": 0.8897,
"step": 27600
},
{
"epoch": 0.69,
"grad_norm": 4.288369178771973,
"learning_rate": 3.6337688442211056e-05,
"loss": 0.9408,
"step": 27700
},
{
"epoch": 0.69,
"grad_norm": 3.0062568187713623,
"learning_rate": 3.628743718592965e-05,
"loss": 0.9504,
"step": 27800
},
{
"epoch": 0.7,
"grad_norm": 3.4575741291046143,
"learning_rate": 3.623718592964824e-05,
"loss": 0.8899,
"step": 27900
},
{
"epoch": 0.7,
"grad_norm": 3.5996832847595215,
"learning_rate": 3.6186934673366836e-05,
"loss": 0.914,
"step": 28000
},
{
"epoch": 0.7,
"eval_loss": 0.939644455909729,
"eval_runtime": 93.1025,
"eval_samples_per_second": 10.741,
"eval_steps_per_second": 2.685,
"step": 28000
},
{
"epoch": 0.7,
"grad_norm": 5.83299446105957,
"learning_rate": 3.613668341708543e-05,
"loss": 0.9357,
"step": 28100
},
{
"epoch": 0.7,
"grad_norm": 2.9027252197265625,
"learning_rate": 3.608643216080402e-05,
"loss": 0.9397,
"step": 28200
},
{
"epoch": 0.71,
"grad_norm": 2.4559011459350586,
"learning_rate": 3.603668341708543e-05,
"loss": 0.8895,
"step": 28300
},
{
"epoch": 0.71,
"grad_norm": 5.305372714996338,
"learning_rate": 3.598643216080402e-05,
"loss": 0.8654,
"step": 28400
},
{
"epoch": 0.71,
"grad_norm": 12.196166038513184,
"learning_rate": 3.5936180904522614e-05,
"loss": 0.988,
"step": 28500
},
{
"epoch": 0.71,
"grad_norm": 2.6445765495300293,
"learning_rate": 3.588592964824121e-05,
"loss": 0.9612,
"step": 28600
},
{
"epoch": 0.72,
"grad_norm": 4.8418192863464355,
"learning_rate": 3.58356783919598e-05,
"loss": 0.9476,
"step": 28700
},
{
"epoch": 0.72,
"grad_norm": 3.024900436401367,
"learning_rate": 3.578542713567839e-05,
"loss": 0.8839,
"step": 28800
},
{
"epoch": 0.72,
"grad_norm": 6.7939066886901855,
"learning_rate": 3.5735175879396986e-05,
"loss": 0.854,
"step": 28900
},
{
"epoch": 0.72,
"grad_norm": 8.745389938354492,
"learning_rate": 3.568492462311558e-05,
"loss": 0.8931,
"step": 29000
},
{
"epoch": 0.72,
"eval_loss": 0.9555127024650574,
"eval_runtime": 93.1763,
"eval_samples_per_second": 10.732,
"eval_steps_per_second": 2.683,
"step": 29000
},
{
"epoch": 0.73,
"grad_norm": 3.085552930831909,
"learning_rate": 3.563467336683417e-05,
"loss": 0.9206,
"step": 29100
},
{
"epoch": 0.73,
"grad_norm": 2.5964431762695312,
"learning_rate": 3.5584422110552766e-05,
"loss": 0.8048,
"step": 29200
},
{
"epoch": 0.73,
"grad_norm": 2.532219886779785,
"learning_rate": 3.553417085427136e-05,
"loss": 0.9781,
"step": 29300
},
{
"epoch": 0.73,
"grad_norm": 7.187425136566162,
"learning_rate": 3.548391959798995e-05,
"loss": 0.8587,
"step": 29400
},
{
"epoch": 0.74,
"grad_norm": 3.9171953201293945,
"learning_rate": 3.543366834170854e-05,
"loss": 0.9206,
"step": 29500
},
{
"epoch": 0.74,
"grad_norm": 8.586997032165527,
"learning_rate": 3.538341708542714e-05,
"loss": 0.9166,
"step": 29600
},
{
"epoch": 0.74,
"grad_norm": 7.860572338104248,
"learning_rate": 3.533316582914573e-05,
"loss": 0.9075,
"step": 29700
},
{
"epoch": 0.74,
"grad_norm": 3.7530722618103027,
"learning_rate": 3.5282914572864326e-05,
"loss": 0.943,
"step": 29800
},
{
"epoch": 0.75,
"grad_norm": 7.102590084075928,
"learning_rate": 3.523266331658291e-05,
"loss": 0.9266,
"step": 29900
},
{
"epoch": 0.75,
"grad_norm": 2.649622917175293,
"learning_rate": 3.518241206030151e-05,
"loss": 0.902,
"step": 30000
},
{
"epoch": 0.75,
"eval_loss": 0.9418465495109558,
"eval_runtime": 93.1693,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 30000
},
{
"epoch": 0.75,
"grad_norm": 4.440699100494385,
"learning_rate": 3.5132160804020105e-05,
"loss": 0.8826,
"step": 30100
},
{
"epoch": 0.76,
"grad_norm": 4.0459303855896,
"learning_rate": 3.508190954773869e-05,
"loss": 0.8657,
"step": 30200
},
{
"epoch": 0.76,
"grad_norm": 8.85013198852539,
"learning_rate": 3.5031658291457285e-05,
"loss": 0.9363,
"step": 30300
},
{
"epoch": 0.76,
"grad_norm": 3.614614725112915,
"learning_rate": 3.4981407035175885e-05,
"loss": 0.8842,
"step": 30400
},
{
"epoch": 0.76,
"grad_norm": 6.1297287940979,
"learning_rate": 3.493115577889448e-05,
"loss": 0.8995,
"step": 30500
},
{
"epoch": 0.77,
"grad_norm": 2.9940993785858154,
"learning_rate": 3.4880904522613065e-05,
"loss": 0.8225,
"step": 30600
},
{
"epoch": 0.77,
"grad_norm": 5.4286675453186035,
"learning_rate": 3.483065326633166e-05,
"loss": 0.888,
"step": 30700
},
{
"epoch": 0.77,
"grad_norm": 5.572968482971191,
"learning_rate": 3.478040201005025e-05,
"loss": 0.9427,
"step": 30800
},
{
"epoch": 0.77,
"grad_norm": 2.559330701828003,
"learning_rate": 3.4730150753768845e-05,
"loss": 0.94,
"step": 30900
},
{
"epoch": 0.78,
"grad_norm": 3.6347320079803467,
"learning_rate": 3.468040201005025e-05,
"loss": 0.9599,
"step": 31000
},
{
"epoch": 0.78,
"eval_loss": 0.9510870575904846,
"eval_runtime": 93.2143,
"eval_samples_per_second": 10.728,
"eval_steps_per_second": 2.682,
"step": 31000
},
{
"epoch": 0.78,
"grad_norm": 2.794281005859375,
"learning_rate": 3.463015075376885e-05,
"loss": 0.8893,
"step": 31100
},
{
"epoch": 0.78,
"grad_norm": 5.214545726776123,
"learning_rate": 3.457989949748744e-05,
"loss": 0.8749,
"step": 31200
},
{
"epoch": 0.78,
"grad_norm": 2.6847405433654785,
"learning_rate": 3.452964824120603e-05,
"loss": 0.8761,
"step": 31300
},
{
"epoch": 0.79,
"grad_norm": 4.318289756774902,
"learning_rate": 3.447939698492462e-05,
"loss": 0.8749,
"step": 31400
},
{
"epoch": 0.79,
"grad_norm": 6.353730201721191,
"learning_rate": 3.442914572864322e-05,
"loss": 0.8365,
"step": 31500
},
{
"epoch": 0.79,
"grad_norm": 7.188552379608154,
"learning_rate": 3.437889447236181e-05,
"loss": 0.875,
"step": 31600
},
{
"epoch": 0.79,
"grad_norm": 4.78775691986084,
"learning_rate": 3.43286432160804e-05,
"loss": 0.8961,
"step": 31700
},
{
"epoch": 0.8,
"grad_norm": 4.9755120277404785,
"learning_rate": 3.4278391959798995e-05,
"loss": 0.8823,
"step": 31800
},
{
"epoch": 0.8,
"grad_norm": 5.056182861328125,
"learning_rate": 3.4228140703517595e-05,
"loss": 0.9061,
"step": 31900
},
{
"epoch": 0.8,
"grad_norm": 4.965173721313477,
"learning_rate": 3.417788944723618e-05,
"loss": 0.925,
"step": 32000
},
{
"epoch": 0.8,
"eval_loss": 0.9364937543869019,
"eval_runtime": 93.1499,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 32000
},
{
"epoch": 0.8,
"grad_norm": 4.997767925262451,
"learning_rate": 3.4127638190954775e-05,
"loss": 0.8554,
"step": 32100
},
{
"epoch": 0.81,
"grad_norm": 6.935024261474609,
"learning_rate": 3.407738693467337e-05,
"loss": 0.8517,
"step": 32200
},
{
"epoch": 0.81,
"grad_norm": 3.6432433128356934,
"learning_rate": 3.402713567839196e-05,
"loss": 0.8663,
"step": 32300
},
{
"epoch": 0.81,
"grad_norm": 5.752647876739502,
"learning_rate": 3.3976884422110554e-05,
"loss": 0.9028,
"step": 32400
},
{
"epoch": 0.81,
"grad_norm": 2.6224024295806885,
"learning_rate": 3.392663316582915e-05,
"loss": 0.9201,
"step": 32500
},
{
"epoch": 0.81,
"grad_norm": 3.743650436401367,
"learning_rate": 3.387638190954774e-05,
"loss": 0.9333,
"step": 32600
},
{
"epoch": 0.82,
"grad_norm": 3.6796152591705322,
"learning_rate": 3.3826130653266334e-05,
"loss": 0.8645,
"step": 32700
},
{
"epoch": 0.82,
"grad_norm": 3.3265674114227295,
"learning_rate": 3.377587939698493e-05,
"loss": 0.935,
"step": 32800
},
{
"epoch": 0.82,
"grad_norm": 5.371291637420654,
"learning_rate": 3.372562814070352e-05,
"loss": 0.9088,
"step": 32900
},
{
"epoch": 0.82,
"grad_norm": 3.846700668334961,
"learning_rate": 3.3675376884422114e-05,
"loss": 0.7866,
"step": 33000
},
{
"epoch": 0.82,
"eval_loss": 0.965214729309082,
"eval_runtime": 93.2035,
"eval_samples_per_second": 10.729,
"eval_steps_per_second": 2.682,
"step": 33000
},
{
"epoch": 0.83,
"grad_norm": 5.847678184509277,
"learning_rate": 3.362512562814071e-05,
"loss": 0.8773,
"step": 33100
},
{
"epoch": 0.83,
"grad_norm": 4.96260929107666,
"learning_rate": 3.357537688442211e-05,
"loss": 0.8919,
"step": 33200
},
{
"epoch": 0.83,
"grad_norm": 2.979278087615967,
"learning_rate": 3.3525125628140705e-05,
"loss": 0.913,
"step": 33300
},
{
"epoch": 0.83,
"grad_norm": 3.861112356185913,
"learning_rate": 3.34748743718593e-05,
"loss": 0.8088,
"step": 33400
},
{
"epoch": 0.84,
"grad_norm": 3.394242763519287,
"learning_rate": 3.342462311557789e-05,
"loss": 0.9261,
"step": 33500
},
{
"epoch": 0.84,
"grad_norm": 2.7038347721099854,
"learning_rate": 3.3374371859296484e-05,
"loss": 0.9047,
"step": 33600
},
{
"epoch": 0.84,
"grad_norm": 4.880646228790283,
"learning_rate": 3.332412060301508e-05,
"loss": 0.8903,
"step": 33700
},
{
"epoch": 0.84,
"grad_norm": 2.0390119552612305,
"learning_rate": 3.327386934673367e-05,
"loss": 0.8745,
"step": 33800
},
{
"epoch": 0.85,
"grad_norm": 2.2257132530212402,
"learning_rate": 3.3223618090452264e-05,
"loss": 0.9408,
"step": 33900
},
{
"epoch": 0.85,
"grad_norm": 4.378706455230713,
"learning_rate": 3.317336683417086e-05,
"loss": 0.8874,
"step": 34000
},
{
"epoch": 0.85,
"eval_loss": 0.9394493699073792,
"eval_runtime": 93.1353,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 34000
},
{
"epoch": 0.85,
"grad_norm": 5.825510025024414,
"learning_rate": 3.3123115577889444e-05,
"loss": 0.8532,
"step": 34100
},
{
"epoch": 0.85,
"grad_norm": 4.304105281829834,
"learning_rate": 3.3072864321608044e-05,
"loss": 0.8466,
"step": 34200
},
{
"epoch": 0.86,
"grad_norm": 5.195026874542236,
"learning_rate": 3.302261306532664e-05,
"loss": 0.8506,
"step": 34300
},
{
"epoch": 0.86,
"grad_norm": 7.148824214935303,
"learning_rate": 3.297236180904523e-05,
"loss": 0.8473,
"step": 34400
},
{
"epoch": 0.86,
"grad_norm": 4.036725997924805,
"learning_rate": 3.292211055276382e-05,
"loss": 0.8491,
"step": 34500
},
{
"epoch": 0.86,
"grad_norm": 5.674220561981201,
"learning_rate": 3.287185929648242e-05,
"loss": 0.9206,
"step": 34600
},
{
"epoch": 0.87,
"grad_norm": 1.7672762870788574,
"learning_rate": 3.282160804020101e-05,
"loss": 0.8972,
"step": 34700
},
{
"epoch": 0.87,
"grad_norm": 5.410703182220459,
"learning_rate": 3.2771356783919596e-05,
"loss": 0.8691,
"step": 34800
},
{
"epoch": 0.87,
"grad_norm": 0.4104035198688507,
"learning_rate": 3.272110552763819e-05,
"loss": 0.8544,
"step": 34900
},
{
"epoch": 0.88,
"grad_norm": 7.403091907501221,
"learning_rate": 3.267085427135679e-05,
"loss": 0.8776,
"step": 35000
},
{
"epoch": 0.88,
"eval_loss": 0.956791877746582,
"eval_runtime": 93.1885,
"eval_samples_per_second": 10.731,
"eval_steps_per_second": 2.683,
"step": 35000
},
{
"epoch": 0.88,
"grad_norm": 3.543870449066162,
"learning_rate": 3.2620603015075376e-05,
"loss": 0.8759,
"step": 35100
},
{
"epoch": 0.88,
"grad_norm": 7.124320030212402,
"learning_rate": 3.257035175879397e-05,
"loss": 0.8996,
"step": 35200
},
{
"epoch": 0.88,
"grad_norm": 2.345332384109497,
"learning_rate": 3.252010050251256e-05,
"loss": 0.8434,
"step": 35300
},
{
"epoch": 0.89,
"grad_norm": 4.802892208099365,
"learning_rate": 3.2470351758793974e-05,
"loss": 0.8326,
"step": 35400
},
{
"epoch": 0.89,
"grad_norm": 3.3908345699310303,
"learning_rate": 3.242010050251256e-05,
"loss": 0.9087,
"step": 35500
},
{
"epoch": 0.89,
"grad_norm": 8.180322647094727,
"learning_rate": 3.236984924623116e-05,
"loss": 0.8353,
"step": 35600
},
{
"epoch": 0.89,
"grad_norm": 5.232153415679932,
"learning_rate": 3.2319597989949753e-05,
"loss": 0.8961,
"step": 35700
},
{
"epoch": 0.9,
"grad_norm": 5.016308307647705,
"learning_rate": 3.226934673366834e-05,
"loss": 0.8372,
"step": 35800
},
{
"epoch": 0.9,
"grad_norm": 10.003373146057129,
"learning_rate": 3.221909547738693e-05,
"loss": 0.904,
"step": 35900
},
{
"epoch": 0.9,
"grad_norm": 8.255809783935547,
"learning_rate": 3.216884422110553e-05,
"loss": 0.8432,
"step": 36000
},
{
"epoch": 0.9,
"eval_loss": 0.9308714866638184,
"eval_runtime": 93.1505,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 36000
},
{
"epoch": 0.9,
"grad_norm": 4.078076362609863,
"learning_rate": 3.2118592964824126e-05,
"loss": 0.8924,
"step": 36100
},
{
"epoch": 0.91,
"grad_norm": 3.7647814750671387,
"learning_rate": 3.206834170854271e-05,
"loss": 0.835,
"step": 36200
},
{
"epoch": 0.91,
"grad_norm": 5.127739429473877,
"learning_rate": 3.2018090452261306e-05,
"loss": 0.8393,
"step": 36300
},
{
"epoch": 0.91,
"grad_norm": 4.961634159088135,
"learning_rate": 3.19678391959799e-05,
"loss": 0.8592,
"step": 36400
},
{
"epoch": 0.91,
"grad_norm": 8.022249221801758,
"learning_rate": 3.191758793969849e-05,
"loss": 0.8723,
"step": 36500
},
{
"epoch": 0.92,
"grad_norm": 4.613710403442383,
"learning_rate": 3.1867336683417086e-05,
"loss": 0.9061,
"step": 36600
},
{
"epoch": 0.92,
"grad_norm": 5.985745429992676,
"learning_rate": 3.181708542713568e-05,
"loss": 0.9198,
"step": 36700
},
{
"epoch": 0.92,
"grad_norm": 4.569676876068115,
"learning_rate": 3.176683417085427e-05,
"loss": 0.8561,
"step": 36800
},
{
"epoch": 0.92,
"grad_norm": 5.985031604766846,
"learning_rate": 3.1716582914572866e-05,
"loss": 0.8382,
"step": 36900
},
{
"epoch": 0.93,
"grad_norm": 3.2333121299743652,
"learning_rate": 3.166633165829146e-05,
"loss": 0.911,
"step": 37000
},
{
"epoch": 0.93,
"eval_loss": 0.8933776617050171,
"eval_runtime": 93.2112,
"eval_samples_per_second": 10.728,
"eval_steps_per_second": 2.682,
"step": 37000
},
{
"epoch": 0.93,
"grad_norm": 2.7566323280334473,
"learning_rate": 3.161608040201005e-05,
"loss": 0.8732,
"step": 37100
},
{
"epoch": 0.93,
"grad_norm": 3.0768489837646484,
"learning_rate": 3.1565829145728645e-05,
"loss": 0.9278,
"step": 37200
},
{
"epoch": 0.93,
"grad_norm": 2.595099925994873,
"learning_rate": 3.151557788944724e-05,
"loss": 0.9435,
"step": 37300
},
{
"epoch": 0.94,
"grad_norm": 3.799212694168091,
"learning_rate": 3.146532663316583e-05,
"loss": 0.834,
"step": 37400
},
{
"epoch": 0.94,
"grad_norm": 2.8810317516326904,
"learning_rate": 3.1415075376884425e-05,
"loss": 0.8501,
"step": 37500
},
{
"epoch": 0.94,
"grad_norm": 3.931706428527832,
"learning_rate": 3.136482412060301e-05,
"loss": 0.8778,
"step": 37600
},
{
"epoch": 0.94,
"grad_norm": 2.4938392639160156,
"learning_rate": 3.131457286432161e-05,
"loss": 0.887,
"step": 37700
},
{
"epoch": 0.94,
"grad_norm": 5.07243013381958,
"learning_rate": 3.1264824120603016e-05,
"loss": 0.8819,
"step": 37800
},
{
"epoch": 0.95,
"grad_norm": 3.65317440032959,
"learning_rate": 3.121457286432161e-05,
"loss": 0.9086,
"step": 37900
},
{
"epoch": 0.95,
"grad_norm": 4.633053779602051,
"learning_rate": 3.11643216080402e-05,
"loss": 0.8617,
"step": 38000
},
{
"epoch": 0.95,
"eval_loss": 0.9430031180381775,
"eval_runtime": 93.1171,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 38000
},
{
"epoch": 0.95,
"grad_norm": 5.8754377365112305,
"learning_rate": 3.1114070351758796e-05,
"loss": 0.8751,
"step": 38100
},
{
"epoch": 0.95,
"grad_norm": 1.0654345750808716,
"learning_rate": 3.106381909547739e-05,
"loss": 0.9182,
"step": 38200
},
{
"epoch": 0.96,
"grad_norm": 4.2580766677856445,
"learning_rate": 3.101356783919598e-05,
"loss": 0.9449,
"step": 38300
},
{
"epoch": 0.96,
"grad_norm": 2.0152392387390137,
"learning_rate": 3.0963316582914575e-05,
"loss": 0.8728,
"step": 38400
},
{
"epoch": 0.96,
"grad_norm": 3.4203391075134277,
"learning_rate": 3.091306532663317e-05,
"loss": 0.8576,
"step": 38500
},
{
"epoch": 0.96,
"grad_norm": 7.677492141723633,
"learning_rate": 3.086281407035176e-05,
"loss": 0.8328,
"step": 38600
},
{
"epoch": 0.97,
"grad_norm": 6.361073017120361,
"learning_rate": 3.0812562814070355e-05,
"loss": 0.8738,
"step": 38700
},
{
"epoch": 0.97,
"grad_norm": 5.15574312210083,
"learning_rate": 3.076231155778895e-05,
"loss": 0.8552,
"step": 38800
},
{
"epoch": 0.97,
"grad_norm": 5.873382568359375,
"learning_rate": 3.071206030150754e-05,
"loss": 0.8545,
"step": 38900
},
{
"epoch": 0.97,
"grad_norm": 7.3346757888793945,
"learning_rate": 3.066180904522613e-05,
"loss": 0.8673,
"step": 39000
},
{
"epoch": 0.97,
"eval_loss": 0.9260377883911133,
"eval_runtime": 93.1125,
"eval_samples_per_second": 10.74,
"eval_steps_per_second": 2.685,
"step": 39000
},
{
"epoch": 0.98,
"grad_norm": 2.6923649311065674,
"learning_rate": 3.061155778894472e-05,
"loss": 0.8591,
"step": 39100
},
{
"epoch": 0.98,
"grad_norm": 2.852013349533081,
"learning_rate": 3.056130653266332e-05,
"loss": 0.8451,
"step": 39200
},
{
"epoch": 0.98,
"grad_norm": 4.325497627258301,
"learning_rate": 3.051105527638191e-05,
"loss": 0.8447,
"step": 39300
},
{
"epoch": 0.98,
"grad_norm": 2.5726442337036133,
"learning_rate": 3.0460804020100504e-05,
"loss": 0.8634,
"step": 39400
},
{
"epoch": 0.99,
"grad_norm": 4.642368793487549,
"learning_rate": 3.0410552763819094e-05,
"loss": 0.8341,
"step": 39500
},
{
"epoch": 0.99,
"grad_norm": 9.209900856018066,
"learning_rate": 3.036030150753769e-05,
"loss": 0.8999,
"step": 39600
},
{
"epoch": 0.99,
"grad_norm": 5.0136399269104,
"learning_rate": 3.0310050251256284e-05,
"loss": 0.845,
"step": 39700
},
{
"epoch": 0.99,
"grad_norm": 1.9574824571609497,
"learning_rate": 3.0259798994974874e-05,
"loss": 0.8618,
"step": 39800
},
{
"epoch": 1.0,
"grad_norm": 1.94467031955719,
"learning_rate": 3.0209547738693467e-05,
"loss": 0.9011,
"step": 39900
},
{
"epoch": 1.0,
"grad_norm": 4.029626369476318,
"learning_rate": 3.0159296482412064e-05,
"loss": 0.8125,
"step": 40000
},
{
"epoch": 1.0,
"eval_loss": 0.9220036864280701,
"eval_runtime": 93.1186,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 40000
},
{
"epoch": 1.0,
"grad_norm": 7.923732280731201,
"learning_rate": 3.0109045226130657e-05,
"loss": 0.8802,
"step": 40100
},
{
"epoch": 1.0,
"grad_norm": 3.5818631649017334,
"learning_rate": 3.0058793969849247e-05,
"loss": 0.8745,
"step": 40200
},
{
"epoch": 1.01,
"grad_norm": 4.282202243804932,
"learning_rate": 3.000854271356784e-05,
"loss": 0.8291,
"step": 40300
},
{
"epoch": 1.01,
"grad_norm": 5.023198127746582,
"learning_rate": 2.9958291457286437e-05,
"loss": 0.8332,
"step": 40400
},
{
"epoch": 1.01,
"grad_norm": 2.645123243331909,
"learning_rate": 2.9908040201005027e-05,
"loss": 0.825,
"step": 40500
},
{
"epoch": 1.01,
"grad_norm": 5.194004535675049,
"learning_rate": 2.985778894472362e-05,
"loss": 0.8645,
"step": 40600
},
{
"epoch": 1.02,
"grad_norm": 2.7656760215759277,
"learning_rate": 2.980753768844221e-05,
"loss": 0.8304,
"step": 40700
},
{
"epoch": 1.02,
"grad_norm": 4.818857192993164,
"learning_rate": 2.975728643216081e-05,
"loss": 0.8389,
"step": 40800
},
{
"epoch": 1.02,
"grad_norm": 7.637053966522217,
"learning_rate": 2.97070351758794e-05,
"loss": 0.8352,
"step": 40900
},
{
"epoch": 1.02,
"grad_norm": 7.042177200317383,
"learning_rate": 2.9656783919597993e-05,
"loss": 0.8479,
"step": 41000
},
{
"epoch": 1.02,
"eval_loss": 0.9259112477302551,
"eval_runtime": 93.1176,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 41000
},
{
"epoch": 1.03,
"grad_norm": 3.9861555099487305,
"learning_rate": 2.9606532663316583e-05,
"loss": 0.8386,
"step": 41100
},
{
"epoch": 1.03,
"grad_norm": 4.692278861999512,
"learning_rate": 2.9556281407035176e-05,
"loss": 0.8527,
"step": 41200
},
{
"epoch": 1.03,
"grad_norm": 4.120222091674805,
"learning_rate": 2.9506030150753772e-05,
"loss": 0.8236,
"step": 41300
},
{
"epoch": 1.03,
"grad_norm": 5.731037139892578,
"learning_rate": 2.9455778894472362e-05,
"loss": 0.8105,
"step": 41400
},
{
"epoch": 1.04,
"grad_norm": 4.9300947189331055,
"learning_rate": 2.9405527638190956e-05,
"loss": 0.8306,
"step": 41500
},
{
"epoch": 1.04,
"grad_norm": 2.210872173309326,
"learning_rate": 2.9355276381909545e-05,
"loss": 0.8606,
"step": 41600
},
{
"epoch": 1.04,
"grad_norm": 7.957526683807373,
"learning_rate": 2.9305025125628145e-05,
"loss": 0.812,
"step": 41700
},
{
"epoch": 1.04,
"grad_norm": 3.215770959854126,
"learning_rate": 2.9255276381909546e-05,
"loss": 0.8445,
"step": 41800
},
{
"epoch": 1.05,
"grad_norm": 5.36251163482666,
"learning_rate": 2.9205025125628143e-05,
"loss": 0.776,
"step": 41900
},
{
"epoch": 1.05,
"grad_norm": 3.0531303882598877,
"learning_rate": 2.9154773869346736e-05,
"loss": 0.8524,
"step": 42000
},
{
"epoch": 1.05,
"eval_loss": 0.9591408967971802,
"eval_runtime": 93.1092,
"eval_samples_per_second": 10.74,
"eval_steps_per_second": 2.685,
"step": 42000
},
{
"epoch": 1.05,
"grad_norm": 5.086330413818359,
"learning_rate": 2.9104522613065326e-05,
"loss": 0.8047,
"step": 42100
},
{
"epoch": 1.05,
"grad_norm": 3.808764934539795,
"learning_rate": 2.905427135678392e-05,
"loss": 0.8172,
"step": 42200
},
{
"epoch": 1.06,
"grad_norm": 3.6660187244415283,
"learning_rate": 2.9004020100502516e-05,
"loss": 0.8388,
"step": 42300
},
{
"epoch": 1.06,
"grad_norm": 2.2802734375,
"learning_rate": 2.8954271356783917e-05,
"loss": 0.8623,
"step": 42400
},
{
"epoch": 1.06,
"grad_norm": 5.802855491638184,
"learning_rate": 2.8904020100502517e-05,
"loss": 0.8148,
"step": 42500
},
{
"epoch": 1.06,
"grad_norm": 3.55525541305542,
"learning_rate": 2.8853768844221107e-05,
"loss": 0.7829,
"step": 42600
},
{
"epoch": 1.07,
"grad_norm": 6.823301315307617,
"learning_rate": 2.88035175879397e-05,
"loss": 0.8241,
"step": 42700
},
{
"epoch": 1.07,
"grad_norm": 6.987720489501953,
"learning_rate": 2.875326633165829e-05,
"loss": 0.822,
"step": 42800
},
{
"epoch": 1.07,
"grad_norm": 7.806210517883301,
"learning_rate": 2.8703015075376886e-05,
"loss": 0.8335,
"step": 42900
},
{
"epoch": 1.07,
"grad_norm": 4.272769927978516,
"learning_rate": 2.865276381909548e-05,
"loss": 0.8329,
"step": 43000
},
{
"epoch": 1.07,
"eval_loss": 0.9682011604309082,
"eval_runtime": 93.0968,
"eval_samples_per_second": 10.742,
"eval_steps_per_second": 2.685,
"step": 43000
},
{
"epoch": 1.08,
"grad_norm": 4.718047618865967,
"learning_rate": 2.860251256281407e-05,
"loss": 0.8526,
"step": 43100
},
{
"epoch": 1.08,
"grad_norm": 4.184821128845215,
"learning_rate": 2.8552261306532663e-05,
"loss": 0.797,
"step": 43200
},
{
"epoch": 1.08,
"grad_norm": 4.138742923736572,
"learning_rate": 2.850201005025126e-05,
"loss": 0.9019,
"step": 43300
},
{
"epoch": 1.08,
"grad_norm": 4.0768938064575195,
"learning_rate": 2.8451758793969853e-05,
"loss": 0.8015,
"step": 43400
},
{
"epoch": 1.09,
"grad_norm": 5.367864608764648,
"learning_rate": 2.8401507537688443e-05,
"loss": 0.8598,
"step": 43500
},
{
"epoch": 1.09,
"grad_norm": 3.9556725025177,
"learning_rate": 2.8351256281407036e-05,
"loss": 0.8377,
"step": 43600
},
{
"epoch": 1.09,
"grad_norm": 3.8033978939056396,
"learning_rate": 2.8301005025125632e-05,
"loss": 0.8177,
"step": 43700
},
{
"epoch": 1.09,
"grad_norm": 5.000590801239014,
"learning_rate": 2.8250753768844222e-05,
"loss": 0.8052,
"step": 43800
},
{
"epoch": 1.1,
"grad_norm": 4.309756755828857,
"learning_rate": 2.8200502512562816e-05,
"loss": 0.8253,
"step": 43900
},
{
"epoch": 1.1,
"grad_norm": 5.882336616516113,
"learning_rate": 2.8150251256281405e-05,
"loss": 0.9011,
"step": 44000
},
{
"epoch": 1.1,
"eval_loss": 0.9591376185417175,
"eval_runtime": 93.0826,
"eval_samples_per_second": 10.743,
"eval_steps_per_second": 2.686,
"step": 44000
},
{
"epoch": 1.1,
"grad_norm": 3.259488821029663,
"learning_rate": 2.8100000000000005e-05,
"loss": 0.8566,
"step": 44100
},
{
"epoch": 1.1,
"grad_norm": 5.610021591186523,
"learning_rate": 2.8049748743718595e-05,
"loss": 0.7963,
"step": 44200
},
{
"epoch": 1.11,
"grad_norm": 1.6782532930374146,
"learning_rate": 2.799949748743719e-05,
"loss": 0.8566,
"step": 44300
},
{
"epoch": 1.11,
"grad_norm": 7.192352294921875,
"learning_rate": 2.794924623115578e-05,
"loss": 0.8228,
"step": 44400
},
{
"epoch": 1.11,
"grad_norm": 3.9157118797302246,
"learning_rate": 2.789899497487437e-05,
"loss": 0.8564,
"step": 44500
},
{
"epoch": 1.11,
"grad_norm": 9.507820129394531,
"learning_rate": 2.7848743718592968e-05,
"loss": 0.8324,
"step": 44600
},
{
"epoch": 1.12,
"grad_norm": 4.202729225158691,
"learning_rate": 2.7798492462311558e-05,
"loss": 0.862,
"step": 44700
},
{
"epoch": 1.12,
"grad_norm": NaN,
"learning_rate": 2.774874371859297e-05,
"loss": 0.7983,
"step": 44800
},
{
"epoch": 1.12,
"grad_norm": 6.222537517547607,
"learning_rate": 2.769849246231156e-05,
"loss": 0.8106,
"step": 44900
},
{
"epoch": 1.12,
"grad_norm": 6.28545618057251,
"learning_rate": 2.7648241206030152e-05,
"loss": 0.8323,
"step": 45000
},
{
"epoch": 1.12,
"eval_loss": 0.934741735458374,
"eval_runtime": 93.086,
"eval_samples_per_second": 10.743,
"eval_steps_per_second": 2.686,
"step": 45000
},
{
"epoch": 1.13,
"grad_norm": 3.990586757659912,
"learning_rate": 2.7597989949748742e-05,
"loss": 0.9218,
"step": 45100
},
{
"epoch": 1.13,
"grad_norm": 2.8158135414123535,
"learning_rate": 2.754773869346734e-05,
"loss": 0.8602,
"step": 45200
},
{
"epoch": 1.13,
"grad_norm": 6.327960014343262,
"learning_rate": 2.7497487437185932e-05,
"loss": 0.8272,
"step": 45300
},
{
"epoch": 1.14,
"grad_norm": 3.685887336730957,
"learning_rate": 2.7447236180904522e-05,
"loss": 0.8303,
"step": 45400
},
{
"epoch": 1.14,
"grad_norm": 2.856107234954834,
"learning_rate": 2.7396984924623115e-05,
"loss": 0.795,
"step": 45500
},
{
"epoch": 1.14,
"grad_norm": 2.6869564056396484,
"learning_rate": 2.7346733668341712e-05,
"loss": 0.8204,
"step": 45600
},
{
"epoch": 1.14,
"grad_norm": 3.407679557800293,
"learning_rate": 2.7296482412060305e-05,
"loss": 0.8228,
"step": 45700
},
{
"epoch": 1.15,
"grad_norm": 3.7909011840820312,
"learning_rate": 2.7246231155778895e-05,
"loss": 0.8445,
"step": 45800
},
{
"epoch": 1.15,
"grad_norm": 3.4164321422576904,
"learning_rate": 2.7195979899497488e-05,
"loss": 0.78,
"step": 45900
},
{
"epoch": 1.15,
"grad_norm": 6.0484232902526855,
"learning_rate": 2.7145728643216085e-05,
"loss": 0.8355,
"step": 46000
},
{
"epoch": 1.15,
"eval_loss": 0.92562335729599,
"eval_runtime": 93.1146,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 46000
},
{
"epoch": 1.15,
"grad_norm": 3.4825520515441895,
"learning_rate": 2.7095477386934675e-05,
"loss": 0.8323,
"step": 46100
},
{
"epoch": 1.16,
"grad_norm": 3.930040121078491,
"learning_rate": 2.7045226130653268e-05,
"loss": 0.8139,
"step": 46200
},
{
"epoch": 1.16,
"grad_norm": 3.592379331588745,
"learning_rate": 2.6994974874371858e-05,
"loss": 0.8474,
"step": 46300
},
{
"epoch": 1.16,
"grad_norm": 5.713965892791748,
"learning_rate": 2.6944723618090458e-05,
"loss": 0.8282,
"step": 46400
},
{
"epoch": 1.16,
"grad_norm": 5.45329475402832,
"learning_rate": 2.6894472361809048e-05,
"loss": 0.8045,
"step": 46500
},
{
"epoch": 1.17,
"grad_norm": 4.350860118865967,
"learning_rate": 2.684422110552764e-05,
"loss": 0.8042,
"step": 46600
},
{
"epoch": 1.17,
"grad_norm": 3.444300413131714,
"learning_rate": 2.679396984924623e-05,
"loss": 0.7957,
"step": 46700
},
{
"epoch": 1.17,
"grad_norm": 4.588834285736084,
"learning_rate": 2.6743718592964824e-05,
"loss": 0.8334,
"step": 46800
},
{
"epoch": 1.17,
"grad_norm": 4.566316604614258,
"learning_rate": 2.669346733668342e-05,
"loss": 0.8518,
"step": 46900
},
{
"epoch": 1.18,
"grad_norm": 6.886305332183838,
"learning_rate": 2.664321608040201e-05,
"loss": 0.824,
"step": 47000
},
{
"epoch": 1.18,
"eval_loss": 0.922966718673706,
"eval_runtime": 93.1638,
"eval_samples_per_second": 10.734,
"eval_steps_per_second": 2.683,
"step": 47000
},
{
"epoch": 1.18,
"grad_norm": 6.220484256744385,
"learning_rate": 2.6592964824120604e-05,
"loss": 0.8137,
"step": 47100
},
{
"epoch": 1.18,
"grad_norm": 3.9158473014831543,
"learning_rate": 2.6542713567839193e-05,
"loss": 0.7684,
"step": 47200
},
{
"epoch": 1.18,
"grad_norm": 9.41419506072998,
"learning_rate": 2.6492462311557793e-05,
"loss": 0.8447,
"step": 47300
},
{
"epoch": 1.19,
"grad_norm": 4.423808574676514,
"learning_rate": 2.6442211055276383e-05,
"loss": 0.7795,
"step": 47400
},
{
"epoch": 1.19,
"grad_norm": 5.699799537658691,
"learning_rate": 2.6391959798994977e-05,
"loss": 0.8444,
"step": 47500
},
{
"epoch": 1.19,
"grad_norm": 8.215849876403809,
"learning_rate": 2.6341708542713566e-05,
"loss": 0.8164,
"step": 47600
},
{
"epoch": 1.19,
"grad_norm": 6.393054485321045,
"learning_rate": 2.6291457286432163e-05,
"loss": 0.8563,
"step": 47700
},
{
"epoch": 1.2,
"grad_norm": 5.494711875915527,
"learning_rate": 2.6241206030150756e-05,
"loss": 0.7894,
"step": 47800
},
{
"epoch": 1.2,
"grad_norm": 7.110975742340088,
"learning_rate": 2.6190954773869346e-05,
"loss": 0.8169,
"step": 47900
},
{
"epoch": 1.2,
"grad_norm": 6.756740093231201,
"learning_rate": 2.614070351758794e-05,
"loss": 0.8063,
"step": 48000
},
{
"epoch": 1.2,
"eval_loss": 0.959385871887207,
"eval_runtime": 93.1749,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 48000
},
{
"epoch": 1.2,
"grad_norm": 4.6598615646362305,
"learning_rate": 2.6090452261306536e-05,
"loss": 0.7914,
"step": 48100
},
{
"epoch": 1.21,
"grad_norm": 3.9379007816314697,
"learning_rate": 2.604020100502513e-05,
"loss": 0.8423,
"step": 48200
},
{
"epoch": 1.21,
"grad_norm": 10.248889923095703,
"learning_rate": 2.598994974874372e-05,
"loss": 0.81,
"step": 48300
},
{
"epoch": 1.21,
"grad_norm": 4.938050270080566,
"learning_rate": 2.5939698492462312e-05,
"loss": 0.8624,
"step": 48400
},
{
"epoch": 1.21,
"grad_norm": 7.004148960113525,
"learning_rate": 2.588944723618091e-05,
"loss": 0.74,
"step": 48500
},
{
"epoch": 1.22,
"grad_norm": 1.0052399635314941,
"learning_rate": 2.58391959798995e-05,
"loss": 0.7998,
"step": 48600
},
{
"epoch": 1.22,
"grad_norm": 5.938643455505371,
"learning_rate": 2.5788944723618092e-05,
"loss": 0.7688,
"step": 48700
},
{
"epoch": 1.22,
"grad_norm": 4.684143543243408,
"learning_rate": 2.5738693467336682e-05,
"loss": 0.83,
"step": 48800
},
{
"epoch": 1.22,
"grad_norm": 4.721595764160156,
"learning_rate": 2.5688442211055282e-05,
"loss": 0.8519,
"step": 48900
},
{
"epoch": 1.23,
"grad_norm": 9.128904342651367,
"learning_rate": 2.5638190954773872e-05,
"loss": 0.832,
"step": 49000
},
{
"epoch": 1.23,
"eval_loss": 0.9091083407402039,
"eval_runtime": 93.0598,
"eval_samples_per_second": 10.746,
"eval_steps_per_second": 2.686,
"step": 49000
},
{
"epoch": 1.23,
"grad_norm": 4.281224727630615,
"learning_rate": 2.558844221105528e-05,
"loss": 0.8454,
"step": 49100
},
{
"epoch": 1.23,
"grad_norm": 4.15258264541626,
"learning_rate": 2.5538190954773873e-05,
"loss": 0.8534,
"step": 49200
},
{
"epoch": 1.23,
"grad_norm": 2.726292610168457,
"learning_rate": 2.5487939698492463e-05,
"loss": 0.8331,
"step": 49300
},
{
"epoch": 1.23,
"grad_norm": 2.922222852706909,
"learning_rate": 2.543819095477387e-05,
"loss": 0.7906,
"step": 49400
},
{
"epoch": 1.24,
"grad_norm": 6.58457088470459,
"learning_rate": 2.5387939698492464e-05,
"loss": 0.7655,
"step": 49500
},
{
"epoch": 1.24,
"grad_norm": 4.855134963989258,
"learning_rate": 2.5337688442211053e-05,
"loss": 0.8446,
"step": 49600
},
{
"epoch": 1.24,
"grad_norm": 5.20013427734375,
"learning_rate": 2.5287437185929653e-05,
"loss": 0.8014,
"step": 49700
},
{
"epoch": 1.25,
"grad_norm": 3.2615082263946533,
"learning_rate": 2.5237185929648243e-05,
"loss": 0.8108,
"step": 49800
},
{
"epoch": 1.25,
"grad_norm": 4.964284896850586,
"learning_rate": 2.5186934673366837e-05,
"loss": 0.785,
"step": 49900
},
{
"epoch": 1.25,
"grad_norm": 4.1435933113098145,
"learning_rate": 2.5136683417085426e-05,
"loss": 0.7955,
"step": 50000
},
{
"epoch": 1.25,
"eval_loss": 0.9666250944137573,
"eval_runtime": 93.1566,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 50000
},
{
"epoch": 1.25,
"grad_norm": 6.780755043029785,
"learning_rate": 2.508643216080402e-05,
"loss": 0.8035,
"step": 50100
},
{
"epoch": 1.25,
"grad_norm": 3.228900909423828,
"learning_rate": 2.5036180904522616e-05,
"loss": 0.8456,
"step": 50200
},
{
"epoch": 1.26,
"grad_norm": 2.7805113792419434,
"learning_rate": 2.4985929648241206e-05,
"loss": 0.8323,
"step": 50300
},
{
"epoch": 1.26,
"grad_norm": 6.089780330657959,
"learning_rate": 2.49356783919598e-05,
"loss": 0.8601,
"step": 50400
},
{
"epoch": 1.26,
"grad_norm": 2.9426724910736084,
"learning_rate": 2.4885427135678393e-05,
"loss": 0.8239,
"step": 50500
},
{
"epoch": 1.27,
"grad_norm": 6.784173011779785,
"learning_rate": 2.4835175879396986e-05,
"loss": 0.8408,
"step": 50600
},
{
"epoch": 1.27,
"grad_norm": 4.918420791625977,
"learning_rate": 2.478492462311558e-05,
"loss": 0.8547,
"step": 50700
},
{
"epoch": 1.27,
"grad_norm": 5.326765537261963,
"learning_rate": 2.4734673366834172e-05,
"loss": 0.8169,
"step": 50800
},
{
"epoch": 1.27,
"grad_norm": 7.219261646270752,
"learning_rate": 2.4684422110552766e-05,
"loss": 0.7847,
"step": 50900
},
{
"epoch": 1.27,
"grad_norm": 8.008410453796387,
"learning_rate": 2.463417085427136e-05,
"loss": 0.8481,
"step": 51000
},
{
"epoch": 1.27,
"eval_loss": 0.907688319683075,
"eval_runtime": 93.1288,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.684,
"step": 51000
},
{
"epoch": 1.28,
"grad_norm": 6.92462682723999,
"learning_rate": 2.4583919597989952e-05,
"loss": 0.8221,
"step": 51100
},
{
"epoch": 1.28,
"grad_norm": 16.007116317749023,
"learning_rate": 2.4533668341708542e-05,
"loss": 0.8229,
"step": 51200
},
{
"epoch": 1.28,
"grad_norm": 7.9966206550598145,
"learning_rate": 2.448341708542714e-05,
"loss": 0.8015,
"step": 51300
},
{
"epoch": 1.28,
"grad_norm": 2.7600510120391846,
"learning_rate": 2.443316582914573e-05,
"loss": 0.7778,
"step": 51400
},
{
"epoch": 1.29,
"grad_norm": 6.640542984008789,
"learning_rate": 2.4382914572864325e-05,
"loss": 0.7604,
"step": 51500
},
{
"epoch": 1.29,
"grad_norm": 2.193493127822876,
"learning_rate": 2.4332663316582915e-05,
"loss": 0.8104,
"step": 51600
},
{
"epoch": 1.29,
"grad_norm": 2.484828472137451,
"learning_rate": 2.428241206030151e-05,
"loss": 0.8317,
"step": 51700
},
{
"epoch": 1.29,
"grad_norm": 7.014501571655273,
"learning_rate": 2.42321608040201e-05,
"loss": 0.8444,
"step": 51800
},
{
"epoch": 1.3,
"grad_norm": 7.0019330978393555,
"learning_rate": 2.4181909547738695e-05,
"loss": 0.8316,
"step": 51900
},
{
"epoch": 1.3,
"grad_norm": 4.065704822540283,
"learning_rate": 2.4131658291457288e-05,
"loss": 0.7758,
"step": 52000
},
{
"epoch": 1.3,
"eval_loss": 0.9396142363548279,
"eval_runtime": 93.149,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 52000
},
{
"epoch": 1.3,
"grad_norm": 13.064213752746582,
"learning_rate": 2.408140703517588e-05,
"loss": 0.7676,
"step": 52100
},
{
"epoch": 1.3,
"grad_norm": 2.7535459995269775,
"learning_rate": 2.4031155778894474e-05,
"loss": 0.8109,
"step": 52200
},
{
"epoch": 1.31,
"grad_norm": 5.052075386047363,
"learning_rate": 2.3980904522613064e-05,
"loss": 0.8604,
"step": 52300
},
{
"epoch": 1.31,
"grad_norm": 7.920835971832275,
"learning_rate": 2.393065326633166e-05,
"loss": 0.8534,
"step": 52400
},
{
"epoch": 1.31,
"grad_norm": 6.852170944213867,
"learning_rate": 2.388040201005025e-05,
"loss": 0.7455,
"step": 52500
},
{
"epoch": 1.31,
"grad_norm": 3.4216814041137695,
"learning_rate": 2.3830150753768847e-05,
"loss": 0.8026,
"step": 52600
},
{
"epoch": 1.32,
"grad_norm": 7.002025127410889,
"learning_rate": 2.3779899497487437e-05,
"loss": 0.8337,
"step": 52700
},
{
"epoch": 1.32,
"grad_norm": 2.7925586700439453,
"learning_rate": 2.372964824120603e-05,
"loss": 0.7956,
"step": 52800
},
{
"epoch": 1.32,
"grad_norm": 5.271830081939697,
"learning_rate": 2.3679396984924624e-05,
"loss": 0.8455,
"step": 52900
},
{
"epoch": 1.32,
"grad_norm": 4.730651378631592,
"learning_rate": 2.3629145728643217e-05,
"loss": 0.8202,
"step": 53000
},
{
"epoch": 1.32,
"eval_loss": 0.9165379405021667,
"eval_runtime": 93.1667,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 53000
},
{
"epoch": 1.33,
"grad_norm": 5.654317378997803,
"learning_rate": 2.3579396984924625e-05,
"loss": 0.8762,
"step": 53100
},
{
"epoch": 1.33,
"grad_norm": 8.263214111328125,
"learning_rate": 2.3529145728643218e-05,
"loss": 0.753,
"step": 53200
},
{
"epoch": 1.33,
"grad_norm": 8.440163612365723,
"learning_rate": 2.3478894472361808e-05,
"loss": 0.7939,
"step": 53300
},
{
"epoch": 1.33,
"grad_norm": 4.141328811645508,
"learning_rate": 2.3428643216080404e-05,
"loss": 0.8289,
"step": 53400
},
{
"epoch": 1.34,
"grad_norm": 3.525546073913574,
"learning_rate": 2.3378391959798994e-05,
"loss": 0.8257,
"step": 53500
},
{
"epoch": 1.34,
"grad_norm": 2.550956964492798,
"learning_rate": 2.332814070351759e-05,
"loss": 0.7743,
"step": 53600
},
{
"epoch": 1.34,
"grad_norm": 3.5744988918304443,
"learning_rate": 2.327788944723618e-05,
"loss": 0.8341,
"step": 53700
},
{
"epoch": 1.34,
"grad_norm": 3.221747398376465,
"learning_rate": 2.3227638190954777e-05,
"loss": 0.8191,
"step": 53800
},
{
"epoch": 1.35,
"grad_norm": 5.098736763000488,
"learning_rate": 2.3177386934673367e-05,
"loss": 0.7909,
"step": 53900
},
{
"epoch": 1.35,
"grad_norm": 3.8236818313598633,
"learning_rate": 2.312713567839196e-05,
"loss": 0.7992,
"step": 54000
},
{
"epoch": 1.35,
"eval_loss": 0.9016970992088318,
"eval_runtime": 93.1656,
"eval_samples_per_second": 10.734,
"eval_steps_per_second": 2.683,
"step": 54000
},
{
"epoch": 1.35,
"grad_norm": 5.223961353302002,
"learning_rate": 2.3076884422110554e-05,
"loss": 0.7877,
"step": 54100
},
{
"epoch": 1.35,
"grad_norm": 2.1510214805603027,
"learning_rate": 2.3026633165829147e-05,
"loss": 0.8184,
"step": 54200
},
{
"epoch": 1.36,
"grad_norm": 4.342808723449707,
"learning_rate": 2.297638190954774e-05,
"loss": 0.7532,
"step": 54300
},
{
"epoch": 1.36,
"grad_norm": 3.9258158206939697,
"learning_rate": 2.2926130653266333e-05,
"loss": 0.8282,
"step": 54400
},
{
"epoch": 1.36,
"grad_norm": 8.203388214111328,
"learning_rate": 2.2875879396984927e-05,
"loss": 0.7674,
"step": 54500
},
{
"epoch": 1.36,
"grad_norm": 7.974172592163086,
"learning_rate": 2.282562814070352e-05,
"loss": 0.8005,
"step": 54600
},
{
"epoch": 1.37,
"grad_norm": 1.6733659505844116,
"learning_rate": 2.2775376884422113e-05,
"loss": 0.8415,
"step": 54700
},
{
"epoch": 1.37,
"grad_norm": 4.691416263580322,
"learning_rate": 2.2725125628140703e-05,
"loss": 0.7929,
"step": 54800
},
{
"epoch": 1.37,
"grad_norm": 8.121203422546387,
"learning_rate": 2.2674874371859296e-05,
"loss": 0.8706,
"step": 54900
},
{
"epoch": 1.38,
"grad_norm": 5.2276105880737305,
"learning_rate": 2.262462311557789e-05,
"loss": 0.8191,
"step": 55000
},
{
"epoch": 1.38,
"eval_loss": 0.8968465328216553,
"eval_runtime": 93.1186,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 55000
},
{
"epoch": 1.38,
"grad_norm": 10.938821792602539,
"learning_rate": 2.2574371859296483e-05,
"loss": 0.8166,
"step": 55100
},
{
"epoch": 1.38,
"grad_norm": 4.444934368133545,
"learning_rate": 2.2524120603015076e-05,
"loss": 0.8266,
"step": 55200
},
{
"epoch": 1.38,
"grad_norm": 3.187230110168457,
"learning_rate": 2.247386934673367e-05,
"loss": 0.7921,
"step": 55300
},
{
"epoch": 1.39,
"grad_norm": 6.6117048263549805,
"learning_rate": 2.2423618090452262e-05,
"loss": 0.7604,
"step": 55400
},
{
"epoch": 1.39,
"grad_norm": 1.8173909187316895,
"learning_rate": 2.237386934673367e-05,
"loss": 0.8627,
"step": 55500
},
{
"epoch": 1.39,
"grad_norm": 4.650048732757568,
"learning_rate": 2.232361809045226e-05,
"loss": 0.7781,
"step": 55600
},
{
"epoch": 1.39,
"grad_norm": 2.5037543773651123,
"learning_rate": 2.2273366834170857e-05,
"loss": 0.8093,
"step": 55700
},
{
"epoch": 1.4,
"grad_norm": 1.6862285137176514,
"learning_rate": 2.2223115577889446e-05,
"loss": 0.8567,
"step": 55800
},
{
"epoch": 1.4,
"grad_norm": 7.282008647918701,
"learning_rate": 2.2172864321608043e-05,
"loss": 0.838,
"step": 55900
},
{
"epoch": 1.4,
"grad_norm": 5.399494647979736,
"learning_rate": 2.2122613065326633e-05,
"loss": 0.8188,
"step": 56000
},
{
"epoch": 1.4,
"eval_loss": 0.9193799495697021,
"eval_runtime": 93.1427,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 56000
},
{
"epoch": 1.4,
"grad_norm": 8.599305152893066,
"learning_rate": 2.2072361809045226e-05,
"loss": 0.8178,
"step": 56100
},
{
"epoch": 1.41,
"grad_norm": 7.157848834991455,
"learning_rate": 2.202211055276382e-05,
"loss": 0.8151,
"step": 56200
},
{
"epoch": 1.41,
"grad_norm": 6.2992448806762695,
"learning_rate": 2.1971859296482413e-05,
"loss": 0.8709,
"step": 56300
},
{
"epoch": 1.41,
"grad_norm": 4.634735584259033,
"learning_rate": 2.1921608040201006e-05,
"loss": 0.8394,
"step": 56400
},
{
"epoch": 1.41,
"grad_norm": 3.598958969116211,
"learning_rate": 2.18713567839196e-05,
"loss": 0.8497,
"step": 56500
},
{
"epoch": 1.42,
"grad_norm": 5.21018648147583,
"learning_rate": 2.1821105527638192e-05,
"loss": 0.8176,
"step": 56600
},
{
"epoch": 1.42,
"grad_norm": 4.91611909866333,
"learning_rate": 2.1770854271356786e-05,
"loss": 0.7918,
"step": 56700
},
{
"epoch": 1.42,
"grad_norm": 3.1374623775482178,
"learning_rate": 2.172060301507538e-05,
"loss": 0.816,
"step": 56800
},
{
"epoch": 1.42,
"grad_norm": 14.474822998046875,
"learning_rate": 2.1670351758793972e-05,
"loss": 0.8669,
"step": 56900
},
{
"epoch": 1.43,
"grad_norm": 3.6005401611328125,
"learning_rate": 2.1620100502512562e-05,
"loss": 0.8186,
"step": 57000
},
{
"epoch": 1.43,
"eval_loss": 0.9354815483093262,
"eval_runtime": 93.1537,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 57000
},
{
"epoch": 1.43,
"grad_norm": 0.3770127296447754,
"learning_rate": 2.156984924623116e-05,
"loss": 0.7991,
"step": 57100
},
{
"epoch": 1.43,
"grad_norm": 6.312694072723389,
"learning_rate": 2.151959798994975e-05,
"loss": 0.8021,
"step": 57200
},
{
"epoch": 1.43,
"grad_norm": 1.813674807548523,
"learning_rate": 2.1469346733668345e-05,
"loss": 0.7525,
"step": 57300
},
{
"epoch": 1.44,
"grad_norm": 4.475043773651123,
"learning_rate": 2.1419095477386935e-05,
"loss": 0.8184,
"step": 57400
},
{
"epoch": 1.44,
"grad_norm": 8.855767250061035,
"learning_rate": 2.1368844221105528e-05,
"loss": 0.7819,
"step": 57500
},
{
"epoch": 1.44,
"grad_norm": 3.376981735229492,
"learning_rate": 2.131859296482412e-05,
"loss": 0.7631,
"step": 57600
},
{
"epoch": 1.44,
"grad_norm": 2.3325247764587402,
"learning_rate": 2.1268341708542715e-05,
"loss": 0.8452,
"step": 57700
},
{
"epoch": 1.45,
"grad_norm": 6.577011585235596,
"learning_rate": 2.1218592964824122e-05,
"loss": 0.8137,
"step": 57800
},
{
"epoch": 1.45,
"grad_norm": 4.225331783294678,
"learning_rate": 2.1168341708542712e-05,
"loss": 0.8107,
"step": 57900
},
{
"epoch": 1.45,
"grad_norm": 3.6176490783691406,
"learning_rate": 2.111809045226131e-05,
"loss": 0.8553,
"step": 58000
},
{
"epoch": 1.45,
"eval_loss": 0.8989996314048767,
"eval_runtime": 93.1531,
"eval_samples_per_second": 10.735,
"eval_steps_per_second": 2.684,
"step": 58000
},
{
"epoch": 1.45,
"grad_norm": 9.829930305480957,
"learning_rate": 2.10678391959799e-05,
"loss": 0.7607,
"step": 58100
},
{
"epoch": 1.46,
"grad_norm": 6.14058780670166,
"learning_rate": 2.1017587939698495e-05,
"loss": 0.8465,
"step": 58200
},
{
"epoch": 1.46,
"grad_norm": 2.7666096687316895,
"learning_rate": 2.0967336683417085e-05,
"loss": 0.7562,
"step": 58300
},
{
"epoch": 1.46,
"grad_norm": 7.080272197723389,
"learning_rate": 2.091708542713568e-05,
"loss": 0.7751,
"step": 58400
},
{
"epoch": 1.46,
"grad_norm": 5.646043300628662,
"learning_rate": 2.086683417085427e-05,
"loss": 0.8789,
"step": 58500
},
{
"epoch": 1.47,
"grad_norm": 5.366353511810303,
"learning_rate": 2.0816582914572865e-05,
"loss": 0.8433,
"step": 58600
},
{
"epoch": 1.47,
"grad_norm": 9.850458145141602,
"learning_rate": 2.0766331658291458e-05,
"loss": 0.7937,
"step": 58700
},
{
"epoch": 1.47,
"grad_norm": 6.383124351501465,
"learning_rate": 2.071608040201005e-05,
"loss": 0.791,
"step": 58800
},
{
"epoch": 1.47,
"grad_norm": 4.849941730499268,
"learning_rate": 2.0665829145728645e-05,
"loss": 0.7229,
"step": 58900
},
{
"epoch": 1.48,
"grad_norm": 4.7146477699279785,
"learning_rate": 2.0615577889447238e-05,
"loss": 0.8076,
"step": 59000
},
{
"epoch": 1.48,
"eval_loss": 0.8675306439399719,
"eval_runtime": 93.1423,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 59000
},
{
"epoch": 1.48,
"grad_norm": 3.0342600345611572,
"learning_rate": 2.056532663316583e-05,
"loss": 0.8242,
"step": 59100
},
{
"epoch": 1.48,
"grad_norm": 10.506383895874023,
"learning_rate": 2.0515075376884424e-05,
"loss": 0.7938,
"step": 59200
},
{
"epoch": 1.48,
"grad_norm": 12.584467887878418,
"learning_rate": 2.0464824120603014e-05,
"loss": 0.7225,
"step": 59300
},
{
"epoch": 1.48,
"grad_norm": 5.804998397827148,
"learning_rate": 2.041457286432161e-05,
"loss": 0.7717,
"step": 59400
},
{
"epoch": 1.49,
"grad_norm": 2.937588691711426,
"learning_rate": 2.03643216080402e-05,
"loss": 0.7516,
"step": 59500
},
{
"epoch": 1.49,
"grad_norm": 5.086533546447754,
"learning_rate": 2.0314070351758797e-05,
"loss": 0.7484,
"step": 59600
},
{
"epoch": 1.49,
"grad_norm": 5.561635971069336,
"learning_rate": 2.0263819095477387e-05,
"loss": 0.7737,
"step": 59700
},
{
"epoch": 1.5,
"grad_norm": 4.539155006408691,
"learning_rate": 2.0213567839195984e-05,
"loss": 0.7645,
"step": 59800
},
{
"epoch": 1.5,
"grad_norm": 7.240508079528809,
"learning_rate": 2.0163316582914574e-05,
"loss": 0.8262,
"step": 59900
},
{
"epoch": 1.5,
"grad_norm": 4.487994194030762,
"learning_rate": 2.0113065326633167e-05,
"loss": 0.851,
"step": 60000
},
{
"epoch": 1.5,
"eval_loss": 0.9379053711891174,
"eval_runtime": 93.1644,
"eval_samples_per_second": 10.734,
"eval_steps_per_second": 2.683,
"step": 60000
},
{
"epoch": 1.5,
"grad_norm": 3.100945234298706,
"learning_rate": 2.006281407035176e-05,
"loss": 0.7998,
"step": 60100
},
{
"epoch": 1.5,
"grad_norm": 5.316318035125732,
"learning_rate": 2.001256281407035e-05,
"loss": 0.8322,
"step": 60200
},
{
"epoch": 1.51,
"grad_norm": 1.705694556236267,
"learning_rate": 1.9962311557788947e-05,
"loss": 0.8099,
"step": 60300
},
{
"epoch": 1.51,
"grad_norm": 7.9559783935546875,
"learning_rate": 1.9912060301507536e-05,
"loss": 0.8489,
"step": 60400
},
{
"epoch": 1.51,
"grad_norm": 9.180657386779785,
"learning_rate": 1.9861809045226133e-05,
"loss": 0.8327,
"step": 60500
},
{
"epoch": 1.52,
"grad_norm": 4.283862113952637,
"learning_rate": 1.9811557788944723e-05,
"loss": 0.7234,
"step": 60600
},
{
"epoch": 1.52,
"grad_norm": 5.539619445800781,
"learning_rate": 1.976130653266332e-05,
"loss": 0.8569,
"step": 60700
},
{
"epoch": 1.52,
"grad_norm": 9.324729919433594,
"learning_rate": 1.971105527638191e-05,
"loss": 0.7977,
"step": 60800
},
{
"epoch": 1.52,
"grad_norm": 4.896857738494873,
"learning_rate": 1.9660804020100503e-05,
"loss": 0.7394,
"step": 60900
},
{
"epoch": 1.52,
"grad_norm": 4.1820831298828125,
"learning_rate": 1.9610552763819096e-05,
"loss": 0.7694,
"step": 61000
},
{
"epoch": 1.52,
"eval_loss": 0.8731037378311157,
"eval_runtime": 93.1073,
"eval_samples_per_second": 10.74,
"eval_steps_per_second": 2.685,
"step": 61000
},
{
"epoch": 1.53,
"grad_norm": 4.465863227844238,
"learning_rate": 1.956030150753769e-05,
"loss": 0.7947,
"step": 61100
},
{
"epoch": 1.53,
"grad_norm": 5.7435526847839355,
"learning_rate": 1.9510050251256282e-05,
"loss": 0.7457,
"step": 61200
},
{
"epoch": 1.53,
"grad_norm": 7.473779201507568,
"learning_rate": 1.9459798994974876e-05,
"loss": 0.8125,
"step": 61300
},
{
"epoch": 1.54,
"grad_norm": 5.428642749786377,
"learning_rate": 1.940954773869347e-05,
"loss": 0.7669,
"step": 61400
},
{
"epoch": 1.54,
"grad_norm": 4.349414348602295,
"learning_rate": 1.9359296482412062e-05,
"loss": 0.816,
"step": 61500
},
{
"epoch": 1.54,
"grad_norm": 7.700584888458252,
"learning_rate": 1.9309045226130655e-05,
"loss": 0.7852,
"step": 61600
},
{
"epoch": 1.54,
"grad_norm": 5.3439621925354,
"learning_rate": 1.925879396984925e-05,
"loss": 0.8217,
"step": 61700
},
{
"epoch": 1.54,
"grad_norm": 5.036532878875732,
"learning_rate": 1.920854271356784e-05,
"loss": 0.7393,
"step": 61800
},
{
"epoch": 1.55,
"grad_norm": 17.64934730529785,
"learning_rate": 1.915879396984925e-05,
"loss": 0.8063,
"step": 61900
},
{
"epoch": 1.55,
"grad_norm": 5.335484027862549,
"learning_rate": 1.910854271356784e-05,
"loss": 0.8138,
"step": 62000
},
{
"epoch": 1.55,
"eval_loss": 0.8697061538696289,
"eval_runtime": 93.1231,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.685,
"step": 62000
},
{
"epoch": 1.55,
"grad_norm": 7.09649133682251,
"learning_rate": 1.9058291457286433e-05,
"loss": 0.7725,
"step": 62100
},
{
"epoch": 1.56,
"grad_norm": 8.522541046142578,
"learning_rate": 1.9008040201005026e-05,
"loss": 0.7777,
"step": 62200
},
{
"epoch": 1.56,
"grad_norm": 7.422926425933838,
"learning_rate": 1.895778894472362e-05,
"loss": 0.8084,
"step": 62300
},
{
"epoch": 1.56,
"grad_norm": 4.379819393157959,
"learning_rate": 1.8907537688442212e-05,
"loss": 0.7781,
"step": 62400
},
{
"epoch": 1.56,
"grad_norm": 3.58109450340271,
"learning_rate": 1.8857286432160806e-05,
"loss": 0.7861,
"step": 62500
},
{
"epoch": 1.56,
"grad_norm": 7.983757019042969,
"learning_rate": 1.88070351758794e-05,
"loss": 0.7955,
"step": 62600
},
{
"epoch": 1.57,
"grad_norm": 4.671863555908203,
"learning_rate": 1.8756783919597992e-05,
"loss": 0.8038,
"step": 62700
},
{
"epoch": 1.57,
"grad_norm": 6.778916358947754,
"learning_rate": 1.8706532663316585e-05,
"loss": 0.7582,
"step": 62800
},
{
"epoch": 1.57,
"grad_norm": 3.6116878986358643,
"learning_rate": 1.8656281407035175e-05,
"loss": 0.762,
"step": 62900
},
{
"epoch": 1.57,
"grad_norm": 7.216502666473389,
"learning_rate": 1.8606532663316583e-05,
"loss": 0.7795,
"step": 63000
},
{
"epoch": 1.57,
"eval_loss": 0.9283447861671448,
"eval_runtime": 93.0979,
"eval_samples_per_second": 10.741,
"eval_steps_per_second": 2.685,
"step": 63000
},
{
"epoch": 1.58,
"grad_norm": 7.127340316772461,
"learning_rate": 1.855628140703518e-05,
"loss": 0.83,
"step": 63100
},
{
"epoch": 1.58,
"grad_norm": 2.5859460830688477,
"learning_rate": 1.850603015075377e-05,
"loss": 0.782,
"step": 63200
},
{
"epoch": 1.58,
"grad_norm": 9.9983491897583,
"learning_rate": 1.8455778894472363e-05,
"loss": 0.7793,
"step": 63300
},
{
"epoch": 1.58,
"grad_norm": 3.28371000289917,
"learning_rate": 1.8405527638190956e-05,
"loss": 0.8048,
"step": 63400
},
{
"epoch": 1.59,
"grad_norm": 9.143861770629883,
"learning_rate": 1.835527638190955e-05,
"loss": 0.8056,
"step": 63500
},
{
"epoch": 1.59,
"grad_norm": 0.5545308589935303,
"learning_rate": 1.8305025125628142e-05,
"loss": 0.7864,
"step": 63600
},
{
"epoch": 1.59,
"grad_norm": 5.879006385803223,
"learning_rate": 1.8254773869346732e-05,
"loss": 0.814,
"step": 63700
},
{
"epoch": 1.59,
"grad_norm": 8.02795124053955,
"learning_rate": 1.820452261306533e-05,
"loss": 0.8679,
"step": 63800
},
{
"epoch": 1.6,
"grad_norm": 8.666707038879395,
"learning_rate": 1.815427135678392e-05,
"loss": 0.8548,
"step": 63900
},
{
"epoch": 1.6,
"grad_norm": 2.4890201091766357,
"learning_rate": 1.8104020100502515e-05,
"loss": 0.861,
"step": 64000
},
{
"epoch": 1.6,
"eval_loss": 0.9001312255859375,
"eval_runtime": 93.123,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.685,
"step": 64000
},
{
"epoch": 1.6,
"grad_norm": 5.596157073974609,
"learning_rate": 1.8053768844221105e-05,
"loss": 0.8193,
"step": 64100
},
{
"epoch": 1.6,
"grad_norm": 6.049336910247803,
"learning_rate": 1.80035175879397e-05,
"loss": 0.783,
"step": 64200
},
{
"epoch": 1.61,
"grad_norm": 3.6119985580444336,
"learning_rate": 1.795326633165829e-05,
"loss": 0.7983,
"step": 64300
},
{
"epoch": 1.61,
"grad_norm": 6.655552387237549,
"learning_rate": 1.7903015075376885e-05,
"loss": 0.7947,
"step": 64400
},
{
"epoch": 1.61,
"grad_norm": 3.9695820808410645,
"learning_rate": 1.7852763819095478e-05,
"loss": 0.8059,
"step": 64500
},
{
"epoch": 1.61,
"grad_norm": 5.60053014755249,
"learning_rate": 1.780251256281407e-05,
"loss": 0.7523,
"step": 64600
},
{
"epoch": 1.62,
"grad_norm": 3.6050097942352295,
"learning_rate": 1.7752261306532665e-05,
"loss": 0.8396,
"step": 64700
},
{
"epoch": 1.62,
"grad_norm": 9.066208839416504,
"learning_rate": 1.7702010050251258e-05,
"loss": 0.8064,
"step": 64800
},
{
"epoch": 1.62,
"grad_norm": 10.357030868530273,
"learning_rate": 1.765175879396985e-05,
"loss": 0.7886,
"step": 64900
},
{
"epoch": 1.62,
"grad_norm": 5.488982677459717,
"learning_rate": 1.7601507537688444e-05,
"loss": 0.8075,
"step": 65000
},
{
"epoch": 1.62,
"eval_loss": 0.9325853586196899,
"eval_runtime": 93.1385,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 65000
},
{
"epoch": 1.63,
"grad_norm": 4.819763660430908,
"learning_rate": 1.7551256281407034e-05,
"loss": 0.7612,
"step": 65100
},
{
"epoch": 1.63,
"grad_norm": 7.374830722808838,
"learning_rate": 1.750100502512563e-05,
"loss": 0.7867,
"step": 65200
},
{
"epoch": 1.63,
"grad_norm": 10.001041412353516,
"learning_rate": 1.745075376884422e-05,
"loss": 0.8175,
"step": 65300
},
{
"epoch": 1.64,
"grad_norm": 6.270244598388672,
"learning_rate": 1.7400502512562817e-05,
"loss": 0.7396,
"step": 65400
},
{
"epoch": 1.64,
"grad_norm": 4.1896772384643555,
"learning_rate": 1.7350251256281407e-05,
"loss": 0.7738,
"step": 65500
},
{
"epoch": 1.64,
"grad_norm": 6.399722099304199,
"learning_rate": 1.73e-05,
"loss": 0.806,
"step": 65600
},
{
"epoch": 1.64,
"grad_norm": 3.8990511894226074,
"learning_rate": 1.7249748743718594e-05,
"loss": 0.7438,
"step": 65700
},
{
"epoch": 1.65,
"grad_norm": 5.120051383972168,
"learning_rate": 1.7199497487437187e-05,
"loss": 0.7261,
"step": 65800
},
{
"epoch": 1.65,
"grad_norm": 4.206692695617676,
"learning_rate": 1.714924623115578e-05,
"loss": 0.8095,
"step": 65900
},
{
"epoch": 1.65,
"grad_norm": 12.36181640625,
"learning_rate": 1.709899497487437e-05,
"loss": 0.8073,
"step": 66000
},
{
"epoch": 1.65,
"eval_loss": 0.8938525915145874,
"eval_runtime": 93.1198,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 66000
},
{
"epoch": 1.65,
"grad_norm": 7.308992862701416,
"learning_rate": 1.7048743718592967e-05,
"loss": 0.7847,
"step": 66100
},
{
"epoch": 1.66,
"grad_norm": 6.200756072998047,
"learning_rate": 1.6998492462311556e-05,
"loss": 0.7794,
"step": 66200
},
{
"epoch": 1.66,
"grad_norm": 4.346643447875977,
"learning_rate": 1.6948241206030153e-05,
"loss": 0.8072,
"step": 66300
},
{
"epoch": 1.66,
"grad_norm": 5.740476131439209,
"learning_rate": 1.6897989949748743e-05,
"loss": 0.8331,
"step": 66400
},
{
"epoch": 1.66,
"grad_norm": 4.887804985046387,
"learning_rate": 1.684773869346734e-05,
"loss": 0.7855,
"step": 66500
},
{
"epoch": 1.67,
"grad_norm": 4.159181118011475,
"learning_rate": 1.679748743718593e-05,
"loss": 0.7892,
"step": 66600
},
{
"epoch": 1.67,
"grad_norm": 3.0571138858795166,
"learning_rate": 1.6747236180904523e-05,
"loss": 0.7232,
"step": 66700
},
{
"epoch": 1.67,
"grad_norm": 4.245538711547852,
"learning_rate": 1.6696984924623116e-05,
"loss": 0.7878,
"step": 66800
},
{
"epoch": 1.67,
"grad_norm": 12.613323211669922,
"learning_rate": 1.664673366834171e-05,
"loss": 0.8625,
"step": 66900
},
{
"epoch": 1.68,
"grad_norm": 11.823962211608887,
"learning_rate": 1.6596482412060302e-05,
"loss": 0.7439,
"step": 67000
},
{
"epoch": 1.68,
"eval_loss": 0.8676530122756958,
"eval_runtime": 93.1279,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.684,
"step": 67000
},
{
"epoch": 1.68,
"grad_norm": 11.366893768310547,
"learning_rate": 1.654673366834171e-05,
"loss": 0.7777,
"step": 67100
},
{
"epoch": 1.68,
"grad_norm": 2.996554136276245,
"learning_rate": 1.6496482412060303e-05,
"loss": 0.752,
"step": 67200
},
{
"epoch": 1.68,
"grad_norm": 4.225461959838867,
"learning_rate": 1.6446231155778897e-05,
"loss": 0.7629,
"step": 67300
},
{
"epoch": 1.69,
"grad_norm": 2.6366543769836426,
"learning_rate": 1.6395979899497486e-05,
"loss": 0.788,
"step": 67400
},
{
"epoch": 1.69,
"grad_norm": 6.298273086547852,
"learning_rate": 1.6345728643216083e-05,
"loss": 0.7872,
"step": 67500
},
{
"epoch": 1.69,
"grad_norm": 6.06473970413208,
"learning_rate": 1.6295477386934673e-05,
"loss": 0.8203,
"step": 67600
},
{
"epoch": 1.69,
"grad_norm": 7.898092269897461,
"learning_rate": 1.624572864321608e-05,
"loss": 0.7611,
"step": 67700
},
{
"epoch": 1.69,
"grad_norm": 0.26392027735710144,
"learning_rate": 1.6195477386934674e-05,
"loss": 0.7797,
"step": 67800
},
{
"epoch": 1.7,
"grad_norm": 8.04962158203125,
"learning_rate": 1.6145226130653267e-05,
"loss": 0.7999,
"step": 67900
},
{
"epoch": 1.7,
"grad_norm": 3.1057941913604736,
"learning_rate": 1.609497487437186e-05,
"loss": 0.8137,
"step": 68000
},
{
"epoch": 1.7,
"eval_loss": 0.9231812953948975,
"eval_runtime": 93.1285,
"eval_samples_per_second": 10.738,
"eval_steps_per_second": 2.684,
"step": 68000
},
{
"epoch": 1.7,
"grad_norm": 7.14741325378418,
"learning_rate": 1.6044723618090454e-05,
"loss": 0.8056,
"step": 68100
},
{
"epoch": 1.71,
"grad_norm": 3.0000669956207275,
"learning_rate": 1.5994472361809047e-05,
"loss": 0.819,
"step": 68200
},
{
"epoch": 1.71,
"grad_norm": 8.119636535644531,
"learning_rate": 1.594422110552764e-05,
"loss": 0.7692,
"step": 68300
},
{
"epoch": 1.71,
"grad_norm": 12.256668090820312,
"learning_rate": 1.5893969849246233e-05,
"loss": 0.7655,
"step": 68400
},
{
"epoch": 1.71,
"grad_norm": 6.233114242553711,
"learning_rate": 1.5843718592964823e-05,
"loss": 0.7729,
"step": 68500
},
{
"epoch": 1.71,
"grad_norm": 8.290607452392578,
"learning_rate": 1.5793467336683416e-05,
"loss": 0.6898,
"step": 68600
},
{
"epoch": 1.72,
"grad_norm": 9.289992332458496,
"learning_rate": 1.574321608040201e-05,
"loss": 0.7387,
"step": 68700
},
{
"epoch": 1.72,
"grad_norm": 3.4356155395507812,
"learning_rate": 1.5692964824120603e-05,
"loss": 0.7965,
"step": 68800
},
{
"epoch": 1.72,
"grad_norm": 3.125286340713501,
"learning_rate": 1.5642713567839196e-05,
"loss": 0.7263,
"step": 68900
},
{
"epoch": 1.73,
"grad_norm": 2.8927297592163086,
"learning_rate": 1.559246231155779e-05,
"loss": 0.701,
"step": 69000
},
{
"epoch": 1.73,
"eval_loss": 0.9217960238456726,
"eval_runtime": 93.14,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 69000
},
{
"epoch": 1.73,
"grad_norm": 9.302748680114746,
"learning_rate": 1.5542211055276383e-05,
"loss": 0.7481,
"step": 69100
},
{
"epoch": 1.73,
"grad_norm": 6.11757230758667,
"learning_rate": 1.5491959798994976e-05,
"loss": 0.7029,
"step": 69200
},
{
"epoch": 1.73,
"grad_norm": 7.994334697723389,
"learning_rate": 1.544170854271357e-05,
"loss": 0.7731,
"step": 69300
},
{
"epoch": 1.73,
"grad_norm": 8.997603416442871,
"learning_rate": 1.5391457286432162e-05,
"loss": 0.8639,
"step": 69400
},
{
"epoch": 1.74,
"grad_norm": 4.444380283355713,
"learning_rate": 1.5341206030150752e-05,
"loss": 0.7567,
"step": 69500
},
{
"epoch": 1.74,
"grad_norm": 5.060299873352051,
"learning_rate": 1.529095477386935e-05,
"loss": 0.7468,
"step": 69600
},
{
"epoch": 1.74,
"grad_norm": 5.900922775268555,
"learning_rate": 1.524070351758794e-05,
"loss": 0.8089,
"step": 69700
},
{
"epoch": 1.75,
"grad_norm": 9.167262077331543,
"learning_rate": 1.5190452261306534e-05,
"loss": 0.7549,
"step": 69800
},
{
"epoch": 1.75,
"grad_norm": 3.5557000637054443,
"learning_rate": 1.5140201005025125e-05,
"loss": 0.8366,
"step": 69900
},
{
"epoch": 1.75,
"grad_norm": 1.1888383626937866,
"learning_rate": 1.508994974874372e-05,
"loss": 0.7529,
"step": 70000
},
{
"epoch": 1.75,
"eval_loss": 0.8521784543991089,
"eval_runtime": 93.1203,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 70000
},
{
"epoch": 1.75,
"grad_norm": 4.121077537536621,
"learning_rate": 1.5039698492462312e-05,
"loss": 0.7658,
"step": 70100
},
{
"epoch": 1.75,
"grad_norm": 5.181189060211182,
"learning_rate": 1.498994974874372e-05,
"loss": 0.7347,
"step": 70200
},
{
"epoch": 1.76,
"grad_norm": 4.053492546081543,
"learning_rate": 1.4939698492462313e-05,
"loss": 0.8159,
"step": 70300
},
{
"epoch": 1.76,
"grad_norm": 6.347891807556152,
"learning_rate": 1.4889447236180906e-05,
"loss": 0.835,
"step": 70400
},
{
"epoch": 1.76,
"grad_norm": 6.455724239349365,
"learning_rate": 1.4839195979899497e-05,
"loss": 0.7674,
"step": 70500
},
{
"epoch": 1.77,
"grad_norm": 2.7582502365112305,
"learning_rate": 1.4788944723618092e-05,
"loss": 0.7656,
"step": 70600
},
{
"epoch": 1.77,
"grad_norm": 2.472013473510742,
"learning_rate": 1.4738693467336684e-05,
"loss": 0.775,
"step": 70700
},
{
"epoch": 1.77,
"grad_norm": 6.966689586639404,
"learning_rate": 1.4688442211055279e-05,
"loss": 0.7697,
"step": 70800
},
{
"epoch": 1.77,
"grad_norm": 3.572748899459839,
"learning_rate": 1.463819095477387e-05,
"loss": 0.7406,
"step": 70900
},
{
"epoch": 1.77,
"grad_norm": 13.143310546875,
"learning_rate": 1.4587939698492464e-05,
"loss": 0.8036,
"step": 71000
},
{
"epoch": 1.77,
"eval_loss": 0.8541510105133057,
"eval_runtime": 93.1043,
"eval_samples_per_second": 10.741,
"eval_steps_per_second": 2.685,
"step": 71000
},
{
"epoch": 1.78,
"grad_norm": 3.642167806625366,
"learning_rate": 1.4537688442211055e-05,
"loss": 0.7912,
"step": 71100
},
{
"epoch": 1.78,
"grad_norm": 10.763513565063477,
"learning_rate": 1.4487437185929648e-05,
"loss": 0.8091,
"step": 71200
},
{
"epoch": 1.78,
"grad_norm": 4.094142913818359,
"learning_rate": 1.4437185929648242e-05,
"loss": 0.7331,
"step": 71300
},
{
"epoch": 1.79,
"grad_norm": 6.153075218200684,
"learning_rate": 1.4386934673366833e-05,
"loss": 0.8162,
"step": 71400
},
{
"epoch": 1.79,
"grad_norm": 4.609516620635986,
"learning_rate": 1.4336683417085428e-05,
"loss": 0.8265,
"step": 71500
},
{
"epoch": 1.79,
"grad_norm": 6.956715106964111,
"learning_rate": 1.428643216080402e-05,
"loss": 0.8303,
"step": 71600
},
{
"epoch": 1.79,
"grad_norm": 8.181746482849121,
"learning_rate": 1.4236180904522615e-05,
"loss": 0.777,
"step": 71700
},
{
"epoch": 1.79,
"grad_norm": 8.35756778717041,
"learning_rate": 1.4185929648241206e-05,
"loss": 0.7939,
"step": 71800
},
{
"epoch": 1.8,
"grad_norm": 6.512216567993164,
"learning_rate": 1.41356783919598e-05,
"loss": 0.7853,
"step": 71900
},
{
"epoch": 1.8,
"grad_norm": 6.464352130889893,
"learning_rate": 1.4085427135678391e-05,
"loss": 0.8135,
"step": 72000
},
{
"epoch": 1.8,
"eval_loss": 0.8742861151695251,
"eval_runtime": 93.0945,
"eval_samples_per_second": 10.742,
"eval_steps_per_second": 2.685,
"step": 72000
},
{
"epoch": 1.8,
"grad_norm": 5.150899410247803,
"learning_rate": 1.4035175879396986e-05,
"loss": 0.7664,
"step": 72100
},
{
"epoch": 1.81,
"grad_norm": 2.8593320846557617,
"learning_rate": 1.3984924623115577e-05,
"loss": 0.8266,
"step": 72200
},
{
"epoch": 1.81,
"grad_norm": 6.50922155380249,
"learning_rate": 1.3934673366834172e-05,
"loss": 0.761,
"step": 72300
},
{
"epoch": 1.81,
"grad_norm": 4.50417947769165,
"learning_rate": 1.3884422110552764e-05,
"loss": 0.7875,
"step": 72400
},
{
"epoch": 1.81,
"grad_norm": 5.570100784301758,
"learning_rate": 1.3834170854271359e-05,
"loss": 0.8033,
"step": 72500
},
{
"epoch": 1.81,
"grad_norm": 10.100653648376465,
"learning_rate": 1.378391959798995e-05,
"loss": 0.7554,
"step": 72600
},
{
"epoch": 1.82,
"grad_norm": 10.893858909606934,
"learning_rate": 1.3733668341708544e-05,
"loss": 0.7077,
"step": 72700
},
{
"epoch": 1.82,
"grad_norm": 3.154226541519165,
"learning_rate": 1.368391959798995e-05,
"loss": 0.7678,
"step": 72800
},
{
"epoch": 1.82,
"grad_norm": 7.701584339141846,
"learning_rate": 1.3633668341708545e-05,
"loss": 0.7958,
"step": 72900
},
{
"epoch": 1.82,
"grad_norm": 9.466403007507324,
"learning_rate": 1.3583417085427136e-05,
"loss": 0.8119,
"step": 73000
},
{
"epoch": 1.82,
"eval_loss": 0.878372073173523,
"eval_runtime": 93.1706,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 73000
},
{
"epoch": 1.83,
"grad_norm": 8.789670944213867,
"learning_rate": 1.3533165829145731e-05,
"loss": 0.7594,
"step": 73100
},
{
"epoch": 1.83,
"grad_norm": 9.359320640563965,
"learning_rate": 1.3482914572864323e-05,
"loss": 0.7382,
"step": 73200
},
{
"epoch": 1.83,
"grad_norm": 3.5085604190826416,
"learning_rate": 1.3432663316582916e-05,
"loss": 0.7811,
"step": 73300
},
{
"epoch": 1.83,
"grad_norm": 3.8232526779174805,
"learning_rate": 1.3382412060301507e-05,
"loss": 0.742,
"step": 73400
},
{
"epoch": 1.84,
"grad_norm": 5.470067024230957,
"learning_rate": 1.3332160804020102e-05,
"loss": 0.8204,
"step": 73500
},
{
"epoch": 1.84,
"grad_norm": 2.5600407123565674,
"learning_rate": 1.3281909547738694e-05,
"loss": 0.7655,
"step": 73600
},
{
"epoch": 1.84,
"grad_norm": 2.3198015689849854,
"learning_rate": 1.3231658291457289e-05,
"loss": 0.7856,
"step": 73700
},
{
"epoch": 1.84,
"grad_norm": 8.053333282470703,
"learning_rate": 1.318140703517588e-05,
"loss": 0.8319,
"step": 73800
},
{
"epoch": 1.85,
"grad_norm": 6.589441776275635,
"learning_rate": 1.3131155778894472e-05,
"loss": 0.7642,
"step": 73900
},
{
"epoch": 1.85,
"grad_norm": 3.052572250366211,
"learning_rate": 1.3080904522613067e-05,
"loss": 0.776,
"step": 74000
},
{
"epoch": 1.85,
"eval_loss": 0.8486243486404419,
"eval_runtime": 93.14,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 74000
},
{
"epoch": 1.85,
"grad_norm": 6.734087944030762,
"learning_rate": 1.3030653266331658e-05,
"loss": 0.8113,
"step": 74100
},
{
"epoch": 1.85,
"grad_norm": 3.0014145374298096,
"learning_rate": 1.2980402010050252e-05,
"loss": 0.8076,
"step": 74200
},
{
"epoch": 1.86,
"grad_norm": 9.161528587341309,
"learning_rate": 1.2930150753768843e-05,
"loss": 0.7918,
"step": 74300
},
{
"epoch": 1.86,
"grad_norm": 6.697804927825928,
"learning_rate": 1.2879899497487438e-05,
"loss": 0.8148,
"step": 74400
},
{
"epoch": 1.86,
"grad_norm": 8.576240539550781,
"learning_rate": 1.282964824120603e-05,
"loss": 0.7362,
"step": 74500
},
{
"epoch": 1.86,
"grad_norm": 6.68988037109375,
"learning_rate": 1.2779396984924625e-05,
"loss": 0.7741,
"step": 74600
},
{
"epoch": 1.87,
"grad_norm": 4.636489391326904,
"learning_rate": 1.2729145728643216e-05,
"loss": 0.7666,
"step": 74700
},
{
"epoch": 1.87,
"grad_norm": 4.227512836456299,
"learning_rate": 1.2678894472361811e-05,
"loss": 0.8033,
"step": 74800
},
{
"epoch": 1.87,
"grad_norm": 10.484670639038086,
"learning_rate": 1.2628643216080403e-05,
"loss": 0.8015,
"step": 74900
},
{
"epoch": 1.88,
"grad_norm": 3.7899045944213867,
"learning_rate": 1.2578391959798996e-05,
"loss": 0.7305,
"step": 75000
},
{
"epoch": 1.88,
"eval_loss": 0.8654676675796509,
"eval_runtime": 93.1732,
"eval_samples_per_second": 10.733,
"eval_steps_per_second": 2.683,
"step": 75000
},
{
"epoch": 1.88,
"grad_norm": 5.665567398071289,
"learning_rate": 1.2528643216080402e-05,
"loss": 0.7619,
"step": 75100
},
{
"epoch": 1.88,
"grad_norm": 7.015081882476807,
"learning_rate": 1.2478391959798997e-05,
"loss": 0.7761,
"step": 75200
},
{
"epoch": 1.88,
"grad_norm": 6.353122711181641,
"learning_rate": 1.2428140703517588e-05,
"loss": 0.748,
"step": 75300
},
{
"epoch": 1.89,
"grad_norm": 2.5389204025268555,
"learning_rate": 1.2377889447236182e-05,
"loss": 0.7846,
"step": 75400
},
{
"epoch": 1.89,
"grad_norm": 2.3196558952331543,
"learning_rate": 1.2327638190954773e-05,
"loss": 0.7793,
"step": 75500
},
{
"epoch": 1.89,
"grad_norm": 6.59114933013916,
"learning_rate": 1.2277386934673366e-05,
"loss": 0.7883,
"step": 75600
},
{
"epoch": 1.89,
"grad_norm": 11.592848777770996,
"learning_rate": 1.222713567839196e-05,
"loss": 0.762,
"step": 75700
},
{
"epoch": 1.9,
"grad_norm": 2.90639328956604,
"learning_rate": 1.2176884422110553e-05,
"loss": 0.7188,
"step": 75800
},
{
"epoch": 1.9,
"grad_norm": 3.033656597137451,
"learning_rate": 1.2126633165829146e-05,
"loss": 0.6816,
"step": 75900
},
{
"epoch": 1.9,
"grad_norm": 5.675739765167236,
"learning_rate": 1.207638190954774e-05,
"loss": 0.721,
"step": 76000
},
{
"epoch": 1.9,
"eval_loss": 0.8610661625862122,
"eval_runtime": 93.1189,
"eval_samples_per_second": 10.739,
"eval_steps_per_second": 2.685,
"step": 76000
},
{
"epoch": 1.9,
"grad_norm": 2.424712657928467,
"learning_rate": 1.2026130653266333e-05,
"loss": 0.7505,
"step": 76100
},
{
"epoch": 1.91,
"grad_norm": 5.7225470542907715,
"learning_rate": 1.1975879396984926e-05,
"loss": 0.7764,
"step": 76200
},
{
"epoch": 1.91,
"grad_norm": 2.682415723800659,
"learning_rate": 1.1925628140703517e-05,
"loss": 0.7766,
"step": 76300
},
{
"epoch": 1.91,
"grad_norm": 3.9195406436920166,
"learning_rate": 1.187537688442211e-05,
"loss": 0.7373,
"step": 76400
},
{
"epoch": 1.91,
"grad_norm": 3.46449613571167,
"learning_rate": 1.1825125628140704e-05,
"loss": 0.8011,
"step": 76500
},
{
"epoch": 1.92,
"grad_norm": 6.3689069747924805,
"learning_rate": 1.1774874371859297e-05,
"loss": 0.7828,
"step": 76600
},
{
"epoch": 1.92,
"grad_norm": 1.710821509361267,
"learning_rate": 1.172462311557789e-05,
"loss": 0.7638,
"step": 76700
},
{
"epoch": 1.92,
"grad_norm": 10.961737632751465,
"learning_rate": 1.1674371859296484e-05,
"loss": 0.7036,
"step": 76800
},
{
"epoch": 1.92,
"grad_norm": 5.5576395988464355,
"learning_rate": 1.1624120603015077e-05,
"loss": 0.7446,
"step": 76900
},
{
"epoch": 1.93,
"grad_norm": 2.242861747741699,
"learning_rate": 1.1573869346733668e-05,
"loss": 0.7571,
"step": 77000
},
{
"epoch": 1.93,
"eval_loss": 0.8827255368232727,
"eval_runtime": 93.1481,
"eval_samples_per_second": 10.736,
"eval_steps_per_second": 2.684,
"step": 77000
},
{
"epoch": 1.93,
"grad_norm": 4.320422172546387,
"learning_rate": 1.1523618090452262e-05,
"loss": 0.7392,
"step": 77100
},
{
"epoch": 1.93,
"grad_norm": 5.0130615234375,
"learning_rate": 1.147386934673367e-05,
"loss": 0.7659,
"step": 77200
},
{
"epoch": 1.93,
"grad_norm": 6.650476932525635,
"learning_rate": 1.1423618090452263e-05,
"loss": 0.6964,
"step": 77300
},
{
"epoch": 1.94,
"grad_norm": 4.825006008148193,
"learning_rate": 1.1373366834170856e-05,
"loss": 0.7344,
"step": 77400
},
{
"epoch": 1.94,
"grad_norm": 6.247884750366211,
"learning_rate": 1.1323115577889447e-05,
"loss": 0.7133,
"step": 77500
},
{
"epoch": 1.94,
"grad_norm": 4.41520881652832,
"learning_rate": 1.127286432160804e-05,
"loss": 0.7219,
"step": 77600
},
{
"epoch": 1.94,
"grad_norm": 7.191974639892578,
"learning_rate": 1.1222613065326634e-05,
"loss": 0.7417,
"step": 77700
},
{
"epoch": 1.94,
"grad_norm": 3.9499876499176025,
"learning_rate": 1.1172361809045227e-05,
"loss": 0.7488,
"step": 77800
},
{
"epoch": 1.95,
"grad_norm": 3.5450992584228516,
"learning_rate": 1.112211055276382e-05,
"loss": 0.7433,
"step": 77900
},
{
"epoch": 1.95,
"grad_norm": 3.713775157928467,
"learning_rate": 1.1071859296482412e-05,
"loss": 0.6978,
"step": 78000
},
{
"epoch": 1.95,
"eval_loss": 0.8865166306495667,
"eval_runtime": 93.1372,
"eval_samples_per_second": 10.737,
"eval_steps_per_second": 2.684,
"step": 78000
},
{
"epoch": 1.95,
"grad_norm": 4.509785175323486,
"learning_rate": 1.1021608040201005e-05,
"loss": 0.8032,
"step": 78100
},
{
"epoch": 1.96,
"grad_norm": 4.823591709136963,
"learning_rate": 1.0971356783919598e-05,
"loss": 0.7822,
"step": 78200
},
{
"epoch": 1.96,
"grad_norm": 3.9249942302703857,
"learning_rate": 1.0921105527638192e-05,
"loss": 0.7506,
"step": 78300
},
{
"epoch": 1.96,
"grad_norm": 7.268338203430176,
"learning_rate": 1.0870854271356783e-05,
"loss": 0.7698,
"step": 78400
},
{
"epoch": 1.96,
"grad_norm": 7.747581958770752,
"learning_rate": 1.0820603015075376e-05,
"loss": 0.7093,
"step": 78500
},
{
"epoch": 1.96,
"grad_norm": 9.720921516418457,
"learning_rate": 1.077035175879397e-05,
"loss": 0.7934,
"step": 78600
},
{
"epoch": 1.97,
"grad_norm": 7.734616756439209,
"learning_rate": 1.0720100502512563e-05,
"loss": 0.7819,
"step": 78700
},
{
"epoch": 1.97,
"grad_norm": 7.007671356201172,
"learning_rate": 1.0669849246231156e-05,
"loss": 0.7296,
"step": 78800
},
{
"epoch": 1.97,
"grad_norm": 3.1866681575775146,
"learning_rate": 1.061959798994975e-05,
"loss": 0.8135,
"step": 78900
},
{
"epoch": 1.98,
"grad_norm": 3.9308559894561768,
"learning_rate": 1.0569346733668343e-05,
"loss": 0.7819,
"step": 79000
},
{
"epoch": 1.98,
"eval_loss": 0.8832026124000549,
"eval_runtime": 93.0901,
"eval_samples_per_second": 10.742,
"eval_steps_per_second": 2.686,
"step": 79000
},
{
"epoch": 1.98,
"grad_norm": 5.734823226928711,
"learning_rate": 1.0519095477386936e-05,
"loss": 0.7862,
"step": 79100
},
{
"epoch": 1.98,
"grad_norm": 3.2910168170928955,
"learning_rate": 1.0468844221105527e-05,
"loss": 0.7575,
"step": 79200
},
{
"epoch": 1.98,
"grad_norm": 4.012611389160156,
"learning_rate": 1.041859296482412e-05,
"loss": 0.7966,
"step": 79300
},
{
"epoch": 1.98,
"grad_norm": 6.5967535972595215,
"learning_rate": 1.0368341708542714e-05,
"loss": 0.7862,
"step": 79400
},
{
"epoch": 1.99,
"grad_norm": 6.494651794433594,
"learning_rate": 1.0318592964824122e-05,
"loss": 0.7849,
"step": 79500
},
{
"epoch": 1.99,
"grad_norm": 8.57440185546875,
"learning_rate": 1.0268341708542715e-05,
"loss": 0.7216,
"step": 79600
},
{
"epoch": 1.99,
"grad_norm": 6.793177604675293,
"learning_rate": 1.0218090452261306e-05,
"loss": 0.7803,
"step": 79700
},
{
"epoch": 2.0,
"grad_norm": 8.437973022460938,
"learning_rate": 1.01678391959799e-05,
"loss": 0.7464,
"step": 79800
},
{
"epoch": 2.0,
"grad_norm": 5.798175811767578,
"learning_rate": 1.0117587939698493e-05,
"loss": 0.7847,
"step": 79900
},
{
"epoch": 2.0,
"grad_norm": 4.946900844573975,
"learning_rate": 1.0067336683417086e-05,
"loss": 0.7455,
"step": 80000
},
{
"epoch": 2.0,
"eval_loss": 0.8540694117546082,
"eval_runtime": 93.087,
"eval_samples_per_second": 10.743,
"eval_steps_per_second": 2.686,
"step": 80000
}
],
"logging_steps": 100,
"max_steps": 100000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"total_flos": 1.28817007951872e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}