gsmyrnis's picture
End of training
6e442df verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1578,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019011406844106463,
"grad_norm": 9.1989966206838,
"learning_rate": 6.329113924050634e-07,
"loss": 0.8874,
"step": 10
},
{
"epoch": 0.03802281368821293,
"grad_norm": 4.161122432720083,
"learning_rate": 1.2658227848101267e-06,
"loss": 0.791,
"step": 20
},
{
"epoch": 0.057034220532319393,
"grad_norm": 1.6740419759238627,
"learning_rate": 1.8987341772151901e-06,
"loss": 0.7354,
"step": 30
},
{
"epoch": 0.07604562737642585,
"grad_norm": 1.472773729260504,
"learning_rate": 2.5316455696202535e-06,
"loss": 0.6972,
"step": 40
},
{
"epoch": 0.09505703422053231,
"grad_norm": 1.6632418888769052,
"learning_rate": 3.164556962025317e-06,
"loss": 0.698,
"step": 50
},
{
"epoch": 0.11406844106463879,
"grad_norm": 1.2299395567832287,
"learning_rate": 3.7974683544303802e-06,
"loss": 0.6778,
"step": 60
},
{
"epoch": 0.13307984790874525,
"grad_norm": 1.5380428593757542,
"learning_rate": 4.430379746835443e-06,
"loss": 0.6751,
"step": 70
},
{
"epoch": 0.1520912547528517,
"grad_norm": 1.3704564329134865,
"learning_rate": 4.999995058613287e-06,
"loss": 0.6713,
"step": 80
},
{
"epoch": 0.17110266159695817,
"grad_norm": 1.9490540655863884,
"learning_rate": 4.999402118469357e-06,
"loss": 0.6671,
"step": 90
},
{
"epoch": 0.19011406844106463,
"grad_norm": 1.5630755002882903,
"learning_rate": 4.997821199394829e-06,
"loss": 0.6676,
"step": 100
},
{
"epoch": 0.20912547528517111,
"grad_norm": 2.133710896815452,
"learning_rate": 4.995252995758543e-06,
"loss": 0.6695,
"step": 110
},
{
"epoch": 0.22813688212927757,
"grad_norm": 1.5943526267612576,
"learning_rate": 4.99169863556294e-06,
"loss": 0.6575,
"step": 120
},
{
"epoch": 0.24714828897338403,
"grad_norm": 1.832479949115814,
"learning_rate": 4.987159679948624e-06,
"loss": 0.6612,
"step": 130
},
{
"epoch": 0.2661596958174905,
"grad_norm": 1.4945020437443977,
"learning_rate": 4.981638122508684e-06,
"loss": 0.658,
"step": 140
},
{
"epoch": 0.28517110266159695,
"grad_norm": 1.619599628436461,
"learning_rate": 4.975136388413065e-06,
"loss": 0.6656,
"step": 150
},
{
"epoch": 0.3041825095057034,
"grad_norm": 1.5360935621329557,
"learning_rate": 4.967657333343394e-06,
"loss": 0.6511,
"step": 160
},
{
"epoch": 0.3231939163498099,
"grad_norm": 1.4510475916499845,
"learning_rate": 4.959204242238707e-06,
"loss": 0.6573,
"step": 170
},
{
"epoch": 0.34220532319391633,
"grad_norm": 1.532931462135121,
"learning_rate": 4.949780827852648e-06,
"loss": 0.6507,
"step": 180
},
{
"epoch": 0.3612167300380228,
"grad_norm": 1.1719507987669484,
"learning_rate": 4.939391229122757e-06,
"loss": 0.6523,
"step": 190
},
{
"epoch": 0.38022813688212925,
"grad_norm": 1.2585592582289158,
"learning_rate": 4.928040009352568e-06,
"loss": 0.6524,
"step": 200
},
{
"epoch": 0.39923954372623577,
"grad_norm": 1.7296360428483535,
"learning_rate": 4.91573215420733e-06,
"loss": 0.6614,
"step": 210
},
{
"epoch": 0.41825095057034223,
"grad_norm": 1.5691004380249787,
"learning_rate": 4.902473069524204e-06,
"loss": 0.6484,
"step": 220
},
{
"epoch": 0.4372623574144487,
"grad_norm": 1.247775759644616,
"learning_rate": 4.888268578937923e-06,
"loss": 0.6508,
"step": 230
},
{
"epoch": 0.45627376425855515,
"grad_norm": 1.3594034920568745,
"learning_rate": 4.873124921322945e-06,
"loss": 0.6469,
"step": 240
},
{
"epoch": 0.4752851711026616,
"grad_norm": 1.2309510240137682,
"learning_rate": 4.8570487480532196e-06,
"loss": 0.6401,
"step": 250
},
{
"epoch": 0.49429657794676807,
"grad_norm": 1.3299272061400234,
"learning_rate": 4.840047120080787e-06,
"loss": 0.6416,
"step": 260
},
{
"epoch": 0.5133079847908745,
"grad_norm": 1.3129636158199345,
"learning_rate": 4.822127504834472e-06,
"loss": 0.6438,
"step": 270
},
{
"epoch": 0.532319391634981,
"grad_norm": 1.4703016714267874,
"learning_rate": 4.8032977729400585e-06,
"loss": 0.6506,
"step": 280
},
{
"epoch": 0.5513307984790875,
"grad_norm": 1.3282370638504304,
"learning_rate": 4.783566194763359e-06,
"loss": 0.6521,
"step": 290
},
{
"epoch": 0.5703422053231939,
"grad_norm": 1.187364149838104,
"learning_rate": 4.762941436777721e-06,
"loss": 0.6371,
"step": 300
},
{
"epoch": 0.5893536121673004,
"grad_norm": 1.2164074784365948,
"learning_rate": 4.7414325577575484e-06,
"loss": 0.6407,
"step": 310
},
{
"epoch": 0.6083650190114068,
"grad_norm": 1.4208300435315424,
"learning_rate": 4.719049004799525e-06,
"loss": 0.6328,
"step": 320
},
{
"epoch": 0.6273764258555133,
"grad_norm": 1.324349326714873,
"learning_rate": 4.695800609173274e-06,
"loss": 0.6526,
"step": 330
},
{
"epoch": 0.6463878326996197,
"grad_norm": 1.1238559883921406,
"learning_rate": 4.671697582003279e-06,
"loss": 0.6371,
"step": 340
},
{
"epoch": 0.6653992395437263,
"grad_norm": 1.3772583883883136,
"learning_rate": 4.646750509783975e-06,
"loss": 0.6413,
"step": 350
},
{
"epoch": 0.6844106463878327,
"grad_norm": 1.3311507053384555,
"learning_rate": 4.620970349729961e-06,
"loss": 0.638,
"step": 360
},
{
"epoch": 0.7034220532319392,
"grad_norm": 1.1084009519269555,
"learning_rate": 4.594368424963392e-06,
"loss": 0.6402,
"step": 370
},
{
"epoch": 0.7224334600760456,
"grad_norm": 1.272517045012054,
"learning_rate": 4.56695641954065e-06,
"loss": 0.6332,
"step": 380
},
{
"epoch": 0.7414448669201521,
"grad_norm": 1.068763291195587,
"learning_rate": 4.538746373320499e-06,
"loss": 0.6451,
"step": 390
},
{
"epoch": 0.7604562737642585,
"grad_norm": 1.17609263311229,
"learning_rate": 4.5097506766759465e-06,
"loss": 0.6398,
"step": 400
},
{
"epoch": 0.779467680608365,
"grad_norm": 1.2993920513145316,
"learning_rate": 4.479982065052171e-06,
"loss": 0.6333,
"step": 410
},
{
"epoch": 0.7984790874524715,
"grad_norm": 1.4521328868048515,
"learning_rate": 4.4494536133728754e-06,
"loss": 0.6389,
"step": 420
},
{
"epoch": 0.8174904942965779,
"grad_norm": 1.134966447023594,
"learning_rate": 4.418178730297542e-06,
"loss": 0.6369,
"step": 430
},
{
"epoch": 0.8365019011406845,
"grad_norm": 1.1802112020601854,
"learning_rate": 4.3861711523321e-06,
"loss": 0.6355,
"step": 440
},
{
"epoch": 0.8555133079847909,
"grad_norm": 1.2438925294191414,
"learning_rate": 4.353444937795595e-06,
"loss": 0.6337,
"step": 450
},
{
"epoch": 0.8745247148288974,
"grad_norm": 1.169622958547474,
"learning_rate": 4.320014460645523e-06,
"loss": 0.6357,
"step": 460
},
{
"epoch": 0.8935361216730038,
"grad_norm": 1.0360592829770976,
"learning_rate": 4.2858944041645196e-06,
"loss": 0.6308,
"step": 470
},
{
"epoch": 0.9125475285171103,
"grad_norm": 1.133882806727274,
"learning_rate": 4.251099754511189e-06,
"loss": 0.633,
"step": 480
},
{
"epoch": 0.9315589353612167,
"grad_norm": 1.0230579665487711,
"learning_rate": 4.2156457941379095e-06,
"loss": 0.637,
"step": 490
},
{
"epoch": 0.9505703422053232,
"grad_norm": 1.0897950528236497,
"learning_rate": 4.179548095078498e-06,
"loss": 0.6315,
"step": 500
},
{
"epoch": 0.9695817490494296,
"grad_norm": 1.4631268078629502,
"learning_rate": 4.142822512108683e-06,
"loss": 0.632,
"step": 510
},
{
"epoch": 0.9885931558935361,
"grad_norm": 1.2859460507714569,
"learning_rate": 4.105485175782396e-06,
"loss": 0.6226,
"step": 520
},
{
"epoch": 1.0,
"eval_loss": 0.6353716254234314,
"eval_runtime": 49.2084,
"eval_samples_per_second": 287.816,
"eval_steps_per_second": 1.138,
"step": 526
},
{
"epoch": 1.0076045627376427,
"grad_norm": 2.6186402518104304,
"learning_rate": 4.067552485346939e-06,
"loss": 0.5901,
"step": 530
},
{
"epoch": 1.026615969581749,
"grad_norm": 1.4429614147480494,
"learning_rate": 4.029041101540122e-06,
"loss": 0.546,
"step": 540
},
{
"epoch": 1.0456273764258555,
"grad_norm": 1.1901165511323117,
"learning_rate": 3.989967939272569e-06,
"loss": 0.5434,
"step": 550
},
{
"epoch": 1.064638783269962,
"grad_norm": 1.133670318062506,
"learning_rate": 3.950350160198373e-06,
"loss": 0.5365,
"step": 560
},
{
"epoch": 1.0836501901140685,
"grad_norm": 1.1569131629508171,
"learning_rate": 3.91020516517738e-06,
"loss": 0.5394,
"step": 570
},
{
"epoch": 1.102661596958175,
"grad_norm": 1.1578867626444747,
"learning_rate": 3.869550586632413e-06,
"loss": 0.5454,
"step": 580
},
{
"epoch": 1.1216730038022813,
"grad_norm": 1.1710585399638043,
"learning_rate": 3.828404280804782e-06,
"loss": 0.5437,
"step": 590
},
{
"epoch": 1.1406844106463878,
"grad_norm": 1.2968456937546469,
"learning_rate": 3.7867843199114996e-06,
"loss": 0.545,
"step": 600
},
{
"epoch": 1.1596958174904943,
"grad_norm": 1.173788116358311,
"learning_rate": 3.7447089842076238e-06,
"loss": 0.5401,
"step": 610
},
{
"epoch": 1.1787072243346008,
"grad_norm": 1.1736747473104372,
"learning_rate": 3.7021967539572343e-06,
"loss": 0.5451,
"step": 620
},
{
"epoch": 1.1977186311787071,
"grad_norm": 1.1583258876929678,
"learning_rate": 3.6592663013165636e-06,
"loss": 0.5413,
"step": 630
},
{
"epoch": 1.2167300380228137,
"grad_norm": 1.1187865805782111,
"learning_rate": 3.6159364821328325e-06,
"loss": 0.5531,
"step": 640
},
{
"epoch": 1.2357414448669202,
"grad_norm": 1.2500694936887193,
"learning_rate": 3.572226327662423e-06,
"loss": 0.5481,
"step": 650
},
{
"epoch": 1.2547528517110267,
"grad_norm": 1.2649269376886392,
"learning_rate": 3.528155036211995e-06,
"loss": 0.5402,
"step": 660
},
{
"epoch": 1.2737642585551332,
"grad_norm": 1.2215327457350609,
"learning_rate": 3.4837419647062344e-06,
"loss": 0.5452,
"step": 670
},
{
"epoch": 1.2927756653992395,
"grad_norm": 1.2145466936928535,
"learning_rate": 3.4390066201859355e-06,
"loss": 0.5376,
"step": 680
},
{
"epoch": 1.311787072243346,
"grad_norm": 1.16823342480946,
"learning_rate": 3.393968651240153e-06,
"loss": 0.5442,
"step": 690
},
{
"epoch": 1.3307984790874525,
"grad_norm": 1.1720513921800486,
"learning_rate": 3.3486478393761707e-06,
"loss": 0.5449,
"step": 700
},
{
"epoch": 1.3498098859315588,
"grad_norm": 1.32127207540034,
"learning_rate": 3.303064090331106e-06,
"loss": 0.537,
"step": 710
},
{
"epoch": 1.3688212927756653,
"grad_norm": 1.170488361503433,
"learning_rate": 3.2572374253289385e-06,
"loss": 0.5525,
"step": 720
},
{
"epoch": 1.3878326996197718,
"grad_norm": 1.2540643595664949,
"learning_rate": 3.2111879722868204e-06,
"loss": 0.5513,
"step": 730
},
{
"epoch": 1.4068441064638784,
"grad_norm": 1.1125376049437519,
"learning_rate": 3.164935956974522e-06,
"loss": 0.5428,
"step": 740
},
{
"epoch": 1.4258555133079849,
"grad_norm": 1.2222128167327286,
"learning_rate": 3.1185016941309033e-06,
"loss": 0.5443,
"step": 750
},
{
"epoch": 1.4448669201520912,
"grad_norm": 1.1189014403528423,
"learning_rate": 3.071905578541304e-06,
"loss": 0.55,
"step": 760
},
{
"epoch": 1.4638783269961977,
"grad_norm": 1.106630875988617,
"learning_rate": 3.0251680760797737e-06,
"loss": 0.5512,
"step": 770
},
{
"epoch": 1.4828897338403042,
"grad_norm": 1.238247599173364,
"learning_rate": 2.978309714720092e-06,
"loss": 0.5457,
"step": 780
},
{
"epoch": 1.5019011406844105,
"grad_norm": 1.1877938363765261,
"learning_rate": 2.931351075519494e-06,
"loss": 0.5472,
"step": 790
},
{
"epoch": 1.5209125475285172,
"grad_norm": 1.0765509288814237,
"learning_rate": 2.8843127835790922e-06,
"loss": 0.5467,
"step": 800
},
{
"epoch": 1.5399239543726235,
"grad_norm": 1.1769551690370097,
"learning_rate": 2.837215498984956e-06,
"loss": 0.5423,
"step": 810
},
{
"epoch": 1.55893536121673,
"grad_norm": 1.0525687613765247,
"learning_rate": 2.7900799077338137e-06,
"loss": 0.5377,
"step": 820
},
{
"epoch": 1.5779467680608366,
"grad_norm": 1.1872738975990165,
"learning_rate": 2.742926712647389e-06,
"loss": 0.5422,
"step": 830
},
{
"epoch": 1.5969581749049429,
"grad_norm": 1.2556831499142438,
"learning_rate": 2.6957766242793286e-06,
"loss": 0.5548,
"step": 840
},
{
"epoch": 1.6159695817490496,
"grad_norm": 1.0530666465416862,
"learning_rate": 2.648650351818758e-06,
"loss": 0.5396,
"step": 850
},
{
"epoch": 1.6349809885931559,
"grad_norm": 1.0626505034866265,
"learning_rate": 2.6015685939944113e-06,
"loss": 0.5443,
"step": 860
},
{
"epoch": 1.6539923954372624,
"grad_norm": 1.02476310680603,
"learning_rate": 2.554552029983375e-06,
"loss": 0.5404,
"step": 870
},
{
"epoch": 1.673003802281369,
"grad_norm": 1.0885196976387683,
"learning_rate": 2.5076213103284107e-06,
"loss": 0.5443,
"step": 880
},
{
"epoch": 1.6920152091254752,
"grad_norm": 1.1420366693644837,
"learning_rate": 2.4607970478678515e-06,
"loss": 0.5464,
"step": 890
},
{
"epoch": 1.7110266159695817,
"grad_norm": 1.1255789898459938,
"learning_rate": 2.4140998086820665e-06,
"loss": 0.5447,
"step": 900
},
{
"epoch": 1.7300380228136882,
"grad_norm": 1.3189103855357065,
"learning_rate": 2.367550103060459e-06,
"loss": 0.5475,
"step": 910
},
{
"epoch": 1.7490494296577945,
"grad_norm": 1.0465751177563847,
"learning_rate": 2.3211683764929664e-06,
"loss": 0.5484,
"step": 920
},
{
"epoch": 1.7680608365019013,
"grad_norm": 1.1704397396223116,
"learning_rate": 2.2749750006900263e-06,
"loss": 0.5431,
"step": 930
},
{
"epoch": 1.7870722433460076,
"grad_norm": 1.053213291008937,
"learning_rate": 2.2289902646349423e-06,
"loss": 0.5425,
"step": 940
},
{
"epoch": 1.806083650190114,
"grad_norm": 1.2515940528618232,
"learning_rate": 2.1832343656725918e-06,
"loss": 0.5459,
"step": 950
},
{
"epoch": 1.8250950570342206,
"grad_norm": 1.099127233957744,
"learning_rate": 2.1377274006383737e-06,
"loss": 0.5394,
"step": 960
},
{
"epoch": 1.8441064638783269,
"grad_norm": 1.12063559389163,
"learning_rate": 2.092489357031315e-06,
"loss": 0.5447,
"step": 970
},
{
"epoch": 1.8631178707224336,
"grad_norm": 1.102123448980619,
"learning_rate": 2.0475401042351843e-06,
"loss": 0.5305,
"step": 980
},
{
"epoch": 1.88212927756654,
"grad_norm": 1.1254624333919672,
"learning_rate": 2.0028993847914966e-06,
"loss": 0.5395,
"step": 990
},
{
"epoch": 1.9011406844106464,
"grad_norm": 1.0910835517042778,
"learning_rate": 1.9585868057282256e-06,
"loss": 0.5406,
"step": 1000
},
{
"epoch": 1.920152091254753,
"grad_norm": 1.3820944438954628,
"learning_rate": 1.9146218299480303e-06,
"loss": 0.5476,
"step": 1010
},
{
"epoch": 1.9391634980988592,
"grad_norm": 1.2180866671022355,
"learning_rate": 1.8710237676797923e-06,
"loss": 0.5429,
"step": 1020
},
{
"epoch": 1.9581749049429658,
"grad_norm": 1.1380236136327964,
"learning_rate": 1.827811767997207e-06,
"loss": 0.5419,
"step": 1030
},
{
"epoch": 1.9771863117870723,
"grad_norm": 1.0602009694403736,
"learning_rate": 1.7850048104081555e-06,
"loss": 0.5369,
"step": 1040
},
{
"epoch": 1.9961977186311786,
"grad_norm": 1.0393675864746768,
"learning_rate": 1.742621696518555e-06,
"loss": 0.5371,
"step": 1050
},
{
"epoch": 2.0,
"eval_loss": 0.6316379308700562,
"eval_runtime": 54.2509,
"eval_samples_per_second": 261.065,
"eval_steps_per_second": 1.032,
"step": 1052
},
{
"epoch": 2.0152091254752853,
"grad_norm": 1.7644212761083442,
"learning_rate": 1.7006810417743453e-06,
"loss": 0.4716,
"step": 1060
},
{
"epoch": 2.0342205323193916,
"grad_norm": 1.3479439652442078,
"learning_rate": 1.6592012672852462e-06,
"loss": 0.4592,
"step": 1070
},
{
"epoch": 2.053231939163498,
"grad_norm": 1.1555574696681938,
"learning_rate": 1.618200591733858e-06,
"loss": 0.4568,
"step": 1080
},
{
"epoch": 2.0722433460076046,
"grad_norm": 1.1655620913425222,
"learning_rate": 1.5776970233736877e-06,
"loss": 0.4649,
"step": 1090
},
{
"epoch": 2.091254752851711,
"grad_norm": 1.246797649914118,
"learning_rate": 1.5377083521195849e-06,
"loss": 0.4584,
"step": 1100
},
{
"epoch": 2.1102661596958177,
"grad_norm": 1.20981005283122,
"learning_rate": 1.4982521417340881e-06,
"loss": 0.457,
"step": 1110
},
{
"epoch": 2.129277566539924,
"grad_norm": 1.25314570539992,
"learning_rate": 1.459345722113095e-06,
"loss": 0.4613,
"step": 1120
},
{
"epoch": 2.1482889733840302,
"grad_norm": 1.2661139969932325,
"learning_rate": 1.4210061816742487e-06,
"loss": 0.4557,
"step": 1130
},
{
"epoch": 2.167300380228137,
"grad_norm": 1.22533837512988,
"learning_rate": 1.3832503598513939e-06,
"loss": 0.4638,
"step": 1140
},
{
"epoch": 2.1863117870722433,
"grad_norm": 1.153296199976961,
"learning_rate": 1.3460948396983833e-06,
"loss": 0.4539,
"step": 1150
},
{
"epoch": 2.20532319391635,
"grad_norm": 1.2554665358203887,
"learning_rate": 1.3095559406054964e-06,
"loss": 0.4574,
"step": 1160
},
{
"epoch": 2.2243346007604563,
"grad_norm": 1.216327809593985,
"learning_rate": 1.2736497111316607e-06,
"loss": 0.4595,
"step": 1170
},
{
"epoch": 2.2433460076045626,
"grad_norm": 1.1472080286252149,
"learning_rate": 1.238391921955631e-06,
"loss": 0.4565,
"step": 1180
},
{
"epoch": 2.2623574144486693,
"grad_norm": 1.280760900457142,
"learning_rate": 1.2037980589492116e-06,
"loss": 0.4623,
"step": 1190
},
{
"epoch": 2.2813688212927756,
"grad_norm": 1.1694311311925278,
"learning_rate": 1.1698833163755793e-06,
"loss": 0.4531,
"step": 1200
},
{
"epoch": 2.3003802281368824,
"grad_norm": 1.1799865250234591,
"learning_rate": 1.1366625902156814e-06,
"loss": 0.4555,
"step": 1210
},
{
"epoch": 2.3193916349809887,
"grad_norm": 1.1721763967059378,
"learning_rate": 1.1041504716256456e-06,
"loss": 0.4601,
"step": 1220
},
{
"epoch": 2.338403041825095,
"grad_norm": 1.1882316774789523,
"learning_rate": 1.0723612405280762e-06,
"loss": 0.4669,
"step": 1230
},
{
"epoch": 2.3574144486692017,
"grad_norm": 1.1388831508699453,
"learning_rate": 1.0413088593400464e-06,
"loss": 0.4632,
"step": 1240
},
{
"epoch": 2.376425855513308,
"grad_norm": 1.143656084527738,
"learning_rate": 1.0110069668405487e-06,
"loss": 0.4564,
"step": 1250
},
{
"epoch": 2.3954372623574143,
"grad_norm": 1.210491576650488,
"learning_rate": 9.8146887218009e-07,
"loss": 0.4639,
"step": 1260
},
{
"epoch": 2.414448669201521,
"grad_norm": 1.1273169371405412,
"learning_rate": 9.527075490350695e-07,
"loss": 0.4615,
"step": 1270
},
{
"epoch": 2.4334600760456273,
"grad_norm": 1.215505001410675,
"learning_rate": 9.247356299094966e-07,
"loss": 0.4528,
"step": 1280
},
{
"epoch": 2.4524714828897336,
"grad_norm": 1.1389739744110048,
"learning_rate": 8.975654005865671e-07,
"loss": 0.4535,
"step": 1290
},
{
"epoch": 2.4714828897338403,
"grad_norm": 1.159288850094368,
"learning_rate": 8.712087947325155e-07,
"loss": 0.4514,
"step": 1300
},
{
"epoch": 2.4904942965779466,
"grad_norm": 1.2097686536353103,
"learning_rate": 8.456773886551317e-07,
"loss": 0.456,
"step": 1310
},
{
"epoch": 2.5095057034220534,
"grad_norm": 1.125891025891593,
"learning_rate": 8.20982396219231e-07,
"loss": 0.4584,
"step": 1320
},
{
"epoch": 2.5285171102661597,
"grad_norm": 1.165727380934779,
"learning_rate": 7.971346639213192e-07,
"loss": 0.4576,
"step": 1330
},
{
"epoch": 2.5475285171102664,
"grad_norm": 1.2196983806300021,
"learning_rate": 7.74144666125611e-07,
"loss": 0.4585,
"step": 1340
},
{
"epoch": 2.5665399239543727,
"grad_norm": 1.1416376267908626,
"learning_rate": 7.520225004634946e-07,
"loss": 0.4617,
"step": 1350
},
{
"epoch": 2.585551330798479,
"grad_norm": 1.1374660560345522,
"learning_rate": 7.307778833984666e-07,
"loss": 0.4525,
"step": 1360
},
{
"epoch": 2.6045627376425857,
"grad_norm": 1.1600478528127594,
"learning_rate": 7.104201459584814e-07,
"loss": 0.4554,
"step": 1370
},
{
"epoch": 2.623574144486692,
"grad_norm": 1.1613018240717523,
"learning_rate": 6.909582296375923e-07,
"loss": 0.4571,
"step": 1380
},
{
"epoch": 2.6425855513307983,
"grad_norm": 1.1582568171040142,
"learning_rate": 6.724006824686814e-07,
"loss": 0.458,
"step": 1390
},
{
"epoch": 2.661596958174905,
"grad_norm": 1.2297405479958352,
"learning_rate": 6.547556552690069e-07,
"loss": 0.4576,
"step": 1400
},
{
"epoch": 2.6806083650190113,
"grad_norm": 1.1545365546618969,
"learning_rate": 6.380308980602119e-07,
"loss": 0.4559,
"step": 1410
},
{
"epoch": 2.6996197718631176,
"grad_norm": 1.1501358792075553,
"learning_rate": 6.222337566643733e-07,
"loss": 0.458,
"step": 1420
},
{
"epoch": 2.7186311787072244,
"grad_norm": 1.1596268149561126,
"learning_rate": 6.073711694775805e-07,
"loss": 0.4523,
"step": 1430
},
{
"epoch": 2.7376425855513307,
"grad_norm": 1.176536310467956,
"learning_rate": 5.934496644224642e-07,
"loss": 0.4592,
"step": 1440
},
{
"epoch": 2.7566539923954374,
"grad_norm": 1.1601201945564672,
"learning_rate": 5.804753560810128e-07,
"loss": 0.4587,
"step": 1450
},
{
"epoch": 2.7756653992395437,
"grad_norm": 1.1503887080909905,
"learning_rate": 5.684539430089329e-07,
"loss": 0.4558,
"step": 1460
},
{
"epoch": 2.7946768060836504,
"grad_norm": 1.148286520523666,
"learning_rate": 5.573907052327413e-07,
"loss": 0.4585,
"step": 1470
},
{
"epoch": 2.8136882129277567,
"grad_norm": 1.1477000696419715,
"learning_rate": 5.472905019306775e-07,
"loss": 0.4555,
"step": 1480
},
{
"epoch": 2.832699619771863,
"grad_norm": 1.1636191591442357,
"learning_rate": 5.38157769298465e-07,
"loss": 0.4529,
"step": 1490
},
{
"epoch": 2.8517110266159698,
"grad_norm": 1.226786969124158,
"learning_rate": 5.2999651860085e-07,
"loss": 0.4628,
"step": 1500
},
{
"epoch": 2.870722433460076,
"grad_norm": 1.2111320995578168,
"learning_rate": 5.228103344097825e-07,
"loss": 0.4642,
"step": 1510
},
{
"epoch": 2.8897338403041823,
"grad_norm": 1.134156671769776,
"learning_rate": 5.166023730300036e-07,
"loss": 0.4541,
"step": 1520
},
{
"epoch": 2.908745247148289,
"grad_norm": 1.1404768838000066,
"learning_rate": 5.113753611127401e-07,
"loss": 0.4607,
"step": 1530
},
{
"epoch": 2.9277566539923954,
"grad_norm": 1.1667213786402724,
"learning_rate": 5.071315944581075e-07,
"loss": 0.4638,
"step": 1540
},
{
"epoch": 2.9467680608365017,
"grad_norm": 1.151294180691113,
"learning_rate": 5.038729370067536e-07,
"loss": 0.455,
"step": 1550
},
{
"epoch": 2.9657794676806084,
"grad_norm": 1.2244552822051,
"learning_rate": 5.016008200211805e-07,
"loss": 0.4556,
"step": 1560
},
{
"epoch": 2.9847908745247147,
"grad_norm": 1.1748003065525463,
"learning_rate": 5.003162414571072e-07,
"loss": 0.455,
"step": 1570
},
{
"epoch": 3.0,
"eval_loss": 0.6586939692497253,
"eval_runtime": 55.4914,
"eval_samples_per_second": 255.229,
"eval_steps_per_second": 1.009,
"step": 1578
},
{
"epoch": 3.0,
"step": 1578,
"total_flos": 2643208773304320.0,
"train_loss": 0.5529848988367426,
"train_runtime": 9902.4641,
"train_samples_per_second": 81.519,
"train_steps_per_second": 0.159
}
],
"logging_steps": 10,
"max_steps": 1578,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2643208773304320.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}