mm-interp-RLAIF-V_Coocur-q0_75 / trainer_state.json
htlou's picture
Upload folder using huggingface_hub
75b7420 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9930394431554523,
"eval_steps": 50,
"global_step": 645,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 27.06661296487487,
"learning_rate": 5e-07,
"loss": 1.6534,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 14.570294011958959,
"learning_rate": 1e-06,
"loss": 1.5286,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.608713229824519,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2854,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.212274190189952,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2312,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.767015997797787,
"learning_rate": 9.986238191873872e-07,
"loss": 1.1896,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 5.073635902534103,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1918,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 4.788195681953361,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1547,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 4.642283394950232,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1326,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.915483171626106,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1409,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.785752094079727,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1348,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1267675161361694,
"eval_runtime": 105.5696,
"eval_samples_per_second": 58.056,
"eval_steps_per_second": 0.909,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.914635335083288,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1167,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.9477576848370095,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1059,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.9360133375019295,
"learning_rate": 9.816034510373285e-07,
"loss": 1.1173,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.7405884580433675,
"learning_rate": 9.781322835100637e-07,
"loss": 1.1091,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.835669121473599,
"learning_rate": 9.743685537743856e-07,
"loss": 1.0967,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.6796771647856055,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0758,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.984167856345103,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0862,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.771046487158264,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0795,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.605260375673115,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0677,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 4.698279384122072,
"learning_rate": 9.51248259538713e-07,
"loss": 1.08,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.0813250541687012,
"eval_runtime": 104.9985,
"eval_samples_per_second": 58.372,
"eval_steps_per_second": 0.914,
"step": 100
},
{
"epoch": 0.4872389791183295,
"grad_norm": 5.05046913930874,
"learning_rate": 9.457836797666721e-07,
"loss": 1.0944,
"step": 105
},
{
"epoch": 0.5104408352668214,
"grad_norm": 5.042019310011314,
"learning_rate": 9.400463314275941e-07,
"loss": 1.0946,
"step": 110
},
{
"epoch": 0.5336426914153132,
"grad_norm": 4.6774297543026435,
"learning_rate": 9.340397251217008e-07,
"loss": 1.067,
"step": 115
},
{
"epoch": 0.5568445475638051,
"grad_norm": 5.124932810617846,
"learning_rate": 9.27767536204258e-07,
"loss": 1.0664,
"step": 120
},
{
"epoch": 0.580046403712297,
"grad_norm": 5.129237449751678,
"learning_rate": 9.212336025366787e-07,
"loss": 1.0635,
"step": 125
},
{
"epoch": 0.6032482598607889,
"grad_norm": 5.116692236159179,
"learning_rate": 9.144419221381918e-07,
"loss": 1.0667,
"step": 130
},
{
"epoch": 0.6264501160092807,
"grad_norm": 4.99964002013004,
"learning_rate": 9.073966507395121e-07,
"loss": 1.0637,
"step": 135
},
{
"epoch": 0.6496519721577726,
"grad_norm": 5.177637510626285,
"learning_rate": 9.001020992400085e-07,
"loss": 1.0634,
"step": 140
},
{
"epoch": 0.6728538283062645,
"grad_norm": 5.4138117417423075,
"learning_rate": 8.925627310699274e-07,
"loss": 1.0637,
"step": 145
},
{
"epoch": 0.6960556844547564,
"grad_norm": 5.246483799648231,
"learning_rate": 8.84783159459285e-07,
"loss": 1.0619,
"step": 150
},
{
"epoch": 0.6960556844547564,
"eval_loss": 1.055559754371643,
"eval_runtime": 104.7917,
"eval_samples_per_second": 58.487,
"eval_steps_per_second": 0.916,
"step": 150
},
{
"epoch": 0.7192575406032483,
"grad_norm": 5.122602649783386,
"learning_rate": 8.767681446150976e-07,
"loss": 1.0575,
"step": 155
},
{
"epoch": 0.7424593967517401,
"grad_norm": 5.29929198962553,
"learning_rate": 8.68522590808682e-07,
"loss": 1.0505,
"step": 160
},
{
"epoch": 0.765661252900232,
"grad_norm": 4.828480264754899,
"learning_rate": 8.600515433748001e-07,
"loss": 1.0518,
"step": 165
},
{
"epoch": 0.7888631090487239,
"grad_norm": 5.567752966669097,
"learning_rate": 8.51360185624495e-07,
"loss": 1.0378,
"step": 170
},
{
"epoch": 0.8120649651972158,
"grad_norm": 5.249257083574839,
"learning_rate": 8.424538356734956e-07,
"loss": 1.0358,
"step": 175
},
{
"epoch": 0.8352668213457076,
"grad_norm": 5.3602912051067575,
"learning_rate": 8.333379431881397e-07,
"loss": 1.0199,
"step": 180
},
{
"epoch": 0.8584686774941995,
"grad_norm": 5.127092342189952,
"learning_rate": 8.240180860508026e-07,
"loss": 1.0404,
"step": 185
},
{
"epoch": 0.8816705336426914,
"grad_norm": 4.841085798771151,
"learning_rate": 8.144999669468713e-07,
"loss": 1.0537,
"step": 190
},
{
"epoch": 0.9048723897911833,
"grad_norm": 4.891876601750079,
"learning_rate": 8.047894098753539e-07,
"loss": 1.0461,
"step": 195
},
{
"epoch": 0.9280742459396751,
"grad_norm": 5.235026926317955,
"learning_rate": 7.948923565852597e-07,
"loss": 1.0468,
"step": 200
},
{
"epoch": 0.9280742459396751,
"eval_loss": 1.0335279703140259,
"eval_runtime": 104.9086,
"eval_samples_per_second": 58.422,
"eval_steps_per_second": 0.915,
"step": 200
},
{
"epoch": 0.951276102088167,
"grad_norm": 5.158788907122558,
"learning_rate": 7.848148629399285e-07,
"loss": 1.0159,
"step": 205
},
{
"epoch": 0.974477958236659,
"grad_norm": 5.333986831951347,
"learning_rate": 7.745630952115363e-07,
"loss": 1.0224,
"step": 210
},
{
"epoch": 0.9976798143851509,
"grad_norm": 4.977110057341932,
"learning_rate": 7.641433263080418e-07,
"loss": 1.0242,
"step": 215
},
{
"epoch": 1.0208816705336428,
"grad_norm": 5.799017323239554,
"learning_rate": 7.535619319348865e-07,
"loss": 0.9359,
"step": 220
},
{
"epoch": 1.0440835266821347,
"grad_norm": 5.8085173406660955,
"learning_rate": 7.428253866937918e-07,
"loss": 0.9014,
"step": 225
},
{
"epoch": 1.0672853828306264,
"grad_norm": 5.888836993626545,
"learning_rate": 7.319402601210447e-07,
"loss": 0.8932,
"step": 230
},
{
"epoch": 1.0904872389791183,
"grad_norm": 5.701985019154487,
"learning_rate": 7.209132126676933e-07,
"loss": 0.8953,
"step": 235
},
{
"epoch": 1.1136890951276102,
"grad_norm": 5.4287275730825,
"learning_rate": 7.097509916241145e-07,
"loss": 0.9011,
"step": 240
},
{
"epoch": 1.136890951276102,
"grad_norm": 5.486828404933326,
"learning_rate": 6.984604269914436e-07,
"loss": 0.879,
"step": 245
},
{
"epoch": 1.160092807424594,
"grad_norm": 5.841423335192377,
"learning_rate": 6.870484273023967e-07,
"loss": 0.8999,
"step": 250
},
{
"epoch": 1.160092807424594,
"eval_loss": 1.0276210308074951,
"eval_runtime": 104.8459,
"eval_samples_per_second": 58.457,
"eval_steps_per_second": 0.916,
"step": 250
},
{
"epoch": 1.1832946635730859,
"grad_norm": 6.104179231791857,
"learning_rate": 6.755219753940388e-07,
"loss": 0.8799,
"step": 255
},
{
"epoch": 1.2064965197215778,
"grad_norm": 5.683759928140861,
"learning_rate": 6.638881241350883e-07,
"loss": 0.8848,
"step": 260
},
{
"epoch": 1.2296983758700697,
"grad_norm": 6.2781173429736565,
"learning_rate": 6.52153992110368e-07,
"loss": 0.8753,
"step": 265
},
{
"epoch": 1.2529002320185616,
"grad_norm": 5.1836939832127555,
"learning_rate": 6.403267592650466e-07,
"loss": 0.8898,
"step": 270
},
{
"epoch": 1.2761020881670533,
"grad_norm": 5.855887931011074,
"learning_rate": 6.28413662511334e-07,
"loss": 0.8782,
"step": 275
},
{
"epoch": 1.2993039443155452,
"grad_norm": 5.740445426512396,
"learning_rate": 6.164219913003207e-07,
"loss": 0.8931,
"step": 280
},
{
"epoch": 1.322505800464037,
"grad_norm": 5.566584948356301,
"learning_rate": 6.043590831616676e-07,
"loss": 0.8696,
"step": 285
},
{
"epoch": 1.345707656612529,
"grad_norm": 6.107582813588156,
"learning_rate": 5.92232319213878e-07,
"loss": 0.8822,
"step": 290
},
{
"epoch": 1.368909512761021,
"grad_norm": 5.795907587506582,
"learning_rate": 5.800491196478988e-07,
"loss": 0.8995,
"step": 295
},
{
"epoch": 1.3921113689095128,
"grad_norm": 5.6643118349028425,
"learning_rate": 5.678169391868127e-07,
"loss": 0.8818,
"step": 300
},
{
"epoch": 1.3921113689095128,
"eval_loss": 1.016647219657898,
"eval_runtime": 104.7971,
"eval_samples_per_second": 58.484,
"eval_steps_per_second": 0.916,
"step": 300
},
{
"epoch": 1.4153132250580047,
"grad_norm": 5.390536356888803,
"learning_rate": 5.555432625244023e-07,
"loss": 0.866,
"step": 305
},
{
"epoch": 1.4385150812064964,
"grad_norm": 5.526626348796313,
"learning_rate": 5.432355997453728e-07,
"loss": 0.8807,
"step": 310
},
{
"epoch": 1.4617169373549883,
"grad_norm": 5.944664241466763,
"learning_rate": 5.309014817300421e-07,
"loss": 0.8874,
"step": 315
},
{
"epoch": 1.4849187935034802,
"grad_norm": 5.3905922077833335,
"learning_rate": 5.185484555463026e-07,
"loss": 0.8798,
"step": 320
},
{
"epoch": 1.5081206496519721,
"grad_norm": 6.160504369378942,
"learning_rate": 5.061840798316814e-07,
"loss": 0.8941,
"step": 325
},
{
"epoch": 1.531322505800464,
"grad_norm": 5.8405554397510695,
"learning_rate": 4.938159201683186e-07,
"loss": 0.8825,
"step": 330
},
{
"epoch": 1.554524361948956,
"grad_norm": 5.674379527635317,
"learning_rate": 4.814515444536974e-07,
"loss": 0.8648,
"step": 335
},
{
"epoch": 1.5777262180974478,
"grad_norm": 5.647819914588787,
"learning_rate": 4.69098518269958e-07,
"loss": 0.8767,
"step": 340
},
{
"epoch": 1.6009280742459397,
"grad_norm": 5.823124283337199,
"learning_rate": 4.5676440025462726e-07,
"loss": 0.8819,
"step": 345
},
{
"epoch": 1.6241299303944317,
"grad_norm": 5.627075817812563,
"learning_rate": 4.444567374755977e-07,
"loss": 0.8729,
"step": 350
},
{
"epoch": 1.6241299303944317,
"eval_loss": 1.0092262029647827,
"eval_runtime": 104.5371,
"eval_samples_per_second": 58.63,
"eval_steps_per_second": 0.918,
"step": 350
},
{
"epoch": 1.6473317865429236,
"grad_norm": 5.606729222247986,
"learning_rate": 4.3218306081318713e-07,
"loss": 0.8701,
"step": 355
},
{
"epoch": 1.6705336426914155,
"grad_norm": 5.850125242783924,
"learning_rate": 4.199508803521012e-07,
"loss": 0.8668,
"step": 360
},
{
"epoch": 1.6937354988399071,
"grad_norm": 5.9911899428687265,
"learning_rate": 4.0776768078612207e-07,
"loss": 0.8746,
"step": 365
},
{
"epoch": 1.716937354988399,
"grad_norm": 5.521154120946142,
"learning_rate": 3.9564091683833244e-07,
"loss": 0.8796,
"step": 370
},
{
"epoch": 1.740139211136891,
"grad_norm": 5.8275802651476525,
"learning_rate": 3.835780086996793e-07,
"loss": 0.8694,
"step": 375
},
{
"epoch": 1.7633410672853829,
"grad_norm": 5.543421042435622,
"learning_rate": 3.7158633748866607e-07,
"loss": 0.8807,
"step": 380
},
{
"epoch": 1.7865429234338746,
"grad_norm": 5.701862114223059,
"learning_rate": 3.596732407349536e-07,
"loss": 0.8759,
"step": 385
},
{
"epoch": 1.8097447795823665,
"grad_norm": 6.043389558191902,
"learning_rate": 3.4784600788963193e-07,
"loss": 0.8809,
"step": 390
},
{
"epoch": 1.8329466357308584,
"grad_norm": 5.976724503752257,
"learning_rate": 3.3611187586491157e-07,
"loss": 0.8709,
"step": 395
},
{
"epoch": 1.8561484918793503,
"grad_norm": 5.509505876146179,
"learning_rate": 3.244780246059612e-07,
"loss": 0.8653,
"step": 400
},
{
"epoch": 1.8561484918793503,
"eval_loss": 1.0026302337646484,
"eval_runtime": 104.9261,
"eval_samples_per_second": 58.413,
"eval_steps_per_second": 0.915,
"step": 400
},
{
"epoch": 1.8793503480278422,
"grad_norm": 5.4679176985684075,
"learning_rate": 3.129515726976034e-07,
"loss": 0.8623,
"step": 405
},
{
"epoch": 1.902552204176334,
"grad_norm": 5.2503978399283495,
"learning_rate": 3.015395730085565e-07,
"loss": 0.8675,
"step": 410
},
{
"epoch": 1.925754060324826,
"grad_norm": 5.67660650040009,
"learning_rate": 2.902490083758856e-07,
"loss": 0.8593,
"step": 415
},
{
"epoch": 1.948955916473318,
"grad_norm": 5.817460747815204,
"learning_rate": 2.790867873323067e-07,
"loss": 0.8802,
"step": 420
},
{
"epoch": 1.9721577726218098,
"grad_norm": 5.711721871686218,
"learning_rate": 2.680597398789554e-07,
"loss": 0.866,
"step": 425
},
{
"epoch": 1.9953596287703017,
"grad_norm": 5.71044846682499,
"learning_rate": 2.5717461330620815e-07,
"loss": 0.8874,
"step": 430
},
{
"epoch": 2.0185614849187936,
"grad_norm": 6.131631291570077,
"learning_rate": 2.464380680651134e-07,
"loss": 0.8141,
"step": 435
},
{
"epoch": 2.0417633410672855,
"grad_norm": 6.411927697119563,
"learning_rate": 2.358566736919581e-07,
"loss": 0.7797,
"step": 440
},
{
"epoch": 2.0649651972157774,
"grad_norm": 6.319794142930513,
"learning_rate": 2.2543690478846388e-07,
"loss": 0.7814,
"step": 445
},
{
"epoch": 2.0881670533642693,
"grad_norm": 6.218375120452989,
"learning_rate": 2.1518513706007152e-07,
"loss": 0.7781,
"step": 450
},
{
"epoch": 2.0881670533642693,
"eval_loss": 1.0152158737182617,
"eval_runtime": 104.5587,
"eval_samples_per_second": 58.618,
"eval_steps_per_second": 0.918,
"step": 450
},
{
"epoch": 2.111368909512761,
"grad_norm": 6.087242686632992,
"learning_rate": 2.051076434147403e-07,
"loss": 0.7741,
"step": 455
},
{
"epoch": 2.1345707656612527,
"grad_norm": 5.953785056646644,
"learning_rate": 1.9521059012464607e-07,
"loss": 0.7806,
"step": 460
},
{
"epoch": 2.1577726218097446,
"grad_norm": 5.8793630013973095,
"learning_rate": 1.855000330531289e-07,
"loss": 0.7808,
"step": 465
},
{
"epoch": 2.1809744779582365,
"grad_norm": 5.810194372043576,
"learning_rate": 1.7598191394919737e-07,
"loss": 0.7805,
"step": 470
},
{
"epoch": 2.2041763341067284,
"grad_norm": 5.9259754232937665,
"learning_rate": 1.666620568118603e-07,
"loss": 0.7951,
"step": 475
},
{
"epoch": 2.2273781902552203,
"grad_norm": 5.746190339722776,
"learning_rate": 1.5754616432650443e-07,
"loss": 0.776,
"step": 480
},
{
"epoch": 2.2505800464037122,
"grad_norm": 5.8343296636667175,
"learning_rate": 1.4863981437550498e-07,
"loss": 0.7721,
"step": 485
},
{
"epoch": 2.273781902552204,
"grad_norm": 6.355429399217548,
"learning_rate": 1.3994845662519983e-07,
"loss": 0.7755,
"step": 490
},
{
"epoch": 2.296983758700696,
"grad_norm": 6.250949867364635,
"learning_rate": 1.3147740919131812e-07,
"loss": 0.786,
"step": 495
},
{
"epoch": 2.320185614849188,
"grad_norm": 5.784318706374051,
"learning_rate": 1.2323185538490228e-07,
"loss": 0.7742,
"step": 500
},
{
"epoch": 2.320185614849188,
"eval_loss": 1.0131419897079468,
"eval_runtime": 104.864,
"eval_samples_per_second": 58.447,
"eval_steps_per_second": 0.915,
"step": 500
},
{
"epoch": 2.34338747099768,
"grad_norm": 5.9222096032764435,
"learning_rate": 1.1521684054071523e-07,
"loss": 0.7683,
"step": 505
},
{
"epoch": 2.3665893271461718,
"grad_norm": 6.101554765204232,
"learning_rate": 1.0743726893007254e-07,
"loss": 0.7757,
"step": 510
},
{
"epoch": 2.3897911832946637,
"grad_norm": 6.042489298416156,
"learning_rate": 9.989790075999144e-08,
"loss": 0.7705,
"step": 515
},
{
"epoch": 2.4129930394431556,
"grad_norm": 5.908756215523099,
"learning_rate": 9.260334926048785e-08,
"loss": 0.7732,
"step": 520
},
{
"epoch": 2.4361948955916475,
"grad_norm": 6.244145835259339,
"learning_rate": 8.555807786180813e-08,
"loss": 0.789,
"step": 525
},
{
"epoch": 2.4593967517401394,
"grad_norm": 6.142728167500032,
"learning_rate": 7.876639746332131e-08,
"loss": 0.7868,
"step": 530
},
{
"epoch": 2.4825986078886313,
"grad_norm": 6.195576909414474,
"learning_rate": 7.223246379574205e-08,
"loss": 0.7794,
"step": 535
},
{
"epoch": 2.505800464037123,
"grad_norm": 6.114972696221882,
"learning_rate": 6.596027487829913e-08,
"loss": 0.7786,
"step": 540
},
{
"epoch": 2.529002320185615,
"grad_norm": 5.970384712179319,
"learning_rate": 5.995366857240591e-08,
"loss": 0.7677,
"step": 545
},
{
"epoch": 2.5522041763341066,
"grad_norm": 5.792971865825656,
"learning_rate": 5.421632023332778e-08,
"loss": 0.7689,
"step": 550
},
{
"epoch": 2.5522041763341066,
"eval_loss": 1.0137938261032104,
"eval_runtime": 104.8642,
"eval_samples_per_second": 58.447,
"eval_steps_per_second": 0.915,
"step": 550
},
{
"epoch": 2.5754060324825985,
"grad_norm": 5.910460030086388,
"learning_rate": 4.8751740461286826e-08,
"loss": 0.775,
"step": 555
},
{
"epoch": 2.5986078886310904,
"grad_norm": 6.1429755184015535,
"learning_rate": 4.356327295337542e-08,
"loss": 0.7699,
"step": 560
},
{
"epoch": 2.6218097447795823,
"grad_norm": 6.192833297686972,
"learning_rate": 3.865409245759671e-08,
"loss": 0.7733,
"step": 565
},
{
"epoch": 2.645011600928074,
"grad_norm": 6.234290188401347,
"learning_rate": 3.402720283028277e-08,
"loss": 0.7685,
"step": 570
},
{
"epoch": 2.668213457076566,
"grad_norm": 6.340392302497708,
"learning_rate": 2.968543519807809e-08,
"loss": 0.7884,
"step": 575
},
{
"epoch": 2.691415313225058,
"grad_norm": 6.2140159508315955,
"learning_rate": 2.5631446225614527e-08,
"loss": 0.7837,
"step": 580
},
{
"epoch": 2.71461716937355,
"grad_norm": 6.1288577970328575,
"learning_rate": 2.1867716489936294e-08,
"loss": 0.7813,
"step": 585
},
{
"epoch": 2.737819025522042,
"grad_norm": 6.397654652667995,
"learning_rate": 1.8396548962671454e-08,
"loss": 0.7775,
"step": 590
},
{
"epoch": 2.7610208816705337,
"grad_norm": 6.034802257671803,
"learning_rate": 1.5220067600876684e-08,
"loss": 0.7674,
"step": 595
},
{
"epoch": 2.7842227378190256,
"grad_norm": 6.568866077043727,
"learning_rate": 1.2340216047418694e-08,
"loss": 0.7824,
"step": 600
},
{
"epoch": 2.7842227378190256,
"eval_loss": 1.0129340887069702,
"eval_runtime": 104.6371,
"eval_samples_per_second": 58.574,
"eval_steps_per_second": 0.917,
"step": 600
},
{
"epoch": 2.8074245939675175,
"grad_norm": 5.954722052193834,
"learning_rate": 9.758756441687332e-09,
"loss": 0.7668,
"step": 605
},
{
"epoch": 2.8306264501160094,
"grad_norm": 5.969817376300994,
"learning_rate": 7.477268341368359e-09,
"loss": 0.7727,
"step": 610
},
{
"epoch": 2.853828306264501,
"grad_norm": 6.216712316873081,
"learning_rate": 5.497147755934628e-09,
"loss": 0.7749,
"step": 615
},
{
"epoch": 2.877030162412993,
"grad_norm": 5.796077454156364,
"learning_rate": 3.819606292448541e-09,
"loss": 0.7734,
"step": 620
},
{
"epoch": 2.9002320185614847,
"grad_norm": 6.10165963135605,
"learning_rate": 2.4456704141967433e-09,
"loss": 0.7798,
"step": 625
},
{
"epoch": 2.9234338747099766,
"grad_norm": 5.84098005395959,
"learning_rate": 1.3761808126126483e-09,
"loss": 0.7684,
"step": 630
},
{
"epoch": 2.9466357308584685,
"grad_norm": 6.284934992265928,
"learning_rate": 6.117918928693622e-10,
"loss": 0.7664,
"step": 635
},
{
"epoch": 2.9698375870069604,
"grad_norm": 6.05781836596347,
"learning_rate": 1.529713734584326e-10,
"loss": 0.7692,
"step": 640
},
{
"epoch": 2.9930394431554523,
"grad_norm": 6.23938718298016,
"learning_rate": 0.0,
"loss": 0.7789,
"step": 645
},
{
"epoch": 2.9930394431554523,
"step": 645,
"total_flos": 3802978389590016.0,
"train_loss": 0.923209237682727,
"train_runtime": 9905.6925,
"train_samples_per_second": 16.704,
"train_steps_per_second": 0.065
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3802978389590016.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}