Charlie911's picture
Training in progress, epoch 2, checkpoint
fb6108d verified
raw
history blame
13.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9973614775725594,
"eval_steps": 500,
"global_step": 710,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04221635883905013,
"grad_norm": 0.50788813829422,
"learning_rate": 2.5e-05,
"loss": 4.0916,
"step": 10
},
{
"epoch": 0.08443271767810026,
"grad_norm": 0.31596097350120544,
"learning_rate": 2.9996874776728528e-05,
"loss": 3.8989,
"step": 20
},
{
"epoch": 0.1266490765171504,
"grad_norm": 0.28351184725761414,
"learning_rate": 2.998418103705505e-05,
"loss": 3.6225,
"step": 30
},
{
"epoch": 0.16886543535620052,
"grad_norm": 0.2747008502483368,
"learning_rate": 2.996173263030885e-05,
"loss": 3.3931,
"step": 40
},
{
"epoch": 0.21108179419525067,
"grad_norm": 0.27435237169265747,
"learning_rate": 2.9929545796017835e-05,
"loss": 3.1614,
"step": 50
},
{
"epoch": 0.2532981530343008,
"grad_norm": 0.31030479073524475,
"learning_rate": 2.9887643818640412e-05,
"loss": 2.882,
"step": 60
},
{
"epoch": 0.2955145118733509,
"grad_norm": 0.3085922598838806,
"learning_rate": 2.983605701072119e-05,
"loss": 2.6283,
"step": 70
},
{
"epoch": 0.33773087071240104,
"grad_norm": 0.230041041970253,
"learning_rate": 2.9774822690962358e-05,
"loss": 2.3723,
"step": 80
},
{
"epoch": 0.37994722955145116,
"grad_norm": 0.19185876846313477,
"learning_rate": 2.9703985157226802e-05,
"loss": 2.1931,
"step": 90
},
{
"epoch": 0.42216358839050133,
"grad_norm": 0.17534567415714264,
"learning_rate": 2.9623595654492328e-05,
"loss": 2.1193,
"step": 100
},
{
"epoch": 0.46437994722955145,
"grad_norm": 0.15833114087581635,
"learning_rate": 2.953371233778022e-05,
"loss": 2.0044,
"step": 110
},
{
"epoch": 0.5065963060686016,
"grad_norm": 0.152941033244133,
"learning_rate": 2.943440023008502e-05,
"loss": 2.0208,
"step": 120
},
{
"epoch": 0.5488126649076517,
"grad_norm": 0.14308719336986542,
"learning_rate": 2.932573117533585e-05,
"loss": 2.0013,
"step": 130
},
{
"epoch": 0.5910290237467019,
"grad_norm": 0.12415236979722977,
"learning_rate": 2.9207783786423436e-05,
"loss": 1.9719,
"step": 140
},
{
"epoch": 0.633245382585752,
"grad_norm": 0.14564257860183716,
"learning_rate": 2.9080643388330266e-05,
"loss": 1.9659,
"step": 150
},
{
"epoch": 0.6754617414248021,
"grad_norm": 0.14092124998569489,
"learning_rate": 2.8944401956405192e-05,
"loss": 1.9645,
"step": 160
},
{
"epoch": 0.7176781002638523,
"grad_norm": 0.14931683242321014,
"learning_rate": 2.8799158049827027e-05,
"loss": 1.902,
"step": 170
},
{
"epoch": 0.7598944591029023,
"grad_norm": 0.13305355608463287,
"learning_rate": 2.8645016740305286e-05,
"loss": 2.0203,
"step": 180
},
{
"epoch": 0.8021108179419525,
"grad_norm": 0.13253405690193176,
"learning_rate": 2.8482089536069683e-05,
"loss": 1.92,
"step": 190
},
{
"epoch": 0.8443271767810027,
"grad_norm": 0.15814724564552307,
"learning_rate": 2.8310494301203323e-05,
"loss": 1.9354,
"step": 200
},
{
"epoch": 0.8865435356200527,
"grad_norm": 0.12858152389526367,
"learning_rate": 2.8130355170378002e-05,
"loss": 1.8576,
"step": 210
},
{
"epoch": 0.9287598944591029,
"grad_norm": 0.15734779834747314,
"learning_rate": 2.7941802459053222e-05,
"loss": 1.8994,
"step": 220
},
{
"epoch": 0.9709762532981531,
"grad_norm": 0.13302397727966309,
"learning_rate": 2.7744972569203985e-05,
"loss": 1.9127,
"step": 230
},
{
"epoch": 1.0131926121372032,
"grad_norm": 0.14125587046146393,
"learning_rate": 2.754000789064544e-05,
"loss": 1.856,
"step": 240
},
{
"epoch": 1.0554089709762533,
"grad_norm": 0.1589905321598053,
"learning_rate": 2.7327056698025907e-05,
"loss": 1.8638,
"step": 250
},
{
"epoch": 1.0976253298153034,
"grad_norm": 0.15254752337932587,
"learning_rate": 2.710627304356264e-05,
"loss": 1.8925,
"step": 260
},
{
"epoch": 1.1398416886543536,
"grad_norm": 0.1510193943977356,
"learning_rate": 2.6877816645598093e-05,
"loss": 1.8423,
"step": 270
},
{
"epoch": 1.1820580474934037,
"grad_norm": 0.15128286182880402,
"learning_rate": 2.664185277305712e-05,
"loss": 1.8433,
"step": 280
},
{
"epoch": 1.2242744063324538,
"grad_norm": 0.16148072481155396,
"learning_rate": 2.639855212588892e-05,
"loss": 1.8482,
"step": 290
},
{
"epoch": 1.266490765171504,
"grad_norm": 0.1490674763917923,
"learning_rate": 2.6148090711579976e-05,
"loss": 1.8667,
"step": 300
},
{
"epoch": 1.3087071240105541,
"grad_norm": 0.141664519906044,
"learning_rate": 2.5890649717827517e-05,
"loss": 1.8034,
"step": 310
},
{
"epoch": 1.3509234828496042,
"grad_norm": 0.13706474006175995,
"learning_rate": 2.5626415381465506e-05,
"loss": 1.8521,
"step": 320
},
{
"epoch": 1.3931398416886545,
"grad_norm": 0.1380421221256256,
"learning_rate": 2.535557885373801e-05,
"loss": 1.8737,
"step": 330
},
{
"epoch": 1.4353562005277045,
"grad_norm": 0.16114376485347748,
"learning_rate": 2.5078336062017396e-05,
"loss": 1.8536,
"step": 340
},
{
"epoch": 1.4775725593667546,
"grad_norm": 0.14424873888492584,
"learning_rate": 2.4794887568067413e-05,
"loss": 1.8225,
"step": 350
},
{
"epoch": 1.5197889182058049,
"grad_norm": 0.15957856178283691,
"learning_rate": 2.4505438422953686e-05,
"loss": 1.8637,
"step": 360
},
{
"epoch": 1.562005277044855,
"grad_norm": 0.16451793909072876,
"learning_rate": 2.421019801870658e-05,
"loss": 1.8643,
"step": 370
},
{
"epoch": 1.604221635883905,
"grad_norm": 0.1663391888141632,
"learning_rate": 2.390937993684371e-05,
"loss": 1.8301,
"step": 380
},
{
"epoch": 1.6464379947229553,
"grad_norm": 0.15829075872898102,
"learning_rate": 2.3603201793861776e-05,
"loss": 1.8388,
"step": 390
},
{
"epoch": 1.6886543535620053,
"grad_norm": 0.14315567910671234,
"learning_rate": 2.329188508380936e-05,
"loss": 1.8571,
"step": 400
},
{
"epoch": 1.7308707124010554,
"grad_norm": 0.16106358170509338,
"learning_rate": 2.2975655018054685e-05,
"loss": 1.8783,
"step": 410
},
{
"epoch": 1.7730870712401057,
"grad_norm": 0.14807620644569397,
"learning_rate": 2.2654740362364196e-05,
"loss": 1.8006,
"step": 420
},
{
"epoch": 1.8153034300791555,
"grad_norm": 0.1798703372478485,
"learning_rate": 2.232937327140983e-05,
"loss": 1.7707,
"step": 430
},
{
"epoch": 1.8575197889182058,
"grad_norm": 0.15553659200668335,
"learning_rate": 2.1999789120824702e-05,
"loss": 1.8307,
"step": 440
},
{
"epoch": 1.899736147757256,
"grad_norm": 0.14261312782764435,
"learning_rate": 2.166622633692871e-05,
"loss": 1.7768,
"step": 450
},
{
"epoch": 1.941952506596306,
"grad_norm": 0.18098565936088562,
"learning_rate": 2.13289262242472e-05,
"loss": 1.7687,
"step": 460
},
{
"epoch": 1.9841688654353562,
"grad_norm": 0.15806493163108826,
"learning_rate": 2.0988132790947478e-05,
"loss": 1.8417,
"step": 470
},
{
"epoch": 2.0263852242744065,
"grad_norm": 0.14425334334373474,
"learning_rate": 2.0644092572319572e-05,
"loss": 1.8255,
"step": 480
},
{
"epoch": 2.0686015831134563,
"grad_norm": 0.1517636924982071,
"learning_rate": 2.0297054452428663e-05,
"loss": 1.7771,
"step": 490
},
{
"epoch": 2.1108179419525066,
"grad_norm": 0.16989009082317352,
"learning_rate": 1.9947269484068524e-05,
"loss": 1.787,
"step": 500
},
{
"epoch": 2.153034300791557,
"grad_norm": 0.15170590579509735,
"learning_rate": 1.9594990707146005e-05,
"loss": 1.7909,
"step": 510
},
{
"epoch": 2.1952506596306067,
"grad_norm": 0.15983903408050537,
"learning_rate": 1.9240472965627965e-05,
"loss": 1.7876,
"step": 520
},
{
"epoch": 2.237467018469657,
"grad_norm": 0.15358828008174896,
"learning_rate": 1.8883972723183257e-05,
"loss": 1.7535,
"step": 530
},
{
"epoch": 2.2796833773087073,
"grad_norm": 0.17618593573570251,
"learning_rate": 1.8525747877652812e-05,
"loss": 1.8016,
"step": 540
},
{
"epoch": 2.321899736147757,
"grad_norm": 0.14523907005786896,
"learning_rate": 1.8166057574482378e-05,
"loss": 1.8273,
"step": 550
},
{
"epoch": 2.3641160949868074,
"grad_norm": 0.14915776252746582,
"learning_rate": 1.7805162019252628e-05,
"loss": 1.7179,
"step": 560
},
{
"epoch": 2.4063324538258577,
"grad_norm": 0.15583857893943787,
"learning_rate": 1.7443322289442403e-05,
"loss": 1.8101,
"step": 570
},
{
"epoch": 2.4485488126649075,
"grad_norm": 0.14223437011241913,
"learning_rate": 1.7080800145561163e-05,
"loss": 1.7888,
"step": 580
},
{
"epoch": 2.490765171503958,
"grad_norm": 0.1892772763967514,
"learning_rate": 1.6717857841787367e-05,
"loss": 1.7598,
"step": 590
},
{
"epoch": 2.532981530343008,
"grad_norm": 0.17377297580242157,
"learning_rate": 1.6354757936249698e-05,
"loss": 1.7866,
"step": 600
},
{
"epoch": 2.575197889182058,
"grad_norm": 0.15643130242824554,
"learning_rate": 1.5991763101088416e-05,
"loss": 1.8072,
"step": 610
},
{
"epoch": 2.6174142480211082,
"grad_norm": 0.16179224848747253,
"learning_rate": 1.5629135932434233e-05,
"loss": 1.8096,
"step": 620
},
{
"epoch": 2.6596306068601585,
"grad_norm": 0.1530320644378662,
"learning_rate": 1.5267138760442164e-05,
"loss": 1.8024,
"step": 630
},
{
"epoch": 2.7018469656992083,
"grad_norm": 0.1630353033542633,
"learning_rate": 1.4906033459517801e-05,
"loss": 1.8308,
"step": 640
},
{
"epoch": 2.7440633245382586,
"grad_norm": 0.17242635786533356,
"learning_rate": 1.45460812588733e-05,
"loss": 1.7974,
"step": 650
},
{
"epoch": 2.786279683377309,
"grad_norm": 0.18635079264640808,
"learning_rate": 1.4187542553550054e-05,
"loss": 1.7643,
"step": 660
},
{
"epoch": 2.8284960422163588,
"grad_norm": 0.18247109651565552,
"learning_rate": 1.3830676716044876e-05,
"loss": 1.8032,
"step": 670
},
{
"epoch": 2.870712401055409,
"grad_norm": 0.1679922342300415,
"learning_rate": 1.34757419086759e-05,
"loss": 1.7435,
"step": 680
},
{
"epoch": 2.9129287598944593,
"grad_norm": 0.16725490987300873,
"learning_rate": 1.3122994896823878e-05,
"loss": 1.748,
"step": 690
},
{
"epoch": 2.955145118733509,
"grad_norm": 0.17414063215255737,
"learning_rate": 1.277269086318417e-05,
"loss": 1.8094,
"step": 700
},
{
"epoch": 2.9973614775725594,
"grad_norm": 0.17552894353866577,
"learning_rate": 1.2425083223163535e-05,
"loss": 1.7267,
"step": 710
}
],
"logging_steps": 10,
"max_steps": 1180,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.20870025603072e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}