pkr7098's picture
End of training
e190377 verified
raw
history blame
25.6 kB
{
"best_metric": 0.3851391077041626,
"best_model_checkpoint": "./beans_outputs/checkpoint-130",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 1.2493035793304443,
"learning_rate": 0.004961538461538462,
"loss": 0.8568,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.4747363328933716,
"learning_rate": 0.004923076923076923,
"loss": 0.6606,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 0.8188544511795044,
"learning_rate": 0.004884615384615385,
"loss": 0.3787,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 1.718248724937439,
"learning_rate": 0.004846153846153846,
"loss": 0.3154,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 2.2010860443115234,
"learning_rate": 0.004807692307692308,
"loss": 0.4932,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 2.927755832672119,
"learning_rate": 0.0047692307692307695,
"loss": 0.561,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 1.2203412055969238,
"learning_rate": 0.004730769230769231,
"loss": 0.3309,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.08795254677534103,
"learning_rate": 0.004692307692307693,
"loss": 0.3261,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 0.4609662890434265,
"learning_rate": 0.004653846153846154,
"loss": 0.195,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.9046596884727478,
"learning_rate": 0.004615384615384616,
"loss": 0.3352,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 2.5637238025665283,
"learning_rate": 0.0045769230769230765,
"loss": 0.5961,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.32293158769607544,
"learning_rate": 0.004538461538461539,
"loss": 0.236,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 7.484982967376709,
"learning_rate": 0.0045000000000000005,
"loss": 0.6323,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.8721804511278195,
"eval_loss": 0.3851391077041626,
"eval_runtime": 0.8932,
"eval_samples_per_second": 148.91,
"eval_steps_per_second": 19.034,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.12631376087665558,
"learning_rate": 0.004461538461538462,
"loss": 0.747,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 1.5336790084838867,
"learning_rate": 0.004423076923076923,
"loss": 0.3062,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.7460920214653015,
"learning_rate": 0.004384615384615384,
"loss": 0.2088,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.6954104900360107,
"learning_rate": 0.004346153846153846,
"loss": 0.2942,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.2199032306671143,
"learning_rate": 0.004307692307692308,
"loss": 0.315,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.654019832611084,
"learning_rate": 0.004269230769230769,
"loss": 0.4203,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.460809350013733,
"learning_rate": 0.004230769230769231,
"loss": 0.4812,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.9371921420097351,
"learning_rate": 0.004192307692307692,
"loss": 0.3436,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.573295533657074,
"learning_rate": 0.004153846153846154,
"loss": 0.7394,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 3.2005550861358643,
"learning_rate": 0.004115384615384615,
"loss": 0.5694,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 1.0854452848434448,
"learning_rate": 0.004076923076923077,
"loss": 0.6043,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.3743146657943726,
"learning_rate": 0.0040384615384615385,
"loss": 0.4406,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 0.11435157805681229,
"learning_rate": 0.004,
"loss": 0.4102,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.6240601503759399,
"eval_loss": 1.1071540117263794,
"eval_runtime": 0.9165,
"eval_samples_per_second": 145.121,
"eval_steps_per_second": 18.549,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 0.9636590480804443,
"learning_rate": 0.003961538461538462,
"loss": 0.4669,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 1.4855515956878662,
"learning_rate": 0.003923076923076923,
"loss": 0.4618,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 1.6462602615356445,
"learning_rate": 0.003884615384615385,
"loss": 0.5266,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.7035958766937256,
"learning_rate": 0.0038461538461538464,
"loss": 0.6045,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.7253210544586182,
"learning_rate": 0.0038076923076923075,
"loss": 0.4711,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.6617446541786194,
"learning_rate": 0.003769230769230769,
"loss": 0.5785,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 0.6448426842689514,
"learning_rate": 0.003730769230769231,
"loss": 0.7907,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 3.4137613773345947,
"learning_rate": 0.0036923076923076927,
"loss": 0.9229,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 5.124917507171631,
"learning_rate": 0.003653846153846154,
"loss": 1.0287,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.2190972566604614,
"learning_rate": 0.0036153846153846154,
"loss": 0.7791,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.6898893713951111,
"learning_rate": 0.003576923076923077,
"loss": 1.4277,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 2.563831090927124,
"learning_rate": 0.003538461538461539,
"loss": 1.3955,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 2.519380569458008,
"learning_rate": 0.0034999999999999996,
"loss": 0.8964,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6616541353383458,
"eval_loss": 0.8284351825714111,
"eval_runtime": 0.9347,
"eval_samples_per_second": 142.291,
"eval_steps_per_second": 18.188,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 4.3924150466918945,
"learning_rate": 0.0034615384615384616,
"loss": 1.2627,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 1.773996114730835,
"learning_rate": 0.003423076923076923,
"loss": 1.3465,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 3.1790945529937744,
"learning_rate": 0.003384615384615385,
"loss": 1.2822,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 5.48622465133667,
"learning_rate": 0.003346153846153846,
"loss": 1.0807,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 3.6509623527526855,
"learning_rate": 0.0033076923076923075,
"loss": 1.1806,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 2.3183326721191406,
"learning_rate": 0.0032692307692307695,
"loss": 1.2542,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 3.2686471939086914,
"learning_rate": 0.003230769230769231,
"loss": 1.2217,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 0.33250316977500916,
"learning_rate": 0.003192307692307692,
"loss": 1.2107,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.4644064903259277,
"learning_rate": 0.0031538461538461538,
"loss": 1.2287,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 0.9661989808082581,
"learning_rate": 0.0031153846153846153,
"loss": 1.1392,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.0765918493270874,
"learning_rate": 0.0030769230769230774,
"loss": 1.1576,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 0.5606147646903992,
"learning_rate": 0.0030384615384615385,
"loss": 1.1148,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 3.787473678588867,
"learning_rate": 0.003,
"loss": 1.155,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.3383458646616541,
"eval_loss": 1.1070177555084229,
"eval_runtime": 0.9062,
"eval_samples_per_second": 146.767,
"eval_steps_per_second": 18.76,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 1.8376567363739014,
"learning_rate": 0.0029615384615384616,
"loss": 1.16,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 0.1573391705751419,
"learning_rate": 0.002923076923076923,
"loss": 1.1133,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 1.7338229417800903,
"learning_rate": 0.0028846153846153843,
"loss": 1.1604,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 1.532076358795166,
"learning_rate": 0.002846153846153846,
"loss": 1.171,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 1.2591376304626465,
"learning_rate": 0.002807692307692308,
"loss": 1.116,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 1.7697715759277344,
"learning_rate": 0.0027692307692307695,
"loss": 1.1409,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 1.0740739107131958,
"learning_rate": 0.0027307692307692306,
"loss": 1.1458,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.46824851632118225,
"learning_rate": 0.002692307692307692,
"loss": 1.1056,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 1.8973933458328247,
"learning_rate": 0.0026538461538461538,
"loss": 1.1024,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 0.06765652447938919,
"learning_rate": 0.0026153846153846158,
"loss": 1.1514,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 0.8284403085708618,
"learning_rate": 0.002576923076923077,
"loss": 1.1799,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 1.611846923828125,
"learning_rate": 0.0025384615384615385,
"loss": 1.169,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 3.048553705215454,
"learning_rate": 0.0025,
"loss": 1.1069,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.3308270676691729,
"eval_loss": 1.1081749200820923,
"eval_runtime": 0.9355,
"eval_samples_per_second": 142.176,
"eval_steps_per_second": 18.173,
"step": 650
},
{
"epoch": 5.076923076923077,
"grad_norm": 0.6719330549240112,
"learning_rate": 0.0024615384615384616,
"loss": 1.1054,
"step": 660
},
{
"epoch": 5.153846153846154,
"grad_norm": 0.8374503254890442,
"learning_rate": 0.002423076923076923,
"loss": 1.1134,
"step": 670
},
{
"epoch": 5.230769230769231,
"grad_norm": 0.5713789463043213,
"learning_rate": 0.0023846153846153848,
"loss": 1.1534,
"step": 680
},
{
"epoch": 5.3076923076923075,
"grad_norm": 1.0185028314590454,
"learning_rate": 0.0023461538461538463,
"loss": 1.1368,
"step": 690
},
{
"epoch": 5.384615384615385,
"grad_norm": 1.021331787109375,
"learning_rate": 0.002307692307692308,
"loss": 1.094,
"step": 700
},
{
"epoch": 5.461538461538462,
"grad_norm": 0.9150602221488953,
"learning_rate": 0.0022692307692307695,
"loss": 1.1187,
"step": 710
},
{
"epoch": 5.538461538461538,
"grad_norm": 0.741632878780365,
"learning_rate": 0.002230769230769231,
"loss": 1.1129,
"step": 720
},
{
"epoch": 5.615384615384615,
"grad_norm": 1.5859191417694092,
"learning_rate": 0.002192307692307692,
"loss": 1.135,
"step": 730
},
{
"epoch": 5.6923076923076925,
"grad_norm": 2.2667315006256104,
"learning_rate": 0.002153846153846154,
"loss": 1.1474,
"step": 740
},
{
"epoch": 5.769230769230769,
"grad_norm": 1.3794482946395874,
"learning_rate": 0.0021153846153846153,
"loss": 1.1095,
"step": 750
},
{
"epoch": 5.846153846153846,
"grad_norm": 1.2423471212387085,
"learning_rate": 0.002076923076923077,
"loss": 1.1057,
"step": 760
},
{
"epoch": 5.923076923076923,
"grad_norm": 4.28483247756958,
"learning_rate": 0.0020384615384615385,
"loss": 1.1093,
"step": 770
},
{
"epoch": 6.0,
"grad_norm": 4.998449802398682,
"learning_rate": 0.002,
"loss": 1.1748,
"step": 780
},
{
"epoch": 6.0,
"eval_accuracy": 0.3308270676691729,
"eval_loss": 1.1041723489761353,
"eval_runtime": 0.9442,
"eval_samples_per_second": 140.864,
"eval_steps_per_second": 18.005,
"step": 780
},
{
"epoch": 6.076923076923077,
"grad_norm": 2.81730318069458,
"learning_rate": 0.0019615384615384616,
"loss": 1.1381,
"step": 790
},
{
"epoch": 6.153846153846154,
"grad_norm": 2.5270557403564453,
"learning_rate": 0.0019230769230769232,
"loss": 1.1104,
"step": 800
},
{
"epoch": 6.230769230769231,
"grad_norm": 0.8537469506263733,
"learning_rate": 0.0018846153846153845,
"loss": 1.1327,
"step": 810
},
{
"epoch": 6.3076923076923075,
"grad_norm": 0.8818097710609436,
"learning_rate": 0.0018461538461538463,
"loss": 1.1437,
"step": 820
},
{
"epoch": 6.384615384615385,
"grad_norm": 2.496903896331787,
"learning_rate": 0.0018076923076923077,
"loss": 1.1251,
"step": 830
},
{
"epoch": 6.461538461538462,
"grad_norm": 3.2999167442321777,
"learning_rate": 0.0017692307692307695,
"loss": 1.1373,
"step": 840
},
{
"epoch": 6.538461538461538,
"grad_norm": 2.9537723064422607,
"learning_rate": 0.0017307692307692308,
"loss": 1.176,
"step": 850
},
{
"epoch": 6.615384615384615,
"grad_norm": 1.5148764848709106,
"learning_rate": 0.0016923076923076924,
"loss": 1.1146,
"step": 860
},
{
"epoch": 6.6923076923076925,
"grad_norm": 1.3614485263824463,
"learning_rate": 0.0016538461538461537,
"loss": 1.0895,
"step": 870
},
{
"epoch": 6.769230769230769,
"grad_norm": 0.6686407327651978,
"learning_rate": 0.0016153846153846155,
"loss": 1.1245,
"step": 880
},
{
"epoch": 6.846153846153846,
"grad_norm": 1.6276365518569946,
"learning_rate": 0.0015769230769230769,
"loss": 1.1412,
"step": 890
},
{
"epoch": 6.923076923076923,
"grad_norm": 1.4945542812347412,
"learning_rate": 0.0015384615384615387,
"loss": 1.1247,
"step": 900
},
{
"epoch": 7.0,
"grad_norm": 2.74285888671875,
"learning_rate": 0.0015,
"loss": 1.1514,
"step": 910
},
{
"epoch": 7.0,
"eval_accuracy": 0.3308270676691729,
"eval_loss": 1.1007708311080933,
"eval_runtime": 0.9264,
"eval_samples_per_second": 143.562,
"eval_steps_per_second": 18.35,
"step": 910
},
{
"epoch": 7.076923076923077,
"grad_norm": 2.248867988586426,
"learning_rate": 0.0014615384615384616,
"loss": 1.0913,
"step": 920
},
{
"epoch": 7.153846153846154,
"grad_norm": 2.5912275314331055,
"learning_rate": 0.001423076923076923,
"loss": 1.1193,
"step": 930
},
{
"epoch": 7.230769230769231,
"grad_norm": 3.573967695236206,
"learning_rate": 0.0013846153846153847,
"loss": 1.1352,
"step": 940
},
{
"epoch": 7.3076923076923075,
"grad_norm": 2.077554225921631,
"learning_rate": 0.001346153846153846,
"loss": 1.0947,
"step": 950
},
{
"epoch": 7.384615384615385,
"grad_norm": 1.6364086866378784,
"learning_rate": 0.0013076923076923079,
"loss": 1.1296,
"step": 960
},
{
"epoch": 7.461538461538462,
"grad_norm": 1.1806069612503052,
"learning_rate": 0.0012692307692307692,
"loss": 1.1033,
"step": 970
},
{
"epoch": 7.538461538461538,
"grad_norm": 2.4788713455200195,
"learning_rate": 0.0012307692307692308,
"loss": 1.1148,
"step": 980
},
{
"epoch": 7.615384615384615,
"grad_norm": 0.8245519399642944,
"learning_rate": 0.0011923076923076924,
"loss": 1.1121,
"step": 990
},
{
"epoch": 7.6923076923076925,
"grad_norm": 1.568903923034668,
"learning_rate": 0.001153846153846154,
"loss": 1.1163,
"step": 1000
},
{
"epoch": 7.769230769230769,
"grad_norm": 2.2075676918029785,
"learning_rate": 0.0011153846153846155,
"loss": 1.0956,
"step": 1010
},
{
"epoch": 7.846153846153846,
"grad_norm": 1.0866578817367554,
"learning_rate": 0.001076923076923077,
"loss": 1.1065,
"step": 1020
},
{
"epoch": 7.923076923076923,
"grad_norm": 2.4340384006500244,
"learning_rate": 0.0010384615384615384,
"loss": 1.1118,
"step": 1030
},
{
"epoch": 8.0,
"grad_norm": 2.1427950859069824,
"learning_rate": 0.001,
"loss": 1.0956,
"step": 1040
},
{
"epoch": 8.0,
"eval_accuracy": 0.3383458646616541,
"eval_loss": 1.1004995107650757,
"eval_runtime": 0.9133,
"eval_samples_per_second": 145.621,
"eval_steps_per_second": 18.613,
"step": 1040
},
{
"epoch": 8.076923076923077,
"grad_norm": 0.689738392829895,
"learning_rate": 0.0009615384615384616,
"loss": 1.1119,
"step": 1050
},
{
"epoch": 8.153846153846153,
"grad_norm": 0.7051054835319519,
"learning_rate": 0.0009230769230769232,
"loss": 1.1076,
"step": 1060
},
{
"epoch": 8.23076923076923,
"grad_norm": 0.8233575820922852,
"learning_rate": 0.0008846153846153847,
"loss": 1.1,
"step": 1070
},
{
"epoch": 8.307692307692308,
"grad_norm": 0.4401038885116577,
"learning_rate": 0.0008461538461538462,
"loss": 1.1231,
"step": 1080
},
{
"epoch": 8.384615384615385,
"grad_norm": 2.0618515014648438,
"learning_rate": 0.0008076923076923078,
"loss": 1.0877,
"step": 1090
},
{
"epoch": 8.461538461538462,
"grad_norm": 1.629764199256897,
"learning_rate": 0.0007692307692307693,
"loss": 1.1165,
"step": 1100
},
{
"epoch": 8.538461538461538,
"grad_norm": 0.9111972451210022,
"learning_rate": 0.0007307692307692308,
"loss": 1.1033,
"step": 1110
},
{
"epoch": 8.615384615384615,
"grad_norm": 2.03430438041687,
"learning_rate": 0.0006923076923076924,
"loss": 1.0906,
"step": 1120
},
{
"epoch": 8.692307692307692,
"grad_norm": 3.2121763229370117,
"learning_rate": 0.0006538461538461539,
"loss": 1.126,
"step": 1130
},
{
"epoch": 8.76923076923077,
"grad_norm": 0.34755992889404297,
"learning_rate": 0.0006153846153846154,
"loss": 1.1051,
"step": 1140
},
{
"epoch": 8.846153846153847,
"grad_norm": 0.7230011224746704,
"learning_rate": 0.000576923076923077,
"loss": 1.0721,
"step": 1150
},
{
"epoch": 8.923076923076923,
"grad_norm": 2.8672590255737305,
"learning_rate": 0.0005384615384615385,
"loss": 1.0711,
"step": 1160
},
{
"epoch": 9.0,
"grad_norm": 2.920103073120117,
"learning_rate": 0.0005,
"loss": 1.1448,
"step": 1170
},
{
"epoch": 9.0,
"eval_accuracy": 0.3308270676691729,
"eval_loss": 1.111855149269104,
"eval_runtime": 0.9175,
"eval_samples_per_second": 144.96,
"eval_steps_per_second": 18.529,
"step": 1170
},
{
"epoch": 9.076923076923077,
"grad_norm": 0.26905354857444763,
"learning_rate": 0.0004615384615384616,
"loss": 1.1121,
"step": 1180
},
{
"epoch": 9.153846153846153,
"grad_norm": 0.22876305878162384,
"learning_rate": 0.0004230769230769231,
"loss": 1.0967,
"step": 1190
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.24429693818092346,
"learning_rate": 0.00038461538461538467,
"loss": 1.1023,
"step": 1200
},
{
"epoch": 9.307692307692308,
"grad_norm": 0.6317471861839294,
"learning_rate": 0.0003461538461538462,
"loss": 1.1062,
"step": 1210
},
{
"epoch": 9.384615384615385,
"grad_norm": 0.41996484994888306,
"learning_rate": 0.0003076923076923077,
"loss": 1.1162,
"step": 1220
},
{
"epoch": 9.461538461538462,
"grad_norm": 1.6321758031845093,
"learning_rate": 0.0002692307692307693,
"loss": 1.0976,
"step": 1230
},
{
"epoch": 9.538461538461538,
"grad_norm": 0.966174840927124,
"learning_rate": 0.0002307692307692308,
"loss": 1.0979,
"step": 1240
},
{
"epoch": 9.615384615384615,
"grad_norm": 0.23766568303108215,
"learning_rate": 0.00019230769230769233,
"loss": 1.1012,
"step": 1250
},
{
"epoch": 9.692307692307692,
"grad_norm": 0.930920422077179,
"learning_rate": 0.00015384615384615385,
"loss": 1.0991,
"step": 1260
},
{
"epoch": 9.76923076923077,
"grad_norm": 0.34004735946655273,
"learning_rate": 0.0001153846153846154,
"loss": 1.1009,
"step": 1270
},
{
"epoch": 9.846153846153847,
"grad_norm": 0.663216233253479,
"learning_rate": 7.692307692307693e-05,
"loss": 1.0997,
"step": 1280
},
{
"epoch": 9.923076923076923,
"grad_norm": 0.9082524180412292,
"learning_rate": 3.846153846153846e-05,
"loss": 1.0998,
"step": 1290
},
{
"epoch": 10.0,
"grad_norm": 1.356846570968628,
"learning_rate": 0.0,
"loss": 1.0991,
"step": 1300
},
{
"epoch": 10.0,
"eval_accuracy": 0.3308270676691729,
"eval_loss": 1.0983037948608398,
"eval_runtime": 0.9323,
"eval_samples_per_second": 142.651,
"eval_steps_per_second": 18.234,
"step": 1300
},
{
"epoch": 10.0,
"step": 1300,
"total_flos": 8.400578669044531e+17,
"train_loss": 0.9618942783429072,
"train_runtime": 164.295,
"train_samples_per_second": 62.936,
"train_steps_per_second": 7.913
}
],
"logging_steps": 10,
"max_steps": 1300,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.400578669044531e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}