mtzig's picture
Training in progress, step 776, checkpoint
dd76f93 verified
raw
history blame
150 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 20,
"global_step": 776,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.9369414101290964,
"eval_f1": 0.18064516129032257,
"eval_loss": 0.32557418942451477,
"eval_precision": 0.14285714285714285,
"eval_recall": 0.24561403508771928,
"eval_runtime": 85.4393,
"eval_samples_per_second": 5.325,
"eval_steps_per_second": 0.176,
"step": 0
},
{
"epoch": 0.001288659793814433,
"grad_norm": 4.328640937805176,
"learning_rate": 2.564102564102564e-07,
"loss": 0.5948,
"step": 1
},
{
"epoch": 0.002577319587628866,
"grad_norm": 4.026719570159912,
"learning_rate": 5.128205128205128e-07,
"loss": 0.5193,
"step": 2
},
{
"epoch": 0.003865979381443299,
"grad_norm": 4.2378315925598145,
"learning_rate": 7.692307692307694e-07,
"loss": 0.5631,
"step": 3
},
{
"epoch": 0.005154639175257732,
"grad_norm": 3.568166971206665,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.5153,
"step": 4
},
{
"epoch": 0.006443298969072165,
"grad_norm": 3.5194778442382812,
"learning_rate": 1.282051282051282e-06,
"loss": 0.4554,
"step": 5
},
{
"epoch": 0.007731958762886598,
"grad_norm": 3.977821111679077,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.5351,
"step": 6
},
{
"epoch": 0.00902061855670103,
"grad_norm": 3.5472445487976074,
"learning_rate": 1.794871794871795e-06,
"loss": 0.4795,
"step": 7
},
{
"epoch": 0.010309278350515464,
"grad_norm": 4.021523475646973,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.5567,
"step": 8
},
{
"epoch": 0.011597938144329897,
"grad_norm": 3.9711642265319824,
"learning_rate": 2.307692307692308e-06,
"loss": 0.5156,
"step": 9
},
{
"epoch": 0.01288659793814433,
"grad_norm": 3.964317560195923,
"learning_rate": 2.564102564102564e-06,
"loss": 0.5192,
"step": 10
},
{
"epoch": 0.014175257731958763,
"grad_norm": 4.49519157409668,
"learning_rate": 2.8205128205128207e-06,
"loss": 0.5763,
"step": 11
},
{
"epoch": 0.015463917525773196,
"grad_norm": 3.9775915145874023,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.5087,
"step": 12
},
{
"epoch": 0.01675257731958763,
"grad_norm": 3.533947706222534,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5278,
"step": 13
},
{
"epoch": 0.01804123711340206,
"grad_norm": 3.2834880352020264,
"learning_rate": 3.58974358974359e-06,
"loss": 0.497,
"step": 14
},
{
"epoch": 0.019329896907216496,
"grad_norm": 3.62939190864563,
"learning_rate": 3.846153846153847e-06,
"loss": 0.4718,
"step": 15
},
{
"epoch": 0.020618556701030927,
"grad_norm": 3.499007225036621,
"learning_rate": 4.102564102564103e-06,
"loss": 0.4612,
"step": 16
},
{
"epoch": 0.02190721649484536,
"grad_norm": 3.6551826000213623,
"learning_rate": 4.358974358974359e-06,
"loss": 0.5116,
"step": 17
},
{
"epoch": 0.023195876288659795,
"grad_norm": 3.7035470008850098,
"learning_rate": 4.615384615384616e-06,
"loss": 0.5265,
"step": 18
},
{
"epoch": 0.024484536082474227,
"grad_norm": 3.528616189956665,
"learning_rate": 4.871794871794872e-06,
"loss": 0.4926,
"step": 19
},
{
"epoch": 0.02577319587628866,
"grad_norm": 3.614694833755493,
"learning_rate": 5.128205128205128e-06,
"loss": 0.4498,
"step": 20
},
{
"epoch": 0.02577319587628866,
"eval_accuracy": 0.9473684210526315,
"eval_f1": 0.15873015873015872,
"eval_loss": 0.2867887020111084,
"eval_precision": 0.14492753623188406,
"eval_recall": 0.17543859649122806,
"eval_runtime": 85.3774,
"eval_samples_per_second": 5.329,
"eval_steps_per_second": 0.176,
"step": 20
},
{
"epoch": 0.027061855670103094,
"grad_norm": 3.7909672260284424,
"learning_rate": 5.384615384615385e-06,
"loss": 0.4553,
"step": 21
},
{
"epoch": 0.028350515463917526,
"grad_norm": 3.3818626403808594,
"learning_rate": 5.641025641025641e-06,
"loss": 0.4058,
"step": 22
},
{
"epoch": 0.029639175257731958,
"grad_norm": 3.4036498069763184,
"learning_rate": 5.897435897435898e-06,
"loss": 0.3923,
"step": 23
},
{
"epoch": 0.030927835051546393,
"grad_norm": 4.077082633972168,
"learning_rate": 6.153846153846155e-06,
"loss": 0.433,
"step": 24
},
{
"epoch": 0.03221649484536082,
"grad_norm": 3.6889731884002686,
"learning_rate": 6.410256410256412e-06,
"loss": 0.4107,
"step": 25
},
{
"epoch": 0.03350515463917526,
"grad_norm": 3.24767804145813,
"learning_rate": 6.666666666666667e-06,
"loss": 0.3916,
"step": 26
},
{
"epoch": 0.03479381443298969,
"grad_norm": 3.6298370361328125,
"learning_rate": 6.923076923076923e-06,
"loss": 0.3775,
"step": 27
},
{
"epoch": 0.03608247422680412,
"grad_norm": 3.0387685298919678,
"learning_rate": 7.17948717948718e-06,
"loss": 0.3455,
"step": 28
},
{
"epoch": 0.037371134020618556,
"grad_norm": 2.6114144325256348,
"learning_rate": 7.435897435897437e-06,
"loss": 0.3187,
"step": 29
},
{
"epoch": 0.03865979381443299,
"grad_norm": 2.6260972023010254,
"learning_rate": 7.692307692307694e-06,
"loss": 0.3039,
"step": 30
},
{
"epoch": 0.03994845360824742,
"grad_norm": 3.2159814834594727,
"learning_rate": 7.948717948717949e-06,
"loss": 0.3116,
"step": 31
},
{
"epoch": 0.041237113402061855,
"grad_norm": 2.923689603805542,
"learning_rate": 8.205128205128205e-06,
"loss": 0.3317,
"step": 32
},
{
"epoch": 0.04252577319587629,
"grad_norm": 3.0011069774627686,
"learning_rate": 8.461538461538462e-06,
"loss": 0.3035,
"step": 33
},
{
"epoch": 0.04381443298969072,
"grad_norm": 2.754927396774292,
"learning_rate": 8.717948717948719e-06,
"loss": 0.2897,
"step": 34
},
{
"epoch": 0.045103092783505154,
"grad_norm": 2.29058837890625,
"learning_rate": 8.974358974358976e-06,
"loss": 0.2669,
"step": 35
},
{
"epoch": 0.04639175257731959,
"grad_norm": 2.5178396701812744,
"learning_rate": 9.230769230769232e-06,
"loss": 0.2534,
"step": 36
},
{
"epoch": 0.04768041237113402,
"grad_norm": 2.3435192108154297,
"learning_rate": 9.487179487179487e-06,
"loss": 0.2393,
"step": 37
},
{
"epoch": 0.04896907216494845,
"grad_norm": 2.382751226425171,
"learning_rate": 9.743589743589744e-06,
"loss": 0.2307,
"step": 38
},
{
"epoch": 0.05025773195876289,
"grad_norm": 1.9250915050506592,
"learning_rate": 1e-05,
"loss": 0.1963,
"step": 39
},
{
"epoch": 0.05154639175257732,
"grad_norm": 1.9028986692428589,
"learning_rate": 1.0256410256410256e-05,
"loss": 0.242,
"step": 40
},
{
"epoch": 0.05154639175257732,
"eval_accuracy": 0.9672293942403177,
"eval_f1": 0.08333333333333333,
"eval_loss": 0.14344234764575958,
"eval_precision": 0.2,
"eval_recall": 0.05263157894736842,
"eval_runtime": 85.3093,
"eval_samples_per_second": 5.334,
"eval_steps_per_second": 0.176,
"step": 40
},
{
"epoch": 0.05283505154639175,
"grad_norm": 1.5781856775283813,
"learning_rate": 1.0512820512820514e-05,
"loss": 0.197,
"step": 41
},
{
"epoch": 0.05412371134020619,
"grad_norm": 1.4305051565170288,
"learning_rate": 1.076923076923077e-05,
"loss": 0.1876,
"step": 42
},
{
"epoch": 0.055412371134020616,
"grad_norm": 1.1940586566925049,
"learning_rate": 1.1025641025641028e-05,
"loss": 0.2308,
"step": 43
},
{
"epoch": 0.05670103092783505,
"grad_norm": 1.2878607511520386,
"learning_rate": 1.1282051282051283e-05,
"loss": 0.1427,
"step": 44
},
{
"epoch": 0.05798969072164949,
"grad_norm": 0.896811842918396,
"learning_rate": 1.1538461538461538e-05,
"loss": 0.1809,
"step": 45
},
{
"epoch": 0.059278350515463915,
"grad_norm": 0.8891208171844482,
"learning_rate": 1.1794871794871796e-05,
"loss": 0.155,
"step": 46
},
{
"epoch": 0.06056701030927835,
"grad_norm": 1.0271227359771729,
"learning_rate": 1.2051282051282051e-05,
"loss": 0.1985,
"step": 47
},
{
"epoch": 0.061855670103092786,
"grad_norm": 0.7700079679489136,
"learning_rate": 1.230769230769231e-05,
"loss": 0.1262,
"step": 48
},
{
"epoch": 0.06314432989690721,
"grad_norm": 1.125436544418335,
"learning_rate": 1.2564102564102565e-05,
"loss": 0.1685,
"step": 49
},
{
"epoch": 0.06443298969072164,
"grad_norm": 1.251115083694458,
"learning_rate": 1.2820512820512823e-05,
"loss": 0.1999,
"step": 50
},
{
"epoch": 0.06572164948453608,
"grad_norm": 1.178985595703125,
"learning_rate": 1.3076923076923078e-05,
"loss": 0.1657,
"step": 51
},
{
"epoch": 0.06701030927835051,
"grad_norm": 1.3865740299224854,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.201,
"step": 52
},
{
"epoch": 0.06829896907216494,
"grad_norm": 0.8845710158348083,
"learning_rate": 1.3589743589743592e-05,
"loss": 0.1398,
"step": 53
},
{
"epoch": 0.06958762886597938,
"grad_norm": 1.4564330577850342,
"learning_rate": 1.3846153846153847e-05,
"loss": 0.1913,
"step": 54
},
{
"epoch": 0.07087628865979381,
"grad_norm": 0.7712787985801697,
"learning_rate": 1.4102564102564105e-05,
"loss": 0.1112,
"step": 55
},
{
"epoch": 0.07216494845360824,
"grad_norm": 0.8379471898078918,
"learning_rate": 1.435897435897436e-05,
"loss": 0.1242,
"step": 56
},
{
"epoch": 0.07345360824742268,
"grad_norm": 1.1431857347488403,
"learning_rate": 1.4615384615384615e-05,
"loss": 0.16,
"step": 57
},
{
"epoch": 0.07474226804123711,
"grad_norm": 0.9613205790519714,
"learning_rate": 1.4871794871794874e-05,
"loss": 0.1257,
"step": 58
},
{
"epoch": 0.07603092783505154,
"grad_norm": 0.7836907505989075,
"learning_rate": 1.5128205128205129e-05,
"loss": 0.1252,
"step": 59
},
{
"epoch": 0.07731958762886598,
"grad_norm": 0.9727709889411926,
"learning_rate": 1.5384615384615387e-05,
"loss": 0.1628,
"step": 60
},
{
"epoch": 0.07731958762886598,
"eval_accuracy": 0.9692154915590864,
"eval_f1": 0.20512820512820512,
"eval_loss": 0.10804814100265503,
"eval_precision": 0.38095238095238093,
"eval_recall": 0.14035087719298245,
"eval_runtime": 86.5949,
"eval_samples_per_second": 5.254,
"eval_steps_per_second": 0.173,
"step": 60
},
{
"epoch": 0.07860824742268041,
"grad_norm": 0.7048820853233337,
"learning_rate": 1.5641025641025644e-05,
"loss": 0.1337,
"step": 61
},
{
"epoch": 0.07989690721649484,
"grad_norm": 0.6462810635566711,
"learning_rate": 1.5897435897435897e-05,
"loss": 0.076,
"step": 62
},
{
"epoch": 0.08118556701030928,
"grad_norm": 0.7791882753372192,
"learning_rate": 1.6153846153846154e-05,
"loss": 0.0935,
"step": 63
},
{
"epoch": 0.08247422680412371,
"grad_norm": 0.5717423558235168,
"learning_rate": 1.641025641025641e-05,
"loss": 0.0892,
"step": 64
},
{
"epoch": 0.08376288659793814,
"grad_norm": 0.6709016561508179,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0581,
"step": 65
},
{
"epoch": 0.08505154639175258,
"grad_norm": 0.6802282333374023,
"learning_rate": 1.6923076923076924e-05,
"loss": 0.1023,
"step": 66
},
{
"epoch": 0.08634020618556701,
"grad_norm": 0.7112599611282349,
"learning_rate": 1.717948717948718e-05,
"loss": 0.1213,
"step": 67
},
{
"epoch": 0.08762886597938144,
"grad_norm": 1.2926205396652222,
"learning_rate": 1.7435897435897438e-05,
"loss": 0.1627,
"step": 68
},
{
"epoch": 0.08891752577319588,
"grad_norm": 1.408495545387268,
"learning_rate": 1.7692307692307694e-05,
"loss": 0.1781,
"step": 69
},
{
"epoch": 0.09020618556701031,
"grad_norm": 1.0148080587387085,
"learning_rate": 1.794871794871795e-05,
"loss": 0.0919,
"step": 70
},
{
"epoch": 0.09149484536082474,
"grad_norm": 1.0437681674957275,
"learning_rate": 1.8205128205128208e-05,
"loss": 0.1265,
"step": 71
},
{
"epoch": 0.09278350515463918,
"grad_norm": 0.9646249413490295,
"learning_rate": 1.8461538461538465e-05,
"loss": 0.104,
"step": 72
},
{
"epoch": 0.09407216494845361,
"grad_norm": 0.8352120518684387,
"learning_rate": 1.8717948717948718e-05,
"loss": 0.0845,
"step": 73
},
{
"epoch": 0.09536082474226804,
"grad_norm": 0.9750470519065857,
"learning_rate": 1.8974358974358975e-05,
"loss": 0.1469,
"step": 74
},
{
"epoch": 0.09664948453608248,
"grad_norm": 0.8849421739578247,
"learning_rate": 1.923076923076923e-05,
"loss": 0.0641,
"step": 75
},
{
"epoch": 0.0979381443298969,
"grad_norm": 1.2695003747940063,
"learning_rate": 1.9487179487179488e-05,
"loss": 0.1325,
"step": 76
},
{
"epoch": 0.09922680412371133,
"grad_norm": 0.9113069772720337,
"learning_rate": 1.9743589743589745e-05,
"loss": 0.0791,
"step": 77
},
{
"epoch": 0.10051546391752578,
"grad_norm": 0.863918662071228,
"learning_rate": 2e-05,
"loss": 0.0728,
"step": 78
},
{
"epoch": 0.1018041237113402,
"grad_norm": 1.0128920078277588,
"learning_rate": 1.999989871195906e-05,
"loss": 0.0443,
"step": 79
},
{
"epoch": 0.10309278350515463,
"grad_norm": 1.5655252933502197,
"learning_rate": 1.9999594849888083e-05,
"loss": 0.1241,
"step": 80
},
{
"epoch": 0.10309278350515463,
"eval_accuracy": 0.9707050645481629,
"eval_f1": 0.3917525773195876,
"eval_loss": 0.08737693727016449,
"eval_precision": 0.475,
"eval_recall": 0.3333333333333333,
"eval_runtime": 86.8429,
"eval_samples_per_second": 5.239,
"eval_steps_per_second": 0.173,
"step": 80
},
{
"epoch": 0.10438144329896908,
"grad_norm": 1.1679091453552246,
"learning_rate": 1.9999088419942598e-05,
"loss": 0.081,
"step": 81
},
{
"epoch": 0.1056701030927835,
"grad_norm": 1.3982985019683838,
"learning_rate": 1.999837943238166e-05,
"loss": 0.071,
"step": 82
},
{
"epoch": 0.10695876288659793,
"grad_norm": 2.1905858516693115,
"learning_rate": 1.999746790156766e-05,
"loss": 0.1153,
"step": 83
},
{
"epoch": 0.10824742268041238,
"grad_norm": 2.231328010559082,
"learning_rate": 1.9996353845966033e-05,
"loss": 0.1391,
"step": 84
},
{
"epoch": 0.1095360824742268,
"grad_norm": 1.6173464059829712,
"learning_rate": 1.999503728814488e-05,
"loss": 0.0958,
"step": 85
},
{
"epoch": 0.11082474226804123,
"grad_norm": 1.9609785079956055,
"learning_rate": 1.9993518254774517e-05,
"loss": 0.0864,
"step": 86
},
{
"epoch": 0.11211340206185567,
"grad_norm": 1.735422134399414,
"learning_rate": 1.999179677662692e-05,
"loss": 0.0895,
"step": 87
},
{
"epoch": 0.1134020618556701,
"grad_norm": 1.645450234413147,
"learning_rate": 1.998987288857513e-05,
"loss": 0.1078,
"step": 88
},
{
"epoch": 0.11469072164948453,
"grad_norm": 1.0082734823226929,
"learning_rate": 1.9987746629592506e-05,
"loss": 0.0504,
"step": 89
},
{
"epoch": 0.11597938144329897,
"grad_norm": 2.4662506580352783,
"learning_rate": 1.9985418042751975e-05,
"loss": 0.0982,
"step": 90
},
{
"epoch": 0.1172680412371134,
"grad_norm": 1.3186198472976685,
"learning_rate": 1.9982887175225136e-05,
"loss": 0.04,
"step": 91
},
{
"epoch": 0.11855670103092783,
"grad_norm": 1.4960401058197021,
"learning_rate": 1.998015407828131e-05,
"loss": 0.0572,
"step": 92
},
{
"epoch": 0.11984536082474227,
"grad_norm": 1.6579524278640747,
"learning_rate": 1.9977218807286507e-05,
"loss": 0.0662,
"step": 93
},
{
"epoch": 0.1211340206185567,
"grad_norm": 2.7462518215179443,
"learning_rate": 1.9974081421702296e-05,
"loss": 0.0739,
"step": 94
},
{
"epoch": 0.12242268041237113,
"grad_norm": 1.3179261684417725,
"learning_rate": 1.99707419850846e-05,
"loss": 0.0528,
"step": 95
},
{
"epoch": 0.12371134020618557,
"grad_norm": 1.08860182762146,
"learning_rate": 1.9967200565082426e-05,
"loss": 0.0417,
"step": 96
},
{
"epoch": 0.125,
"grad_norm": 2.638080358505249,
"learning_rate": 1.9963457233436468e-05,
"loss": 0.0964,
"step": 97
},
{
"epoch": 0.12628865979381443,
"grad_norm": 1.3592987060546875,
"learning_rate": 1.9959512065977673e-05,
"loss": 0.0491,
"step": 98
},
{
"epoch": 0.12757731958762886,
"grad_norm": 2.5333075523376465,
"learning_rate": 1.9955365142625694e-05,
"loss": 0.0506,
"step": 99
},
{
"epoch": 0.12886597938144329,
"grad_norm": 2.624704360961914,
"learning_rate": 1.9951016547387286e-05,
"loss": 0.0676,
"step": 100
},
{
"epoch": 0.12886597938144329,
"eval_accuracy": 0.9692154915590864,
"eval_f1": 0.5694444444444444,
"eval_loss": 0.0689799040555954,
"eval_precision": 0.47126436781609193,
"eval_recall": 0.7192982456140351,
"eval_runtime": 85.1099,
"eval_samples_per_second": 5.346,
"eval_steps_per_second": 0.176,
"step": 100
},
{
"epoch": 0.13015463917525774,
"grad_norm": 2.9534921646118164,
"learning_rate": 1.994646636835458e-05,
"loss": 0.0741,
"step": 101
},
{
"epoch": 0.13144329896907217,
"grad_norm": 2.0482945442199707,
"learning_rate": 1.9941714697703333e-05,
"loss": 0.0596,
"step": 102
},
{
"epoch": 0.1327319587628866,
"grad_norm": 0.8915924429893494,
"learning_rate": 1.9936761631691007e-05,
"loss": 0.0271,
"step": 103
},
{
"epoch": 0.13402061855670103,
"grad_norm": 3.5569581985473633,
"learning_rate": 1.993160727065489e-05,
"loss": 0.097,
"step": 104
},
{
"epoch": 0.13530927835051546,
"grad_norm": 1.0290688276290894,
"learning_rate": 1.992625171901e-05,
"loss": 0.0309,
"step": 105
},
{
"epoch": 0.13659793814432988,
"grad_norm": 3.104780673980713,
"learning_rate": 1.9920695085247012e-05,
"loss": 0.0466,
"step": 106
},
{
"epoch": 0.13788659793814434,
"grad_norm": 1.300478458404541,
"learning_rate": 1.991493748193002e-05,
"loss": 0.035,
"step": 107
},
{
"epoch": 0.13917525773195877,
"grad_norm": 1.9571739435195923,
"learning_rate": 1.9908979025694312e-05,
"loss": 0.0432,
"step": 108
},
{
"epoch": 0.1404639175257732,
"grad_norm": 0.9955072402954102,
"learning_rate": 1.9902819837243954e-05,
"loss": 0.0182,
"step": 109
},
{
"epoch": 0.14175257731958762,
"grad_norm": 1.2352385520935059,
"learning_rate": 1.989646004134937e-05,
"loss": 0.0338,
"step": 110
},
{
"epoch": 0.14304123711340205,
"grad_norm": 2.855053663253784,
"learning_rate": 1.9889899766844817e-05,
"loss": 0.0701,
"step": 111
},
{
"epoch": 0.14432989690721648,
"grad_norm": 2.372802495956421,
"learning_rate": 1.9883139146625763e-05,
"loss": 0.0386,
"step": 112
},
{
"epoch": 0.14561855670103094,
"grad_norm": 1.9221031665802002,
"learning_rate": 1.9876178317646203e-05,
"loss": 0.0277,
"step": 113
},
{
"epoch": 0.14690721649484537,
"grad_norm": 0.9431936144828796,
"learning_rate": 1.9869017420915888e-05,
"loss": 0.0188,
"step": 114
},
{
"epoch": 0.1481958762886598,
"grad_norm": 1.950210690498352,
"learning_rate": 1.9861656601497452e-05,
"loss": 0.0302,
"step": 115
},
{
"epoch": 0.14948453608247422,
"grad_norm": 3.239633560180664,
"learning_rate": 1.9854096008503495e-05,
"loss": 0.0416,
"step": 116
},
{
"epoch": 0.15077319587628865,
"grad_norm": 3.1708860397338867,
"learning_rate": 1.9846335795093547e-05,
"loss": 0.0688,
"step": 117
},
{
"epoch": 0.15206185567010308,
"grad_norm": 0.6930286288261414,
"learning_rate": 1.9838376118470965e-05,
"loss": 0.0141,
"step": 118
},
{
"epoch": 0.15335051546391754,
"grad_norm": 2.929121971130371,
"learning_rate": 1.9830217139879768e-05,
"loss": 0.034,
"step": 119
},
{
"epoch": 0.15463917525773196,
"grad_norm": 1.3847970962524414,
"learning_rate": 1.9821859024601345e-05,
"loss": 0.03,
"step": 120
},
{
"epoch": 0.15463917525773196,
"eval_accuracy": 0.9821251241310824,
"eval_f1": 0.7391304347826086,
"eval_loss": 0.04716553911566734,
"eval_precision": 0.6296296296296297,
"eval_recall": 0.8947368421052632,
"eval_runtime": 83.825,
"eval_samples_per_second": 5.428,
"eval_steps_per_second": 0.179,
"step": 120
},
{
"epoch": 0.1559278350515464,
"grad_norm": 2.072525978088379,
"learning_rate": 1.981330194195112e-05,
"loss": 0.016,
"step": 121
},
{
"epoch": 0.15721649484536082,
"grad_norm": 3.0791800022125244,
"learning_rate": 1.9804546065275116e-05,
"loss": 0.0618,
"step": 122
},
{
"epoch": 0.15850515463917525,
"grad_norm": 2.1992335319519043,
"learning_rate": 1.9795591571946454e-05,
"loss": 0.0276,
"step": 123
},
{
"epoch": 0.15979381443298968,
"grad_norm": 2.476609706878662,
"learning_rate": 1.978643864336176e-05,
"loss": 0.0207,
"step": 124
},
{
"epoch": 0.16108247422680413,
"grad_norm": 2.674210786819458,
"learning_rate": 1.9777087464937464e-05,
"loss": 0.0378,
"step": 125
},
{
"epoch": 0.16237113402061856,
"grad_norm": 2.6775150299072266,
"learning_rate": 1.9767538226106078e-05,
"loss": 0.0312,
"step": 126
},
{
"epoch": 0.163659793814433,
"grad_norm": 2.105435848236084,
"learning_rate": 1.9757791120312344e-05,
"loss": 0.0239,
"step": 127
},
{
"epoch": 0.16494845360824742,
"grad_norm": 1.7885074615478516,
"learning_rate": 1.9747846345009306e-05,
"loss": 0.0402,
"step": 128
},
{
"epoch": 0.16623711340206185,
"grad_norm": 4.384532451629639,
"learning_rate": 1.9737704101654335e-05,
"loss": 0.0674,
"step": 129
},
{
"epoch": 0.16752577319587628,
"grad_norm": 0.733161211013794,
"learning_rate": 1.9727364595705012e-05,
"loss": 0.0109,
"step": 130
},
{
"epoch": 0.16881443298969073,
"grad_norm": 2.310255765914917,
"learning_rate": 1.9716828036615006e-05,
"loss": 0.0245,
"step": 131
},
{
"epoch": 0.17010309278350516,
"grad_norm": 2.1358768939971924,
"learning_rate": 1.9706094637829797e-05,
"loss": 0.0506,
"step": 132
},
{
"epoch": 0.1713917525773196,
"grad_norm": 1.873978853225708,
"learning_rate": 1.9695164616782378e-05,
"loss": 0.0239,
"step": 133
},
{
"epoch": 0.17268041237113402,
"grad_norm": 3.210780620574951,
"learning_rate": 1.9684038194888827e-05,
"loss": 0.0453,
"step": 134
},
{
"epoch": 0.17396907216494845,
"grad_norm": 2.6000077724456787,
"learning_rate": 1.9672715597543845e-05,
"loss": 0.0222,
"step": 135
},
{
"epoch": 0.17525773195876287,
"grad_norm": 0.8902448415756226,
"learning_rate": 1.9661197054116165e-05,
"loss": 0.0114,
"step": 136
},
{
"epoch": 0.17654639175257733,
"grad_norm": 2.048377513885498,
"learning_rate": 1.964948279794393e-05,
"loss": 0.0299,
"step": 137
},
{
"epoch": 0.17783505154639176,
"grad_norm": 0.35185545682907104,
"learning_rate": 1.963757306632996e-05,
"loss": 0.0062,
"step": 138
},
{
"epoch": 0.1791237113402062,
"grad_norm": 0.8665434122085571,
"learning_rate": 1.962546810053692e-05,
"loss": 0.0122,
"step": 139
},
{
"epoch": 0.18041237113402062,
"grad_norm": 0.7568170428276062,
"learning_rate": 1.9613168145782468e-05,
"loss": 0.0109,
"step": 140
},
{
"epoch": 0.18041237113402062,
"eval_accuracy": 0.9910625620655412,
"eval_f1": 0.8448275862068966,
"eval_loss": 0.03413279354572296,
"eval_precision": 0.8305084745762712,
"eval_recall": 0.8596491228070176,
"eval_runtime": 83.9067,
"eval_samples_per_second": 5.423,
"eval_steps_per_second": 0.179,
"step": 140
},
{
"epoch": 0.18170103092783504,
"grad_norm": 2.2702317237854004,
"learning_rate": 1.960067345123427e-05,
"loss": 0.0247,
"step": 141
},
{
"epoch": 0.18298969072164947,
"grad_norm": 3.507333755493164,
"learning_rate": 1.958798427000495e-05,
"loss": 0.0297,
"step": 142
},
{
"epoch": 0.18427835051546393,
"grad_norm": 0.5789155960083008,
"learning_rate": 1.9575100859146974e-05,
"loss": 0.013,
"step": 143
},
{
"epoch": 0.18556701030927836,
"grad_norm": 1.9476535320281982,
"learning_rate": 1.956202347964743e-05,
"loss": 0.0208,
"step": 144
},
{
"epoch": 0.18685567010309279,
"grad_norm": 0.855241060256958,
"learning_rate": 1.954875239642274e-05,
"loss": 0.0071,
"step": 145
},
{
"epoch": 0.18814432989690721,
"grad_norm": 2.169466495513916,
"learning_rate": 1.9535287878313315e-05,
"loss": 0.0191,
"step": 146
},
{
"epoch": 0.18943298969072164,
"grad_norm": 1.1874339580535889,
"learning_rate": 1.952163019807809e-05,
"loss": 0.0086,
"step": 147
},
{
"epoch": 0.19072164948453607,
"grad_norm": 3.9380855560302734,
"learning_rate": 1.9507779632388997e-05,
"loss": 0.0264,
"step": 148
},
{
"epoch": 0.19201030927835053,
"grad_norm": 2.052539587020874,
"learning_rate": 1.9493736461825366e-05,
"loss": 0.0126,
"step": 149
},
{
"epoch": 0.19329896907216496,
"grad_norm": 2.4338552951812744,
"learning_rate": 1.947950097086825e-05,
"loss": 0.0426,
"step": 150
},
{
"epoch": 0.19458762886597938,
"grad_norm": 1.8210889101028442,
"learning_rate": 1.946507344789464e-05,
"loss": 0.0088,
"step": 151
},
{
"epoch": 0.1958762886597938,
"grad_norm": 0.9345032572746277,
"learning_rate": 1.945045418517165e-05,
"loss": 0.01,
"step": 152
},
{
"epoch": 0.19716494845360824,
"grad_norm": 2.274660587310791,
"learning_rate": 1.9435643478850573e-05,
"loss": 0.0208,
"step": 153
},
{
"epoch": 0.19845360824742267,
"grad_norm": 1.3613721132278442,
"learning_rate": 1.9420641628960897e-05,
"loss": 0.0136,
"step": 154
},
{
"epoch": 0.19974226804123713,
"grad_norm": 0.8850100040435791,
"learning_rate": 1.9405448939404215e-05,
"loss": 0.009,
"step": 155
},
{
"epoch": 0.20103092783505155,
"grad_norm": 0.5833643078804016,
"learning_rate": 1.9390065717948084e-05,
"loss": 0.0046,
"step": 156
},
{
"epoch": 0.20231958762886598,
"grad_norm": 0.42478522658348083,
"learning_rate": 1.9374492276219776e-05,
"loss": 0.0052,
"step": 157
},
{
"epoch": 0.2036082474226804,
"grad_norm": 1.2607591152191162,
"learning_rate": 1.9358728929699966e-05,
"loss": 0.0101,
"step": 158
},
{
"epoch": 0.20489690721649484,
"grad_norm": 1.5455127954483032,
"learning_rate": 1.9342775997716357e-05,
"loss": 0.0051,
"step": 159
},
{
"epoch": 0.20618556701030927,
"grad_norm": 5.292853832244873,
"learning_rate": 1.9326633803437197e-05,
"loss": 0.043,
"step": 160
},
{
"epoch": 0.20618556701030927,
"eval_accuracy": 0.9915590863952334,
"eval_f1": 0.8547008547008547,
"eval_loss": 0.033666037023067474,
"eval_precision": 0.8333333333333334,
"eval_recall": 0.8771929824561403,
"eval_runtime": 83.7677,
"eval_samples_per_second": 5.432,
"eval_steps_per_second": 0.179,
"step": 160
},
{
"epoch": 0.20747422680412372,
"grad_norm": 5.327892303466797,
"learning_rate": 1.9310302673864724e-05,
"loss": 0.057,
"step": 161
},
{
"epoch": 0.20876288659793815,
"grad_norm": 2.6782376766204834,
"learning_rate": 1.929378293982857e-05,
"loss": 0.0288,
"step": 162
},
{
"epoch": 0.21005154639175258,
"grad_norm": 1.8482961654663086,
"learning_rate": 1.9277074935979034e-05,
"loss": 0.0087,
"step": 163
},
{
"epoch": 0.211340206185567,
"grad_norm": 0.3108800947666168,
"learning_rate": 1.926017900078031e-05,
"loss": 0.002,
"step": 164
},
{
"epoch": 0.21262886597938144,
"grad_norm": 6.560524940490723,
"learning_rate": 1.924309547650363e-05,
"loss": 0.0385,
"step": 165
},
{
"epoch": 0.21391752577319587,
"grad_norm": 1.7873457670211792,
"learning_rate": 1.922582470922034e-05,
"loss": 0.006,
"step": 166
},
{
"epoch": 0.21520618556701032,
"grad_norm": 4.115209102630615,
"learning_rate": 1.9208367048794878e-05,
"loss": 0.0095,
"step": 167
},
{
"epoch": 0.21649484536082475,
"grad_norm": 3.2223434448242188,
"learning_rate": 1.9190722848877683e-05,
"loss": 0.0151,
"step": 168
},
{
"epoch": 0.21778350515463918,
"grad_norm": 4.802370071411133,
"learning_rate": 1.9172892466898047e-05,
"loss": 0.0576,
"step": 169
},
{
"epoch": 0.2190721649484536,
"grad_norm": 2.843043327331543,
"learning_rate": 1.9154876264056863e-05,
"loss": 0.0116,
"step": 170
},
{
"epoch": 0.22036082474226804,
"grad_norm": 1.8300056457519531,
"learning_rate": 1.9136674605319304e-05,
"loss": 0.0048,
"step": 171
},
{
"epoch": 0.22164948453608246,
"grad_norm": 0.7112641930580139,
"learning_rate": 1.911828785940745e-05,
"loss": 0.0029,
"step": 172
},
{
"epoch": 0.22293814432989692,
"grad_norm": 3.5936992168426514,
"learning_rate": 1.9099716398792788e-05,
"loss": 0.0335,
"step": 173
},
{
"epoch": 0.22422680412371135,
"grad_norm": 2.8544235229492188,
"learning_rate": 1.908096059968869e-05,
"loss": 0.0207,
"step": 174
},
{
"epoch": 0.22551546391752578,
"grad_norm": 3.7631168365478516,
"learning_rate": 1.906202084204279e-05,
"loss": 0.0212,
"step": 175
},
{
"epoch": 0.2268041237113402,
"grad_norm": 1.2712973356246948,
"learning_rate": 1.904289750952928e-05,
"loss": 0.0084,
"step": 176
},
{
"epoch": 0.22809278350515463,
"grad_norm": 2.580491542816162,
"learning_rate": 1.9023590989541126e-05,
"loss": 0.0151,
"step": 177
},
{
"epoch": 0.22938144329896906,
"grad_norm": 6.0741777420043945,
"learning_rate": 1.900410167318226e-05,
"loss": 0.0616,
"step": 178
},
{
"epoch": 0.23067010309278352,
"grad_norm": 1.9606350660324097,
"learning_rate": 1.8984429955259607e-05,
"loss": 0.0305,
"step": 179
},
{
"epoch": 0.23195876288659795,
"grad_norm": 4.825283527374268,
"learning_rate": 1.8964576234275123e-05,
"loss": 0.0233,
"step": 180
},
{
"epoch": 0.23195876288659795,
"eval_accuracy": 0.9925521350546177,
"eval_f1": 0.8760330578512396,
"eval_loss": 0.027217118069529533,
"eval_precision": 0.828125,
"eval_recall": 0.9298245614035088,
"eval_runtime": 84.1193,
"eval_samples_per_second": 5.409,
"eval_steps_per_second": 0.178,
"step": 180
},
{
"epoch": 0.23324742268041238,
"grad_norm": 3.7470309734344482,
"learning_rate": 1.894454091241771e-05,
"loss": 0.0375,
"step": 181
},
{
"epoch": 0.2345360824742268,
"grad_norm": 5.566728115081787,
"learning_rate": 1.8924324395555066e-05,
"loss": 0.0397,
"step": 182
},
{
"epoch": 0.23582474226804123,
"grad_norm": 4.115679740905762,
"learning_rate": 1.8903927093225474e-05,
"loss": 0.0318,
"step": 183
},
{
"epoch": 0.23711340206185566,
"grad_norm": 2.0655646324157715,
"learning_rate": 1.8883349418629487e-05,
"loss": 0.0502,
"step": 184
},
{
"epoch": 0.23840206185567012,
"grad_norm": 3.514209270477295,
"learning_rate": 1.8862591788621572e-05,
"loss": 0.034,
"step": 185
},
{
"epoch": 0.23969072164948454,
"grad_norm": 2.274663209915161,
"learning_rate": 1.8841654623701673e-05,
"loss": 0.0105,
"step": 186
},
{
"epoch": 0.24097938144329897,
"grad_norm": 1.3190113306045532,
"learning_rate": 1.8820538348006666e-05,
"loss": 0.0099,
"step": 187
},
{
"epoch": 0.2422680412371134,
"grad_norm": 1.9200594425201416,
"learning_rate": 1.8799243389301796e-05,
"loss": 0.0087,
"step": 188
},
{
"epoch": 0.24355670103092783,
"grad_norm": 3.5742523670196533,
"learning_rate": 1.877777017897199e-05,
"loss": 0.0383,
"step": 189
},
{
"epoch": 0.24484536082474226,
"grad_norm": 2.926935911178589,
"learning_rate": 1.8756119152013134e-05,
"loss": 0.0198,
"step": 190
},
{
"epoch": 0.24613402061855671,
"grad_norm": 4.095611095428467,
"learning_rate": 1.873429074702324e-05,
"loss": 0.0151,
"step": 191
},
{
"epoch": 0.24742268041237114,
"grad_norm": 1.0907986164093018,
"learning_rate": 1.8712285406193585e-05,
"loss": 0.0059,
"step": 192
},
{
"epoch": 0.24871134020618557,
"grad_norm": 1.646490454673767,
"learning_rate": 1.8690103575299754e-05,
"loss": 0.0262,
"step": 193
},
{
"epoch": 0.25,
"grad_norm": 0.9283900856971741,
"learning_rate": 1.866774570369257e-05,
"loss": 0.0071,
"step": 194
},
{
"epoch": 0.25128865979381443,
"grad_norm": 1.8307346105575562,
"learning_rate": 1.8645212244289047e-05,
"loss": 0.0246,
"step": 195
},
{
"epoch": 0.25257731958762886,
"grad_norm": 1.3150577545166016,
"learning_rate": 1.8622503653563173e-05,
"loss": 0.0198,
"step": 196
},
{
"epoch": 0.2538659793814433,
"grad_norm": 3.4825661182403564,
"learning_rate": 1.8599620391536682e-05,
"loss": 0.0136,
"step": 197
},
{
"epoch": 0.2551546391752577,
"grad_norm": 5.4773077964782715,
"learning_rate": 1.8576562921769727e-05,
"loss": 0.0223,
"step": 198
},
{
"epoch": 0.25644329896907214,
"grad_norm": 3.3178765773773193,
"learning_rate": 1.8553331711351502e-05,
"loss": 0.0392,
"step": 199
},
{
"epoch": 0.25773195876288657,
"grad_norm": 4.358588218688965,
"learning_rate": 1.8529927230890757e-05,
"loss": 0.029,
"step": 200
},
{
"epoch": 0.25773195876288657,
"eval_accuracy": 0.9920556107249255,
"eval_f1": 0.8666666666666667,
"eval_loss": 0.02330821380019188,
"eval_precision": 0.8253968253968254,
"eval_recall": 0.9122807017543859,
"eval_runtime": 84.2136,
"eval_samples_per_second": 5.403,
"eval_steps_per_second": 0.178,
"step": 200
},
{
"epoch": 0.25902061855670105,
"grad_norm": 2.103586435317993,
"learning_rate": 1.85063499545063e-05,
"loss": 0.013,
"step": 201
},
{
"epoch": 0.2603092783505155,
"grad_norm": 1.8666274547576904,
"learning_rate": 1.8482600359817344e-05,
"loss": 0.0245,
"step": 202
},
{
"epoch": 0.2615979381443299,
"grad_norm": 2.8087830543518066,
"learning_rate": 1.8458678927933884e-05,
"loss": 0.0187,
"step": 203
},
{
"epoch": 0.26288659793814434,
"grad_norm": 1.8675556182861328,
"learning_rate": 1.843458614344691e-05,
"loss": 0.0156,
"step": 204
},
{
"epoch": 0.26417525773195877,
"grad_norm": 0.7611345648765564,
"learning_rate": 1.8410322494418606e-05,
"loss": 0.0046,
"step": 205
},
{
"epoch": 0.2654639175257732,
"grad_norm": 1.599369764328003,
"learning_rate": 1.8385888472372474e-05,
"loss": 0.0328,
"step": 206
},
{
"epoch": 0.2667525773195876,
"grad_norm": 0.5751793384552002,
"learning_rate": 1.8361284572283356e-05,
"loss": 0.0049,
"step": 207
},
{
"epoch": 0.26804123711340205,
"grad_norm": 1.689562439918518,
"learning_rate": 1.833651129256742e-05,
"loss": 0.0157,
"step": 208
},
{
"epoch": 0.2693298969072165,
"grad_norm": 0.5357356071472168,
"learning_rate": 1.831156913507206e-05,
"loss": 0.0047,
"step": 209
},
{
"epoch": 0.2706185567010309,
"grad_norm": 1.4983047246932983,
"learning_rate": 1.828645860506573e-05,
"loss": 0.0487,
"step": 210
},
{
"epoch": 0.27190721649484534,
"grad_norm": 0.2579003870487213,
"learning_rate": 1.826118021122771e-05,
"loss": 0.0029,
"step": 211
},
{
"epoch": 0.27319587628865977,
"grad_norm": 2.843892812728882,
"learning_rate": 1.8235734465637794e-05,
"loss": 0.0181,
"step": 212
},
{
"epoch": 0.27448453608247425,
"grad_norm": 1.1521669626235962,
"learning_rate": 1.821012188376593e-05,
"loss": 0.0221,
"step": 213
},
{
"epoch": 0.2757731958762887,
"grad_norm": 1.4023137092590332,
"learning_rate": 1.8184342984461766e-05,
"loss": 0.0097,
"step": 214
},
{
"epoch": 0.2770618556701031,
"grad_norm": 1.1344298124313354,
"learning_rate": 1.8158398289944145e-05,
"loss": 0.005,
"step": 215
},
{
"epoch": 0.27835051546391754,
"grad_norm": 1.5524466037750244,
"learning_rate": 1.8132288325790518e-05,
"loss": 0.0105,
"step": 216
},
{
"epoch": 0.27963917525773196,
"grad_norm": 0.8397157192230225,
"learning_rate": 1.8106013620926312e-05,
"loss": 0.0257,
"step": 217
},
{
"epoch": 0.2809278350515464,
"grad_norm": 2.6008617877960205,
"learning_rate": 1.8079574707614202e-05,
"loss": 0.013,
"step": 218
},
{
"epoch": 0.2822164948453608,
"grad_norm": 0.8384814262390137,
"learning_rate": 1.8052972121443337e-05,
"loss": 0.0076,
"step": 219
},
{
"epoch": 0.28350515463917525,
"grad_norm": 1.8078651428222656,
"learning_rate": 1.802620640131848e-05,
"loss": 0.0138,
"step": 220
},
{
"epoch": 0.28350515463917525,
"eval_accuracy": 0.9930486593843099,
"eval_f1": 0.8833333333333333,
"eval_loss": 0.02096499688923359,
"eval_precision": 0.8412698412698413,
"eval_recall": 0.9298245614035088,
"eval_runtime": 85.3923,
"eval_samples_per_second": 5.328,
"eval_steps_per_second": 0.176,
"step": 220
},
{
"epoch": 0.2847938144329897,
"grad_norm": 2.1315155029296875,
"learning_rate": 1.799927808944911e-05,
"loss": 0.0182,
"step": 221
},
{
"epoch": 0.2860824742268041,
"grad_norm": 1.9272891283035278,
"learning_rate": 1.797218773133841e-05,
"loss": 0.0152,
"step": 222
},
{
"epoch": 0.28737113402061853,
"grad_norm": 4.905808448791504,
"learning_rate": 1.7944935875772244e-05,
"loss": 0.0215,
"step": 223
},
{
"epoch": 0.28865979381443296,
"grad_norm": 4.647861480712891,
"learning_rate": 1.7917523074808024e-05,
"loss": 0.0258,
"step": 224
},
{
"epoch": 0.28994845360824745,
"grad_norm": 1.2799395322799683,
"learning_rate": 1.7889949883763532e-05,
"loss": 0.0232,
"step": 225
},
{
"epoch": 0.2912371134020619,
"grad_norm": 2.6159801483154297,
"learning_rate": 1.786221686120567e-05,
"loss": 0.0332,
"step": 226
},
{
"epoch": 0.2925257731958763,
"grad_norm": 0.3623534142971039,
"learning_rate": 1.7834324568939137e-05,
"loss": 0.0031,
"step": 227
},
{
"epoch": 0.29381443298969073,
"grad_norm": 2.5764312744140625,
"learning_rate": 1.7806273571995066e-05,
"loss": 0.0209,
"step": 228
},
{
"epoch": 0.29510309278350516,
"grad_norm": 2.027851104736328,
"learning_rate": 1.7778064438619562e-05,
"loss": 0.0128,
"step": 229
},
{
"epoch": 0.2963917525773196,
"grad_norm": 0.7468307614326477,
"learning_rate": 1.7749697740262197e-05,
"loss": 0.0046,
"step": 230
},
{
"epoch": 0.297680412371134,
"grad_norm": 1.3534049987792969,
"learning_rate": 1.772117405156443e-05,
"loss": 0.0137,
"step": 231
},
{
"epoch": 0.29896907216494845,
"grad_norm": 0.830199658870697,
"learning_rate": 1.769249395034797e-05,
"loss": 0.0044,
"step": 232
},
{
"epoch": 0.3002577319587629,
"grad_norm": 1.318949580192566,
"learning_rate": 1.7663658017603073e-05,
"loss": 0.0156,
"step": 233
},
{
"epoch": 0.3015463917525773,
"grad_norm": 1.9248756170272827,
"learning_rate": 1.7634666837476765e-05,
"loss": 0.0379,
"step": 234
},
{
"epoch": 0.30283505154639173,
"grad_norm": 1.7694895267486572,
"learning_rate": 1.7605520997261014e-05,
"loss": 0.0142,
"step": 235
},
{
"epoch": 0.30412371134020616,
"grad_norm": 1.5250486135482788,
"learning_rate": 1.757622108738083e-05,
"loss": 0.0092,
"step": 236
},
{
"epoch": 0.30541237113402064,
"grad_norm": 1.273772954940796,
"learning_rate": 1.754676770138231e-05,
"loss": 0.0216,
"step": 237
},
{
"epoch": 0.30670103092783507,
"grad_norm": 5.388645172119141,
"learning_rate": 1.7517161435920606e-05,
"loss": 0.0589,
"step": 238
},
{
"epoch": 0.3079896907216495,
"grad_norm": 2.30202317237854,
"learning_rate": 1.7487402890747843e-05,
"loss": 0.016,
"step": 239
},
{
"epoch": 0.30927835051546393,
"grad_norm": 3.9984192848205566,
"learning_rate": 1.7457492668700967e-05,
"loss": 0.0141,
"step": 240
},
{
"epoch": 0.30927835051546393,
"eval_accuracy": 0.9955312810327706,
"eval_f1": 0.9203539823008849,
"eval_loss": 0.01754908636212349,
"eval_precision": 0.9285714285714286,
"eval_recall": 0.9122807017543859,
"eval_runtime": 86.0275,
"eval_samples_per_second": 5.289,
"eval_steps_per_second": 0.174,
"step": 240
},
{
"epoch": 0.31056701030927836,
"grad_norm": 1.2816661596298218,
"learning_rate": 1.7427431375689544e-05,
"loss": 0.0147,
"step": 241
},
{
"epoch": 0.3118556701030928,
"grad_norm": 1.6840155124664307,
"learning_rate": 1.7397219620683465e-05,
"loss": 0.0047,
"step": 242
},
{
"epoch": 0.3131443298969072,
"grad_norm": 1.563914179801941,
"learning_rate": 1.7366858015700626e-05,
"loss": 0.017,
"step": 243
},
{
"epoch": 0.31443298969072164,
"grad_norm": 1.6181697845458984,
"learning_rate": 1.7336347175794523e-05,
"loss": 0.0137,
"step": 244
},
{
"epoch": 0.31572164948453607,
"grad_norm": 0.8612284064292908,
"learning_rate": 1.73056877190418e-05,
"loss": 0.0052,
"step": 245
},
{
"epoch": 0.3170103092783505,
"grad_norm": 1.467340111732483,
"learning_rate": 1.7274880266529716e-05,
"loss": 0.0085,
"step": 246
},
{
"epoch": 0.31829896907216493,
"grad_norm": 1.4295095205307007,
"learning_rate": 1.7243925442343578e-05,
"loss": 0.0333,
"step": 247
},
{
"epoch": 0.31958762886597936,
"grad_norm": 0.7592663764953613,
"learning_rate": 1.721282387355408e-05,
"loss": 0.009,
"step": 248
},
{
"epoch": 0.32087628865979384,
"grad_norm": 1.1355818510055542,
"learning_rate": 1.718157619020462e-05,
"loss": 0.0067,
"step": 249
},
{
"epoch": 0.32216494845360827,
"grad_norm": 1.8645901679992676,
"learning_rate": 1.715018302529852e-05,
"loss": 0.0126,
"step": 250
},
{
"epoch": 0.3234536082474227,
"grad_norm": 1.5803511142730713,
"learning_rate": 1.711864501478622e-05,
"loss": 0.0157,
"step": 251
},
{
"epoch": 0.3247422680412371,
"grad_norm": 2.9905714988708496,
"learning_rate": 1.7086962797552376e-05,
"loss": 0.0167,
"step": 252
},
{
"epoch": 0.32603092783505155,
"grad_norm": 0.2156965732574463,
"learning_rate": 1.7055137015402935e-05,
"loss": 0.0038,
"step": 253
},
{
"epoch": 0.327319587628866,
"grad_norm": 1.7128149271011353,
"learning_rate": 1.7023168313052118e-05,
"loss": 0.0221,
"step": 254
},
{
"epoch": 0.3286082474226804,
"grad_norm": 0.7819356322288513,
"learning_rate": 1.6991057338109376e-05,
"loss": 0.0043,
"step": 255
},
{
"epoch": 0.32989690721649484,
"grad_norm": 2.1492764949798584,
"learning_rate": 1.6958804741066254e-05,
"loss": 0.0289,
"step": 256
},
{
"epoch": 0.33118556701030927,
"grad_norm": 1.3265386819839478,
"learning_rate": 1.6926411175283227e-05,
"loss": 0.0091,
"step": 257
},
{
"epoch": 0.3324742268041237,
"grad_norm": 1.2150596380233765,
"learning_rate": 1.689387729697646e-05,
"loss": 0.0065,
"step": 258
},
{
"epoch": 0.3337628865979381,
"grad_norm": 1.4492149353027344,
"learning_rate": 1.686120376520451e-05,
"loss": 0.009,
"step": 259
},
{
"epoch": 0.33505154639175255,
"grad_norm": 0.4440682530403137,
"learning_rate": 1.6828391241854983e-05,
"loss": 0.0037,
"step": 260
},
{
"epoch": 0.33505154639175255,
"eval_accuracy": 0.9940417080436942,
"eval_f1": 0.896551724137931,
"eval_loss": 0.01701418310403824,
"eval_precision": 0.8813559322033898,
"eval_recall": 0.9122807017543859,
"eval_runtime": 86.0037,
"eval_samples_per_second": 5.29,
"eval_steps_per_second": 0.174,
"step": 260
},
{
"epoch": 0.33634020618556704,
"grad_norm": 0.5090395212173462,
"learning_rate": 1.6795440391631122e-05,
"loss": 0.0047,
"step": 261
},
{
"epoch": 0.33762886597938147,
"grad_norm": 2.755124807357788,
"learning_rate": 1.6762351882038342e-05,
"loss": 0.0169,
"step": 262
},
{
"epoch": 0.3389175257731959,
"grad_norm": 2.456214189529419,
"learning_rate": 1.6729126383370696e-05,
"loss": 0.031,
"step": 263
},
{
"epoch": 0.3402061855670103,
"grad_norm": 0.8938114047050476,
"learning_rate": 1.669576456869733e-05,
"loss": 0.0051,
"step": 264
},
{
"epoch": 0.34149484536082475,
"grad_norm": 0.6057696342468262,
"learning_rate": 1.666226711384881e-05,
"loss": 0.0029,
"step": 265
},
{
"epoch": 0.3427835051546392,
"grad_norm": 0.5478299856185913,
"learning_rate": 1.6628634697403447e-05,
"loss": 0.0026,
"step": 266
},
{
"epoch": 0.3440721649484536,
"grad_norm": 2.5459206104278564,
"learning_rate": 1.6594868000673562e-05,
"loss": 0.0217,
"step": 267
},
{
"epoch": 0.34536082474226804,
"grad_norm": 0.39067503809928894,
"learning_rate": 1.6560967707691663e-05,
"loss": 0.0034,
"step": 268
},
{
"epoch": 0.34664948453608246,
"grad_norm": 0.7849224209785461,
"learning_rate": 1.6526934505196605e-05,
"loss": 0.0059,
"step": 269
},
{
"epoch": 0.3479381443298969,
"grad_norm": 3.5039610862731934,
"learning_rate": 1.649276908261967e-05,
"loss": 0.0557,
"step": 270
},
{
"epoch": 0.3492268041237113,
"grad_norm": 1.609676480293274,
"learning_rate": 1.64584721320706e-05,
"loss": 0.0124,
"step": 271
},
{
"epoch": 0.35051546391752575,
"grad_norm": 3.219574213027954,
"learning_rate": 1.642404434832358e-05,
"loss": 0.0438,
"step": 272
},
{
"epoch": 0.35180412371134023,
"grad_norm": 2.468843936920166,
"learning_rate": 1.6389486428803173e-05,
"loss": 0.0084,
"step": 273
},
{
"epoch": 0.35309278350515466,
"grad_norm": 2.0141680240631104,
"learning_rate": 1.635479907357016e-05,
"loss": 0.0419,
"step": 274
},
{
"epoch": 0.3543814432989691,
"grad_norm": 1.8954237699508667,
"learning_rate": 1.63199829853074e-05,
"loss": 0.0293,
"step": 275
},
{
"epoch": 0.3556701030927835,
"grad_norm": 5.950355529785156,
"learning_rate": 1.6285038869305565e-05,
"loss": 0.0224,
"step": 276
},
{
"epoch": 0.35695876288659795,
"grad_norm": 0.3175673186779022,
"learning_rate": 1.624996743344887e-05,
"loss": 0.002,
"step": 277
},
{
"epoch": 0.3582474226804124,
"grad_norm": 1.8034769296646118,
"learning_rate": 1.621476938820071e-05,
"loss": 0.0107,
"step": 278
},
{
"epoch": 0.3595360824742268,
"grad_norm": 4.965821743011475,
"learning_rate": 1.6179445446589308e-05,
"loss": 0.019,
"step": 279
},
{
"epoch": 0.36082474226804123,
"grad_norm": 2.015825033187866,
"learning_rate": 1.6143996324193227e-05,
"loss": 0.0076,
"step": 280
},
{
"epoch": 0.36082474226804123,
"eval_accuracy": 0.9955312810327706,
"eval_f1": 0.9230769230769231,
"eval_loss": 0.018561244010925293,
"eval_precision": 0.9,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.7232,
"eval_samples_per_second": 5.308,
"eval_steps_per_second": 0.175,
"step": 280
},
{
"epoch": 0.36211340206185566,
"grad_norm": 0.7538387179374695,
"learning_rate": 1.6108422739126896e-05,
"loss": 0.0053,
"step": 281
},
{
"epoch": 0.3634020618556701,
"grad_norm": 6.5049309730529785,
"learning_rate": 1.6072725412026066e-05,
"loss": 0.0248,
"step": 282
},
{
"epoch": 0.3646907216494845,
"grad_norm": 2.8648669719696045,
"learning_rate": 1.6036905066033207e-05,
"loss": 0.0055,
"step": 283
},
{
"epoch": 0.36597938144329895,
"grad_norm": 0.21768106520175934,
"learning_rate": 1.6000962426782844e-05,
"loss": 0.0011,
"step": 284
},
{
"epoch": 0.36726804123711343,
"grad_norm": 1.1262462139129639,
"learning_rate": 1.596489822238689e-05,
"loss": 0.0103,
"step": 285
},
{
"epoch": 0.36855670103092786,
"grad_norm": 0.8232690095901489,
"learning_rate": 1.592871318341986e-05,
"loss": 0.0036,
"step": 286
},
{
"epoch": 0.3698453608247423,
"grad_norm": 3.285132646560669,
"learning_rate": 1.5892408042904098e-05,
"loss": 0.0073,
"step": 287
},
{
"epoch": 0.3711340206185567,
"grad_norm": 0.6526831388473511,
"learning_rate": 1.585598353629492e-05,
"loss": 0.0044,
"step": 288
},
{
"epoch": 0.37242268041237114,
"grad_norm": 0.3050073981285095,
"learning_rate": 1.58194404014657e-05,
"loss": 0.0016,
"step": 289
},
{
"epoch": 0.37371134020618557,
"grad_norm": 4.195672988891602,
"learning_rate": 1.5782779378692957e-05,
"loss": 0.0229,
"step": 290
},
{
"epoch": 0.375,
"grad_norm": 2.8954057693481445,
"learning_rate": 1.5746001210641316e-05,
"loss": 0.0169,
"step": 291
},
{
"epoch": 0.37628865979381443,
"grad_norm": 0.4121025800704956,
"learning_rate": 1.57091066423485e-05,
"loss": 0.0027,
"step": 292
},
{
"epoch": 0.37757731958762886,
"grad_norm": 1.3155614137649536,
"learning_rate": 1.5672096421210217e-05,
"loss": 0.0139,
"step": 293
},
{
"epoch": 0.3788659793814433,
"grad_norm": 0.1106419637799263,
"learning_rate": 1.5634971296965027e-05,
"loss": 0.0008,
"step": 294
},
{
"epoch": 0.3801546391752577,
"grad_norm": 0.11678878217935562,
"learning_rate": 1.5597732021679153e-05,
"loss": 0.0008,
"step": 295
},
{
"epoch": 0.38144329896907214,
"grad_norm": 2.1817727088928223,
"learning_rate": 1.5560379349731234e-05,
"loss": 0.0171,
"step": 296
},
{
"epoch": 0.38273195876288657,
"grad_norm": 2.412383556365967,
"learning_rate": 1.552291403779707e-05,
"loss": 0.0203,
"step": 297
},
{
"epoch": 0.38402061855670105,
"grad_norm": 2.77812123298645,
"learning_rate": 1.5485336844834274e-05,
"loss": 0.0134,
"step": 298
},
{
"epoch": 0.3853092783505155,
"grad_norm": 0.05827389657497406,
"learning_rate": 1.544764853206689e-05,
"loss": 0.0005,
"step": 299
},
{
"epoch": 0.3865979381443299,
"grad_norm": 1.8391661643981934,
"learning_rate": 1.5409849862969994e-05,
"loss": 0.0133,
"step": 300
},
{
"epoch": 0.3865979381443299,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.015246791765093803,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.9032,
"eval_samples_per_second": 5.297,
"eval_steps_per_second": 0.175,
"step": 300
},
{
"epoch": 0.38788659793814434,
"grad_norm": 0.8091245293617249,
"learning_rate": 1.5371941603254215e-05,
"loss": 0.0033,
"step": 301
},
{
"epoch": 0.38917525773195877,
"grad_norm": 1.6510957479476929,
"learning_rate": 1.5333924520850227e-05,
"loss": 0.0044,
"step": 302
},
{
"epoch": 0.3904639175257732,
"grad_norm": 0.02873249165713787,
"learning_rate": 1.529579938589319e-05,
"loss": 0.0002,
"step": 303
},
{
"epoch": 0.3917525773195876,
"grad_norm": 2.239290952682495,
"learning_rate": 1.5257566970707147e-05,
"loss": 0.0062,
"step": 304
},
{
"epoch": 0.39304123711340205,
"grad_norm": 0.5877900123596191,
"learning_rate": 1.5219228049789388e-05,
"loss": 0.0037,
"step": 305
},
{
"epoch": 0.3943298969072165,
"grad_norm": 0.5729249119758606,
"learning_rate": 1.5180783399794749e-05,
"loss": 0.002,
"step": 306
},
{
"epoch": 0.3956185567010309,
"grad_norm": 1.6936867237091064,
"learning_rate": 1.514223379951989e-05,
"loss": 0.0372,
"step": 307
},
{
"epoch": 0.39690721649484534,
"grad_norm": 2.4295144081115723,
"learning_rate": 1.5103580029887504e-05,
"loss": 0.0101,
"step": 308
},
{
"epoch": 0.39819587628865977,
"grad_norm": 0.6043867468833923,
"learning_rate": 1.5064822873930516e-05,
"loss": 0.0022,
"step": 309
},
{
"epoch": 0.39948453608247425,
"grad_norm": 0.03160233795642853,
"learning_rate": 1.5025963116776203e-05,
"loss": 0.0003,
"step": 310
},
{
"epoch": 0.4007731958762887,
"grad_norm": 1.978843331336975,
"learning_rate": 1.49870015456303e-05,
"loss": 0.0099,
"step": 311
},
{
"epoch": 0.4020618556701031,
"grad_norm": 3.697706460952759,
"learning_rate": 1.4947938949761054e-05,
"loss": 0.0112,
"step": 312
},
{
"epoch": 0.40335051546391754,
"grad_norm": 0.2765125036239624,
"learning_rate": 1.490877612048322e-05,
"loss": 0.0009,
"step": 313
},
{
"epoch": 0.40463917525773196,
"grad_norm": 0.5769197940826416,
"learning_rate": 1.4869513851142051e-05,
"loss": 0.0023,
"step": 314
},
{
"epoch": 0.4059278350515464,
"grad_norm": 1.017076849937439,
"learning_rate": 1.483015293709722e-05,
"loss": 0.0042,
"step": 315
},
{
"epoch": 0.4072164948453608,
"grad_norm": 1.9436402320861816,
"learning_rate": 1.4790694175706698e-05,
"loss": 0.0144,
"step": 316
},
{
"epoch": 0.40850515463917525,
"grad_norm": 0.6186187267303467,
"learning_rate": 1.4751138366310612e-05,
"loss": 0.0014,
"step": 317
},
{
"epoch": 0.4097938144329897,
"grad_norm": 0.2874881327152252,
"learning_rate": 1.4711486310215053e-05,
"loss": 0.0025,
"step": 318
},
{
"epoch": 0.4110824742268041,
"grad_norm": 0.35522520542144775,
"learning_rate": 1.4671738810675838e-05,
"loss": 0.0022,
"step": 319
},
{
"epoch": 0.41237113402061853,
"grad_norm": 2.4676198959350586,
"learning_rate": 1.4631896672882235e-05,
"loss": 0.0084,
"step": 320
},
{
"epoch": 0.41237113402061853,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.016356978565454483,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.4177,
"eval_samples_per_second": 5.454,
"eval_steps_per_second": 0.18,
"step": 320
},
{
"epoch": 0.41365979381443296,
"grad_norm": 2.8166093826293945,
"learning_rate": 1.4591960703940662e-05,
"loss": 0.0193,
"step": 321
},
{
"epoch": 0.41494845360824745,
"grad_norm": 1.8290126323699951,
"learning_rate": 1.4551931712858334e-05,
"loss": 0.0099,
"step": 322
},
{
"epoch": 0.4162371134020619,
"grad_norm": 3.9490370750427246,
"learning_rate": 1.4511810510526869e-05,
"loss": 0.0514,
"step": 323
},
{
"epoch": 0.4175257731958763,
"grad_norm": 6.743803024291992,
"learning_rate": 1.4471597909705858e-05,
"loss": 0.0257,
"step": 324
},
{
"epoch": 0.41881443298969073,
"grad_norm": 0.8326684832572937,
"learning_rate": 1.4431294725006415e-05,
"loss": 0.0015,
"step": 325
},
{
"epoch": 0.42010309278350516,
"grad_norm": 5.771852970123291,
"learning_rate": 1.4390901772874668e-05,
"loss": 0.0157,
"step": 326
},
{
"epoch": 0.4213917525773196,
"grad_norm": 0.4042920470237732,
"learning_rate": 1.435041987157521e-05,
"loss": 0.0035,
"step": 327
},
{
"epoch": 0.422680412371134,
"grad_norm": 4.8174872398376465,
"learning_rate": 1.4309849841174538e-05,
"loss": 0.0307,
"step": 328
},
{
"epoch": 0.42396907216494845,
"grad_norm": 1.9805009365081787,
"learning_rate": 1.4269192503524435e-05,
"loss": 0.0117,
"step": 329
},
{
"epoch": 0.4252577319587629,
"grad_norm": 0.4960249960422516,
"learning_rate": 1.422844868224531e-05,
"loss": 0.0033,
"step": 330
},
{
"epoch": 0.4265463917525773,
"grad_norm": 0.6382171511650085,
"learning_rate": 1.4187619202709538e-05,
"loss": 0.0012,
"step": 331
},
{
"epoch": 0.42783505154639173,
"grad_norm": 1.938246250152588,
"learning_rate": 1.4146704892024714e-05,
"loss": 0.009,
"step": 332
},
{
"epoch": 0.42912371134020616,
"grad_norm": 1.8336970806121826,
"learning_rate": 1.4105706579016916e-05,
"loss": 0.0239,
"step": 333
},
{
"epoch": 0.43041237113402064,
"grad_norm": 2.8070526123046875,
"learning_rate": 1.40646250942139e-05,
"loss": 0.062,
"step": 334
},
{
"epoch": 0.43170103092783507,
"grad_norm": 3.8646774291992188,
"learning_rate": 1.4023461269828297e-05,
"loss": 0.041,
"step": 335
},
{
"epoch": 0.4329896907216495,
"grad_norm": 0.3110719323158264,
"learning_rate": 1.3982215939740726e-05,
"loss": 0.0007,
"step": 336
},
{
"epoch": 0.43427835051546393,
"grad_norm": 1.3539198637008667,
"learning_rate": 1.3940889939482925e-05,
"loss": 0.0145,
"step": 337
},
{
"epoch": 0.43556701030927836,
"grad_norm": 1.8467122316360474,
"learning_rate": 1.3899484106220816e-05,
"loss": 0.0065,
"step": 338
},
{
"epoch": 0.4368556701030928,
"grad_norm": 0.34248843789100647,
"learning_rate": 1.3857999278737546e-05,
"loss": 0.0025,
"step": 339
},
{
"epoch": 0.4381443298969072,
"grad_norm": 1.0721467733383179,
"learning_rate": 1.3816436297416496e-05,
"loss": 0.0039,
"step": 340
},
{
"epoch": 0.4381443298969072,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.01406013686209917,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.6914,
"eval_samples_per_second": 5.437,
"eval_steps_per_second": 0.179,
"step": 340
},
{
"epoch": 0.43943298969072164,
"grad_norm": 0.35073205828666687,
"learning_rate": 1.3774796004224258e-05,
"loss": 0.0015,
"step": 341
},
{
"epoch": 0.44072164948453607,
"grad_norm": 3.6525022983551025,
"learning_rate": 1.3733079242693572e-05,
"loss": 0.0248,
"step": 342
},
{
"epoch": 0.4420103092783505,
"grad_norm": 3.9139022827148438,
"learning_rate": 1.3691286857906254e-05,
"loss": 0.0244,
"step": 343
},
{
"epoch": 0.44329896907216493,
"grad_norm": 1.2014960050582886,
"learning_rate": 1.3649419696476057e-05,
"loss": 0.0037,
"step": 344
},
{
"epoch": 0.44458762886597936,
"grad_norm": 2.0724196434020996,
"learning_rate": 1.3607478606531533e-05,
"loss": 0.0158,
"step": 345
},
{
"epoch": 0.44587628865979384,
"grad_norm": 0.4371326267719269,
"learning_rate": 1.356546443769885e-05,
"loss": 0.005,
"step": 346
},
{
"epoch": 0.44716494845360827,
"grad_norm": 3.2014546394348145,
"learning_rate": 1.3523378041084576e-05,
"loss": 0.0159,
"step": 347
},
{
"epoch": 0.4484536082474227,
"grad_norm": 0.9927562475204468,
"learning_rate": 1.3481220269258449e-05,
"loss": 0.0271,
"step": 348
},
{
"epoch": 0.4497422680412371,
"grad_norm": 0.40939047932624817,
"learning_rate": 1.3438991976236087e-05,
"loss": 0.0029,
"step": 349
},
{
"epoch": 0.45103092783505155,
"grad_norm": 0.9826658964157104,
"learning_rate": 1.3396694017461708e-05,
"loss": 0.006,
"step": 350
},
{
"epoch": 0.452319587628866,
"grad_norm": 0.14362020790576935,
"learning_rate": 1.3354327249790786e-05,
"loss": 0.0012,
"step": 351
},
{
"epoch": 0.4536082474226804,
"grad_norm": 2.1811251640319824,
"learning_rate": 1.3311892531472705e-05,
"loss": 0.0121,
"step": 352
},
{
"epoch": 0.45489690721649484,
"grad_norm": 2.0864737033843994,
"learning_rate": 1.3269390722133358e-05,
"loss": 0.0069,
"step": 353
},
{
"epoch": 0.45618556701030927,
"grad_norm": 0.13449899852275848,
"learning_rate": 1.3226822682757745e-05,
"loss": 0.0012,
"step": 354
},
{
"epoch": 0.4574742268041237,
"grad_norm": 2.524960994720459,
"learning_rate": 1.3184189275672532e-05,
"loss": 0.0223,
"step": 355
},
{
"epoch": 0.4587628865979381,
"grad_norm": 0.3010599911212921,
"learning_rate": 1.3141491364528576e-05,
"loss": 0.0021,
"step": 356
},
{
"epoch": 0.46005154639175255,
"grad_norm": 1.1148960590362549,
"learning_rate": 1.3098729814283426e-05,
"loss": 0.0071,
"step": 357
},
{
"epoch": 0.46134020618556704,
"grad_norm": 1.5602556467056274,
"learning_rate": 1.3055905491183822e-05,
"loss": 0.0084,
"step": 358
},
{
"epoch": 0.46262886597938147,
"grad_norm": 2.6677777767181396,
"learning_rate": 1.3013019262748112e-05,
"loss": 0.04,
"step": 359
},
{
"epoch": 0.4639175257731959,
"grad_norm": 3.551100969314575,
"learning_rate": 1.2970071997748712e-05,
"loss": 0.0102,
"step": 360
},
{
"epoch": 0.4639175257731959,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.013832507655024529,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.8868,
"eval_samples_per_second": 5.424,
"eval_steps_per_second": 0.179,
"step": 360
},
{
"epoch": 0.4652061855670103,
"grad_norm": 1.6473758220672607,
"learning_rate": 1.2927064566194493e-05,
"loss": 0.0212,
"step": 361
},
{
"epoch": 0.46649484536082475,
"grad_norm": 0.44642215967178345,
"learning_rate": 1.288399783931315e-05,
"loss": 0.0024,
"step": 362
},
{
"epoch": 0.4677835051546392,
"grad_norm": 0.9296754002571106,
"learning_rate": 1.2840872689533562e-05,
"loss": 0.0126,
"step": 363
},
{
"epoch": 0.4690721649484536,
"grad_norm": 1.4537991285324097,
"learning_rate": 1.2797689990468113e-05,
"loss": 0.0314,
"step": 364
},
{
"epoch": 0.47036082474226804,
"grad_norm": 0.8310962915420532,
"learning_rate": 1.2754450616895006e-05,
"loss": 0.0035,
"step": 365
},
{
"epoch": 0.47164948453608246,
"grad_norm": 0.735938310623169,
"learning_rate": 1.2711155444740529e-05,
"loss": 0.004,
"step": 366
},
{
"epoch": 0.4729381443298969,
"grad_norm": 1.334964632987976,
"learning_rate": 1.2667805351061314e-05,
"loss": 0.0068,
"step": 367
},
{
"epoch": 0.4742268041237113,
"grad_norm": 3.200857400894165,
"learning_rate": 1.2624401214026574e-05,
"loss": 0.0205,
"step": 368
},
{
"epoch": 0.47551546391752575,
"grad_norm": 2.228219747543335,
"learning_rate": 1.2580943912900309e-05,
"loss": 0.0218,
"step": 369
},
{
"epoch": 0.47680412371134023,
"grad_norm": 3.1715714931488037,
"learning_rate": 1.2537434328023501e-05,
"loss": 0.035,
"step": 370
},
{
"epoch": 0.47809278350515466,
"grad_norm": 0.6224120259284973,
"learning_rate": 1.2493873340796271e-05,
"loss": 0.0034,
"step": 371
},
{
"epoch": 0.4793814432989691,
"grad_norm": 1.1881654262542725,
"learning_rate": 1.2450261833660033e-05,
"loss": 0.0149,
"step": 372
},
{
"epoch": 0.4806701030927835,
"grad_norm": 0.48161911964416504,
"learning_rate": 1.2406600690079608e-05,
"loss": 0.0042,
"step": 373
},
{
"epoch": 0.48195876288659795,
"grad_norm": 3.560431957244873,
"learning_rate": 1.2362890794525342e-05,
"loss": 0.0366,
"step": 374
},
{
"epoch": 0.4832474226804124,
"grad_norm": 0.9885364770889282,
"learning_rate": 1.2319133032455164e-05,
"loss": 0.0056,
"step": 375
},
{
"epoch": 0.4845360824742268,
"grad_norm": 0.7451475262641907,
"learning_rate": 1.2275328290296677e-05,
"loss": 0.0035,
"step": 376
},
{
"epoch": 0.48582474226804123,
"grad_norm": 0.24116653203964233,
"learning_rate": 1.2231477455429185e-05,
"loss": 0.0024,
"step": 377
},
{
"epoch": 0.48711340206185566,
"grad_norm": 0.6330999732017517,
"learning_rate": 1.2187581416165721e-05,
"loss": 0.0048,
"step": 378
},
{
"epoch": 0.4884020618556701,
"grad_norm": 2.2537362575531006,
"learning_rate": 1.2143641061735048e-05,
"loss": 0.024,
"step": 379
},
{
"epoch": 0.4896907216494845,
"grad_norm": 0.3978399634361267,
"learning_rate": 1.2099657282263651e-05,
"loss": 0.0019,
"step": 380
},
{
"epoch": 0.4896907216494845,
"eval_accuracy": 0.9965243296921549,
"eval_f1": 0.9391304347826087,
"eval_loss": 0.01518324762582779,
"eval_precision": 0.9310344827586207,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.3887,
"eval_samples_per_second": 5.456,
"eval_steps_per_second": 0.18,
"step": 380
},
{
"epoch": 0.49097938144329895,
"grad_norm": 0.1455003321170807,
"learning_rate": 1.2055630968757696e-05,
"loss": 0.001,
"step": 381
},
{
"epoch": 0.49226804123711343,
"grad_norm": 1.6482967138290405,
"learning_rate": 1.2011563013084996e-05,
"loss": 0.0082,
"step": 382
},
{
"epoch": 0.49355670103092786,
"grad_norm": 1.3718615770339966,
"learning_rate": 1.1967454307956933e-05,
"loss": 0.0175,
"step": 383
},
{
"epoch": 0.4948453608247423,
"grad_norm": 1.4759337902069092,
"learning_rate": 1.1923305746910372e-05,
"loss": 0.0047,
"step": 384
},
{
"epoch": 0.4961340206185567,
"grad_norm": 1.7819279432296753,
"learning_rate": 1.1879118224289563e-05,
"loss": 0.0076,
"step": 385
},
{
"epoch": 0.49742268041237114,
"grad_norm": 0.7792447209358215,
"learning_rate": 1.1834892635228024e-05,
"loss": 0.0141,
"step": 386
},
{
"epoch": 0.49871134020618557,
"grad_norm": 2.1459238529205322,
"learning_rate": 1.1790629875630412e-05,
"loss": 0.0123,
"step": 387
},
{
"epoch": 0.5,
"grad_norm": 0.5646303296089172,
"learning_rate": 1.1746330842154371e-05,
"loss": 0.004,
"step": 388
},
{
"epoch": 0.5012886597938144,
"grad_norm": 1.4319298267364502,
"learning_rate": 1.1701996432192363e-05,
"loss": 0.0105,
"step": 389
},
{
"epoch": 0.5025773195876289,
"grad_norm": 0.7775757908821106,
"learning_rate": 1.1657627543853491e-05,
"loss": 0.0054,
"step": 390
},
{
"epoch": 0.5038659793814433,
"grad_norm": 1.0142550468444824,
"learning_rate": 1.1613225075945316e-05,
"loss": 0.0053,
"step": 391
},
{
"epoch": 0.5051546391752577,
"grad_norm": 0.12586040794849396,
"learning_rate": 1.156878992795563e-05,
"loss": 0.0016,
"step": 392
},
{
"epoch": 0.5064432989690721,
"grad_norm": 2.155184268951416,
"learning_rate": 1.1524323000034256e-05,
"loss": 0.0121,
"step": 393
},
{
"epoch": 0.5077319587628866,
"grad_norm": 0.7880523800849915,
"learning_rate": 1.1479825192974791e-05,
"loss": 0.008,
"step": 394
},
{
"epoch": 0.509020618556701,
"grad_norm": 1.9294145107269287,
"learning_rate": 1.1435297408196382e-05,
"loss": 0.0131,
"step": 395
},
{
"epoch": 0.5103092783505154,
"grad_norm": 2.2471513748168945,
"learning_rate": 1.1390740547725443e-05,
"loss": 0.0113,
"step": 396
},
{
"epoch": 0.5115979381443299,
"grad_norm": 2.0676307678222656,
"learning_rate": 1.13461555141774e-05,
"loss": 0.0304,
"step": 397
},
{
"epoch": 0.5128865979381443,
"grad_norm": 1.3152693510055542,
"learning_rate": 1.1301543210738383e-05,
"loss": 0.007,
"step": 398
},
{
"epoch": 0.5141752577319587,
"grad_norm": 2.229369878768921,
"learning_rate": 1.1256904541146966e-05,
"loss": 0.0184,
"step": 399
},
{
"epoch": 0.5154639175257731,
"grad_norm": 0.16386577486991882,
"learning_rate": 1.1212240409675825e-05,
"loss": 0.0007,
"step": 400
},
{
"epoch": 0.5154639175257731,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9464285714285714,
"eval_loss": 0.014470579102635384,
"eval_precision": 0.9636363636363636,
"eval_recall": 0.9298245614035088,
"eval_runtime": 84.2877,
"eval_samples_per_second": 5.398,
"eval_steps_per_second": 0.178,
"step": 400
},
{
"epoch": 0.5167525773195877,
"grad_norm": 1.0079818964004517,
"learning_rate": 1.1167551721113435e-05,
"loss": 0.0049,
"step": 401
},
{
"epoch": 0.5180412371134021,
"grad_norm": 0.5026824474334717,
"learning_rate": 1.1122839380745738e-05,
"loss": 0.0034,
"step": 402
},
{
"epoch": 0.5193298969072165,
"grad_norm": 0.251522421836853,
"learning_rate": 1.1078104294337806e-05,
"loss": 0.0016,
"step": 403
},
{
"epoch": 0.520618556701031,
"grad_norm": 0.2365703135728836,
"learning_rate": 1.1033347368115494e-05,
"loss": 0.0008,
"step": 404
},
{
"epoch": 0.5219072164948454,
"grad_norm": 0.3773232400417328,
"learning_rate": 1.0988569508747075e-05,
"loss": 0.0018,
"step": 405
},
{
"epoch": 0.5231958762886598,
"grad_norm": 1.486518383026123,
"learning_rate": 1.0943771623324884e-05,
"loss": 0.0276,
"step": 406
},
{
"epoch": 0.5244845360824743,
"grad_norm": 0.07446420192718506,
"learning_rate": 1.0898954619346924e-05,
"loss": 0.0008,
"step": 407
},
{
"epoch": 0.5257731958762887,
"grad_norm": 0.12031542509794235,
"learning_rate": 1.085411940469851e-05,
"loss": 0.0012,
"step": 408
},
{
"epoch": 0.5270618556701031,
"grad_norm": 0.2832430601119995,
"learning_rate": 1.0809266887633849e-05,
"loss": 0.0009,
"step": 409
},
{
"epoch": 0.5283505154639175,
"grad_norm": 0.21104812622070312,
"learning_rate": 1.0764397976757658e-05,
"loss": 0.0028,
"step": 410
},
{
"epoch": 0.529639175257732,
"grad_norm": 2.558060884475708,
"learning_rate": 1.0719513581006751e-05,
"loss": 0.018,
"step": 411
},
{
"epoch": 0.5309278350515464,
"grad_norm": 0.1067991852760315,
"learning_rate": 1.0674614609631634e-05,
"loss": 0.0004,
"step": 412
},
{
"epoch": 0.5322164948453608,
"grad_norm": 1.812943935394287,
"learning_rate": 1.062970197217808e-05,
"loss": 0.0152,
"step": 413
},
{
"epoch": 0.5335051546391752,
"grad_norm": 1.7201879024505615,
"learning_rate": 1.0584776578468698e-05,
"loss": 0.0071,
"step": 414
},
{
"epoch": 0.5347938144329897,
"grad_norm": 0.17433598637580872,
"learning_rate": 1.0539839338584509e-05,
"loss": 0.0017,
"step": 415
},
{
"epoch": 0.5360824742268041,
"grad_norm": 1.8511940240859985,
"learning_rate": 1.0494891162846515e-05,
"loss": 0.0036,
"step": 416
},
{
"epoch": 0.5373711340206185,
"grad_norm": 0.22043751180171967,
"learning_rate": 1.0449932961797249e-05,
"loss": 0.0014,
"step": 417
},
{
"epoch": 0.538659793814433,
"grad_norm": 4.1709370613098145,
"learning_rate": 1.040496564618233e-05,
"loss": 0.0426,
"step": 418
},
{
"epoch": 0.5399484536082474,
"grad_norm": 2.242624282836914,
"learning_rate": 1.0359990126932022e-05,
"loss": 0.0141,
"step": 419
},
{
"epoch": 0.5412371134020618,
"grad_norm": 0.7744148969650269,
"learning_rate": 1.0315007315142772e-05,
"loss": 0.0028,
"step": 420
},
{
"epoch": 0.5412371134020618,
"eval_accuracy": 0.9965243296921549,
"eval_f1": 0.9391304347826087,
"eval_loss": 0.01408898364752531,
"eval_precision": 0.9310344827586207,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.4124,
"eval_samples_per_second": 5.455,
"eval_steps_per_second": 0.18,
"step": 420
},
{
"epoch": 0.5425257731958762,
"grad_norm": 2.3163211345672607,
"learning_rate": 1.0270018122058753e-05,
"loss": 0.0163,
"step": 421
},
{
"epoch": 0.5438144329896907,
"grad_norm": 0.7905238270759583,
"learning_rate": 1.0225023459053416e-05,
"loss": 0.0029,
"step": 422
},
{
"epoch": 0.5451030927835051,
"grad_norm": 1.9856590032577515,
"learning_rate": 1.018002423761101e-05,
"loss": 0.0499,
"step": 423
},
{
"epoch": 0.5463917525773195,
"grad_norm": 0.17007720470428467,
"learning_rate": 1.0135021369308138e-05,
"loss": 0.0009,
"step": 424
},
{
"epoch": 0.5476804123711341,
"grad_norm": 0.5280592441558838,
"learning_rate": 1.0090015765795265e-05,
"loss": 0.0031,
"step": 425
},
{
"epoch": 0.5489690721649485,
"grad_norm": 1.507529616355896,
"learning_rate": 1.004500833877828e-05,
"loss": 0.0333,
"step": 426
},
{
"epoch": 0.5502577319587629,
"grad_norm": 3.355781316757202,
"learning_rate": 1e-05,
"loss": 0.0136,
"step": 427
},
{
"epoch": 0.5515463917525774,
"grad_norm": 2.624476909637451,
"learning_rate": 9.954991661221724e-06,
"loss": 0.0115,
"step": 428
},
{
"epoch": 0.5528350515463918,
"grad_norm": 1.1731654405593872,
"learning_rate": 9.909984234204738e-06,
"loss": 0.0029,
"step": 429
},
{
"epoch": 0.5541237113402062,
"grad_norm": 0.35924506187438965,
"learning_rate": 9.864978630691865e-06,
"loss": 0.0023,
"step": 430
},
{
"epoch": 0.5554123711340206,
"grad_norm": 0.4166584610939026,
"learning_rate": 9.819975762388993e-06,
"loss": 0.0023,
"step": 431
},
{
"epoch": 0.5567010309278351,
"grad_norm": 1.3430147171020508,
"learning_rate": 9.774976540946589e-06,
"loss": 0.022,
"step": 432
},
{
"epoch": 0.5579896907216495,
"grad_norm": 1.9612890481948853,
"learning_rate": 9.729981877941249e-06,
"loss": 0.0268,
"step": 433
},
{
"epoch": 0.5592783505154639,
"grad_norm": 1.5633459091186523,
"learning_rate": 9.684992684857232e-06,
"loss": 0.0058,
"step": 434
},
{
"epoch": 0.5605670103092784,
"grad_norm": 1.8824856281280518,
"learning_rate": 9.640009873067981e-06,
"loss": 0.0161,
"step": 435
},
{
"epoch": 0.5618556701030928,
"grad_norm": 0.3390294015407562,
"learning_rate": 9.595034353817673e-06,
"loss": 0.0011,
"step": 436
},
{
"epoch": 0.5631443298969072,
"grad_norm": 1.143394947052002,
"learning_rate": 9.550067038202756e-06,
"loss": 0.0032,
"step": 437
},
{
"epoch": 0.5644329896907216,
"grad_norm": 1.6857935190200806,
"learning_rate": 9.505108837153489e-06,
"loss": 0.0161,
"step": 438
},
{
"epoch": 0.5657216494845361,
"grad_norm": 0.44908684492111206,
"learning_rate": 9.460160661415496e-06,
"loss": 0.0027,
"step": 439
},
{
"epoch": 0.5670103092783505,
"grad_norm": 0.46384885907173157,
"learning_rate": 9.415223421531308e-06,
"loss": 0.0035,
"step": 440
},
{
"epoch": 0.5670103092783505,
"eval_accuracy": 0.9960278053624627,
"eval_f1": 0.9272727272727272,
"eval_loss": 0.014668312855064869,
"eval_precision": 0.9622641509433962,
"eval_recall": 0.8947368421052632,
"eval_runtime": 85.42,
"eval_samples_per_second": 5.327,
"eval_steps_per_second": 0.176,
"step": 440
},
{
"epoch": 0.5682989690721649,
"grad_norm": 0.303989052772522,
"learning_rate": 9.370298027821924e-06,
"loss": 0.0011,
"step": 441
},
{
"epoch": 0.5695876288659794,
"grad_norm": 1.179250717163086,
"learning_rate": 9.325385390368367e-06,
"loss": 0.0108,
"step": 442
},
{
"epoch": 0.5708762886597938,
"grad_norm": 0.0955902561545372,
"learning_rate": 9.280486418993254e-06,
"loss": 0.0008,
"step": 443
},
{
"epoch": 0.5721649484536082,
"grad_norm": 4.412278175354004,
"learning_rate": 9.23560202324235e-06,
"loss": 0.0331,
"step": 444
},
{
"epoch": 0.5734536082474226,
"grad_norm": 0.4738692045211792,
"learning_rate": 9.190733112366158e-06,
"loss": 0.0024,
"step": 445
},
{
"epoch": 0.5747422680412371,
"grad_norm": 1.4232240915298462,
"learning_rate": 9.145880595301495e-06,
"loss": 0.0095,
"step": 446
},
{
"epoch": 0.5760309278350515,
"grad_norm": 1.06927490234375,
"learning_rate": 9.101045380653076e-06,
"loss": 0.0038,
"step": 447
},
{
"epoch": 0.5773195876288659,
"grad_norm": 1.7943761348724365,
"learning_rate": 9.056228376675118e-06,
"loss": 0.0181,
"step": 448
},
{
"epoch": 0.5786082474226805,
"grad_norm": 0.47978296875953674,
"learning_rate": 9.011430491252924e-06,
"loss": 0.0019,
"step": 449
},
{
"epoch": 0.5798969072164949,
"grad_norm": 2.006948947906494,
"learning_rate": 8.966652631884506e-06,
"loss": 0.0152,
"step": 450
},
{
"epoch": 0.5811855670103093,
"grad_norm": 0.14350372552871704,
"learning_rate": 8.921895705662194e-06,
"loss": 0.0009,
"step": 451
},
{
"epoch": 0.5824742268041238,
"grad_norm": 1.1150081157684326,
"learning_rate": 8.877160619254264e-06,
"loss": 0.0036,
"step": 452
},
{
"epoch": 0.5837628865979382,
"grad_norm": 0.7745972275733948,
"learning_rate": 8.832448278886567e-06,
"loss": 0.0061,
"step": 453
},
{
"epoch": 0.5850515463917526,
"grad_norm": 0.15023173391819,
"learning_rate": 8.787759590324177e-06,
"loss": 0.0008,
"step": 454
},
{
"epoch": 0.586340206185567,
"grad_norm": 0.15638376772403717,
"learning_rate": 8.743095458853034e-06,
"loss": 0.0015,
"step": 455
},
{
"epoch": 0.5876288659793815,
"grad_norm": 0.8829141855239868,
"learning_rate": 8.698456789261617e-06,
"loss": 0.0047,
"step": 456
},
{
"epoch": 0.5889175257731959,
"grad_norm": 0.13397619128227234,
"learning_rate": 8.653844485822603e-06,
"loss": 0.0008,
"step": 457
},
{
"epoch": 0.5902061855670103,
"grad_norm": 0.54257732629776,
"learning_rate": 8.609259452274559e-06,
"loss": 0.0015,
"step": 458
},
{
"epoch": 0.5914948453608248,
"grad_norm": 3.111884832382202,
"learning_rate": 8.56470259180362e-06,
"loss": 0.0354,
"step": 459
},
{
"epoch": 0.5927835051546392,
"grad_norm": 0.41738268733024597,
"learning_rate": 8.52017480702521e-06,
"loss": 0.0016,
"step": 460
},
{
"epoch": 0.5927835051546392,
"eval_accuracy": 0.9965243296921549,
"eval_f1": 0.9357798165137615,
"eval_loss": 0.01587463542819023,
"eval_precision": 0.9807692307692307,
"eval_recall": 0.8947368421052632,
"eval_runtime": 84.0121,
"eval_samples_per_second": 5.416,
"eval_steps_per_second": 0.179,
"step": 460
},
{
"epoch": 0.5940721649484536,
"grad_norm": 1.6076654195785522,
"learning_rate": 8.475676999965747e-06,
"loss": 0.0089,
"step": 461
},
{
"epoch": 0.595360824742268,
"grad_norm": 0.16697372496128082,
"learning_rate": 8.431210072044371e-06,
"loss": 0.0007,
"step": 462
},
{
"epoch": 0.5966494845360825,
"grad_norm": 1.6268433332443237,
"learning_rate": 8.386774924054686e-06,
"loss": 0.0048,
"step": 463
},
{
"epoch": 0.5979381443298969,
"grad_norm": 1.5314583778381348,
"learning_rate": 8.342372456146512e-06,
"loss": 0.0074,
"step": 464
},
{
"epoch": 0.5992268041237113,
"grad_norm": 2.308037519454956,
"learning_rate": 8.29800356780764e-06,
"loss": 0.0105,
"step": 465
},
{
"epoch": 0.6005154639175257,
"grad_norm": 0.43206554651260376,
"learning_rate": 8.253669157845632e-06,
"loss": 0.0025,
"step": 466
},
{
"epoch": 0.6018041237113402,
"grad_norm": 2.582341194152832,
"learning_rate": 8.20937012436959e-06,
"loss": 0.0118,
"step": 467
},
{
"epoch": 0.6030927835051546,
"grad_norm": 0.5174412727355957,
"learning_rate": 8.165107364771979e-06,
"loss": 0.0015,
"step": 468
},
{
"epoch": 0.604381443298969,
"grad_norm": 1.751904845237732,
"learning_rate": 8.12088177571044e-06,
"loss": 0.0129,
"step": 469
},
{
"epoch": 0.6056701030927835,
"grad_norm": 0.601997435092926,
"learning_rate": 8.076694253089632e-06,
"loss": 0.0025,
"step": 470
},
{
"epoch": 0.6069587628865979,
"grad_norm": 0.21518899500370026,
"learning_rate": 8.032545692043068e-06,
"loss": 0.0007,
"step": 471
},
{
"epoch": 0.6082474226804123,
"grad_norm": 0.23358668386936188,
"learning_rate": 7.988436986915005e-06,
"loss": 0.0016,
"step": 472
},
{
"epoch": 0.6095360824742269,
"grad_norm": 1.7518439292907715,
"learning_rate": 7.944369031242307e-06,
"loss": 0.0363,
"step": 473
},
{
"epoch": 0.6108247422680413,
"grad_norm": 0.2232217937707901,
"learning_rate": 7.900342717736354e-06,
"loss": 0.0022,
"step": 474
},
{
"epoch": 0.6121134020618557,
"grad_norm": 0.07207631319761276,
"learning_rate": 7.856358938264953e-06,
"loss": 0.0004,
"step": 475
},
{
"epoch": 0.6134020618556701,
"grad_norm": 3.338632822036743,
"learning_rate": 7.812418583834282e-06,
"loss": 0.0105,
"step": 476
},
{
"epoch": 0.6146907216494846,
"grad_norm": 0.6013636589050293,
"learning_rate": 7.768522544570818e-06,
"loss": 0.0021,
"step": 477
},
{
"epoch": 0.615979381443299,
"grad_norm": 1.2868963479995728,
"learning_rate": 7.724671709703328e-06,
"loss": 0.0131,
"step": 478
},
{
"epoch": 0.6172680412371134,
"grad_norm": 1.118053913116455,
"learning_rate": 7.680866967544841e-06,
"loss": 0.0023,
"step": 479
},
{
"epoch": 0.6185567010309279,
"grad_norm": 3.8270020484924316,
"learning_rate": 7.637109205474665e-06,
"loss": 0.0262,
"step": 480
},
{
"epoch": 0.6185567010309279,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.01405768096446991,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 83.4682,
"eval_samples_per_second": 5.451,
"eval_steps_per_second": 0.18,
"step": 480
},
{
"epoch": 0.6198453608247423,
"grad_norm": 4.576198577880859,
"learning_rate": 7.5933993099203936e-06,
"loss": 0.0198,
"step": 481
},
{
"epoch": 0.6211340206185567,
"grad_norm": 1.4137742519378662,
"learning_rate": 7.5497381663399716e-06,
"loss": 0.012,
"step": 482
},
{
"epoch": 0.6224226804123711,
"grad_norm": 0.7309709787368774,
"learning_rate": 7.506126659203733e-06,
"loss": 0.0052,
"step": 483
},
{
"epoch": 0.6237113402061856,
"grad_norm": 1.414675235748291,
"learning_rate": 7.462565671976504e-06,
"loss": 0.005,
"step": 484
},
{
"epoch": 0.625,
"grad_norm": 0.5044420957565308,
"learning_rate": 7.419056087099695e-06,
"loss": 0.0034,
"step": 485
},
{
"epoch": 0.6262886597938144,
"grad_norm": 0.4570430815219879,
"learning_rate": 7.375598785973429e-06,
"loss": 0.0022,
"step": 486
},
{
"epoch": 0.6275773195876289,
"grad_norm": 1.932029128074646,
"learning_rate": 7.332194648938689e-06,
"loss": 0.0102,
"step": 487
},
{
"epoch": 0.6288659793814433,
"grad_norm": 1.0421007871627808,
"learning_rate": 7.288844555259471e-06,
"loss": 0.0039,
"step": 488
},
{
"epoch": 0.6301546391752577,
"grad_norm": 0.5654887557029724,
"learning_rate": 7.245549383104993e-06,
"loss": 0.0022,
"step": 489
},
{
"epoch": 0.6314432989690721,
"grad_norm": 0.5495522022247314,
"learning_rate": 7.202310009531886e-06,
"loss": 0.0036,
"step": 490
},
{
"epoch": 0.6327319587628866,
"grad_norm": 2.5807251930236816,
"learning_rate": 7.159127310466441e-06,
"loss": 0.012,
"step": 491
},
{
"epoch": 0.634020618556701,
"grad_norm": 0.829201877117157,
"learning_rate": 7.116002160686851e-06,
"loss": 0.006,
"step": 492
},
{
"epoch": 0.6353092783505154,
"grad_norm": 0.9482012391090393,
"learning_rate": 7.072935433805508e-06,
"loss": 0.0089,
"step": 493
},
{
"epoch": 0.6365979381443299,
"grad_norm": 1.9564292430877686,
"learning_rate": 7.0299280022512875e-06,
"loss": 0.0109,
"step": 494
},
{
"epoch": 0.6378865979381443,
"grad_norm": 0.7540721297264099,
"learning_rate": 6.986980737251889e-06,
"loss": 0.0022,
"step": 495
},
{
"epoch": 0.6391752577319587,
"grad_norm": 0.34916985034942627,
"learning_rate": 6.944094508816182e-06,
"loss": 0.004,
"step": 496
},
{
"epoch": 0.6404639175257731,
"grad_norm": 0.06619101762771606,
"learning_rate": 6.9012701857165755e-06,
"loss": 0.0002,
"step": 497
},
{
"epoch": 0.6417525773195877,
"grad_norm": 1.7265164852142334,
"learning_rate": 6.858508635471428e-06,
"loss": 0.0038,
"step": 498
},
{
"epoch": 0.6430412371134021,
"grad_norm": 0.7744361758232117,
"learning_rate": 6.815810724327469e-06,
"loss": 0.0032,
"step": 499
},
{
"epoch": 0.6443298969072165,
"grad_norm": 3.491551637649536,
"learning_rate": 6.773177317242257e-06,
"loss": 0.0294,
"step": 500
},
{
"epoch": 0.6443298969072165,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9454545454545454,
"eval_loss": 0.016488004475831985,
"eval_precision": 0.9811320754716981,
"eval_recall": 0.9122807017543859,
"eval_runtime": 83.472,
"eval_samples_per_second": 5.451,
"eval_steps_per_second": 0.18,
"step": 500
},
{
"epoch": 0.645618556701031,
"grad_norm": 0.3875870704650879,
"learning_rate": 6.730609277866644e-06,
"loss": 0.0013,
"step": 501
},
{
"epoch": 0.6469072164948454,
"grad_norm": 0.7030169367790222,
"learning_rate": 6.688107468527297e-06,
"loss": 0.0044,
"step": 502
},
{
"epoch": 0.6481958762886598,
"grad_norm": 0.05920355021953583,
"learning_rate": 6.645672750209216e-06,
"loss": 0.0007,
"step": 503
},
{
"epoch": 0.6494845360824743,
"grad_norm": 2.848557472229004,
"learning_rate": 6.603305982538295e-06,
"loss": 0.014,
"step": 504
},
{
"epoch": 0.6507731958762887,
"grad_norm": 0.3917801082134247,
"learning_rate": 6.561008023763915e-06,
"loss": 0.0009,
"step": 505
},
{
"epoch": 0.6520618556701031,
"grad_norm": 1.1397738456726074,
"learning_rate": 6.518779730741555e-06,
"loss": 0.0072,
"step": 506
},
{
"epoch": 0.6533505154639175,
"grad_norm": 0.10615069419145584,
"learning_rate": 6.476621958915426e-06,
"loss": 0.0007,
"step": 507
},
{
"epoch": 0.654639175257732,
"grad_norm": 0.2596324384212494,
"learning_rate": 6.434535562301153e-06,
"loss": 0.0006,
"step": 508
},
{
"epoch": 0.6559278350515464,
"grad_norm": 1.1918329000473022,
"learning_rate": 6.392521393468471e-06,
"loss": 0.0025,
"step": 509
},
{
"epoch": 0.6572164948453608,
"grad_norm": 2.2419281005859375,
"learning_rate": 6.350580303523947e-06,
"loss": 0.006,
"step": 510
},
{
"epoch": 0.6585051546391752,
"grad_norm": 1.821906328201294,
"learning_rate": 6.308713142093749e-06,
"loss": 0.025,
"step": 511
},
{
"epoch": 0.6597938144329897,
"grad_norm": 2.59908390045166,
"learning_rate": 6.266920757306429e-06,
"loss": 0.0225,
"step": 512
},
{
"epoch": 0.6610824742268041,
"grad_norm": 2.4867560863494873,
"learning_rate": 6.225203995775746e-06,
"loss": 0.0097,
"step": 513
},
{
"epoch": 0.6623711340206185,
"grad_norm": 0.33032119274139404,
"learning_rate": 6.183563702583506e-06,
"loss": 0.0012,
"step": 514
},
{
"epoch": 0.663659793814433,
"grad_norm": 0.6683783531188965,
"learning_rate": 6.1420007212624584e-06,
"loss": 0.0014,
"step": 515
},
{
"epoch": 0.6649484536082474,
"grad_norm": 0.08879516273736954,
"learning_rate": 6.100515893779188e-06,
"loss": 0.0006,
"step": 516
},
{
"epoch": 0.6662371134020618,
"grad_norm": 1.5069953203201294,
"learning_rate": 6.05911006051708e-06,
"loss": 0.0062,
"step": 517
},
{
"epoch": 0.6675257731958762,
"grad_norm": 1.1832886934280396,
"learning_rate": 6.01778406025928e-06,
"loss": 0.0051,
"step": 518
},
{
"epoch": 0.6688144329896907,
"grad_norm": 1.7542977333068848,
"learning_rate": 5.976538730171708e-06,
"loss": 0.0072,
"step": 519
},
{
"epoch": 0.6701030927835051,
"grad_norm": 2.53532338142395,
"learning_rate": 5.935374905786102e-06,
"loss": 0.0054,
"step": 520
},
{
"epoch": 0.6701030927835051,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.014478031545877457,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.5952,
"eval_samples_per_second": 5.316,
"eval_steps_per_second": 0.175,
"step": 520
},
{
"epoch": 0.6713917525773195,
"grad_norm": 3.9269418716430664,
"learning_rate": 5.89429342098309e-06,
"loss": 0.0174,
"step": 521
},
{
"epoch": 0.6726804123711341,
"grad_norm": 1.222317099571228,
"learning_rate": 5.8532951079752895e-06,
"loss": 0.0046,
"step": 522
},
{
"epoch": 0.6739690721649485,
"grad_norm": 0.17730024456977844,
"learning_rate": 5.812380797290465e-06,
"loss": 0.0008,
"step": 523
},
{
"epoch": 0.6752577319587629,
"grad_norm": 0.8336971998214722,
"learning_rate": 5.771551317754691e-06,
"loss": 0.0014,
"step": 524
},
{
"epoch": 0.6765463917525774,
"grad_norm": 0.8036553859710693,
"learning_rate": 5.730807496475568e-06,
"loss": 0.0033,
"step": 525
},
{
"epoch": 0.6778350515463918,
"grad_norm": 0.5665665864944458,
"learning_rate": 5.690150158825462e-06,
"loss": 0.0016,
"step": 526
},
{
"epoch": 0.6791237113402062,
"grad_norm": 0.08514845371246338,
"learning_rate": 5.649580128424792e-06,
"loss": 0.0013,
"step": 527
},
{
"epoch": 0.6804123711340206,
"grad_norm": 0.22260144352912903,
"learning_rate": 5.609098227125334e-06,
"loss": 0.001,
"step": 528
},
{
"epoch": 0.6817010309278351,
"grad_norm": 0.6056246161460876,
"learning_rate": 5.568705274993584e-06,
"loss": 0.0013,
"step": 529
},
{
"epoch": 0.6829896907216495,
"grad_norm": 0.2608482539653778,
"learning_rate": 5.528402090294142e-06,
"loss": 0.0013,
"step": 530
},
{
"epoch": 0.6842783505154639,
"grad_norm": 2.119140386581421,
"learning_rate": 5.488189489473133e-06,
"loss": 0.0264,
"step": 531
},
{
"epoch": 0.6855670103092784,
"grad_norm": 0.605993390083313,
"learning_rate": 5.448068287141663e-06,
"loss": 0.0025,
"step": 532
},
{
"epoch": 0.6868556701030928,
"grad_norm": 0.04999390244483948,
"learning_rate": 5.4080392960593355e-06,
"loss": 0.0003,
"step": 533
},
{
"epoch": 0.6881443298969072,
"grad_norm": 1.6986360549926758,
"learning_rate": 5.368103327117768e-06,
"loss": 0.0199,
"step": 534
},
{
"epoch": 0.6894329896907216,
"grad_norm": 1.8997451066970825,
"learning_rate": 5.328261189324166e-06,
"loss": 0.0236,
"step": 535
},
{
"epoch": 0.6907216494845361,
"grad_norm": 0.4543597102165222,
"learning_rate": 5.288513689784951e-06,
"loss": 0.0013,
"step": 536
},
{
"epoch": 0.6920103092783505,
"grad_norm": 0.3688147962093353,
"learning_rate": 5.2488616336893915e-06,
"loss": 0.001,
"step": 537
},
{
"epoch": 0.6932989690721649,
"grad_norm": 1.8557827472686768,
"learning_rate": 5.209305824293307e-06,
"loss": 0.0086,
"step": 538
},
{
"epoch": 0.6945876288659794,
"grad_norm": 2.0368287563323975,
"learning_rate": 5.1698470629027845e-06,
"loss": 0.0127,
"step": 539
},
{
"epoch": 0.6958762886597938,
"grad_norm": 1.7883585691452026,
"learning_rate": 5.130486148857952e-06,
"loss": 0.0293,
"step": 540
},
{
"epoch": 0.6958762886597938,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.014807779341936111,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 84.982,
"eval_samples_per_second": 5.354,
"eval_steps_per_second": 0.177,
"step": 540
},
{
"epoch": 0.6971649484536082,
"grad_norm": 1.6709312200546265,
"learning_rate": 5.0912238795167845e-06,
"loss": 0.022,
"step": 541
},
{
"epoch": 0.6984536082474226,
"grad_norm": 0.772537350654602,
"learning_rate": 5.05206105023895e-06,
"loss": 0.0029,
"step": 542
},
{
"epoch": 0.6997422680412371,
"grad_norm": 4.051438331604004,
"learning_rate": 5.012998454369701e-06,
"loss": 0.038,
"step": 543
},
{
"epoch": 0.7010309278350515,
"grad_norm": 1.2733999490737915,
"learning_rate": 4.974036883223798e-06,
"loss": 0.0065,
"step": 544
},
{
"epoch": 0.7023195876288659,
"grad_norm": 0.21695715188980103,
"learning_rate": 4.935177126069485e-06,
"loss": 0.0006,
"step": 545
},
{
"epoch": 0.7036082474226805,
"grad_norm": 0.9881150722503662,
"learning_rate": 4.896419970112499e-06,
"loss": 0.0061,
"step": 546
},
{
"epoch": 0.7048969072164949,
"grad_norm": 0.4101882576942444,
"learning_rate": 4.857766200480115e-06,
"loss": 0.0012,
"step": 547
},
{
"epoch": 0.7061855670103093,
"grad_norm": 0.4901997745037079,
"learning_rate": 4.819216600205254e-06,
"loss": 0.0022,
"step": 548
},
{
"epoch": 0.7074742268041238,
"grad_norm": 1.6338658332824707,
"learning_rate": 4.780771950210616e-06,
"loss": 0.0074,
"step": 549
},
{
"epoch": 0.7087628865979382,
"grad_norm": 0.9421409964561462,
"learning_rate": 4.742433029292856e-06,
"loss": 0.0023,
"step": 550
},
{
"epoch": 0.7100515463917526,
"grad_norm": 0.20757536590099335,
"learning_rate": 4.704200614106813e-06,
"loss": 0.0012,
"step": 551
},
{
"epoch": 0.711340206185567,
"grad_norm": 2.018266201019287,
"learning_rate": 4.6660754791497755e-06,
"loss": 0.0096,
"step": 552
},
{
"epoch": 0.7126288659793815,
"grad_norm": 2.6476552486419678,
"learning_rate": 4.628058396745787e-06,
"loss": 0.0053,
"step": 553
},
{
"epoch": 0.7139175257731959,
"grad_norm": 1.7703890800476074,
"learning_rate": 4.590150137030009e-06,
"loss": 0.0071,
"step": 554
},
{
"epoch": 0.7152061855670103,
"grad_norm": 1.2769412994384766,
"learning_rate": 4.552351467933115e-06,
"loss": 0.0036,
"step": 555
},
{
"epoch": 0.7164948453608248,
"grad_norm": 1.8354310989379883,
"learning_rate": 4.514663155165731e-06,
"loss": 0.008,
"step": 556
},
{
"epoch": 0.7177835051546392,
"grad_norm": 0.896404504776001,
"learning_rate": 4.477085962202931e-06,
"loss": 0.0028,
"step": 557
},
{
"epoch": 0.7190721649484536,
"grad_norm": 0.33429154753685,
"learning_rate": 4.439620650268771e-06,
"loss": 0.0013,
"step": 558
},
{
"epoch": 0.720360824742268,
"grad_norm": 1.1864862442016602,
"learning_rate": 4.402267978320854e-06,
"loss": 0.0035,
"step": 559
},
{
"epoch": 0.7216494845360825,
"grad_norm": 2.4220573902130127,
"learning_rate": 4.365028703034976e-06,
"loss": 0.0133,
"step": 560
},
{
"epoch": 0.7216494845360825,
"eval_accuracy": 0.997020854021847,
"eval_f1": 0.9473684210526315,
"eval_loss": 0.013733865693211555,
"eval_precision": 0.9473684210526315,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.2875,
"eval_samples_per_second": 5.335,
"eval_steps_per_second": 0.176,
"step": 560
},
{
"epoch": 0.7229381443298969,
"grad_norm": 2.1442863941192627,
"learning_rate": 4.327903578789785e-06,
"loss": 0.0307,
"step": 561
},
{
"epoch": 0.7242268041237113,
"grad_norm": 1.1676955223083496,
"learning_rate": 4.290893357651502e-06,
"loss": 0.002,
"step": 562
},
{
"epoch": 0.7255154639175257,
"grad_norm": 1.461906909942627,
"learning_rate": 4.253998789358683e-06,
"loss": 0.0105,
"step": 563
},
{
"epoch": 0.7268041237113402,
"grad_norm": 2.029210090637207,
"learning_rate": 4.217220621307043e-06,
"loss": 0.0066,
"step": 564
},
{
"epoch": 0.7280927835051546,
"grad_norm": 0.26991185545921326,
"learning_rate": 4.180559598534297e-06,
"loss": 0.0009,
"step": 565
},
{
"epoch": 0.729381443298969,
"grad_norm": 2.1972944736480713,
"learning_rate": 4.144016463705081e-06,
"loss": 0.0074,
"step": 566
},
{
"epoch": 0.7306701030927835,
"grad_norm": 1.7855631113052368,
"learning_rate": 4.107591957095903e-06,
"loss": 0.0234,
"step": 567
},
{
"epoch": 0.7319587628865979,
"grad_norm": 0.13372205197811127,
"learning_rate": 4.071286816580142e-06,
"loss": 0.0011,
"step": 568
},
{
"epoch": 0.7332474226804123,
"grad_norm": 0.3758986294269562,
"learning_rate": 4.035101777613113e-06,
"loss": 0.0009,
"step": 569
},
{
"epoch": 0.7345360824742269,
"grad_norm": 4.052021026611328,
"learning_rate": 3.999037573217157e-06,
"loss": 0.031,
"step": 570
},
{
"epoch": 0.7358247422680413,
"grad_norm": 3.024075508117676,
"learning_rate": 3.963094933966797e-06,
"loss": 0.0191,
"step": 571
},
{
"epoch": 0.7371134020618557,
"grad_norm": 0.10660507529973984,
"learning_rate": 3.927274587973935e-06,
"loss": 0.0004,
"step": 572
},
{
"epoch": 0.7384020618556701,
"grad_norm": 0.7237541079521179,
"learning_rate": 3.8915772608731066e-06,
"loss": 0.0015,
"step": 573
},
{
"epoch": 0.7396907216494846,
"grad_norm": 3.8813493251800537,
"learning_rate": 3.856003675806777e-06,
"loss": 0.0142,
"step": 574
},
{
"epoch": 0.740979381443299,
"grad_norm": 0.08904914557933807,
"learning_rate": 3.820554553410693e-06,
"loss": 0.0006,
"step": 575
},
{
"epoch": 0.7422680412371134,
"grad_norm": 2.8645918369293213,
"learning_rate": 3.78523061179929e-06,
"loss": 0.0151,
"step": 576
},
{
"epoch": 0.7435567010309279,
"grad_norm": 0.8430268168449402,
"learning_rate": 3.7500325665511337e-06,
"loss": 0.0031,
"step": 577
},
{
"epoch": 0.7448453608247423,
"grad_norm": 0.0855301171541214,
"learning_rate": 3.7149611306944356e-06,
"loss": 0.0007,
"step": 578
},
{
"epoch": 0.7461340206185567,
"grad_norm": 1.7717701196670532,
"learning_rate": 3.680017014692604e-06,
"loss": 0.0075,
"step": 579
},
{
"epoch": 0.7474226804123711,
"grad_norm": 1.216423749923706,
"learning_rate": 3.645200926429844e-06,
"loss": 0.0028,
"step": 580
},
{
"epoch": 0.7474226804123711,
"eval_accuracy": 0.9980139026812314,
"eval_f1": 0.9642857142857143,
"eval_loss": 0.014089370146393776,
"eval_precision": 0.9818181818181818,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.7572,
"eval_samples_per_second": 5.306,
"eval_steps_per_second": 0.175,
"step": 580
},
{
"epoch": 0.7487113402061856,
"grad_norm": 0.18626463413238525,
"learning_rate": 3.610513571196832e-06,
"loss": 0.0008,
"step": 581
},
{
"epoch": 0.75,
"grad_norm": 0.16629698872566223,
"learning_rate": 3.5759556516764205e-06,
"loss": 0.001,
"step": 582
},
{
"epoch": 0.7512886597938144,
"grad_norm": 0.0471065454185009,
"learning_rate": 3.541527867929403e-06,
"loss": 0.0003,
"step": 583
},
{
"epoch": 0.7525773195876289,
"grad_norm": 0.925058901309967,
"learning_rate": 3.507230917380332e-06,
"loss": 0.0022,
"step": 584
},
{
"epoch": 0.7538659793814433,
"grad_norm": 5.241347312927246,
"learning_rate": 3.4730654948033957e-06,
"loss": 0.0038,
"step": 585
},
{
"epoch": 0.7551546391752577,
"grad_norm": 5.135495662689209,
"learning_rate": 3.4390322923083385e-06,
"loss": 0.0154,
"step": 586
},
{
"epoch": 0.7564432989690721,
"grad_norm": 0.30281150341033936,
"learning_rate": 3.4051319993264397e-06,
"loss": 0.0009,
"step": 587
},
{
"epoch": 0.7577319587628866,
"grad_norm": 0.08247953653335571,
"learning_rate": 3.3713653025965544e-06,
"loss": 0.0006,
"step": 588
},
{
"epoch": 0.759020618556701,
"grad_norm": 0.1323813498020172,
"learning_rate": 3.3377328861511927e-06,
"loss": 0.0005,
"step": 589
},
{
"epoch": 0.7603092783505154,
"grad_norm": 1.5231373310089111,
"learning_rate": 3.3042354313026702e-06,
"loss": 0.0051,
"step": 590
},
{
"epoch": 0.7615979381443299,
"grad_norm": 0.08996398001909256,
"learning_rate": 3.2708736166293064e-06,
"loss": 0.0004,
"step": 591
},
{
"epoch": 0.7628865979381443,
"grad_norm": 0.5507305264472961,
"learning_rate": 3.237648117961665e-06,
"loss": 0.001,
"step": 592
},
{
"epoch": 0.7641752577319587,
"grad_norm": 3.912440061569214,
"learning_rate": 3.2045596083688814e-06,
"loss": 0.0169,
"step": 593
},
{
"epoch": 0.7654639175257731,
"grad_norm": 1.7454997301101685,
"learning_rate": 3.1716087581450193e-06,
"loss": 0.0051,
"step": 594
},
{
"epoch": 0.7667525773195877,
"grad_norm": 2.3474819660186768,
"learning_rate": 3.1387962347954936e-06,
"loss": 0.0101,
"step": 595
},
{
"epoch": 0.7680412371134021,
"grad_norm": 0.4886447787284851,
"learning_rate": 3.1061227030235442e-06,
"loss": 0.0009,
"step": 596
},
{
"epoch": 0.7693298969072165,
"grad_norm": 2.3838088512420654,
"learning_rate": 3.073588824716777e-06,
"loss": 0.0057,
"step": 597
},
{
"epoch": 0.770618556701031,
"grad_norm": 0.4210747480392456,
"learning_rate": 3.041195258933749e-06,
"loss": 0.0026,
"step": 598
},
{
"epoch": 0.7719072164948454,
"grad_norm": 1.517642855644226,
"learning_rate": 3.008942661890627e-06,
"loss": 0.0033,
"step": 599
},
{
"epoch": 0.7731958762886598,
"grad_norm": 0.5007296800613403,
"learning_rate": 2.976831686947884e-06,
"loss": 0.0012,
"step": 600
},
{
"epoch": 0.7731958762886598,
"eval_accuracy": 0.9980139026812314,
"eval_f1": 0.9642857142857143,
"eval_loss": 0.01421260554343462,
"eval_precision": 0.9818181818181818,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.1923,
"eval_samples_per_second": 5.341,
"eval_steps_per_second": 0.176,
"step": 600
},
{
"epoch": 0.7744845360824743,
"grad_norm": 0.11000218242406845,
"learning_rate": 2.944862984597068e-06,
"loss": 0.0006,
"step": 601
},
{
"epoch": 0.7757731958762887,
"grad_norm": 0.06000187247991562,
"learning_rate": 2.913037202447625e-06,
"loss": 0.0002,
"step": 602
},
{
"epoch": 0.7770618556701031,
"grad_norm": 1.5387769937515259,
"learning_rate": 2.8813549852137824e-06,
"loss": 0.0069,
"step": 603
},
{
"epoch": 0.7783505154639175,
"grad_norm": 0.052634306252002716,
"learning_rate": 2.8498169747014824e-06,
"loss": 0.0004,
"step": 604
},
{
"epoch": 0.779639175257732,
"grad_norm": 2.0702285766601562,
"learning_rate": 2.818423809795384e-06,
"loss": 0.0075,
"step": 605
},
{
"epoch": 0.7809278350515464,
"grad_norm": 2.145193099975586,
"learning_rate": 2.787176126445923e-06,
"loss": 0.0143,
"step": 606
},
{
"epoch": 0.7822164948453608,
"grad_norm": 1.94924795627594,
"learning_rate": 2.756074557656424e-06,
"loss": 0.0032,
"step": 607
},
{
"epoch": 0.7835051546391752,
"grad_norm": 0.6096054911613464,
"learning_rate": 2.725119733470284e-06,
"loss": 0.0019,
"step": 608
},
{
"epoch": 0.7847938144329897,
"grad_norm": 0.4482673406600952,
"learning_rate": 2.6943122809582e-06,
"loss": 0.0017,
"step": 609
},
{
"epoch": 0.7860824742268041,
"grad_norm": 0.4562239944934845,
"learning_rate": 2.663652824205476e-06,
"loss": 0.0018,
"step": 610
},
{
"epoch": 0.7873711340206185,
"grad_norm": 0.07097622007131577,
"learning_rate": 2.6331419842993746e-06,
"loss": 0.0003,
"step": 611
},
{
"epoch": 0.788659793814433,
"grad_norm": 0.23742669820785522,
"learning_rate": 2.6027803793165353e-06,
"loss": 0.0012,
"step": 612
},
{
"epoch": 0.7899484536082474,
"grad_norm": 0.7744345664978027,
"learning_rate": 2.5725686243104586e-06,
"loss": 0.0009,
"step": 613
},
{
"epoch": 0.7912371134020618,
"grad_norm": 2.8336048126220703,
"learning_rate": 2.5425073312990334e-06,
"loss": 0.0189,
"step": 614
},
{
"epoch": 0.7925257731958762,
"grad_norm": 3.236351728439331,
"learning_rate": 2.5125971092521607e-06,
"loss": 0.0504,
"step": 615
},
{
"epoch": 0.7938144329896907,
"grad_norm": 0.09571472555398941,
"learning_rate": 2.4828385640793974e-06,
"loss": 0.0004,
"step": 616
},
{
"epoch": 0.7951030927835051,
"grad_norm": 3.24649977684021,
"learning_rate": 2.4532322986176926e-06,
"loss": 0.0066,
"step": 617
},
{
"epoch": 0.7963917525773195,
"grad_norm": 0.6379624605178833,
"learning_rate": 2.4237789126191715e-06,
"loss": 0.0025,
"step": 618
},
{
"epoch": 0.7976804123711341,
"grad_norm": 0.4450211226940155,
"learning_rate": 2.3944790027389888e-06,
"loss": 0.0024,
"step": 619
},
{
"epoch": 0.7989690721649485,
"grad_norm": 0.42626920342445374,
"learning_rate": 2.3653331625232367e-06,
"loss": 0.0018,
"step": 620
},
{
"epoch": 0.7989690721649485,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.013614228926599026,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.5414,
"eval_samples_per_second": 5.319,
"eval_steps_per_second": 0.175,
"step": 620
},
{
"epoch": 0.8002577319587629,
"grad_norm": 1.3859606981277466,
"learning_rate": 2.336341982396928e-06,
"loss": 0.0018,
"step": 621
},
{
"epoch": 0.8015463917525774,
"grad_norm": 0.29047346115112305,
"learning_rate": 2.307506049652031e-06,
"loss": 0.0033,
"step": 622
},
{
"epoch": 0.8028350515463918,
"grad_norm": 4.054072856903076,
"learning_rate": 2.2788259484355712e-06,
"loss": 0.0358,
"step": 623
},
{
"epoch": 0.8041237113402062,
"grad_norm": 1.3279507160186768,
"learning_rate": 2.250302259737803e-06,
"loss": 0.0047,
"step": 624
},
{
"epoch": 0.8054123711340206,
"grad_norm": 1.3657283782958984,
"learning_rate": 2.2219355613804406e-06,
"loss": 0.0027,
"step": 625
},
{
"epoch": 0.8067010309278351,
"grad_norm": 0.02000313624739647,
"learning_rate": 2.1937264280049365e-06,
"loss": 0.0001,
"step": 626
},
{
"epoch": 0.8079896907216495,
"grad_norm": 0.022800248116254807,
"learning_rate": 2.1656754310608663e-06,
"loss": 0.0001,
"step": 627
},
{
"epoch": 0.8092783505154639,
"grad_norm": 2.1615025997161865,
"learning_rate": 2.137783138794335e-06,
"loss": 0.005,
"step": 628
},
{
"epoch": 0.8105670103092784,
"grad_norm": 2.5483734607696533,
"learning_rate": 2.1100501162364707e-06,
"loss": 0.0407,
"step": 629
},
{
"epoch": 0.8118556701030928,
"grad_norm": 0.29894164204597473,
"learning_rate": 2.0824769251919775e-06,
"loss": 0.0005,
"step": 630
},
{
"epoch": 0.8131443298969072,
"grad_norm": 1.0408016443252563,
"learning_rate": 2.055064124227758e-06,
"loss": 0.0018,
"step": 631
},
{
"epoch": 0.8144329896907216,
"grad_norm": 0.12250500172376633,
"learning_rate": 2.027812268661592e-06,
"loss": 0.0005,
"step": 632
},
{
"epoch": 0.8157216494845361,
"grad_norm": 0.30518946051597595,
"learning_rate": 2.0007219105508935e-06,
"loss": 0.0015,
"step": 633
},
{
"epoch": 0.8170103092783505,
"grad_norm": 0.46933111548423767,
"learning_rate": 1.9737935986815205e-06,
"loss": 0.0014,
"step": 634
},
{
"epoch": 0.8182989690721649,
"grad_norm": 0.45417195558547974,
"learning_rate": 1.9470278785566653e-06,
"loss": 0.0016,
"step": 635
},
{
"epoch": 0.8195876288659794,
"grad_norm": 2.510430335998535,
"learning_rate": 1.9204252923858003e-06,
"loss": 0.0114,
"step": 636
},
{
"epoch": 0.8208762886597938,
"grad_norm": 0.9059619903564453,
"learning_rate": 1.8939863790736923e-06,
"loss": 0.0041,
"step": 637
},
{
"epoch": 0.8221649484536082,
"grad_norm": 0.9617973566055298,
"learning_rate": 1.8677116742094858e-06,
"loss": 0.0038,
"step": 638
},
{
"epoch": 0.8234536082474226,
"grad_norm": 2.0744788646698,
"learning_rate": 1.8416017100558592e-06,
"loss": 0.0046,
"step": 639
},
{
"epoch": 0.8247422680412371,
"grad_norm": 1.710120439529419,
"learning_rate": 1.8156570155382357e-06,
"loss": 0.0164,
"step": 640
},
{
"epoch": 0.8247422680412371,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.014028658159077168,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.1596,
"eval_samples_per_second": 5.343,
"eval_steps_per_second": 0.176,
"step": 640
},
{
"epoch": 0.8260309278350515,
"grad_norm": 0.4058464765548706,
"learning_rate": 1.7898781162340683e-06,
"loss": 0.0015,
"step": 641
},
{
"epoch": 0.8273195876288659,
"grad_norm": 2.3487915992736816,
"learning_rate": 1.764265534362205e-06,
"loss": 0.0046,
"step": 642
},
{
"epoch": 0.8286082474226805,
"grad_norm": 1.4977848529815674,
"learning_rate": 1.7388197887722914e-06,
"loss": 0.0025,
"step": 643
},
{
"epoch": 0.8298969072164949,
"grad_norm": 0.7546347379684448,
"learning_rate": 1.7135413949342706e-06,
"loss": 0.0034,
"step": 644
},
{
"epoch": 0.8311855670103093,
"grad_norm": 1.2620099782943726,
"learning_rate": 1.6884308649279413e-06,
"loss": 0.0026,
"step": 645
},
{
"epoch": 0.8324742268041238,
"grad_norm": 0.3899240791797638,
"learning_rate": 1.6634887074325844e-06,
"loss": 0.0042,
"step": 646
},
{
"epoch": 0.8337628865979382,
"grad_norm": 3.6986889839172363,
"learning_rate": 1.6387154277166484e-06,
"loss": 0.014,
"step": 647
},
{
"epoch": 0.8350515463917526,
"grad_norm": 4.560563087463379,
"learning_rate": 1.6141115276275298e-06,
"loss": 0.0071,
"step": 648
},
{
"epoch": 0.836340206185567,
"grad_norm": 2.40631365776062,
"learning_rate": 1.5896775055813973e-06,
"loss": 0.0155,
"step": 649
},
{
"epoch": 0.8376288659793815,
"grad_norm": 0.34469377994537354,
"learning_rate": 1.565413856553095e-06,
"loss": 0.0008,
"step": 650
},
{
"epoch": 0.8389175257731959,
"grad_norm": 2.645254611968994,
"learning_rate": 1.5413210720661187e-06,
"loss": 0.0087,
"step": 651
},
{
"epoch": 0.8402061855670103,
"grad_norm": 3.504944324493408,
"learning_rate": 1.5173996401826563e-06,
"loss": 0.0076,
"step": 652
},
{
"epoch": 0.8414948453608248,
"grad_norm": 0.9376299977302551,
"learning_rate": 1.493650045493703e-06,
"loss": 0.0011,
"step": 653
},
{
"epoch": 0.8427835051546392,
"grad_norm": 0.07310164719820023,
"learning_rate": 1.470072769109242e-06,
"loss": 0.0002,
"step": 654
},
{
"epoch": 0.8440721649484536,
"grad_norm": 0.9804725050926208,
"learning_rate": 1.4466682886485007e-06,
"loss": 0.0248,
"step": 655
},
{
"epoch": 0.845360824742268,
"grad_norm": 1.2410582304000854,
"learning_rate": 1.4234370782302742e-06,
"loss": 0.0027,
"step": 656
},
{
"epoch": 0.8466494845360825,
"grad_norm": 1.5435892343521118,
"learning_rate": 1.4003796084633204e-06,
"loss": 0.0021,
"step": 657
},
{
"epoch": 0.8479381443298969,
"grad_norm": 1.6477559804916382,
"learning_rate": 1.3774963464368295e-06,
"loss": 0.0219,
"step": 658
},
{
"epoch": 0.8492268041237113,
"grad_norm": 1.8819410800933838,
"learning_rate": 1.3547877557109546e-06,
"loss": 0.0033,
"step": 659
},
{
"epoch": 0.8505154639175257,
"grad_norm": 4.333096981048584,
"learning_rate": 1.3322542963074314e-06,
"loss": 0.0359,
"step": 660
},
{
"epoch": 0.8505154639175257,
"eval_accuracy": 0.9980139026812314,
"eval_f1": 0.9642857142857143,
"eval_loss": 0.014260557480156422,
"eval_precision": 0.9818181818181818,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.7933,
"eval_samples_per_second": 5.303,
"eval_steps_per_second": 0.175,
"step": 660
},
{
"epoch": 0.8518041237113402,
"grad_norm": 1.0809779167175293,
"learning_rate": 1.3098964247002499e-06,
"loss": 0.0085,
"step": 661
},
{
"epoch": 0.8530927835051546,
"grad_norm": 1.4017481803894043,
"learning_rate": 1.287714593806415e-06,
"loss": 0.0429,
"step": 662
},
{
"epoch": 0.854381443298969,
"grad_norm": 1.9630863666534424,
"learning_rate": 1.2657092529767644e-06,
"loss": 0.017,
"step": 663
},
{
"epoch": 0.8556701030927835,
"grad_norm": 1.8519372940063477,
"learning_rate": 1.2438808479868715e-06,
"loss": 0.0054,
"step": 664
},
{
"epoch": 0.8569587628865979,
"grad_norm": 0.13302436470985413,
"learning_rate": 1.2222298210280103e-06,
"loss": 0.0003,
"step": 665
},
{
"epoch": 0.8582474226804123,
"grad_norm": 0.8774328231811523,
"learning_rate": 1.200756610698205e-06,
"loss": 0.0016,
"step": 666
},
{
"epoch": 0.8595360824742269,
"grad_norm": 0.1723754107952118,
"learning_rate": 1.1794616519933344e-06,
"loss": 0.0008,
"step": 667
},
{
"epoch": 0.8608247422680413,
"grad_norm": 4.612834930419922,
"learning_rate": 1.1583453762983289e-06,
"loss": 0.0165,
"step": 668
},
{
"epoch": 0.8621134020618557,
"grad_norm": 4.430521011352539,
"learning_rate": 1.137408211378429e-06,
"loss": 0.0274,
"step": 669
},
{
"epoch": 0.8634020618556701,
"grad_norm": 2.550323963165283,
"learning_rate": 1.1166505813705187e-06,
"loss": 0.0115,
"step": 670
},
{
"epoch": 0.8646907216494846,
"grad_norm": 3.418363094329834,
"learning_rate": 1.09607290677453e-06,
"loss": 0.0195,
"step": 671
},
{
"epoch": 0.865979381443299,
"grad_norm": 1.712893009185791,
"learning_rate": 1.0756756044449358e-06,
"loss": 0.0095,
"step": 672
},
{
"epoch": 0.8672680412371134,
"grad_norm": 0.8803926110267639,
"learning_rate": 1.0554590875822924e-06,
"loss": 0.0013,
"step": 673
},
{
"epoch": 0.8685567010309279,
"grad_norm": 0.15702317655086517,
"learning_rate": 1.035423765724879e-06,
"loss": 0.0006,
"step": 674
},
{
"epoch": 0.8698453608247423,
"grad_norm": 0.7960672974586487,
"learning_rate": 1.015570044740396e-06,
"loss": 0.004,
"step": 675
},
{
"epoch": 0.8711340206185567,
"grad_norm": 0.0636625736951828,
"learning_rate": 9.958983268177425e-07,
"loss": 0.0003,
"step": 676
},
{
"epoch": 0.8724226804123711,
"grad_norm": 0.5046800971031189,
"learning_rate": 9.764090104588741e-07,
"loss": 0.0011,
"step": 677
},
{
"epoch": 0.8737113402061856,
"grad_norm": 0.18947793543338776,
"learning_rate": 9.571024904707238e-07,
"loss": 0.0014,
"step": 678
},
{
"epoch": 0.875,
"grad_norm": 1.3208938837051392,
"learning_rate": 9.379791579572118e-07,
"loss": 0.0047,
"step": 679
},
{
"epoch": 0.8762886597938144,
"grad_norm": 1.1269551515579224,
"learning_rate": 9.190394003113123e-07,
"loss": 0.0038,
"step": 680
},
{
"epoch": 0.8762886597938144,
"eval_accuracy": 0.9980139026812314,
"eval_f1": 0.9642857142857143,
"eval_loss": 0.013650565408170223,
"eval_precision": 0.9818181818181818,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.0138,
"eval_samples_per_second": 5.352,
"eval_steps_per_second": 0.176,
"step": 680
},
{
"epoch": 0.8775773195876289,
"grad_norm": 1.984449863433838,
"learning_rate": 9.002836012072169e-07,
"loss": 0.0023,
"step": 681
},
{
"epoch": 0.8788659793814433,
"grad_norm": 1.0982954502105713,
"learning_rate": 8.817121405925543e-07,
"loss": 0.006,
"step": 682
},
{
"epoch": 0.8801546391752577,
"grad_norm": 1.77447509765625,
"learning_rate": 8.633253946806974e-07,
"loss": 0.0145,
"step": 683
},
{
"epoch": 0.8814432989690721,
"grad_norm": 0.07710134983062744,
"learning_rate": 8.451237359431397e-07,
"loss": 0.0004,
"step": 684
},
{
"epoch": 0.8827319587628866,
"grad_norm": 0.9491273760795593,
"learning_rate": 8.271075331019541e-07,
"loss": 0.0092,
"step": 685
},
{
"epoch": 0.884020618556701,
"grad_norm": 0.14051373302936554,
"learning_rate": 8.092771511223185e-07,
"loss": 0.0011,
"step": 686
},
{
"epoch": 0.8853092783505154,
"grad_norm": 0.11947651207447052,
"learning_rate": 7.916329512051235e-07,
"loss": 0.0006,
"step": 687
},
{
"epoch": 0.8865979381443299,
"grad_norm": 2.8755271434783936,
"learning_rate": 7.741752907796584e-07,
"loss": 0.0148,
"step": 688
},
{
"epoch": 0.8878865979381443,
"grad_norm": 0.33898699283599854,
"learning_rate": 7.56904523496369e-07,
"loss": 0.001,
"step": 689
},
{
"epoch": 0.8891752577319587,
"grad_norm": 1.2820080518722534,
"learning_rate": 7.398209992196914e-07,
"loss": 0.0021,
"step": 690
},
{
"epoch": 0.8904639175257731,
"grad_norm": 0.06955321133136749,
"learning_rate": 7.229250640209661e-07,
"loss": 0.0003,
"step": 691
},
{
"epoch": 0.8917525773195877,
"grad_norm": 0.3767196238040924,
"learning_rate": 7.062170601714302e-07,
"loss": 0.0014,
"step": 692
},
{
"epoch": 0.8930412371134021,
"grad_norm": 5.599634170532227,
"learning_rate": 6.896973261352779e-07,
"loss": 0.0293,
"step": 693
},
{
"epoch": 0.8943298969072165,
"grad_norm": 0.2771053910255432,
"learning_rate": 6.73366196562808e-07,
"loss": 0.0013,
"step": 694
},
{
"epoch": 0.895618556701031,
"grad_norm": 2.668797492980957,
"learning_rate": 6.572240022836451e-07,
"loss": 0.0139,
"step": 695
},
{
"epoch": 0.8969072164948454,
"grad_norm": 1.3741244077682495,
"learning_rate": 6.412710703000368e-07,
"loss": 0.004,
"step": 696
},
{
"epoch": 0.8981958762886598,
"grad_norm": 4.912818908691406,
"learning_rate": 6.255077237802288e-07,
"loss": 0.0207,
"step": 697
},
{
"epoch": 0.8994845360824743,
"grad_norm": 2.466047525405884,
"learning_rate": 6.099342820519183e-07,
"loss": 0.0217,
"step": 698
},
{
"epoch": 0.9007731958762887,
"grad_norm": 0.5519626140594482,
"learning_rate": 5.945510605957861e-07,
"loss": 0.0022,
"step": 699
},
{
"epoch": 0.9020618556701031,
"grad_norm": 2.488891124725342,
"learning_rate": 5.79358371039106e-07,
"loss": 0.011,
"step": 700
},
{
"epoch": 0.9020618556701031,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.013403017073869705,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.6723,
"eval_samples_per_second": 5.311,
"eval_steps_per_second": 0.175,
"step": 700
},
{
"epoch": 0.9033505154639175,
"grad_norm": 3.9940011501312256,
"learning_rate": 5.643565211494285e-07,
"loss": 0.0374,
"step": 701
},
{
"epoch": 0.904639175257732,
"grad_norm": 0.029992813244462013,
"learning_rate": 5.495458148283505e-07,
"loss": 0.0003,
"step": 702
},
{
"epoch": 0.9059278350515464,
"grad_norm": 0.03322442248463631,
"learning_rate": 5.349265521053603e-07,
"loss": 0.0002,
"step": 703
},
{
"epoch": 0.9072164948453608,
"grad_norm": 1.478757381439209,
"learning_rate": 5.204990291317535e-07,
"loss": 0.0026,
"step": 704
},
{
"epoch": 0.9085051546391752,
"grad_norm": 3.334146499633789,
"learning_rate": 5.062635381746362e-07,
"loss": 0.0251,
"step": 705
},
{
"epoch": 0.9097938144329897,
"grad_norm": 0.7413277626037598,
"learning_rate": 4.92220367611006e-07,
"loss": 0.0037,
"step": 706
},
{
"epoch": 0.9110824742268041,
"grad_norm": 0.4254130423069,
"learning_rate": 4.783698019219118e-07,
"loss": 0.0014,
"step": 707
},
{
"epoch": 0.9123711340206185,
"grad_norm": 2.775209426879883,
"learning_rate": 4.647121216866857e-07,
"loss": 0.026,
"step": 708
},
{
"epoch": 0.913659793814433,
"grad_norm": 1.4021016359329224,
"learning_rate": 4.512476035772628e-07,
"loss": 0.0094,
"step": 709
},
{
"epoch": 0.9149484536082474,
"grad_norm": 0.19824525713920593,
"learning_rate": 4.3797652035257544e-07,
"loss": 0.0016,
"step": 710
},
{
"epoch": 0.9162371134020618,
"grad_norm": 1.9552204608917236,
"learning_rate": 4.248991408530279e-07,
"loss": 0.0136,
"step": 711
},
{
"epoch": 0.9175257731958762,
"grad_norm": 0.1556915044784546,
"learning_rate": 4.1201572999505e-07,
"loss": 0.0004,
"step": 712
},
{
"epoch": 0.9188144329896907,
"grad_norm": 4.923846244812012,
"learning_rate": 3.9932654876573164e-07,
"loss": 0.0145,
"step": 713
},
{
"epoch": 0.9201030927835051,
"grad_norm": 0.12035495787858963,
"learning_rate": 3.8683185421753313e-07,
"loss": 0.0005,
"step": 714
},
{
"epoch": 0.9213917525773195,
"grad_norm": 1.2554630041122437,
"learning_rate": 3.74531899463082e-07,
"loss": 0.0086,
"step": 715
},
{
"epoch": 0.9226804123711341,
"grad_norm": 0.042812976986169815,
"learning_rate": 3.6242693367004365e-07,
"loss": 0.0002,
"step": 716
},
{
"epoch": 0.9239690721649485,
"grad_norm": 0.1420414000749588,
"learning_rate": 3.5051720205606877e-07,
"loss": 0.0009,
"step": 717
},
{
"epoch": 0.9252577319587629,
"grad_norm": 0.12338154017925262,
"learning_rate": 3.38802945883836e-07,
"loss": 0.001,
"step": 718
},
{
"epoch": 0.9265463917525774,
"grad_norm": 1.5074167251586914,
"learning_rate": 3.2728440245615724e-07,
"loss": 0.004,
"step": 719
},
{
"epoch": 0.9278350515463918,
"grad_norm": 4.496931076049805,
"learning_rate": 3.1596180511117235e-07,
"loss": 0.0144,
"step": 720
},
{
"epoch": 0.9278350515463918,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.013383620418608189,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.8622,
"eval_samples_per_second": 5.299,
"eval_steps_per_second": 0.175,
"step": 720
},
{
"epoch": 0.9291237113402062,
"grad_norm": 3.5359578132629395,
"learning_rate": 3.048353832176221e-07,
"loss": 0.0237,
"step": 721
},
{
"epoch": 0.9304123711340206,
"grad_norm": 0.27863824367523193,
"learning_rate": 2.939053621702015e-07,
"loss": 0.0012,
"step": 722
},
{
"epoch": 0.9317010309278351,
"grad_norm": 2.338470935821533,
"learning_rate": 2.83171963384995e-07,
"loss": 0.0099,
"step": 723
},
{
"epoch": 0.9329896907216495,
"grad_norm": 4.0552592277526855,
"learning_rate": 2.7263540429498747e-07,
"loss": 0.0294,
"step": 724
},
{
"epoch": 0.9342783505154639,
"grad_norm": 1.4125486612319946,
"learning_rate": 2.6229589834566807e-07,
"loss": 0.0324,
"step": 725
},
{
"epoch": 0.9355670103092784,
"grad_norm": 3.3217031955718994,
"learning_rate": 2.5215365499069446e-07,
"loss": 0.0062,
"step": 726
},
{
"epoch": 0.9368556701030928,
"grad_norm": 0.8493993878364563,
"learning_rate": 2.4220887968765873e-07,
"loss": 0.002,
"step": 727
},
{
"epoch": 0.9381443298969072,
"grad_norm": 3.687810182571411,
"learning_rate": 2.3246177389392388e-07,
"loss": 0.037,
"step": 728
},
{
"epoch": 0.9394329896907216,
"grad_norm": 0.4572630822658539,
"learning_rate": 2.229125350625394e-07,
"loss": 0.0012,
"step": 729
},
{
"epoch": 0.9407216494845361,
"grad_norm": 0.3448236882686615,
"learning_rate": 2.1356135663824328e-07,
"loss": 0.0017,
"step": 730
},
{
"epoch": 0.9420103092783505,
"grad_norm": 1.1979801654815674,
"learning_rate": 2.0440842805354522e-07,
"loss": 0.0174,
"step": 731
},
{
"epoch": 0.9432989690721649,
"grad_norm": 0.3525691330432892,
"learning_rate": 1.9545393472488738e-07,
"loss": 0.0019,
"step": 732
},
{
"epoch": 0.9445876288659794,
"grad_norm": 1.4202477931976318,
"learning_rate": 1.866980580488842e-07,
"loss": 0.0269,
"step": 733
},
{
"epoch": 0.9458762886597938,
"grad_norm": 1.2961419820785522,
"learning_rate": 1.7814097539865626e-07,
"loss": 0.0023,
"step": 734
},
{
"epoch": 0.9471649484536082,
"grad_norm": 0.17165932059288025,
"learning_rate": 1.6978286012023225e-07,
"loss": 0.0009,
"step": 735
},
{
"epoch": 0.9484536082474226,
"grad_norm": 0.12149068713188171,
"learning_rate": 1.6162388152903498e-07,
"loss": 0.0005,
"step": 736
},
{
"epoch": 0.9497422680412371,
"grad_norm": 0.8597332835197449,
"learning_rate": 1.5366420490645738e-07,
"loss": 0.0028,
"step": 737
},
{
"epoch": 0.9510309278350515,
"grad_norm": 0.13404878973960876,
"learning_rate": 1.4590399149650769e-07,
"loss": 0.0004,
"step": 738
},
{
"epoch": 0.9523195876288659,
"grad_norm": 1.938827633857727,
"learning_rate": 1.3834339850254952e-07,
"loss": 0.0083,
"step": 739
},
{
"epoch": 0.9536082474226805,
"grad_norm": 1.4186664819717407,
"learning_rate": 1.309825790841146e-07,
"loss": 0.0284,
"step": 740
},
{
"epoch": 0.9536082474226805,
"eval_accuracy": 0.9980139026812314,
"eval_f1": 0.9642857142857143,
"eval_loss": 0.013383138924837112,
"eval_precision": 0.9818181818181818,
"eval_recall": 0.9473684210526315,
"eval_runtime": 85.8161,
"eval_samples_per_second": 5.302,
"eval_steps_per_second": 0.175,
"step": 740
},
{
"epoch": 0.9548969072164949,
"grad_norm": 1.9752976894378662,
"learning_rate": 1.2382168235379742e-07,
"loss": 0.0028,
"step": 741
},
{
"epoch": 0.9561855670103093,
"grad_norm": 4.70041036605835,
"learning_rate": 1.1686085337423991e-07,
"loss": 0.0102,
"step": 742
},
{
"epoch": 0.9574742268041238,
"grad_norm": 3.6995646953582764,
"learning_rate": 1.1010023315518592e-07,
"loss": 0.0218,
"step": 743
},
{
"epoch": 0.9587628865979382,
"grad_norm": 2.3631069660186768,
"learning_rate": 1.0353995865063138e-07,
"loss": 0.0306,
"step": 744
},
{
"epoch": 0.9600515463917526,
"grad_norm": 0.07328186929225922,
"learning_rate": 9.718016275604759e-08,
"loss": 0.0004,
"step": 745
},
{
"epoch": 0.961340206185567,
"grad_norm": 0.09281091392040253,
"learning_rate": 9.10209743056889e-08,
"loss": 0.0006,
"step": 746
},
{
"epoch": 0.9626288659793815,
"grad_norm": 2.129312753677368,
"learning_rate": 8.506251806997934e-08,
"loss": 0.0418,
"step": 747
},
{
"epoch": 0.9639175257731959,
"grad_norm": 0.45759478211402893,
"learning_rate": 7.930491475299229e-08,
"loss": 0.001,
"step": 748
},
{
"epoch": 0.9652061855670103,
"grad_norm": 0.9310470819473267,
"learning_rate": 7.37482809900003e-08,
"loss": 0.0023,
"step": 749
},
{
"epoch": 0.9664948453608248,
"grad_norm": 0.8306396007537842,
"learning_rate": 6.839272934511143e-08,
"loss": 0.0038,
"step": 750
},
{
"epoch": 0.9677835051546392,
"grad_norm": 0.3015538156032562,
"learning_rate": 6.323836830899321e-08,
"loss": 0.001,
"step": 751
},
{
"epoch": 0.9690721649484536,
"grad_norm": 2.0582327842712402,
"learning_rate": 5.828530229667228e-08,
"loss": 0.0127,
"step": 752
},
{
"epoch": 0.970360824742268,
"grad_norm": 0.13917666673660278,
"learning_rate": 5.353363164541825e-08,
"loss": 0.001,
"step": 753
},
{
"epoch": 0.9716494845360825,
"grad_norm": 1.766170620918274,
"learning_rate": 4.898345261271531e-08,
"loss": 0.0033,
"step": 754
},
{
"epoch": 0.9729381443298969,
"grad_norm": 2.295456647872925,
"learning_rate": 4.463485737430606e-08,
"loss": 0.0045,
"step": 755
},
{
"epoch": 0.9742268041237113,
"grad_norm": 3.3458142280578613,
"learning_rate": 4.0487934022328533e-08,
"loss": 0.0081,
"step": 756
},
{
"epoch": 0.9755154639175257,
"grad_norm": 3.1655807495117188,
"learning_rate": 3.654276656353206e-08,
"loss": 0.0101,
"step": 757
},
{
"epoch": 0.9768041237113402,
"grad_norm": 0.16411763429641724,
"learning_rate": 3.27994349175742e-08,
"loss": 0.0005,
"step": 758
},
{
"epoch": 0.9780927835051546,
"grad_norm": 3.4274473190307617,
"learning_rate": 2.9258014915399813e-08,
"loss": 0.0037,
"step": 759
},
{
"epoch": 0.979381443298969,
"grad_norm": 1.7726308107376099,
"learning_rate": 2.591857829770672e-08,
"loss": 0.0066,
"step": 760
},
{
"epoch": 0.979381443298969,
"eval_accuracy": 0.9975173783515392,
"eval_f1": 0.9557522123893806,
"eval_loss": 0.013445839285850525,
"eval_precision": 0.9642857142857143,
"eval_recall": 0.9473684210526315,
"eval_runtime": 86.1309,
"eval_samples_per_second": 5.283,
"eval_steps_per_second": 0.174,
"step": 760
},
{
"epoch": 0.9806701030927835,
"grad_norm": 3.986452579498291,
"learning_rate": 2.278119271349466e-08,
"loss": 0.0265,
"step": 761
},
{
"epoch": 0.9819587628865979,
"grad_norm": 1.644910454750061,
"learning_rate": 1.984592171869082e-08,
"loss": 0.0039,
"step": 762
},
{
"epoch": 0.9832474226804123,
"grad_norm": 0.0761791542172432,
"learning_rate": 1.711282477486642e-08,
"loss": 0.0003,
"step": 763
},
{
"epoch": 0.9845360824742269,
"grad_norm": 0.8887882828712463,
"learning_rate": 1.4581957248026579e-08,
"loss": 0.0047,
"step": 764
},
{
"epoch": 0.9858247422680413,
"grad_norm": 1.5930033922195435,
"learning_rate": 1.2253370407495636e-08,
"loss": 0.0062,
"step": 765
},
{
"epoch": 0.9871134020618557,
"grad_norm": 0.7668823599815369,
"learning_rate": 1.0127111424872437e-08,
"loss": 0.0027,
"step": 766
},
{
"epoch": 0.9884020618556701,
"grad_norm": 3.0065221786499023,
"learning_rate": 8.203223373078883e-09,
"loss": 0.0152,
"step": 767
},
{
"epoch": 0.9896907216494846,
"grad_norm": 0.5820819735527039,
"learning_rate": 6.481745225485059e-09,
"loss": 0.0016,
"step": 768
},
{
"epoch": 0.990979381443299,
"grad_norm": 0.7048105597496033,
"learning_rate": 4.962711855120983e-09,
"loss": 0.0027,
"step": 769
},
{
"epoch": 0.9922680412371134,
"grad_norm": 2.509854555130005,
"learning_rate": 3.6461540339682855e-09,
"loss": 0.0188,
"step": 770
},
{
"epoch": 0.9935567010309279,
"grad_norm": 0.2154129147529602,
"learning_rate": 2.532098432341812e-09,
"loss": 0.0008,
"step": 771
},
{
"epoch": 0.9948453608247423,
"grad_norm": 2.393842935562134,
"learning_rate": 1.6205676183411733e-09,
"loss": 0.0079,
"step": 772
},
{
"epoch": 0.9961340206185567,
"grad_norm": 2.176377058029175,
"learning_rate": 9.115800574022171e-10,
"loss": 0.013,
"step": 773
},
{
"epoch": 0.9974226804123711,
"grad_norm": 2.1431355476379395,
"learning_rate": 4.0515011191621933e-10,
"loss": 0.0134,
"step": 774
},
{
"epoch": 0.9987113402061856,
"grad_norm": 1.2513763904571533,
"learning_rate": 1.0128804094233779e-10,
"loss": 0.0034,
"step": 775
},
{
"epoch": 1.0,
"grad_norm": 2.7385754585266113,
"learning_rate": 0.0,
"loss": 0.0062,
"step": 776
}
],
"logging_steps": 1,
"max_steps": 776,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.5857289592242176e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}