mtzig's picture
Training in progress, step 200, checkpoint
168605c verified
raw
history blame
39.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.25773195876288657,
"eval_steps": 20,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.9369414101290964,
"eval_f1": 0.18064516129032257,
"eval_loss": 0.32557418942451477,
"eval_precision": 0.14285714285714285,
"eval_recall": 0.24561403508771928,
"eval_runtime": 85.4393,
"eval_samples_per_second": 5.325,
"eval_steps_per_second": 0.176,
"step": 0
},
{
"epoch": 0.001288659793814433,
"grad_norm": 4.328640937805176,
"learning_rate": 2.564102564102564e-07,
"loss": 0.5948,
"step": 1
},
{
"epoch": 0.002577319587628866,
"grad_norm": 4.026719570159912,
"learning_rate": 5.128205128205128e-07,
"loss": 0.5193,
"step": 2
},
{
"epoch": 0.003865979381443299,
"grad_norm": 4.2378315925598145,
"learning_rate": 7.692307692307694e-07,
"loss": 0.5631,
"step": 3
},
{
"epoch": 0.005154639175257732,
"grad_norm": 3.568166971206665,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.5153,
"step": 4
},
{
"epoch": 0.006443298969072165,
"grad_norm": 3.5194778442382812,
"learning_rate": 1.282051282051282e-06,
"loss": 0.4554,
"step": 5
},
{
"epoch": 0.007731958762886598,
"grad_norm": 3.977821111679077,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.5351,
"step": 6
},
{
"epoch": 0.00902061855670103,
"grad_norm": 3.5472445487976074,
"learning_rate": 1.794871794871795e-06,
"loss": 0.4795,
"step": 7
},
{
"epoch": 0.010309278350515464,
"grad_norm": 4.021523475646973,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.5567,
"step": 8
},
{
"epoch": 0.011597938144329897,
"grad_norm": 3.9711642265319824,
"learning_rate": 2.307692307692308e-06,
"loss": 0.5156,
"step": 9
},
{
"epoch": 0.01288659793814433,
"grad_norm": 3.964317560195923,
"learning_rate": 2.564102564102564e-06,
"loss": 0.5192,
"step": 10
},
{
"epoch": 0.014175257731958763,
"grad_norm": 4.49519157409668,
"learning_rate": 2.8205128205128207e-06,
"loss": 0.5763,
"step": 11
},
{
"epoch": 0.015463917525773196,
"grad_norm": 3.9775915145874023,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.5087,
"step": 12
},
{
"epoch": 0.01675257731958763,
"grad_norm": 3.533947706222534,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5278,
"step": 13
},
{
"epoch": 0.01804123711340206,
"grad_norm": 3.2834880352020264,
"learning_rate": 3.58974358974359e-06,
"loss": 0.497,
"step": 14
},
{
"epoch": 0.019329896907216496,
"grad_norm": 3.62939190864563,
"learning_rate": 3.846153846153847e-06,
"loss": 0.4718,
"step": 15
},
{
"epoch": 0.020618556701030927,
"grad_norm": 3.499007225036621,
"learning_rate": 4.102564102564103e-06,
"loss": 0.4612,
"step": 16
},
{
"epoch": 0.02190721649484536,
"grad_norm": 3.6551826000213623,
"learning_rate": 4.358974358974359e-06,
"loss": 0.5116,
"step": 17
},
{
"epoch": 0.023195876288659795,
"grad_norm": 3.7035470008850098,
"learning_rate": 4.615384615384616e-06,
"loss": 0.5265,
"step": 18
},
{
"epoch": 0.024484536082474227,
"grad_norm": 3.528616189956665,
"learning_rate": 4.871794871794872e-06,
"loss": 0.4926,
"step": 19
},
{
"epoch": 0.02577319587628866,
"grad_norm": 3.614694833755493,
"learning_rate": 5.128205128205128e-06,
"loss": 0.4498,
"step": 20
},
{
"epoch": 0.02577319587628866,
"eval_accuracy": 0.9473684210526315,
"eval_f1": 0.15873015873015872,
"eval_loss": 0.2867887020111084,
"eval_precision": 0.14492753623188406,
"eval_recall": 0.17543859649122806,
"eval_runtime": 85.3774,
"eval_samples_per_second": 5.329,
"eval_steps_per_second": 0.176,
"step": 20
},
{
"epoch": 0.027061855670103094,
"grad_norm": 3.7909672260284424,
"learning_rate": 5.384615384615385e-06,
"loss": 0.4553,
"step": 21
},
{
"epoch": 0.028350515463917526,
"grad_norm": 3.3818626403808594,
"learning_rate": 5.641025641025641e-06,
"loss": 0.4058,
"step": 22
},
{
"epoch": 0.029639175257731958,
"grad_norm": 3.4036498069763184,
"learning_rate": 5.897435897435898e-06,
"loss": 0.3923,
"step": 23
},
{
"epoch": 0.030927835051546393,
"grad_norm": 4.077082633972168,
"learning_rate": 6.153846153846155e-06,
"loss": 0.433,
"step": 24
},
{
"epoch": 0.03221649484536082,
"grad_norm": 3.6889731884002686,
"learning_rate": 6.410256410256412e-06,
"loss": 0.4107,
"step": 25
},
{
"epoch": 0.03350515463917526,
"grad_norm": 3.24767804145813,
"learning_rate": 6.666666666666667e-06,
"loss": 0.3916,
"step": 26
},
{
"epoch": 0.03479381443298969,
"grad_norm": 3.6298370361328125,
"learning_rate": 6.923076923076923e-06,
"loss": 0.3775,
"step": 27
},
{
"epoch": 0.03608247422680412,
"grad_norm": 3.0387685298919678,
"learning_rate": 7.17948717948718e-06,
"loss": 0.3455,
"step": 28
},
{
"epoch": 0.037371134020618556,
"grad_norm": 2.6114144325256348,
"learning_rate": 7.435897435897437e-06,
"loss": 0.3187,
"step": 29
},
{
"epoch": 0.03865979381443299,
"grad_norm": 2.6260972023010254,
"learning_rate": 7.692307692307694e-06,
"loss": 0.3039,
"step": 30
},
{
"epoch": 0.03994845360824742,
"grad_norm": 3.2159814834594727,
"learning_rate": 7.948717948717949e-06,
"loss": 0.3116,
"step": 31
},
{
"epoch": 0.041237113402061855,
"grad_norm": 2.923689603805542,
"learning_rate": 8.205128205128205e-06,
"loss": 0.3317,
"step": 32
},
{
"epoch": 0.04252577319587629,
"grad_norm": 3.0011069774627686,
"learning_rate": 8.461538461538462e-06,
"loss": 0.3035,
"step": 33
},
{
"epoch": 0.04381443298969072,
"grad_norm": 2.754927396774292,
"learning_rate": 8.717948717948719e-06,
"loss": 0.2897,
"step": 34
},
{
"epoch": 0.045103092783505154,
"grad_norm": 2.29058837890625,
"learning_rate": 8.974358974358976e-06,
"loss": 0.2669,
"step": 35
},
{
"epoch": 0.04639175257731959,
"grad_norm": 2.5178396701812744,
"learning_rate": 9.230769230769232e-06,
"loss": 0.2534,
"step": 36
},
{
"epoch": 0.04768041237113402,
"grad_norm": 2.3435192108154297,
"learning_rate": 9.487179487179487e-06,
"loss": 0.2393,
"step": 37
},
{
"epoch": 0.04896907216494845,
"grad_norm": 2.382751226425171,
"learning_rate": 9.743589743589744e-06,
"loss": 0.2307,
"step": 38
},
{
"epoch": 0.05025773195876289,
"grad_norm": 1.9250915050506592,
"learning_rate": 1e-05,
"loss": 0.1963,
"step": 39
},
{
"epoch": 0.05154639175257732,
"grad_norm": 1.9028986692428589,
"learning_rate": 1.0256410256410256e-05,
"loss": 0.242,
"step": 40
},
{
"epoch": 0.05154639175257732,
"eval_accuracy": 0.9672293942403177,
"eval_f1": 0.08333333333333333,
"eval_loss": 0.14344234764575958,
"eval_precision": 0.2,
"eval_recall": 0.05263157894736842,
"eval_runtime": 85.3093,
"eval_samples_per_second": 5.334,
"eval_steps_per_second": 0.176,
"step": 40
},
{
"epoch": 0.05283505154639175,
"grad_norm": 1.5781856775283813,
"learning_rate": 1.0512820512820514e-05,
"loss": 0.197,
"step": 41
},
{
"epoch": 0.05412371134020619,
"grad_norm": 1.4305051565170288,
"learning_rate": 1.076923076923077e-05,
"loss": 0.1876,
"step": 42
},
{
"epoch": 0.055412371134020616,
"grad_norm": 1.1940586566925049,
"learning_rate": 1.1025641025641028e-05,
"loss": 0.2308,
"step": 43
},
{
"epoch": 0.05670103092783505,
"grad_norm": 1.2878607511520386,
"learning_rate": 1.1282051282051283e-05,
"loss": 0.1427,
"step": 44
},
{
"epoch": 0.05798969072164949,
"grad_norm": 0.896811842918396,
"learning_rate": 1.1538461538461538e-05,
"loss": 0.1809,
"step": 45
},
{
"epoch": 0.059278350515463915,
"grad_norm": 0.8891208171844482,
"learning_rate": 1.1794871794871796e-05,
"loss": 0.155,
"step": 46
},
{
"epoch": 0.06056701030927835,
"grad_norm": 1.0271227359771729,
"learning_rate": 1.2051282051282051e-05,
"loss": 0.1985,
"step": 47
},
{
"epoch": 0.061855670103092786,
"grad_norm": 0.7700079679489136,
"learning_rate": 1.230769230769231e-05,
"loss": 0.1262,
"step": 48
},
{
"epoch": 0.06314432989690721,
"grad_norm": 1.125436544418335,
"learning_rate": 1.2564102564102565e-05,
"loss": 0.1685,
"step": 49
},
{
"epoch": 0.06443298969072164,
"grad_norm": 1.251115083694458,
"learning_rate": 1.2820512820512823e-05,
"loss": 0.1999,
"step": 50
},
{
"epoch": 0.06572164948453608,
"grad_norm": 1.178985595703125,
"learning_rate": 1.3076923076923078e-05,
"loss": 0.1657,
"step": 51
},
{
"epoch": 0.06701030927835051,
"grad_norm": 1.3865740299224854,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.201,
"step": 52
},
{
"epoch": 0.06829896907216494,
"grad_norm": 0.8845710158348083,
"learning_rate": 1.3589743589743592e-05,
"loss": 0.1398,
"step": 53
},
{
"epoch": 0.06958762886597938,
"grad_norm": 1.4564330577850342,
"learning_rate": 1.3846153846153847e-05,
"loss": 0.1913,
"step": 54
},
{
"epoch": 0.07087628865979381,
"grad_norm": 0.7712787985801697,
"learning_rate": 1.4102564102564105e-05,
"loss": 0.1112,
"step": 55
},
{
"epoch": 0.07216494845360824,
"grad_norm": 0.8379471898078918,
"learning_rate": 1.435897435897436e-05,
"loss": 0.1242,
"step": 56
},
{
"epoch": 0.07345360824742268,
"grad_norm": 1.1431857347488403,
"learning_rate": 1.4615384615384615e-05,
"loss": 0.16,
"step": 57
},
{
"epoch": 0.07474226804123711,
"grad_norm": 0.9613205790519714,
"learning_rate": 1.4871794871794874e-05,
"loss": 0.1257,
"step": 58
},
{
"epoch": 0.07603092783505154,
"grad_norm": 0.7836907505989075,
"learning_rate": 1.5128205128205129e-05,
"loss": 0.1252,
"step": 59
},
{
"epoch": 0.07731958762886598,
"grad_norm": 0.9727709889411926,
"learning_rate": 1.5384615384615387e-05,
"loss": 0.1628,
"step": 60
},
{
"epoch": 0.07731958762886598,
"eval_accuracy": 0.9692154915590864,
"eval_f1": 0.20512820512820512,
"eval_loss": 0.10804814100265503,
"eval_precision": 0.38095238095238093,
"eval_recall": 0.14035087719298245,
"eval_runtime": 86.5949,
"eval_samples_per_second": 5.254,
"eval_steps_per_second": 0.173,
"step": 60
},
{
"epoch": 0.07860824742268041,
"grad_norm": 0.7048820853233337,
"learning_rate": 1.5641025641025644e-05,
"loss": 0.1337,
"step": 61
},
{
"epoch": 0.07989690721649484,
"grad_norm": 0.6462810635566711,
"learning_rate": 1.5897435897435897e-05,
"loss": 0.076,
"step": 62
},
{
"epoch": 0.08118556701030928,
"grad_norm": 0.7791882753372192,
"learning_rate": 1.6153846153846154e-05,
"loss": 0.0935,
"step": 63
},
{
"epoch": 0.08247422680412371,
"grad_norm": 0.5717423558235168,
"learning_rate": 1.641025641025641e-05,
"loss": 0.0892,
"step": 64
},
{
"epoch": 0.08376288659793814,
"grad_norm": 0.6709016561508179,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0581,
"step": 65
},
{
"epoch": 0.08505154639175258,
"grad_norm": 0.6802282333374023,
"learning_rate": 1.6923076923076924e-05,
"loss": 0.1023,
"step": 66
},
{
"epoch": 0.08634020618556701,
"grad_norm": 0.7112599611282349,
"learning_rate": 1.717948717948718e-05,
"loss": 0.1213,
"step": 67
},
{
"epoch": 0.08762886597938144,
"grad_norm": 1.2926205396652222,
"learning_rate": 1.7435897435897438e-05,
"loss": 0.1627,
"step": 68
},
{
"epoch": 0.08891752577319588,
"grad_norm": 1.408495545387268,
"learning_rate": 1.7692307692307694e-05,
"loss": 0.1781,
"step": 69
},
{
"epoch": 0.09020618556701031,
"grad_norm": 1.0148080587387085,
"learning_rate": 1.794871794871795e-05,
"loss": 0.0919,
"step": 70
},
{
"epoch": 0.09149484536082474,
"grad_norm": 1.0437681674957275,
"learning_rate": 1.8205128205128208e-05,
"loss": 0.1265,
"step": 71
},
{
"epoch": 0.09278350515463918,
"grad_norm": 0.9646249413490295,
"learning_rate": 1.8461538461538465e-05,
"loss": 0.104,
"step": 72
},
{
"epoch": 0.09407216494845361,
"grad_norm": 0.8352120518684387,
"learning_rate": 1.8717948717948718e-05,
"loss": 0.0845,
"step": 73
},
{
"epoch": 0.09536082474226804,
"grad_norm": 0.9750470519065857,
"learning_rate": 1.8974358974358975e-05,
"loss": 0.1469,
"step": 74
},
{
"epoch": 0.09664948453608248,
"grad_norm": 0.8849421739578247,
"learning_rate": 1.923076923076923e-05,
"loss": 0.0641,
"step": 75
},
{
"epoch": 0.0979381443298969,
"grad_norm": 1.2695003747940063,
"learning_rate": 1.9487179487179488e-05,
"loss": 0.1325,
"step": 76
},
{
"epoch": 0.09922680412371133,
"grad_norm": 0.9113069772720337,
"learning_rate": 1.9743589743589745e-05,
"loss": 0.0791,
"step": 77
},
{
"epoch": 0.10051546391752578,
"grad_norm": 0.863918662071228,
"learning_rate": 2e-05,
"loss": 0.0728,
"step": 78
},
{
"epoch": 0.1018041237113402,
"grad_norm": 1.0128920078277588,
"learning_rate": 1.999989871195906e-05,
"loss": 0.0443,
"step": 79
},
{
"epoch": 0.10309278350515463,
"grad_norm": 1.5655252933502197,
"learning_rate": 1.9999594849888083e-05,
"loss": 0.1241,
"step": 80
},
{
"epoch": 0.10309278350515463,
"eval_accuracy": 0.9707050645481629,
"eval_f1": 0.3917525773195876,
"eval_loss": 0.08737693727016449,
"eval_precision": 0.475,
"eval_recall": 0.3333333333333333,
"eval_runtime": 86.8429,
"eval_samples_per_second": 5.239,
"eval_steps_per_second": 0.173,
"step": 80
},
{
"epoch": 0.10438144329896908,
"grad_norm": 1.1679091453552246,
"learning_rate": 1.9999088419942598e-05,
"loss": 0.081,
"step": 81
},
{
"epoch": 0.1056701030927835,
"grad_norm": 1.3982985019683838,
"learning_rate": 1.999837943238166e-05,
"loss": 0.071,
"step": 82
},
{
"epoch": 0.10695876288659793,
"grad_norm": 2.1905858516693115,
"learning_rate": 1.999746790156766e-05,
"loss": 0.1153,
"step": 83
},
{
"epoch": 0.10824742268041238,
"grad_norm": 2.231328010559082,
"learning_rate": 1.9996353845966033e-05,
"loss": 0.1391,
"step": 84
},
{
"epoch": 0.1095360824742268,
"grad_norm": 1.6173464059829712,
"learning_rate": 1.999503728814488e-05,
"loss": 0.0958,
"step": 85
},
{
"epoch": 0.11082474226804123,
"grad_norm": 1.9609785079956055,
"learning_rate": 1.9993518254774517e-05,
"loss": 0.0864,
"step": 86
},
{
"epoch": 0.11211340206185567,
"grad_norm": 1.735422134399414,
"learning_rate": 1.999179677662692e-05,
"loss": 0.0895,
"step": 87
},
{
"epoch": 0.1134020618556701,
"grad_norm": 1.645450234413147,
"learning_rate": 1.998987288857513e-05,
"loss": 0.1078,
"step": 88
},
{
"epoch": 0.11469072164948453,
"grad_norm": 1.0082734823226929,
"learning_rate": 1.9987746629592506e-05,
"loss": 0.0504,
"step": 89
},
{
"epoch": 0.11597938144329897,
"grad_norm": 2.4662506580352783,
"learning_rate": 1.9985418042751975e-05,
"loss": 0.0982,
"step": 90
},
{
"epoch": 0.1172680412371134,
"grad_norm": 1.3186198472976685,
"learning_rate": 1.9982887175225136e-05,
"loss": 0.04,
"step": 91
},
{
"epoch": 0.11855670103092783,
"grad_norm": 1.4960401058197021,
"learning_rate": 1.998015407828131e-05,
"loss": 0.0572,
"step": 92
},
{
"epoch": 0.11984536082474227,
"grad_norm": 1.6579524278640747,
"learning_rate": 1.9977218807286507e-05,
"loss": 0.0662,
"step": 93
},
{
"epoch": 0.1211340206185567,
"grad_norm": 2.7462518215179443,
"learning_rate": 1.9974081421702296e-05,
"loss": 0.0739,
"step": 94
},
{
"epoch": 0.12242268041237113,
"grad_norm": 1.3179261684417725,
"learning_rate": 1.99707419850846e-05,
"loss": 0.0528,
"step": 95
},
{
"epoch": 0.12371134020618557,
"grad_norm": 1.08860182762146,
"learning_rate": 1.9967200565082426e-05,
"loss": 0.0417,
"step": 96
},
{
"epoch": 0.125,
"grad_norm": 2.638080358505249,
"learning_rate": 1.9963457233436468e-05,
"loss": 0.0964,
"step": 97
},
{
"epoch": 0.12628865979381443,
"grad_norm": 1.3592987060546875,
"learning_rate": 1.9959512065977673e-05,
"loss": 0.0491,
"step": 98
},
{
"epoch": 0.12757731958762886,
"grad_norm": 2.5333075523376465,
"learning_rate": 1.9955365142625694e-05,
"loss": 0.0506,
"step": 99
},
{
"epoch": 0.12886597938144329,
"grad_norm": 2.624704360961914,
"learning_rate": 1.9951016547387286e-05,
"loss": 0.0676,
"step": 100
},
{
"epoch": 0.12886597938144329,
"eval_accuracy": 0.9692154915590864,
"eval_f1": 0.5694444444444444,
"eval_loss": 0.0689799040555954,
"eval_precision": 0.47126436781609193,
"eval_recall": 0.7192982456140351,
"eval_runtime": 85.1099,
"eval_samples_per_second": 5.346,
"eval_steps_per_second": 0.176,
"step": 100
},
{
"epoch": 0.13015463917525774,
"grad_norm": 2.9534921646118164,
"learning_rate": 1.994646636835458e-05,
"loss": 0.0741,
"step": 101
},
{
"epoch": 0.13144329896907217,
"grad_norm": 2.0482945442199707,
"learning_rate": 1.9941714697703333e-05,
"loss": 0.0596,
"step": 102
},
{
"epoch": 0.1327319587628866,
"grad_norm": 0.8915924429893494,
"learning_rate": 1.9936761631691007e-05,
"loss": 0.0271,
"step": 103
},
{
"epoch": 0.13402061855670103,
"grad_norm": 3.5569581985473633,
"learning_rate": 1.993160727065489e-05,
"loss": 0.097,
"step": 104
},
{
"epoch": 0.13530927835051546,
"grad_norm": 1.0290688276290894,
"learning_rate": 1.992625171901e-05,
"loss": 0.0309,
"step": 105
},
{
"epoch": 0.13659793814432988,
"grad_norm": 3.104780673980713,
"learning_rate": 1.9920695085247012e-05,
"loss": 0.0466,
"step": 106
},
{
"epoch": 0.13788659793814434,
"grad_norm": 1.300478458404541,
"learning_rate": 1.991493748193002e-05,
"loss": 0.035,
"step": 107
},
{
"epoch": 0.13917525773195877,
"grad_norm": 1.9571739435195923,
"learning_rate": 1.9908979025694312e-05,
"loss": 0.0432,
"step": 108
},
{
"epoch": 0.1404639175257732,
"grad_norm": 0.9955072402954102,
"learning_rate": 1.9902819837243954e-05,
"loss": 0.0182,
"step": 109
},
{
"epoch": 0.14175257731958762,
"grad_norm": 1.2352385520935059,
"learning_rate": 1.989646004134937e-05,
"loss": 0.0338,
"step": 110
},
{
"epoch": 0.14304123711340205,
"grad_norm": 2.855053663253784,
"learning_rate": 1.9889899766844817e-05,
"loss": 0.0701,
"step": 111
},
{
"epoch": 0.14432989690721648,
"grad_norm": 2.372802495956421,
"learning_rate": 1.9883139146625763e-05,
"loss": 0.0386,
"step": 112
},
{
"epoch": 0.14561855670103094,
"grad_norm": 1.9221031665802002,
"learning_rate": 1.9876178317646203e-05,
"loss": 0.0277,
"step": 113
},
{
"epoch": 0.14690721649484537,
"grad_norm": 0.9431936144828796,
"learning_rate": 1.9869017420915888e-05,
"loss": 0.0188,
"step": 114
},
{
"epoch": 0.1481958762886598,
"grad_norm": 1.950210690498352,
"learning_rate": 1.9861656601497452e-05,
"loss": 0.0302,
"step": 115
},
{
"epoch": 0.14948453608247422,
"grad_norm": 3.239633560180664,
"learning_rate": 1.9854096008503495e-05,
"loss": 0.0416,
"step": 116
},
{
"epoch": 0.15077319587628865,
"grad_norm": 3.1708860397338867,
"learning_rate": 1.9846335795093547e-05,
"loss": 0.0688,
"step": 117
},
{
"epoch": 0.15206185567010308,
"grad_norm": 0.6930286288261414,
"learning_rate": 1.9838376118470965e-05,
"loss": 0.0141,
"step": 118
},
{
"epoch": 0.15335051546391754,
"grad_norm": 2.929121971130371,
"learning_rate": 1.9830217139879768e-05,
"loss": 0.034,
"step": 119
},
{
"epoch": 0.15463917525773196,
"grad_norm": 1.3847970962524414,
"learning_rate": 1.9821859024601345e-05,
"loss": 0.03,
"step": 120
},
{
"epoch": 0.15463917525773196,
"eval_accuracy": 0.9821251241310824,
"eval_f1": 0.7391304347826086,
"eval_loss": 0.04716553911566734,
"eval_precision": 0.6296296296296297,
"eval_recall": 0.8947368421052632,
"eval_runtime": 83.825,
"eval_samples_per_second": 5.428,
"eval_steps_per_second": 0.179,
"step": 120
},
{
"epoch": 0.1559278350515464,
"grad_norm": 2.072525978088379,
"learning_rate": 1.981330194195112e-05,
"loss": 0.016,
"step": 121
},
{
"epoch": 0.15721649484536082,
"grad_norm": 3.0791800022125244,
"learning_rate": 1.9804546065275116e-05,
"loss": 0.0618,
"step": 122
},
{
"epoch": 0.15850515463917525,
"grad_norm": 2.1992335319519043,
"learning_rate": 1.9795591571946454e-05,
"loss": 0.0276,
"step": 123
},
{
"epoch": 0.15979381443298968,
"grad_norm": 2.476609706878662,
"learning_rate": 1.978643864336176e-05,
"loss": 0.0207,
"step": 124
},
{
"epoch": 0.16108247422680413,
"grad_norm": 2.674210786819458,
"learning_rate": 1.9777087464937464e-05,
"loss": 0.0378,
"step": 125
},
{
"epoch": 0.16237113402061856,
"grad_norm": 2.6775150299072266,
"learning_rate": 1.9767538226106078e-05,
"loss": 0.0312,
"step": 126
},
{
"epoch": 0.163659793814433,
"grad_norm": 2.105435848236084,
"learning_rate": 1.9757791120312344e-05,
"loss": 0.0239,
"step": 127
},
{
"epoch": 0.16494845360824742,
"grad_norm": 1.7885074615478516,
"learning_rate": 1.9747846345009306e-05,
"loss": 0.0402,
"step": 128
},
{
"epoch": 0.16623711340206185,
"grad_norm": 4.384532451629639,
"learning_rate": 1.9737704101654335e-05,
"loss": 0.0674,
"step": 129
},
{
"epoch": 0.16752577319587628,
"grad_norm": 0.733161211013794,
"learning_rate": 1.9727364595705012e-05,
"loss": 0.0109,
"step": 130
},
{
"epoch": 0.16881443298969073,
"grad_norm": 2.310255765914917,
"learning_rate": 1.9716828036615006e-05,
"loss": 0.0245,
"step": 131
},
{
"epoch": 0.17010309278350516,
"grad_norm": 2.1358768939971924,
"learning_rate": 1.9706094637829797e-05,
"loss": 0.0506,
"step": 132
},
{
"epoch": 0.1713917525773196,
"grad_norm": 1.873978853225708,
"learning_rate": 1.9695164616782378e-05,
"loss": 0.0239,
"step": 133
},
{
"epoch": 0.17268041237113402,
"grad_norm": 3.210780620574951,
"learning_rate": 1.9684038194888827e-05,
"loss": 0.0453,
"step": 134
},
{
"epoch": 0.17396907216494845,
"grad_norm": 2.6000077724456787,
"learning_rate": 1.9672715597543845e-05,
"loss": 0.0222,
"step": 135
},
{
"epoch": 0.17525773195876287,
"grad_norm": 0.8902448415756226,
"learning_rate": 1.9661197054116165e-05,
"loss": 0.0114,
"step": 136
},
{
"epoch": 0.17654639175257733,
"grad_norm": 2.048377513885498,
"learning_rate": 1.964948279794393e-05,
"loss": 0.0299,
"step": 137
},
{
"epoch": 0.17783505154639176,
"grad_norm": 0.35185545682907104,
"learning_rate": 1.963757306632996e-05,
"loss": 0.0062,
"step": 138
},
{
"epoch": 0.1791237113402062,
"grad_norm": 0.8665434122085571,
"learning_rate": 1.962546810053692e-05,
"loss": 0.0122,
"step": 139
},
{
"epoch": 0.18041237113402062,
"grad_norm": 0.7568170428276062,
"learning_rate": 1.9613168145782468e-05,
"loss": 0.0109,
"step": 140
},
{
"epoch": 0.18041237113402062,
"eval_accuracy": 0.9910625620655412,
"eval_f1": 0.8448275862068966,
"eval_loss": 0.03413279354572296,
"eval_precision": 0.8305084745762712,
"eval_recall": 0.8596491228070176,
"eval_runtime": 83.9067,
"eval_samples_per_second": 5.423,
"eval_steps_per_second": 0.179,
"step": 140
},
{
"epoch": 0.18170103092783504,
"grad_norm": 2.2702317237854004,
"learning_rate": 1.960067345123427e-05,
"loss": 0.0247,
"step": 141
},
{
"epoch": 0.18298969072164947,
"grad_norm": 3.507333755493164,
"learning_rate": 1.958798427000495e-05,
"loss": 0.0297,
"step": 142
},
{
"epoch": 0.18427835051546393,
"grad_norm": 0.5789155960083008,
"learning_rate": 1.9575100859146974e-05,
"loss": 0.013,
"step": 143
},
{
"epoch": 0.18556701030927836,
"grad_norm": 1.9476535320281982,
"learning_rate": 1.956202347964743e-05,
"loss": 0.0208,
"step": 144
},
{
"epoch": 0.18685567010309279,
"grad_norm": 0.855241060256958,
"learning_rate": 1.954875239642274e-05,
"loss": 0.0071,
"step": 145
},
{
"epoch": 0.18814432989690721,
"grad_norm": 2.169466495513916,
"learning_rate": 1.9535287878313315e-05,
"loss": 0.0191,
"step": 146
},
{
"epoch": 0.18943298969072164,
"grad_norm": 1.1874339580535889,
"learning_rate": 1.952163019807809e-05,
"loss": 0.0086,
"step": 147
},
{
"epoch": 0.19072164948453607,
"grad_norm": 3.9380855560302734,
"learning_rate": 1.9507779632388997e-05,
"loss": 0.0264,
"step": 148
},
{
"epoch": 0.19201030927835053,
"grad_norm": 2.052539587020874,
"learning_rate": 1.9493736461825366e-05,
"loss": 0.0126,
"step": 149
},
{
"epoch": 0.19329896907216496,
"grad_norm": 2.4338552951812744,
"learning_rate": 1.947950097086825e-05,
"loss": 0.0426,
"step": 150
},
{
"epoch": 0.19458762886597938,
"grad_norm": 1.8210889101028442,
"learning_rate": 1.946507344789464e-05,
"loss": 0.0088,
"step": 151
},
{
"epoch": 0.1958762886597938,
"grad_norm": 0.9345032572746277,
"learning_rate": 1.945045418517165e-05,
"loss": 0.01,
"step": 152
},
{
"epoch": 0.19716494845360824,
"grad_norm": 2.274660587310791,
"learning_rate": 1.9435643478850573e-05,
"loss": 0.0208,
"step": 153
},
{
"epoch": 0.19845360824742267,
"grad_norm": 1.3613721132278442,
"learning_rate": 1.9420641628960897e-05,
"loss": 0.0136,
"step": 154
},
{
"epoch": 0.19974226804123713,
"grad_norm": 0.8850100040435791,
"learning_rate": 1.9405448939404215e-05,
"loss": 0.009,
"step": 155
},
{
"epoch": 0.20103092783505155,
"grad_norm": 0.5833643078804016,
"learning_rate": 1.9390065717948084e-05,
"loss": 0.0046,
"step": 156
},
{
"epoch": 0.20231958762886598,
"grad_norm": 0.42478522658348083,
"learning_rate": 1.9374492276219776e-05,
"loss": 0.0052,
"step": 157
},
{
"epoch": 0.2036082474226804,
"grad_norm": 1.2607591152191162,
"learning_rate": 1.9358728929699966e-05,
"loss": 0.0101,
"step": 158
},
{
"epoch": 0.20489690721649484,
"grad_norm": 1.5455127954483032,
"learning_rate": 1.9342775997716357e-05,
"loss": 0.0051,
"step": 159
},
{
"epoch": 0.20618556701030927,
"grad_norm": 5.292853832244873,
"learning_rate": 1.9326633803437197e-05,
"loss": 0.043,
"step": 160
},
{
"epoch": 0.20618556701030927,
"eval_accuracy": 0.9915590863952334,
"eval_f1": 0.8547008547008547,
"eval_loss": 0.033666037023067474,
"eval_precision": 0.8333333333333334,
"eval_recall": 0.8771929824561403,
"eval_runtime": 83.7677,
"eval_samples_per_second": 5.432,
"eval_steps_per_second": 0.179,
"step": 160
},
{
"epoch": 0.20747422680412372,
"grad_norm": 5.327892303466797,
"learning_rate": 1.9310302673864724e-05,
"loss": 0.057,
"step": 161
},
{
"epoch": 0.20876288659793815,
"grad_norm": 2.6782376766204834,
"learning_rate": 1.929378293982857e-05,
"loss": 0.0288,
"step": 162
},
{
"epoch": 0.21005154639175258,
"grad_norm": 1.8482961654663086,
"learning_rate": 1.9277074935979034e-05,
"loss": 0.0087,
"step": 163
},
{
"epoch": 0.211340206185567,
"grad_norm": 0.3108800947666168,
"learning_rate": 1.926017900078031e-05,
"loss": 0.002,
"step": 164
},
{
"epoch": 0.21262886597938144,
"grad_norm": 6.560524940490723,
"learning_rate": 1.924309547650363e-05,
"loss": 0.0385,
"step": 165
},
{
"epoch": 0.21391752577319587,
"grad_norm": 1.7873457670211792,
"learning_rate": 1.922582470922034e-05,
"loss": 0.006,
"step": 166
},
{
"epoch": 0.21520618556701032,
"grad_norm": 4.115209102630615,
"learning_rate": 1.9208367048794878e-05,
"loss": 0.0095,
"step": 167
},
{
"epoch": 0.21649484536082475,
"grad_norm": 3.2223434448242188,
"learning_rate": 1.9190722848877683e-05,
"loss": 0.0151,
"step": 168
},
{
"epoch": 0.21778350515463918,
"grad_norm": 4.802370071411133,
"learning_rate": 1.9172892466898047e-05,
"loss": 0.0576,
"step": 169
},
{
"epoch": 0.2190721649484536,
"grad_norm": 2.843043327331543,
"learning_rate": 1.9154876264056863e-05,
"loss": 0.0116,
"step": 170
},
{
"epoch": 0.22036082474226804,
"grad_norm": 1.8300056457519531,
"learning_rate": 1.9136674605319304e-05,
"loss": 0.0048,
"step": 171
},
{
"epoch": 0.22164948453608246,
"grad_norm": 0.7112641930580139,
"learning_rate": 1.911828785940745e-05,
"loss": 0.0029,
"step": 172
},
{
"epoch": 0.22293814432989692,
"grad_norm": 3.5936992168426514,
"learning_rate": 1.9099716398792788e-05,
"loss": 0.0335,
"step": 173
},
{
"epoch": 0.22422680412371135,
"grad_norm": 2.8544235229492188,
"learning_rate": 1.908096059968869e-05,
"loss": 0.0207,
"step": 174
},
{
"epoch": 0.22551546391752578,
"grad_norm": 3.7631168365478516,
"learning_rate": 1.906202084204279e-05,
"loss": 0.0212,
"step": 175
},
{
"epoch": 0.2268041237113402,
"grad_norm": 1.2712973356246948,
"learning_rate": 1.904289750952928e-05,
"loss": 0.0084,
"step": 176
},
{
"epoch": 0.22809278350515463,
"grad_norm": 2.580491542816162,
"learning_rate": 1.9023590989541126e-05,
"loss": 0.0151,
"step": 177
},
{
"epoch": 0.22938144329896906,
"grad_norm": 6.0741777420043945,
"learning_rate": 1.900410167318226e-05,
"loss": 0.0616,
"step": 178
},
{
"epoch": 0.23067010309278352,
"grad_norm": 1.9606350660324097,
"learning_rate": 1.8984429955259607e-05,
"loss": 0.0305,
"step": 179
},
{
"epoch": 0.23195876288659795,
"grad_norm": 4.825283527374268,
"learning_rate": 1.8964576234275123e-05,
"loss": 0.0233,
"step": 180
},
{
"epoch": 0.23195876288659795,
"eval_accuracy": 0.9925521350546177,
"eval_f1": 0.8760330578512396,
"eval_loss": 0.027217118069529533,
"eval_precision": 0.828125,
"eval_recall": 0.9298245614035088,
"eval_runtime": 84.1193,
"eval_samples_per_second": 5.409,
"eval_steps_per_second": 0.178,
"step": 180
},
{
"epoch": 0.23324742268041238,
"grad_norm": 3.7470309734344482,
"learning_rate": 1.894454091241771e-05,
"loss": 0.0375,
"step": 181
},
{
"epoch": 0.2345360824742268,
"grad_norm": 5.566728115081787,
"learning_rate": 1.8924324395555066e-05,
"loss": 0.0397,
"step": 182
},
{
"epoch": 0.23582474226804123,
"grad_norm": 4.115679740905762,
"learning_rate": 1.8903927093225474e-05,
"loss": 0.0318,
"step": 183
},
{
"epoch": 0.23711340206185566,
"grad_norm": 2.0655646324157715,
"learning_rate": 1.8883349418629487e-05,
"loss": 0.0502,
"step": 184
},
{
"epoch": 0.23840206185567012,
"grad_norm": 3.514209270477295,
"learning_rate": 1.8862591788621572e-05,
"loss": 0.034,
"step": 185
},
{
"epoch": 0.23969072164948454,
"grad_norm": 2.274663209915161,
"learning_rate": 1.8841654623701673e-05,
"loss": 0.0105,
"step": 186
},
{
"epoch": 0.24097938144329897,
"grad_norm": 1.3190113306045532,
"learning_rate": 1.8820538348006666e-05,
"loss": 0.0099,
"step": 187
},
{
"epoch": 0.2422680412371134,
"grad_norm": 1.9200594425201416,
"learning_rate": 1.8799243389301796e-05,
"loss": 0.0087,
"step": 188
},
{
"epoch": 0.24355670103092783,
"grad_norm": 3.5742523670196533,
"learning_rate": 1.877777017897199e-05,
"loss": 0.0383,
"step": 189
},
{
"epoch": 0.24484536082474226,
"grad_norm": 2.926935911178589,
"learning_rate": 1.8756119152013134e-05,
"loss": 0.0198,
"step": 190
},
{
"epoch": 0.24613402061855671,
"grad_norm": 4.095611095428467,
"learning_rate": 1.873429074702324e-05,
"loss": 0.0151,
"step": 191
},
{
"epoch": 0.24742268041237114,
"grad_norm": 1.0907986164093018,
"learning_rate": 1.8712285406193585e-05,
"loss": 0.0059,
"step": 192
},
{
"epoch": 0.24871134020618557,
"grad_norm": 1.646490454673767,
"learning_rate": 1.8690103575299754e-05,
"loss": 0.0262,
"step": 193
},
{
"epoch": 0.25,
"grad_norm": 0.9283900856971741,
"learning_rate": 1.866774570369257e-05,
"loss": 0.0071,
"step": 194
},
{
"epoch": 0.25128865979381443,
"grad_norm": 1.8307346105575562,
"learning_rate": 1.8645212244289047e-05,
"loss": 0.0246,
"step": 195
},
{
"epoch": 0.25257731958762886,
"grad_norm": 1.3150577545166016,
"learning_rate": 1.8622503653563173e-05,
"loss": 0.0198,
"step": 196
},
{
"epoch": 0.2538659793814433,
"grad_norm": 3.4825661182403564,
"learning_rate": 1.8599620391536682e-05,
"loss": 0.0136,
"step": 197
},
{
"epoch": 0.2551546391752577,
"grad_norm": 5.4773077964782715,
"learning_rate": 1.8576562921769727e-05,
"loss": 0.0223,
"step": 198
},
{
"epoch": 0.25644329896907214,
"grad_norm": 3.3178765773773193,
"learning_rate": 1.8553331711351502e-05,
"loss": 0.0392,
"step": 199
},
{
"epoch": 0.25773195876288657,
"grad_norm": 4.358588218688965,
"learning_rate": 1.8529927230890757e-05,
"loss": 0.029,
"step": 200
},
{
"epoch": 0.25773195876288657,
"eval_accuracy": 0.9920556107249255,
"eval_f1": 0.8666666666666667,
"eval_loss": 0.02330821380019188,
"eval_precision": 0.8253968253968254,
"eval_recall": 0.9122807017543859,
"eval_runtime": 84.2136,
"eval_samples_per_second": 5.403,
"eval_steps_per_second": 0.178,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 776,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.685140289008435e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}