matthieulel's picture
End of training
8c8ca94 verified
{
"best_metric": 0.4137542277339346,
"best_model_checkpoint": "resnet-50-finetuned-galaxy10-decals/checkpoint-1122",
"epoch": 9.939879759519037,
"eval_steps": 500,
"global_step": 1240,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08016032064128256,
"grad_norm": 0.5629293918609619,
"learning_rate": 1.9838709677419358e-05,
"loss": 2.3002,
"step": 10
},
{
"epoch": 0.16032064128256512,
"grad_norm": 0.6315276026725769,
"learning_rate": 1.967741935483871e-05,
"loss": 2.2945,
"step": 20
},
{
"epoch": 0.24048096192384769,
"grad_norm": 0.6637193560600281,
"learning_rate": 1.9516129032258068e-05,
"loss": 2.2901,
"step": 30
},
{
"epoch": 0.32064128256513025,
"grad_norm": 0.6203126907348633,
"learning_rate": 1.935483870967742e-05,
"loss": 2.2838,
"step": 40
},
{
"epoch": 0.40080160320641284,
"grad_norm": 0.7069018483161926,
"learning_rate": 1.9193548387096777e-05,
"loss": 2.2765,
"step": 50
},
{
"epoch": 0.48096192384769537,
"grad_norm": 0.6753049492835999,
"learning_rate": 1.903225806451613e-05,
"loss": 2.2719,
"step": 60
},
{
"epoch": 0.561122244488978,
"grad_norm": 0.7277663946151733,
"learning_rate": 1.8870967741935487e-05,
"loss": 2.2691,
"step": 70
},
{
"epoch": 0.6412825651302605,
"grad_norm": 0.6369321346282959,
"learning_rate": 1.870967741935484e-05,
"loss": 2.2662,
"step": 80
},
{
"epoch": 0.7214428857715431,
"grad_norm": 0.9763292074203491,
"learning_rate": 1.8548387096774193e-05,
"loss": 2.2536,
"step": 90
},
{
"epoch": 0.8016032064128257,
"grad_norm": 0.7406184077262878,
"learning_rate": 1.838709677419355e-05,
"loss": 2.2497,
"step": 100
},
{
"epoch": 0.8817635270541082,
"grad_norm": 0.7439438700675964,
"learning_rate": 1.8225806451612903e-05,
"loss": 2.2429,
"step": 110
},
{
"epoch": 0.9619238476953907,
"grad_norm": 0.772075891494751,
"learning_rate": 1.806451612903226e-05,
"loss": 2.2417,
"step": 120
},
{
"epoch": 0.9939879759519038,
"eval_accuracy": 0.23731679819616686,
"eval_f1": 0.10768730389550502,
"eval_loss": 2.2265095710754395,
"eval_precision": 0.06968947596402092,
"eval_recall": 0.23731679819616686,
"eval_runtime": 10.4766,
"eval_samples_per_second": 169.33,
"eval_steps_per_second": 5.345,
"step": 124
},
{
"epoch": 1.0420841683366733,
"grad_norm": 0.690293550491333,
"learning_rate": 1.7903225806451612e-05,
"loss": 2.2289,
"step": 130
},
{
"epoch": 1.122244488977956,
"grad_norm": 0.9022379517555237,
"learning_rate": 1.774193548387097e-05,
"loss": 2.2309,
"step": 140
},
{
"epoch": 1.2024048096192386,
"grad_norm": 0.8783809542655945,
"learning_rate": 1.7580645161290325e-05,
"loss": 2.2224,
"step": 150
},
{
"epoch": 1.282565130260521,
"grad_norm": 0.7563625574111938,
"learning_rate": 1.741935483870968e-05,
"loss": 2.2067,
"step": 160
},
{
"epoch": 1.3627254509018036,
"grad_norm": 0.7573732137680054,
"learning_rate": 1.7258064516129035e-05,
"loss": 2.2092,
"step": 170
},
{
"epoch": 1.4428857715430863,
"grad_norm": 0.6887681484222412,
"learning_rate": 1.7096774193548388e-05,
"loss": 2.1874,
"step": 180
},
{
"epoch": 1.5230460921843687,
"grad_norm": 0.8253772854804993,
"learning_rate": 1.6935483870967744e-05,
"loss": 2.1956,
"step": 190
},
{
"epoch": 1.6032064128256514,
"grad_norm": 0.930928647518158,
"learning_rate": 1.6774193548387098e-05,
"loss": 2.188,
"step": 200
},
{
"epoch": 1.6833667334669338,
"grad_norm": 0.7987092733383179,
"learning_rate": 1.6612903225806454e-05,
"loss": 2.1865,
"step": 210
},
{
"epoch": 1.7635270541082164,
"grad_norm": 0.9516962170600891,
"learning_rate": 1.6451612903225807e-05,
"loss": 2.1816,
"step": 220
},
{
"epoch": 1.843687374749499,
"grad_norm": 0.7769425511360168,
"learning_rate": 1.6290322580645164e-05,
"loss": 2.1487,
"step": 230
},
{
"epoch": 1.9238476953907817,
"grad_norm": 0.9441273212432861,
"learning_rate": 1.6129032258064517e-05,
"loss": 2.1616,
"step": 240
},
{
"epoch": 1.9959919839679359,
"eval_accuracy": 0.19503945885005636,
"eval_f1": 0.08885289259144245,
"eval_loss": 2.1268093585968018,
"eval_precision": 0.11324255277888252,
"eval_recall": 0.19503945885005636,
"eval_runtime": 21.372,
"eval_samples_per_second": 83.006,
"eval_steps_per_second": 2.62,
"step": 249
},
{
"epoch": 2.004008016032064,
"grad_norm": 1.0876325368881226,
"learning_rate": 1.596774193548387e-05,
"loss": 2.1485,
"step": 250
},
{
"epoch": 2.0841683366733466,
"grad_norm": 1.0379817485809326,
"learning_rate": 1.5806451612903226e-05,
"loss": 2.1385,
"step": 260
},
{
"epoch": 2.164328657314629,
"grad_norm": 1.0472718477249146,
"learning_rate": 1.5645161290322583e-05,
"loss": 2.1171,
"step": 270
},
{
"epoch": 2.244488977955912,
"grad_norm": 1.1123789548873901,
"learning_rate": 1.5483870967741936e-05,
"loss": 2.1185,
"step": 280
},
{
"epoch": 2.3246492985971945,
"grad_norm": 1.0969492197036743,
"learning_rate": 1.5322580645161292e-05,
"loss": 2.1059,
"step": 290
},
{
"epoch": 2.404809619238477,
"grad_norm": 1.0305417776107788,
"learning_rate": 1.5161290322580646e-05,
"loss": 2.094,
"step": 300
},
{
"epoch": 2.4849699398797593,
"grad_norm": 1.1403542757034302,
"learning_rate": 1.5000000000000002e-05,
"loss": 2.0992,
"step": 310
},
{
"epoch": 2.565130260521042,
"grad_norm": 1.1770292520523071,
"learning_rate": 1.4838709677419357e-05,
"loss": 2.0758,
"step": 320
},
{
"epoch": 2.6452905811623246,
"grad_norm": 0.9762546420097351,
"learning_rate": 1.4677419354838712e-05,
"loss": 2.0753,
"step": 330
},
{
"epoch": 2.7254509018036073,
"grad_norm": 1.0303020477294922,
"learning_rate": 1.4516129032258066e-05,
"loss": 2.0721,
"step": 340
},
{
"epoch": 2.80561122244489,
"grad_norm": 1.1007182598114014,
"learning_rate": 1.4354838709677421e-05,
"loss": 2.0442,
"step": 350
},
{
"epoch": 2.8857715430861726,
"grad_norm": 1.0562711954116821,
"learning_rate": 1.4193548387096776e-05,
"loss": 2.0467,
"step": 360
},
{
"epoch": 2.9659318637274548,
"grad_norm": 1.0534286499023438,
"learning_rate": 1.4032258064516131e-05,
"loss": 2.0459,
"step": 370
},
{
"epoch": 2.997995991983968,
"eval_accuracy": 0.24013528748590757,
"eval_f1": 0.12897223772636482,
"eval_loss": 1.990055799484253,
"eval_precision": 0.10291028503169428,
"eval_recall": 0.24013528748590757,
"eval_runtime": 9.0597,
"eval_samples_per_second": 195.812,
"eval_steps_per_second": 6.181,
"step": 374
},
{
"epoch": 3.0460921843687374,
"grad_norm": 1.340589165687561,
"learning_rate": 1.3870967741935486e-05,
"loss": 2.0286,
"step": 380
},
{
"epoch": 3.12625250501002,
"grad_norm": 1.260714054107666,
"learning_rate": 1.3709677419354839e-05,
"loss": 2.0037,
"step": 390
},
{
"epoch": 3.2064128256513027,
"grad_norm": 1.1234012842178345,
"learning_rate": 1.3548387096774194e-05,
"loss": 1.9998,
"step": 400
},
{
"epoch": 3.2865731462925853,
"grad_norm": 1.1667792797088623,
"learning_rate": 1.3387096774193548e-05,
"loss": 1.9845,
"step": 410
},
{
"epoch": 3.3667334669338675,
"grad_norm": 1.4682308435440063,
"learning_rate": 1.3225806451612903e-05,
"loss": 1.9663,
"step": 420
},
{
"epoch": 3.44689378757515,
"grad_norm": 1.1297812461853027,
"learning_rate": 1.3064516129032258e-05,
"loss": 1.9711,
"step": 430
},
{
"epoch": 3.527054108216433,
"grad_norm": 1.2213447093963623,
"learning_rate": 1.2903225806451613e-05,
"loss": 1.9557,
"step": 440
},
{
"epoch": 3.6072144288577155,
"grad_norm": 1.1665083169937134,
"learning_rate": 1.274193548387097e-05,
"loss": 1.962,
"step": 450
},
{
"epoch": 3.687374749498998,
"grad_norm": 1.3926866054534912,
"learning_rate": 1.2580645161290324e-05,
"loss": 1.9647,
"step": 460
},
{
"epoch": 3.7675350701402808,
"grad_norm": 1.1469335556030273,
"learning_rate": 1.2419354838709679e-05,
"loss": 1.9533,
"step": 470
},
{
"epoch": 3.847695390781563,
"grad_norm": 1.259191870689392,
"learning_rate": 1.2258064516129034e-05,
"loss": 1.9532,
"step": 480
},
{
"epoch": 3.9278557114228456,
"grad_norm": 1.181053876876831,
"learning_rate": 1.2096774193548388e-05,
"loss": 1.9203,
"step": 490
},
{
"epoch": 4.0,
"eval_accuracy": 0.33032694475760993,
"eval_f1": 0.20520589850554893,
"eval_loss": 1.8571178913116455,
"eval_precision": 0.3115828631860488,
"eval_recall": 0.33032694475760993,
"eval_runtime": 9.5098,
"eval_samples_per_second": 186.545,
"eval_steps_per_second": 5.889,
"step": 499
},
{
"epoch": 4.008016032064128,
"grad_norm": 1.2856674194335938,
"learning_rate": 1.1935483870967743e-05,
"loss": 1.938,
"step": 500
},
{
"epoch": 4.0881763527054105,
"grad_norm": 1.263211727142334,
"learning_rate": 1.1774193548387098e-05,
"loss": 1.9092,
"step": 510
},
{
"epoch": 4.168336673346693,
"grad_norm": 1.5177675485610962,
"learning_rate": 1.1612903225806453e-05,
"loss": 1.9162,
"step": 520
},
{
"epoch": 4.248496993987976,
"grad_norm": 1.1196962594985962,
"learning_rate": 1.1451612903225808e-05,
"loss": 1.9038,
"step": 530
},
{
"epoch": 4.328657314629258,
"grad_norm": 1.2426973581314087,
"learning_rate": 1.1290322580645164e-05,
"loss": 1.8824,
"step": 540
},
{
"epoch": 4.408817635270541,
"grad_norm": 1.224511981010437,
"learning_rate": 1.1129032258064516e-05,
"loss": 1.8817,
"step": 550
},
{
"epoch": 4.488977955911824,
"grad_norm": 1.370235562324524,
"learning_rate": 1.096774193548387e-05,
"loss": 1.8781,
"step": 560
},
{
"epoch": 4.569138276553106,
"grad_norm": 1.3393809795379639,
"learning_rate": 1.0806451612903225e-05,
"loss": 1.8472,
"step": 570
},
{
"epoch": 4.649298597194389,
"grad_norm": 1.3370624780654907,
"learning_rate": 1.0645161290322582e-05,
"loss": 1.8535,
"step": 580
},
{
"epoch": 4.729458917835672,
"grad_norm": 1.1478331089019775,
"learning_rate": 1.0483870967741936e-05,
"loss": 1.8508,
"step": 590
},
{
"epoch": 4.809619238476954,
"grad_norm": 1.4004801511764526,
"learning_rate": 1.0322580645161291e-05,
"loss": 1.8861,
"step": 600
},
{
"epoch": 4.889779559118237,
"grad_norm": 1.399299144744873,
"learning_rate": 1.0161290322580646e-05,
"loss": 1.8537,
"step": 610
},
{
"epoch": 4.969939879759519,
"grad_norm": 1.3188832998275757,
"learning_rate": 1e-05,
"loss": 1.8347,
"step": 620
},
{
"epoch": 4.993987975951904,
"eval_accuracy": 0.3613303269447576,
"eval_f1": 0.24569343378869493,
"eval_loss": 1.7692168951034546,
"eval_precision": 0.2693815240199387,
"eval_recall": 0.3613303269447576,
"eval_runtime": 11.1509,
"eval_samples_per_second": 159.09,
"eval_steps_per_second": 5.022,
"step": 623
},
{
"epoch": 5.050100200400801,
"grad_norm": 1.2819945812225342,
"learning_rate": 9.838709677419356e-06,
"loss": 1.8244,
"step": 630
},
{
"epoch": 5.130260521042084,
"grad_norm": 1.291672706604004,
"learning_rate": 9.67741935483871e-06,
"loss": 1.8077,
"step": 640
},
{
"epoch": 5.210420841683367,
"grad_norm": 1.4084501266479492,
"learning_rate": 9.516129032258065e-06,
"loss": 1.8432,
"step": 650
},
{
"epoch": 5.290581162324649,
"grad_norm": 1.527843713760376,
"learning_rate": 9.35483870967742e-06,
"loss": 1.815,
"step": 660
},
{
"epoch": 5.370741482965932,
"grad_norm": 1.317143440246582,
"learning_rate": 9.193548387096775e-06,
"loss": 1.8104,
"step": 670
},
{
"epoch": 5.4509018036072145,
"grad_norm": 1.344497561454773,
"learning_rate": 9.03225806451613e-06,
"loss": 1.8064,
"step": 680
},
{
"epoch": 5.531062124248497,
"grad_norm": 1.4130953550338745,
"learning_rate": 8.870967741935484e-06,
"loss": 1.7957,
"step": 690
},
{
"epoch": 5.61122244488978,
"grad_norm": 1.5459003448486328,
"learning_rate": 8.70967741935484e-06,
"loss": 1.8051,
"step": 700
},
{
"epoch": 5.6913827655310625,
"grad_norm": 1.7455060482025146,
"learning_rate": 8.548387096774194e-06,
"loss": 1.8146,
"step": 710
},
{
"epoch": 5.771543086172345,
"grad_norm": 1.362961769104004,
"learning_rate": 8.387096774193549e-06,
"loss": 1.7706,
"step": 720
},
{
"epoch": 5.851703406813627,
"grad_norm": 1.503525972366333,
"learning_rate": 8.225806451612904e-06,
"loss": 1.7877,
"step": 730
},
{
"epoch": 5.9318637274549095,
"grad_norm": 1.4291051626205444,
"learning_rate": 8.064516129032258e-06,
"loss": 1.7628,
"step": 740
},
{
"epoch": 5.995991983967936,
"eval_accuracy": 0.3850056369785795,
"eval_f1": 0.27578049224836065,
"eval_loss": 1.692612886428833,
"eval_precision": 0.417174096805733,
"eval_recall": 0.3850056369785795,
"eval_runtime": 15.8797,
"eval_samples_per_second": 111.715,
"eval_steps_per_second": 3.527,
"step": 748
},
{
"epoch": 6.012024048096192,
"grad_norm": 1.2377204895019531,
"learning_rate": 7.903225806451613e-06,
"loss": 1.7717,
"step": 750
},
{
"epoch": 6.092184368737475,
"grad_norm": 1.7056549787521362,
"learning_rate": 7.741935483870968e-06,
"loss": 1.7749,
"step": 760
},
{
"epoch": 6.1723446893787575,
"grad_norm": 1.51571524143219,
"learning_rate": 7.580645161290323e-06,
"loss": 1.7649,
"step": 770
},
{
"epoch": 6.25250501002004,
"grad_norm": 1.6524779796600342,
"learning_rate": 7.4193548387096784e-06,
"loss": 1.7556,
"step": 780
},
{
"epoch": 6.332665330661323,
"grad_norm": 1.4005558490753174,
"learning_rate": 7.258064516129033e-06,
"loss": 1.7437,
"step": 790
},
{
"epoch": 6.412825651302605,
"grad_norm": 1.567458987236023,
"learning_rate": 7.096774193548388e-06,
"loss": 1.7587,
"step": 800
},
{
"epoch": 6.492985971943888,
"grad_norm": 1.843725323677063,
"learning_rate": 6.935483870967743e-06,
"loss": 1.7568,
"step": 810
},
{
"epoch": 6.573146292585171,
"grad_norm": 1.5228469371795654,
"learning_rate": 6.774193548387097e-06,
"loss": 1.7507,
"step": 820
},
{
"epoch": 6.653306613226453,
"grad_norm": 1.6213654279708862,
"learning_rate": 6.612903225806452e-06,
"loss": 1.7625,
"step": 830
},
{
"epoch": 6.733466933867735,
"grad_norm": 1.5784552097320557,
"learning_rate": 6.451612903225806e-06,
"loss": 1.7321,
"step": 840
},
{
"epoch": 6.813627254509018,
"grad_norm": 1.5368263721466064,
"learning_rate": 6.290322580645162e-06,
"loss": 1.7395,
"step": 850
},
{
"epoch": 6.8937875751503,
"grad_norm": 2.3605217933654785,
"learning_rate": 6.129032258064517e-06,
"loss": 1.7604,
"step": 860
},
{
"epoch": 6.973947895791583,
"grad_norm": 1.6296197175979614,
"learning_rate": 5.967741935483872e-06,
"loss": 1.723,
"step": 870
},
{
"epoch": 6.997995991983968,
"eval_accuracy": 0.39853438556933485,
"eval_f1": 0.292163872939456,
"eval_loss": 1.6341866254806519,
"eval_precision": 0.4427833264577748,
"eval_recall": 0.39853438556933485,
"eval_runtime": 12.0064,
"eval_samples_per_second": 147.755,
"eval_steps_per_second": 4.664,
"step": 873
},
{
"epoch": 7.054108216432866,
"grad_norm": 1.5017938613891602,
"learning_rate": 5.806451612903226e-06,
"loss": 1.733,
"step": 880
},
{
"epoch": 7.134268537074148,
"grad_norm": 1.9987595081329346,
"learning_rate": 5.645161290322582e-06,
"loss": 1.7058,
"step": 890
},
{
"epoch": 7.214428857715431,
"grad_norm": 1.6970964670181274,
"learning_rate": 5.483870967741935e-06,
"loss": 1.7472,
"step": 900
},
{
"epoch": 7.294589178356714,
"grad_norm": 1.8042755126953125,
"learning_rate": 5.322580645161291e-06,
"loss": 1.7119,
"step": 910
},
{
"epoch": 7.374749498997996,
"grad_norm": 1.4862197637557983,
"learning_rate": 5.161290322580646e-06,
"loss": 1.7166,
"step": 920
},
{
"epoch": 7.454909819639279,
"grad_norm": 1.9236266613006592,
"learning_rate": 5e-06,
"loss": 1.7088,
"step": 930
},
{
"epoch": 7.5350701402805615,
"grad_norm": 1.7879084348678589,
"learning_rate": 4.838709677419355e-06,
"loss": 1.7041,
"step": 940
},
{
"epoch": 7.615230460921843,
"grad_norm": 1.80990731716156,
"learning_rate": 4.67741935483871e-06,
"loss": 1.7119,
"step": 950
},
{
"epoch": 7.695390781563126,
"grad_norm": 1.5909008979797363,
"learning_rate": 4.516129032258065e-06,
"loss": 1.6927,
"step": 960
},
{
"epoch": 7.775551102204409,
"grad_norm": 1.6026943922042847,
"learning_rate": 4.35483870967742e-06,
"loss": 1.7155,
"step": 970
},
{
"epoch": 7.855711422845691,
"grad_norm": 1.5704305171966553,
"learning_rate": 4.193548387096774e-06,
"loss": 1.7243,
"step": 980
},
{
"epoch": 7.935871743486974,
"grad_norm": 1.7837029695510864,
"learning_rate": 4.032258064516129e-06,
"loss": 1.71,
"step": 990
},
{
"epoch": 8.0,
"eval_accuracy": 0.41037204058624577,
"eval_f1": 0.31215257466998775,
"eval_loss": 1.6071045398712158,
"eval_precision": 0.4369322578217754,
"eval_recall": 0.41037204058624577,
"eval_runtime": 17.7431,
"eval_samples_per_second": 99.983,
"eval_steps_per_second": 3.156,
"step": 998
},
{
"epoch": 8.016032064128256,
"grad_norm": 1.6631461381912231,
"learning_rate": 3.870967741935484e-06,
"loss": 1.6681,
"step": 1000
},
{
"epoch": 8.09619238476954,
"grad_norm": 1.4949766397476196,
"learning_rate": 3.7096774193548392e-06,
"loss": 1.6996,
"step": 1010
},
{
"epoch": 8.176352705410821,
"grad_norm": 1.4516801834106445,
"learning_rate": 3.548387096774194e-06,
"loss": 1.6857,
"step": 1020
},
{
"epoch": 8.256513026052104,
"grad_norm": 1.3758476972579956,
"learning_rate": 3.3870967741935484e-06,
"loss": 1.7096,
"step": 1030
},
{
"epoch": 8.336673346693386,
"grad_norm": 1.5905085802078247,
"learning_rate": 3.225806451612903e-06,
"loss": 1.6934,
"step": 1040
},
{
"epoch": 8.41683366733467,
"grad_norm": 1.5107113122940063,
"learning_rate": 3.0645161290322584e-06,
"loss": 1.7023,
"step": 1050
},
{
"epoch": 8.496993987975952,
"grad_norm": 1.2995082139968872,
"learning_rate": 2.903225806451613e-06,
"loss": 1.7065,
"step": 1060
},
{
"epoch": 8.577154308617235,
"grad_norm": 1.4054443836212158,
"learning_rate": 2.7419354838709676e-06,
"loss": 1.6596,
"step": 1070
},
{
"epoch": 8.657314629258517,
"grad_norm": 1.3404021263122559,
"learning_rate": 2.580645161290323e-06,
"loss": 1.7036,
"step": 1080
},
{
"epoch": 8.7374749498998,
"grad_norm": 1.8194636106491089,
"learning_rate": 2.4193548387096776e-06,
"loss": 1.6631,
"step": 1090
},
{
"epoch": 8.817635270541082,
"grad_norm": 1.8533542156219482,
"learning_rate": 2.2580645161290324e-06,
"loss": 1.6714,
"step": 1100
},
{
"epoch": 8.897795591182366,
"grad_norm": 1.6438981294631958,
"learning_rate": 2.096774193548387e-06,
"loss": 1.6751,
"step": 1110
},
{
"epoch": 8.977955911823647,
"grad_norm": 1.6241378784179688,
"learning_rate": 1.935483870967742e-06,
"loss": 1.6948,
"step": 1120
},
{
"epoch": 8.993987975951903,
"eval_accuracy": 0.4137542277339346,
"eval_f1": 0.3134270193161171,
"eval_loss": 1.5789121389389038,
"eval_precision": 0.44930477829595117,
"eval_recall": 0.4137542277339346,
"eval_runtime": 8.6347,
"eval_samples_per_second": 205.451,
"eval_steps_per_second": 6.485,
"step": 1122
},
{
"epoch": 9.05811623246493,
"grad_norm": 1.6152193546295166,
"learning_rate": 1.774193548387097e-06,
"loss": 1.6929,
"step": 1130
},
{
"epoch": 9.138276553106213,
"grad_norm": 1.3725346326828003,
"learning_rate": 1.6129032258064516e-06,
"loss": 1.7029,
"step": 1140
},
{
"epoch": 9.218436873747494,
"grad_norm": 1.3589799404144287,
"learning_rate": 1.4516129032258066e-06,
"loss": 1.6856,
"step": 1150
},
{
"epoch": 9.298597194388778,
"grad_norm": 1.686957836151123,
"learning_rate": 1.2903225806451614e-06,
"loss": 1.6604,
"step": 1160
},
{
"epoch": 9.37875751503006,
"grad_norm": 1.3393840789794922,
"learning_rate": 1.1290322580645162e-06,
"loss": 1.6799,
"step": 1170
},
{
"epoch": 9.458917835671343,
"grad_norm": 1.6881558895111084,
"learning_rate": 9.67741935483871e-07,
"loss": 1.6635,
"step": 1180
},
{
"epoch": 9.539078156312625,
"grad_norm": 1.545275092124939,
"learning_rate": 8.064516129032258e-07,
"loss": 1.6886,
"step": 1190
},
{
"epoch": 9.619238476953909,
"grad_norm": 1.2983485460281372,
"learning_rate": 6.451612903225807e-07,
"loss": 1.6443,
"step": 1200
},
{
"epoch": 9.69939879759519,
"grad_norm": 1.5959370136260986,
"learning_rate": 4.838709677419355e-07,
"loss": 1.67,
"step": 1210
},
{
"epoch": 9.779559118236474,
"grad_norm": 1.5400179624557495,
"learning_rate": 3.2258064516129035e-07,
"loss": 1.6754,
"step": 1220
},
{
"epoch": 9.859719438877756,
"grad_norm": 1.5355889797210693,
"learning_rate": 1.6129032258064518e-07,
"loss": 1.6881,
"step": 1230
},
{
"epoch": 9.939879759519037,
"grad_norm": 1.6926000118255615,
"learning_rate": 0.0,
"loss": 1.656,
"step": 1240
},
{
"epoch": 9.939879759519037,
"eval_accuracy": 0.40529875986471253,
"eval_f1": 0.3033691615952433,
"eval_loss": 1.5805104970932007,
"eval_precision": 0.41756755609133844,
"eval_recall": 0.40529875986471253,
"eval_runtime": 16.4801,
"eval_samples_per_second": 107.645,
"eval_steps_per_second": 3.398,
"step": 1240
},
{
"epoch": 9.939879759519037,
"step": 1240,
"total_flos": 3.3716855275489935e+18,
"train_loss": 1.9026473429895217,
"train_runtime": 1845.6703,
"train_samples_per_second": 86.483,
"train_steps_per_second": 0.672
}
],
"logging_steps": 10,
"max_steps": 1240,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.3716855275489935e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}