llava-v1.6-mistral-7b-med-lora / trainer_state.json
rbojja's picture
Upload 6 files
8411ab9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 293,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.0441,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2.666666666666667e-06,
"loss": 1.0424,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0489,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 5.333333333333334e-06,
"loss": 1.0717,
"step": 4
},
{
"epoch": 0.02,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0396,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 8.000000000000001e-06,
"loss": 1.0776,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 9.333333333333334e-06,
"loss": 1.0178,
"step": 7
},
{
"epoch": 0.03,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.0144,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 1.2e-05,
"loss": 0.9903,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.9258,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.9325,
"step": 11
},
{
"epoch": 0.04,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9532,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.9473,
"step": 13
},
{
"epoch": 0.05,
"learning_rate": 1.866666666666667e-05,
"loss": 0.9213,
"step": 14
},
{
"epoch": 0.05,
"learning_rate": 1.866666666666667e-05,
"loss": 0.9497,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 2e-05,
"loss": 0.9721,
"step": 16
},
{
"epoch": 0.06,
"learning_rate": 1.9999361478484043e-05,
"loss": 0.9294,
"step": 17
},
{
"epoch": 0.06,
"learning_rate": 1.999744599547812e-05,
"loss": 0.9948,
"step": 18
},
{
"epoch": 0.06,
"learning_rate": 1.999425379559765e-05,
"loss": 0.9043,
"step": 19
},
{
"epoch": 0.07,
"learning_rate": 1.9989785286500294e-05,
"loss": 0.9031,
"step": 20
},
{
"epoch": 0.07,
"learning_rate": 1.99840410388339e-05,
"loss": 0.8928,
"step": 21
},
{
"epoch": 0.08,
"learning_rate": 1.99770217861636e-05,
"loss": 0.9324,
"step": 22
},
{
"epoch": 0.08,
"learning_rate": 1.9968728424878178e-05,
"loss": 0.9068,
"step": 23
},
{
"epoch": 0.08,
"learning_rate": 1.9959162014075553e-05,
"loss": 0.9057,
"step": 24
},
{
"epoch": 0.09,
"learning_rate": 1.994832377542755e-05,
"loss": 0.8217,
"step": 25
},
{
"epoch": 0.09,
"learning_rate": 1.9936215093023884e-05,
"loss": 0.8921,
"step": 26
},
{
"epoch": 0.09,
"learning_rate": 1.9922837513195406e-05,
"loss": 0.9066,
"step": 27
},
{
"epoch": 0.1,
"learning_rate": 1.990819274431662e-05,
"loss": 0.8279,
"step": 28
},
{
"epoch": 0.1,
"learning_rate": 1.989228265658754e-05,
"loss": 0.8751,
"step": 29
},
{
"epoch": 0.1,
"learning_rate": 1.9875109281794828e-05,
"loss": 0.8937,
"step": 30
},
{
"epoch": 0.11,
"learning_rate": 1.9856674813052345e-05,
"loss": 0.8608,
"step": 31
},
{
"epoch": 0.11,
"learning_rate": 1.9836981604521077e-05,
"loss": 0.8593,
"step": 32
},
{
"epoch": 0.11,
"learning_rate": 1.98160321711085e-05,
"loss": 0.9067,
"step": 33
},
{
"epoch": 0.12,
"learning_rate": 1.9793829188147406e-05,
"loss": 0.8843,
"step": 34
},
{
"epoch": 0.12,
"learning_rate": 1.9770375491054264e-05,
"loss": 0.815,
"step": 35
},
{
"epoch": 0.12,
"learning_rate": 1.974567407496712e-05,
"loss": 0.8244,
"step": 36
},
{
"epoch": 0.13,
"learning_rate": 1.9719728094363103e-05,
"loss": 0.8525,
"step": 37
},
{
"epoch": 0.13,
"learning_rate": 1.9692540862655587e-05,
"loss": 0.7885,
"step": 38
},
{
"epoch": 0.13,
"learning_rate": 1.966411585177105e-05,
"loss": 0.8094,
"step": 39
},
{
"epoch": 0.14,
"learning_rate": 1.9634456691705705e-05,
"loss": 0.8224,
"step": 40
},
{
"epoch": 0.14,
"learning_rate": 1.9603567170061918e-05,
"loss": 0.8554,
"step": 41
},
{
"epoch": 0.14,
"learning_rate": 1.9571451231564523e-05,
"loss": 0.8125,
"step": 42
},
{
"epoch": 0.15,
"learning_rate": 1.9571451231564523e-05,
"loss": 0.7916,
"step": 43
},
{
"epoch": 0.15,
"learning_rate": 1.9538112977557077e-05,
"loss": 0.8607,
"step": 44
},
{
"epoch": 0.15,
"learning_rate": 1.9503556665478066e-05,
"loss": 0.8381,
"step": 45
},
{
"epoch": 0.16,
"learning_rate": 1.9467786708317257e-05,
"loss": 0.8083,
"step": 46
},
{
"epoch": 0.16,
"learning_rate": 1.9430807674052092e-05,
"loss": 0.8169,
"step": 47
},
{
"epoch": 0.16,
"learning_rate": 1.939262428506438e-05,
"loss": 0.7989,
"step": 48
},
{
"epoch": 0.17,
"learning_rate": 1.9353241417537216e-05,
"loss": 0.8313,
"step": 49
},
{
"epoch": 0.17,
"learning_rate": 1.9312664100832236e-05,
"loss": 0.8218,
"step": 50
},
{
"epoch": 0.17,
"learning_rate": 1.9270897516847406e-05,
"loss": 0.8098,
"step": 51
},
{
"epoch": 0.18,
"learning_rate": 1.9227946999355226e-05,
"loss": 0.806,
"step": 52
},
{
"epoch": 0.18,
"learning_rate": 1.9183818033321612e-05,
"loss": 0.8,
"step": 53
},
{
"epoch": 0.18,
"learning_rate": 1.9138516254205416e-05,
"loss": 0.8299,
"step": 54
},
{
"epoch": 0.19,
"learning_rate": 1.9092047447238775e-05,
"loss": 0.841,
"step": 55
},
{
"epoch": 0.19,
"learning_rate": 1.9044417546688295e-05,
"loss": 0.8153,
"step": 56
},
{
"epoch": 0.19,
"learning_rate": 1.899563263509725e-05,
"loss": 0.8235,
"step": 57
},
{
"epoch": 0.2,
"learning_rate": 1.894569894250877e-05,
"loss": 0.8133,
"step": 58
},
{
"epoch": 0.2,
"learning_rate": 1.8894622845670282e-05,
"loss": 0.827,
"step": 59
},
{
"epoch": 0.2,
"learning_rate": 1.8842410867219137e-05,
"loss": 0.8848,
"step": 60
},
{
"epoch": 0.21,
"learning_rate": 1.878906967484966e-05,
"loss": 0.8284,
"step": 61
},
{
"epoch": 0.21,
"learning_rate": 1.8734606080461657e-05,
"loss": 0.8398,
"step": 62
},
{
"epoch": 0.22,
"learning_rate": 1.86790270392905e-05,
"loss": 0.8236,
"step": 63
},
{
"epoch": 0.22,
"learning_rate": 1.8622339649018907e-05,
"loss": 0.8271,
"step": 64
},
{
"epoch": 0.22,
"learning_rate": 1.856455114887056e-05,
"loss": 0.8735,
"step": 65
},
{
"epoch": 0.23,
"learning_rate": 1.8505668918685603e-05,
"loss": 0.7842,
"step": 66
},
{
"epoch": 0.23,
"learning_rate": 1.8445700477978207e-05,
"loss": 0.7925,
"step": 67
},
{
"epoch": 0.23,
"learning_rate": 1.8384653484976305e-05,
"loss": 0.7709,
"step": 68
},
{
"epoch": 0.24,
"learning_rate": 1.8322535735643604e-05,
"loss": 0.7795,
"step": 69
},
{
"epoch": 0.24,
"learning_rate": 1.8259355162684e-05,
"loss": 0.8165,
"step": 70
},
{
"epoch": 0.24,
"learning_rate": 1.8195119834528535e-05,
"loss": 0.7843,
"step": 71
},
{
"epoch": 0.25,
"learning_rate": 1.8129837954305033e-05,
"loss": 0.7773,
"step": 72
},
{
"epoch": 0.25,
"learning_rate": 1.8063517858790517e-05,
"loss": 0.813,
"step": 73
},
{
"epoch": 0.25,
"learning_rate": 1.799616801734657e-05,
"loss": 0.7894,
"step": 74
},
{
"epoch": 0.26,
"learning_rate": 1.792779703083777e-05,
"loss": 0.795,
"step": 75
},
{
"epoch": 0.26,
"learning_rate": 1.7858413630533305e-05,
"loss": 0.8139,
"step": 76
},
{
"epoch": 0.26,
"learning_rate": 1.778802667699196e-05,
"loss": 0.814,
"step": 77
},
{
"epoch": 0.27,
"learning_rate": 1.77166451589306e-05,
"loss": 0.7781,
"step": 78
},
{
"epoch": 0.27,
"learning_rate": 1.764427819207624e-05,
"loss": 0.7791,
"step": 79
},
{
"epoch": 0.27,
"learning_rate": 1.757093501800196e-05,
"loss": 0.7927,
"step": 80
},
{
"epoch": 0.28,
"learning_rate": 1.7496625002946702e-05,
"loss": 0.764,
"step": 81
},
{
"epoch": 0.28,
"learning_rate": 1.7421357636619153e-05,
"loss": 0.8185,
"step": 82
},
{
"epoch": 0.28,
"learning_rate": 1.734514253098589e-05,
"loss": 0.7295,
"step": 83
},
{
"epoch": 0.29,
"learning_rate": 1.726798941904386e-05,
"loss": 0.7698,
"step": 84
},
{
"epoch": 0.29,
"learning_rate": 1.7189908153577473e-05,
"loss": 0.7951,
"step": 85
},
{
"epoch": 0.29,
"learning_rate": 1.7110908705900322e-05,
"loss": 0.894,
"step": 86
},
{
"epoch": 0.3,
"learning_rate": 1.7031001164581828e-05,
"loss": 0.8151,
"step": 87
},
{
"epoch": 0.3,
"learning_rate": 1.6950195734158874e-05,
"loss": 0.7179,
"step": 88
},
{
"epoch": 0.3,
"learning_rate": 1.6868502733832647e-05,
"loss": 0.7924,
"step": 89
},
{
"epoch": 0.31,
"learning_rate": 1.6785932596150827e-05,
"loss": 0.7925,
"step": 90
},
{
"epoch": 0.31,
"learning_rate": 1.670249586567531e-05,
"loss": 0.7784,
"step": 91
},
{
"epoch": 0.31,
"learning_rate": 1.6618203197635624e-05,
"loss": 0.7663,
"step": 92
},
{
"epoch": 0.32,
"learning_rate": 1.6533065356568206e-05,
"loss": 0.874,
"step": 93
},
{
"epoch": 0.32,
"learning_rate": 1.6447093214941727e-05,
"loss": 0.8626,
"step": 94
},
{
"epoch": 0.32,
"learning_rate": 1.636029775176862e-05,
"loss": 0.7891,
"step": 95
},
{
"epoch": 0.33,
"learning_rate": 1.627269005120304e-05,
"loss": 0.8171,
"step": 96
},
{
"epoch": 0.33,
"learning_rate": 1.618428130112533e-05,
"loss": 0.7662,
"step": 97
},
{
"epoch": 0.33,
"learning_rate": 1.6095082791713322e-05,
"loss": 0.8031,
"step": 98
},
{
"epoch": 0.34,
"learning_rate": 1.6005105914000508e-05,
"loss": 0.881,
"step": 99
},
{
"epoch": 0.34,
"learning_rate": 1.5914362158421352e-05,
"loss": 0.7381,
"step": 100
},
{
"epoch": 0.34,
"learning_rate": 1.5822863113343934e-05,
"loss": 0.749,
"step": 101
},
{
"epoch": 0.35,
"learning_rate": 1.5730620463590052e-05,
"loss": 0.774,
"step": 102
},
{
"epoch": 0.35,
"learning_rate": 1.5637645988943008e-05,
"loss": 0.8499,
"step": 103
},
{
"epoch": 0.35,
"learning_rate": 1.554395156264331e-05,
"loss": 0.7523,
"step": 104
},
{
"epoch": 0.36,
"learning_rate": 1.544954914987238e-05,
"loss": 0.8426,
"step": 105
},
{
"epoch": 0.36,
"learning_rate": 1.5354450806224553e-05,
"loss": 0.8087,
"step": 106
},
{
"epoch": 0.37,
"learning_rate": 1.5258668676167548e-05,
"loss": 0.7308,
"step": 107
},
{
"epoch": 0.37,
"learning_rate": 1.516221499149154e-05,
"loss": 0.8656,
"step": 108
},
{
"epoch": 0.37,
"learning_rate": 1.5065102069747117e-05,
"loss": 0.8439,
"step": 109
},
{
"epoch": 0.38,
"learning_rate": 1.4967342312672283e-05,
"loss": 0.8121,
"step": 110
},
{
"epoch": 0.38,
"learning_rate": 1.48689482046087e-05,
"loss": 0.7898,
"step": 111
},
{
"epoch": 0.38,
"learning_rate": 1.4769932310907372e-05,
"loss": 0.8082,
"step": 112
},
{
"epoch": 0.39,
"learning_rate": 1.467030727632401e-05,
"loss": 0.7991,
"step": 113
},
{
"epoch": 0.39,
"learning_rate": 1.4570085823404232e-05,
"loss": 0.9332,
"step": 114
},
{
"epoch": 0.39,
"learning_rate": 1.4469280750858854e-05,
"loss": 0.7493,
"step": 115
},
{
"epoch": 0.4,
"learning_rate": 1.4367904931929422e-05,
"loss": 0.818,
"step": 116
},
{
"epoch": 0.4,
"learning_rate": 1.4265971312744252e-05,
"loss": 0.817,
"step": 117
},
{
"epoch": 0.4,
"learning_rate": 1.4163492910665153e-05,
"loss": 0.8618,
"step": 118
},
{
"epoch": 0.41,
"learning_rate": 1.4060482812625055e-05,
"loss": 0.8142,
"step": 119
},
{
"epoch": 0.41,
"learning_rate": 1.395695417345675e-05,
"loss": 0.7781,
"step": 120
},
{
"epoch": 0.41,
"learning_rate": 1.3852920214212966e-05,
"loss": 0.7698,
"step": 121
},
{
"epoch": 0.42,
"learning_rate": 1.3748394220477972e-05,
"loss": 0.7905,
"step": 122
},
{
"epoch": 0.42,
"learning_rate": 1.3643389540670963e-05,
"loss": 0.7785,
"step": 123
},
{
"epoch": 0.42,
"learning_rate": 1.3537919584341413e-05,
"loss": 0.775,
"step": 124
},
{
"epoch": 0.43,
"learning_rate": 1.3431997820456592e-05,
"loss": 0.8126,
"step": 125
},
{
"epoch": 0.43,
"learning_rate": 1.3325637775681561e-05,
"loss": 0.7391,
"step": 126
},
{
"epoch": 0.43,
"learning_rate": 1.3218853032651719e-05,
"loss": 0.7936,
"step": 127
},
{
"epoch": 0.44,
"learning_rate": 1.3111657228238263e-05,
"loss": 0.8208,
"step": 128
},
{
"epoch": 0.44,
"learning_rate": 1.3004064051806712e-05,
"loss": 0.845,
"step": 129
},
{
"epoch": 0.44,
"learning_rate": 1.2896087243468673e-05,
"loss": 0.8119,
"step": 130
},
{
"epoch": 0.45,
"learning_rate": 1.2787740592327232e-05,
"loss": 0.8512,
"step": 131
},
{
"epoch": 0.45,
"learning_rate": 1.267903793471597e-05,
"loss": 0.8217,
"step": 132
},
{
"epoch": 0.45,
"learning_rate": 1.2569993152432028e-05,
"loss": 0.7735,
"step": 133
},
{
"epoch": 0.46,
"learning_rate": 1.2460620170963353e-05,
"loss": 0.845,
"step": 134
},
{
"epoch": 0.46,
"learning_rate": 1.2350932957710322e-05,
"loss": 0.7313,
"step": 135
},
{
"epoch": 0.46,
"learning_rate": 1.2240945520202079e-05,
"loss": 0.7404,
"step": 136
},
{
"epoch": 0.47,
"learning_rate": 1.2130671904307692e-05,
"loss": 0.7769,
"step": 137
},
{
"epoch": 0.47,
"learning_rate": 1.202012619244243e-05,
"loss": 0.7828,
"step": 138
},
{
"epoch": 0.47,
"learning_rate": 1.1909322501769407e-05,
"loss": 0.7855,
"step": 139
},
{
"epoch": 0.48,
"learning_rate": 1.1798274982396728e-05,
"loss": 0.7496,
"step": 140
},
{
"epoch": 0.48,
"learning_rate": 1.1686997815570473e-05,
"loss": 0.7661,
"step": 141
},
{
"epoch": 0.48,
"learning_rate": 1.15755052118637e-05,
"loss": 0.8014,
"step": 142
},
{
"epoch": 0.49,
"learning_rate": 1.1463811409361667e-05,
"loss": 0.7606,
"step": 143
},
{
"epoch": 0.49,
"learning_rate": 1.13519306718436e-05,
"loss": 0.7683,
"step": 144
},
{
"epoch": 0.49,
"learning_rate": 1.1239877286961123e-05,
"loss": 0.7931,
"step": 145
},
{
"epoch": 0.5,
"learning_rate": 1.112766556441367e-05,
"loss": 0.783,
"step": 146
},
{
"epoch": 0.5,
"learning_rate": 1.1015309834121083e-05,
"loss": 0.7784,
"step": 147
},
{
"epoch": 0.51,
"learning_rate": 1.0902824444393602e-05,
"loss": 0.8107,
"step": 148
},
{
"epoch": 0.51,
"learning_rate": 1.079022376009955e-05,
"loss": 0.7373,
"step": 149
},
{
"epoch": 0.51,
"learning_rate": 1.067752216083085e-05,
"loss": 0.777,
"step": 150
},
{
"epoch": 0.52,
"learning_rate": 1.05647340390667e-05,
"loss": 0.8342,
"step": 151
},
{
"epoch": 0.52,
"learning_rate": 1.0451873798335605e-05,
"loss": 0.7869,
"step": 152
},
{
"epoch": 0.52,
"learning_rate": 1.0338955851375962e-05,
"loss": 0.7407,
"step": 153
},
{
"epoch": 0.53,
"learning_rate": 1.0225994618295507e-05,
"loss": 0.744,
"step": 154
},
{
"epoch": 0.53,
"learning_rate": 1.01130045247298e-05,
"loss": 0.795,
"step": 155
},
{
"epoch": 0.53,
"learning_rate": 1e-05,
"loss": 0.8239,
"step": 156
},
{
"epoch": 0.54,
"learning_rate": 9.886995475270205e-06,
"loss": 0.9011,
"step": 157
},
{
"epoch": 0.54,
"learning_rate": 9.774005381704498e-06,
"loss": 0.7928,
"step": 158
},
{
"epoch": 0.54,
"learning_rate": 9.661044148624038e-06,
"loss": 0.8258,
"step": 159
},
{
"epoch": 0.55,
"learning_rate": 9.548126201664398e-06,
"loss": 0.7536,
"step": 160
},
{
"epoch": 0.55,
"learning_rate": 9.435265960933304e-06,
"loss": 0.739,
"step": 161
},
{
"epoch": 0.55,
"learning_rate": 9.322477839169156e-06,
"loss": 0.7535,
"step": 162
},
{
"epoch": 0.56,
"learning_rate": 9.209776239900453e-06,
"loss": 0.8141,
"step": 163
},
{
"epoch": 0.56,
"learning_rate": 9.097175555606396e-06,
"loss": 0.7229,
"step": 164
},
{
"epoch": 0.56,
"learning_rate": 8.98469016587892e-06,
"loss": 0.8472,
"step": 165
},
{
"epoch": 0.57,
"learning_rate": 8.872334435586333e-06,
"loss": 0.7363,
"step": 166
},
{
"epoch": 0.57,
"learning_rate": 8.76012271303888e-06,
"loss": 0.7183,
"step": 167
},
{
"epoch": 0.57,
"learning_rate": 8.648069328156403e-06,
"loss": 0.741,
"step": 168
},
{
"epoch": 0.58,
"learning_rate": 8.536188590638334e-06,
"loss": 0.7531,
"step": 169
},
{
"epoch": 0.58,
"learning_rate": 8.424494788136303e-06,
"loss": 0.8059,
"step": 170
},
{
"epoch": 0.58,
"learning_rate": 8.313002184429529e-06,
"loss": 0.8084,
"step": 171
},
{
"epoch": 0.59,
"learning_rate": 8.201725017603277e-06,
"loss": 0.8038,
"step": 172
},
{
"epoch": 0.59,
"learning_rate": 8.090677498230598e-06,
"loss": 0.7679,
"step": 173
},
{
"epoch": 0.59,
"learning_rate": 7.97987380755757e-06,
"loss": 0.7534,
"step": 174
},
{
"epoch": 0.6,
"learning_rate": 7.869328095692313e-06,
"loss": 0.6547,
"step": 175
},
{
"epoch": 0.6,
"learning_rate": 7.759054479797924e-06,
"loss": 0.752,
"step": 176
},
{
"epoch": 0.6,
"learning_rate": 7.649067042289681e-06,
"loss": 0.7703,
"step": 177
},
{
"epoch": 0.61,
"learning_rate": 7.539379829036652e-06,
"loss": 0.7418,
"step": 178
},
{
"epoch": 0.61,
"learning_rate": 7.430006847567972e-06,
"loss": 0.7699,
"step": 179
},
{
"epoch": 0.61,
"learning_rate": 7.320962065284032e-06,
"loss": 0.7668,
"step": 180
},
{
"epoch": 0.62,
"learning_rate": 7.2122594076727705e-06,
"loss": 0.6996,
"step": 181
},
{
"epoch": 0.62,
"learning_rate": 7.1039127565313285e-06,
"loss": 0.8008,
"step": 182
},
{
"epoch": 0.62,
"learning_rate": 6.995935948193294e-06,
"loss": 0.8206,
"step": 183
},
{
"epoch": 0.63,
"learning_rate": 6.888342771761737e-06,
"loss": 0.789,
"step": 184
},
{
"epoch": 0.63,
"learning_rate": 6.781146967348283e-06,
"loss": 0.7528,
"step": 185
},
{
"epoch": 0.63,
"learning_rate": 6.6743622243184405e-06,
"loss": 0.7225,
"step": 186
},
{
"epoch": 0.64,
"learning_rate": 6.568002179543409e-06,
"loss": 0.8076,
"step": 187
},
{
"epoch": 0.64,
"learning_rate": 6.462080415658591e-06,
"loss": 0.77,
"step": 188
},
{
"epoch": 0.65,
"learning_rate": 6.356610459329038e-06,
"loss": 0.7526,
"step": 189
},
{
"epoch": 0.65,
"learning_rate": 6.251605779522032e-06,
"loss": 0.8311,
"step": 190
},
{
"epoch": 0.65,
"learning_rate": 6.147079785787038e-06,
"loss": 0.8706,
"step": 191
},
{
"epoch": 0.66,
"learning_rate": 6.043045826543254e-06,
"loss": 0.7817,
"step": 192
},
{
"epoch": 0.66,
"learning_rate": 5.93951718737495e-06,
"loss": 0.8314,
"step": 193
},
{
"epoch": 0.66,
"learning_rate": 5.836507089334849e-06,
"loss": 0.7774,
"step": 194
},
{
"epoch": 0.67,
"learning_rate": 5.7340286872557515e-06,
"loss": 0.7791,
"step": 195
},
{
"epoch": 0.67,
"learning_rate": 5.6320950680705826e-06,
"loss": 0.7239,
"step": 196
},
{
"epoch": 0.67,
"learning_rate": 5.530719249141148e-06,
"loss": 0.8481,
"step": 197
},
{
"epoch": 0.68,
"learning_rate": 5.429914176595772e-06,
"loss": 0.7794,
"step": 198
},
{
"epoch": 0.68,
"learning_rate": 5.329692723675994e-06,
"loss": 0.7363,
"step": 199
},
{
"epoch": 0.68,
"learning_rate": 5.230067689092629e-06,
"loss": 0.8312,
"step": 200
},
{
"epoch": 0.69,
"learning_rate": 5.131051795391302e-06,
"loss": 0.7527,
"step": 201
},
{
"epoch": 0.69,
"learning_rate": 5.03265768732772e-06,
"loss": 0.7228,
"step": 202
},
{
"epoch": 0.69,
"learning_rate": 4.934897930252887e-06,
"loss": 0.8204,
"step": 203
},
{
"epoch": 0.7,
"learning_rate": 4.837785008508462e-06,
"loss": 0.7834,
"step": 204
},
{
"epoch": 0.7,
"learning_rate": 4.7413313238324556e-06,
"loss": 0.7664,
"step": 205
},
{
"epoch": 0.7,
"learning_rate": 4.645549193775452e-06,
"loss": 0.7509,
"step": 206
},
{
"epoch": 0.71,
"learning_rate": 4.550450850127626e-06,
"loss": 0.7836,
"step": 207
},
{
"epoch": 0.71,
"learning_rate": 4.4560484373566945e-06,
"loss": 0.75,
"step": 208
},
{
"epoch": 0.71,
"learning_rate": 4.3623540110569935e-06,
"loss": 0.7374,
"step": 209
},
{
"epoch": 0.72,
"learning_rate": 4.26937953640995e-06,
"loss": 0.7208,
"step": 210
},
{
"epoch": 0.72,
"learning_rate": 4.177136886656067e-06,
"loss": 0.7699,
"step": 211
},
{
"epoch": 0.72,
"learning_rate": 4.085637841578652e-06,
"loss": 0.7434,
"step": 212
},
{
"epoch": 0.73,
"learning_rate": 3.9948940859994964e-06,
"loss": 0.7305,
"step": 213
},
{
"epoch": 0.73,
"learning_rate": 3.9049172082866786e-06,
"loss": 0.7795,
"step": 214
},
{
"epoch": 0.73,
"learning_rate": 3.815718698874672e-06,
"loss": 0.7743,
"step": 215
},
{
"epoch": 0.74,
"learning_rate": 3.727309948796963e-06,
"loss": 0.706,
"step": 216
},
{
"epoch": 0.74,
"learning_rate": 3.6397022482313804e-06,
"loss": 0.7155,
"step": 217
},
{
"epoch": 0.74,
"learning_rate": 3.552906785058278e-06,
"loss": 0.7618,
"step": 218
},
{
"epoch": 0.75,
"learning_rate": 3.466934643431795e-06,
"loss": 0.776,
"step": 219
},
{
"epoch": 0.75,
"learning_rate": 3.3817968023643766e-06,
"loss": 0.7556,
"step": 220
},
{
"epoch": 0.75,
"learning_rate": 3.2975041343246937e-06,
"loss": 0.7645,
"step": 221
},
{
"epoch": 0.76,
"learning_rate": 3.214067403849179e-06,
"loss": 0.7757,
"step": 222
},
{
"epoch": 0.76,
"learning_rate": 3.1314972661673572e-06,
"loss": 0.778,
"step": 223
},
{
"epoch": 0.76,
"learning_rate": 3.0498042658411276e-06,
"loss": 0.7783,
"step": 224
},
{
"epoch": 0.77,
"learning_rate": 2.9689988354181742e-06,
"loss": 0.7602,
"step": 225
},
{
"epoch": 0.77,
"learning_rate": 2.8890912940996784e-06,
"loss": 0.7106,
"step": 226
},
{
"epoch": 0.77,
"learning_rate": 2.8100918464225304e-06,
"loss": 0.6951,
"step": 227
},
{
"epoch": 0.78,
"learning_rate": 2.7320105809561415e-06,
"loss": 0.7384,
"step": 228
},
{
"epoch": 0.78,
"learning_rate": 2.654857469014113e-06,
"loss": 0.7446,
"step": 229
},
{
"epoch": 0.78,
"learning_rate": 2.5786423633808487e-06,
"loss": 0.7311,
"step": 230
},
{
"epoch": 0.79,
"learning_rate": 2.5033749970533015e-06,
"loss": 0.734,
"step": 231
},
{
"epoch": 0.79,
"learning_rate": 2.4290649819980404e-06,
"loss": 0.7467,
"step": 232
},
{
"epoch": 0.8,
"learning_rate": 2.3557218079237608e-06,
"loss": 0.7501,
"step": 233
},
{
"epoch": 0.8,
"learning_rate": 2.283354841069403e-06,
"loss": 0.746,
"step": 234
},
{
"epoch": 0.8,
"learning_rate": 2.211973323008041e-06,
"loss": 0.7515,
"step": 235
},
{
"epoch": 0.81,
"learning_rate": 2.1415863694666973e-06,
"loss": 0.7672,
"step": 236
},
{
"epoch": 0.81,
"learning_rate": 2.072202969162234e-06,
"loss": 0.7621,
"step": 237
},
{
"epoch": 0.81,
"learning_rate": 2.0038319826534312e-06,
"loss": 0.7432,
"step": 238
},
{
"epoch": 0.82,
"learning_rate": 1.936482141209486e-06,
"loss": 0.8442,
"step": 239
},
{
"epoch": 0.82,
"learning_rate": 1.870162045694971e-06,
"loss": 0.7738,
"step": 240
},
{
"epoch": 0.82,
"learning_rate": 1.8048801654714687e-06,
"loss": 0.7054,
"step": 241
},
{
"epoch": 0.83,
"learning_rate": 1.7406448373160024e-06,
"loss": 0.6819,
"step": 242
},
{
"epoch": 0.83,
"learning_rate": 1.6774642643563955e-06,
"loss": 0.7322,
"step": 243
},
{
"epoch": 0.83,
"learning_rate": 1.615346515023698e-06,
"loss": 0.7504,
"step": 244
},
{
"epoch": 0.84,
"learning_rate": 1.5542995220217961e-06,
"loss": 0.7721,
"step": 245
},
{
"epoch": 0.84,
"learning_rate": 1.4943310813144006e-06,
"loss": 0.6799,
"step": 246
},
{
"epoch": 0.84,
"learning_rate": 1.4354488511294418e-06,
"loss": 0.7524,
"step": 247
},
{
"epoch": 0.85,
"learning_rate": 1.3776603509810938e-06,
"loss": 0.7943,
"step": 248
},
{
"epoch": 0.85,
"learning_rate": 1.3209729607095022e-06,
"loss": 0.772,
"step": 249
},
{
"epoch": 0.85,
"learning_rate": 1.2653939195383448e-06,
"loss": 0.7735,
"step": 250
},
{
"epoch": 0.86,
"learning_rate": 1.2109303251503434e-06,
"loss": 0.8155,
"step": 251
},
{
"epoch": 0.86,
"learning_rate": 1.1575891327808664e-06,
"loss": 0.7834,
"step": 252
},
{
"epoch": 0.86,
"learning_rate": 1.1053771543297198e-06,
"loss": 0.7563,
"step": 253
},
{
"epoch": 0.87,
"learning_rate": 1.0543010574912305e-06,
"loss": 0.7114,
"step": 254
},
{
"epoch": 0.87,
"learning_rate": 1.0043673649027519e-06,
"loss": 0.8082,
"step": 255
},
{
"epoch": 0.87,
"learning_rate": 9.555824533117064e-07,
"loss": 0.7351,
"step": 256
},
{
"epoch": 0.88,
"learning_rate": 9.079525527612321e-07,
"loss": 0.6883,
"step": 257
},
{
"epoch": 0.88,
"learning_rate": 8.614837457945868e-07,
"loss": 0.7643,
"step": 258
},
{
"epoch": 0.88,
"learning_rate": 8.161819666783888e-07,
"loss": 0.8028,
"step": 259
},
{
"epoch": 0.89,
"learning_rate": 7.720530006447735e-07,
"loss": 0.7222,
"step": 260
},
{
"epoch": 0.89,
"learning_rate": 7.291024831525961e-07,
"loss": 0.7265,
"step": 261
},
{
"epoch": 0.89,
"learning_rate": 6.87335899167767e-07,
"loss": 0.7656,
"step": 262
},
{
"epoch": 0.9,
"learning_rate": 6.467585824627886e-07,
"loss": 0.7666,
"step": 263
},
{
"epoch": 0.9,
"learning_rate": 6.073757149356185e-07,
"loss": 0.7724,
"step": 264
},
{
"epoch": 0.9,
"learning_rate": 5.691923259479093e-07,
"loss": 0.7669,
"step": 265
},
{
"epoch": 0.91,
"learning_rate": 5.322132916827483e-07,
"loss": 0.6936,
"step": 266
},
{
"epoch": 0.91,
"learning_rate": 4.964433345219354e-07,
"loss": 0.7554,
"step": 267
},
{
"epoch": 0.91,
"learning_rate": 4.6188702244292614e-07,
"loss": 0.7711,
"step": 268
},
{
"epoch": 0.92,
"learning_rate": 4.285487684354772e-07,
"loss": 0.7357,
"step": 269
},
{
"epoch": 0.92,
"learning_rate": 3.96432829938086e-07,
"loss": 0.7777,
"step": 270
},
{
"epoch": 0.92,
"learning_rate": 3.6554330829429716e-07,
"loss": 0.805,
"step": 271
},
{
"epoch": 0.93,
"learning_rate": 3.3588414822895097e-07,
"loss": 0.7552,
"step": 272
},
{
"epoch": 0.93,
"learning_rate": 3.0745913734441357e-07,
"loss": 0.7379,
"step": 273
},
{
"epoch": 0.94,
"learning_rate": 2.8027190563689745e-07,
"loss": 0.7398,
"step": 274
},
{
"epoch": 0.94,
"learning_rate": 2.5432592503288e-07,
"loss": 0.7244,
"step": 275
},
{
"epoch": 0.94,
"learning_rate": 2.2962450894573606e-07,
"loss": 0.7314,
"step": 276
},
{
"epoch": 0.95,
"learning_rate": 2.0617081185259512e-07,
"loss": 0.7479,
"step": 277
},
{
"epoch": 0.95,
"learning_rate": 1.8396782889150144e-07,
"loss": 0.7381,
"step": 278
},
{
"epoch": 0.95,
"learning_rate": 1.630183954789233e-07,
"loss": 0.7126,
"step": 279
},
{
"epoch": 0.96,
"learning_rate": 1.4332518694765708e-07,
"loss": 0.7229,
"step": 280
},
{
"epoch": 0.96,
"learning_rate": 1.2489071820517394e-07,
"loss": 0.6955,
"step": 281
},
{
"epoch": 0.96,
"learning_rate": 1.0771734341246121e-07,
"loss": 0.7215,
"step": 282
},
{
"epoch": 0.97,
"learning_rate": 9.180725568338045e-08,
"loss": 0.7687,
"step": 283
},
{
"epoch": 0.97,
"learning_rate": 7.716248680459726e-08,
"loss": 0.7748,
"step": 284
},
{
"epoch": 0.97,
"learning_rate": 6.378490697611761e-08,
"loss": 0.761,
"step": 285
},
{
"epoch": 0.98,
"learning_rate": 5.1676224572452246e-08,
"loss": 0.7746,
"step": 286
},
{
"epoch": 0.98,
"learning_rate": 4.083798592444899e-08,
"loss": 0.7059,
"step": 287
},
{
"epoch": 0.98,
"learning_rate": 3.127157512182288e-08,
"loss": 0.7545,
"step": 288
},
{
"epoch": 0.99,
"learning_rate": 2.2978213836400974e-08,
"loss": 0.7726,
"step": 289
},
{
"epoch": 0.99,
"learning_rate": 1.5958961166104847e-08,
"loss": 0.6931,
"step": 290
},
{
"epoch": 0.99,
"learning_rate": 1.0214713499706596e-08,
"loss": 0.7208,
"step": 291
},
{
"epoch": 1.0,
"learning_rate": 5.7462044023515186e-09,
"loss": 0.7379,
"step": 292
},
{
"epoch": 1.0,
"learning_rate": 2.5540045218819256e-09,
"loss": 0.7908,
"step": 293
},
{
"epoch": 1.0,
"step": 293,
"total_flos": 133458268946432.0,
"train_loss": 0.798602851381074,
"train_runtime": 7530.3249,
"train_samples_per_second": 1.866,
"train_steps_per_second": 0.039
}
],
"logging_steps": 1.0,
"max_steps": 293,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 133458268946432.0,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}