File size: 5,064 Bytes
7c2f145 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 121.21212121212122,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.848484848484849,
"grad_norm": 0.5543263554573059,
"learning_rate": 0.0001992114701314478,
"loss": 2.7902,
"step": 10
},
{
"epoch": 9.696969696969697,
"grad_norm": 0.6367977857589722,
"learning_rate": 0.0001968583161128631,
"loss": 2.3062,
"step": 20
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.479856014251709,
"learning_rate": 0.00019297764858882514,
"loss": 1.7416,
"step": 30
},
{
"epoch": 19.393939393939394,
"grad_norm": 0.29826533794403076,
"learning_rate": 0.00018763066800438636,
"loss": 1.4342,
"step": 40
},
{
"epoch": 24.242424242424242,
"grad_norm": 0.27084779739379883,
"learning_rate": 0.00018090169943749476,
"loss": 1.3399,
"step": 50
},
{
"epoch": 29.09090909090909,
"grad_norm": 0.2398836314678192,
"learning_rate": 0.00017289686274214118,
"loss": 1.2648,
"step": 60
},
{
"epoch": 33.93939393939394,
"grad_norm": 0.22510196268558502,
"learning_rate": 0.000163742398974869,
"loss": 1.2083,
"step": 70
},
{
"epoch": 38.78787878787879,
"grad_norm": 0.2365349531173706,
"learning_rate": 0.00015358267949789966,
"loss": 1.1641,
"step": 80
},
{
"epoch": 43.63636363636363,
"grad_norm": 0.270672082901001,
"learning_rate": 0.00014257792915650728,
"loss": 1.1283,
"step": 90
},
{
"epoch": 48.484848484848484,
"grad_norm": 0.2681578993797302,
"learning_rate": 0.00013090169943749476,
"loss": 1.0801,
"step": 100
},
{
"epoch": 53.333333333333336,
"grad_norm": 0.31493374705314636,
"learning_rate": 0.00011873813145857249,
"loss": 1.0366,
"step": 110
},
{
"epoch": 58.18181818181818,
"grad_norm": 0.3526628315448761,
"learning_rate": 0.00010627905195293135,
"loss": 1.0097,
"step": 120
},
{
"epoch": 63.03030303030303,
"grad_norm": 0.31481245160102844,
"learning_rate": 9.372094804706867e-05,
"loss": 0.9806,
"step": 130
},
{
"epoch": 67.87878787878788,
"grad_norm": 0.3324221074581146,
"learning_rate": 8.126186854142752e-05,
"loss": 0.9474,
"step": 140
},
{
"epoch": 72.72727272727273,
"grad_norm": 0.32580453157424927,
"learning_rate": 6.909830056250527e-05,
"loss": 0.9228,
"step": 150
},
{
"epoch": 77.57575757575758,
"grad_norm": 0.3470986485481262,
"learning_rate": 5.7422070843492734e-05,
"loss": 0.9007,
"step": 160
},
{
"epoch": 82.42424242424242,
"grad_norm": 0.4177663326263428,
"learning_rate": 4.6417320502100316e-05,
"loss": 0.8839,
"step": 170
},
{
"epoch": 87.27272727272727,
"grad_norm": 0.4714958965778351,
"learning_rate": 3.6257601025131026e-05,
"loss": 0.8706,
"step": 180
},
{
"epoch": 92.12121212121212,
"grad_norm": 0.4196126163005829,
"learning_rate": 2.7103137257858868e-05,
"loss": 0.8541,
"step": 190
},
{
"epoch": 96.96969696969697,
"grad_norm": 0.35907700657844543,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.8503,
"step": 200
},
{
"epoch": 101.81818181818181,
"grad_norm": 0.40164825320243835,
"learning_rate": 1.2369331995613665e-05,
"loss": 0.8449,
"step": 210
},
{
"epoch": 106.66666666666667,
"grad_norm": 0.37763774394989014,
"learning_rate": 7.022351411174866e-06,
"loss": 0.8384,
"step": 220
},
{
"epoch": 111.51515151515152,
"grad_norm": 0.3757176101207733,
"learning_rate": 3.1416838871368924e-06,
"loss": 0.8405,
"step": 230
},
{
"epoch": 116.36363636363636,
"grad_norm": 0.4176121950149536,
"learning_rate": 7.885298685522235e-07,
"loss": 0.8299,
"step": 240
},
{
"epoch": 121.21212121212122,
"grad_norm": 0.47404736280441284,
"learning_rate": 0.0,
"loss": 0.8342,
"step": 250
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 125,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6950976392371200.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|