|
{ |
|
"best_metric": 0.26503002643585205, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.14217167229429536, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0028434334458859074, |
|
"grad_norm": 22.415369033813477, |
|
"learning_rate": 5e-05, |
|
"loss": 5.3378, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0028434334458859074, |
|
"eval_loss": 5.232264995574951, |
|
"eval_runtime": 3.9483, |
|
"eval_samples_per_second": 12.664, |
|
"eval_steps_per_second": 3.293, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005686866891771815, |
|
"grad_norm": 23.209169387817383, |
|
"learning_rate": 0.0001, |
|
"loss": 5.2597, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008530300337657722, |
|
"grad_norm": 15.02617073059082, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 3.7897, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01137373378354363, |
|
"grad_norm": 14.549369812011719, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.8205, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.014217167229429535, |
|
"grad_norm": 7.76815938949585, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 0.6944, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017060600675315445, |
|
"grad_norm": 4.215451717376709, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 0.47, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.019904034121201352, |
|
"grad_norm": 2.1003599166870117, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 0.3876, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02274746756708726, |
|
"grad_norm": 1.887962818145752, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.3898, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.025590901012973163, |
|
"grad_norm": 1.115380883216858, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 0.3581, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02843433445885907, |
|
"grad_norm": 1.0263733863830566, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.3522, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03127776790474498, |
|
"grad_norm": 3.604118585586548, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 0.4661, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03412120135063089, |
|
"grad_norm": 1.8551830053329468, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.3639, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03696463479651679, |
|
"grad_norm": 1.1953414678573608, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.3007, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.039808068242402704, |
|
"grad_norm": 1.147956371307373, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.3732, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04265150168828861, |
|
"grad_norm": 0.7729864716529846, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.3532, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04549493513417452, |
|
"grad_norm": 2.4076788425445557, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.3822, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04833836858006042, |
|
"grad_norm": 2.3338794708251953, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.3718, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05118180202594633, |
|
"grad_norm": 1.7410494089126587, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.361, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05402523547183224, |
|
"grad_norm": 0.8371558785438538, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.3156, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05686866891771814, |
|
"grad_norm": 2.5813608169555664, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.3617, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05971210236360405, |
|
"grad_norm": 1.6700718402862549, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.3385, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06255553580948996, |
|
"grad_norm": 1.207481026649475, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.3765, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06539896925537586, |
|
"grad_norm": 1.1957097053527832, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.3412, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06824240270126178, |
|
"grad_norm": 1.484207272529602, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.3145, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07108583614714768, |
|
"grad_norm": 1.7423596382141113, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.2979, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07108583614714768, |
|
"eval_loss": 0.31171107292175293, |
|
"eval_runtime": 4.0078, |
|
"eval_samples_per_second": 12.476, |
|
"eval_steps_per_second": 3.244, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07392926959303359, |
|
"grad_norm": 1.1360609531402588, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.3498, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07677270303891949, |
|
"grad_norm": 1.0913562774658203, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.3328, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07961613648480541, |
|
"grad_norm": 2.2489614486694336, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.3531, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08245956993069131, |
|
"grad_norm": 1.245743751525879, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.3458, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08530300337657722, |
|
"grad_norm": 0.9143818616867065, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.3279, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08814643682246312, |
|
"grad_norm": 0.9616953134536743, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.3483, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09098987026834904, |
|
"grad_norm": 1.3551769256591797, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.3119, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09383330371423494, |
|
"grad_norm": 0.7638424038887024, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.2966, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.09667673716012085, |
|
"grad_norm": 0.7949358820915222, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.3511, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.09952017060600675, |
|
"grad_norm": 0.8913821578025818, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.3015, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10236360405189265, |
|
"grad_norm": 1.1559334993362427, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.3538, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10520703749777857, |
|
"grad_norm": 1.223947286605835, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.3099, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.10805047094366448, |
|
"grad_norm": 0.9780659675598145, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.306, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11089390438955038, |
|
"grad_norm": 1.0553867816925049, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.318, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11373733783543628, |
|
"grad_norm": 1.149210810661316, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.2961, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1165807712813222, |
|
"grad_norm": 1.1460851430892944, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.2907, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.1194242047272081, |
|
"grad_norm": 1.234253168106079, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.3047, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12226763817309401, |
|
"grad_norm": 1.5525425672531128, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.3576, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1251110716189799, |
|
"grad_norm": 1.1359039545059204, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.3114, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.12795450506486583, |
|
"grad_norm": 1.2153254747390747, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.3103, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13079793851075172, |
|
"grad_norm": 1.3485732078552246, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.3211, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13364137195663764, |
|
"grad_norm": 0.954004168510437, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.2795, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.13648480540252356, |
|
"grad_norm": 0.9658924341201782, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.3035, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.13932823884840945, |
|
"grad_norm": 0.9231831431388855, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.2799, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.14217167229429536, |
|
"grad_norm": 0.8936429619789124, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2711, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14217167229429536, |
|
"eval_loss": 0.26503002643585205, |
|
"eval_runtime": 4.0026, |
|
"eval_samples_per_second": 12.492, |
|
"eval_steps_per_second": 3.248, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.174479344467968e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|