|
{ |
|
"best_metric": 0.6818761825561523, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1345668629100084, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002691337258200168, |
|
"grad_norm": 0.2545667290687561, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8201, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002691337258200168, |
|
"eval_loss": 0.967555820941925, |
|
"eval_runtime": 27.8167, |
|
"eval_samples_per_second": 90.018, |
|
"eval_steps_per_second": 11.252, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005382674516400336, |
|
"grad_norm": 0.34543758630752563, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9103, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008074011774600504, |
|
"grad_norm": 0.33696243166923523, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 0.933, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010765349032800672, |
|
"grad_norm": 0.3235072195529938, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.9148, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01345668629100084, |
|
"grad_norm": 0.29262658953666687, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.8203, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01614802354920101, |
|
"grad_norm": 0.2570266127586365, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.8314, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.018839360807401177, |
|
"grad_norm": 0.22506366670131683, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.7602, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.021530698065601345, |
|
"grad_norm": 0.22845478355884552, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.7756, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.024222035323801513, |
|
"grad_norm": 0.31451737880706787, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.8014, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02691337258200168, |
|
"grad_norm": 0.3066512644290924, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.8079, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02960470984020185, |
|
"grad_norm": 0.27897998690605164, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.7658, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03229604709840202, |
|
"grad_norm": 0.23739658296108246, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.7395, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03498738435660219, |
|
"grad_norm": 0.1737861931324005, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.645, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03767872161480235, |
|
"grad_norm": 0.17162172496318817, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7153, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.040370058873002525, |
|
"grad_norm": 0.15028245747089386, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.7591, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04306139613120269, |
|
"grad_norm": 0.1524060070514679, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.741, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04575273338940286, |
|
"grad_norm": 0.1524454951286316, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.7214, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.048444070647603026, |
|
"grad_norm": 0.16076746582984924, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7221, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0511354079058032, |
|
"grad_norm": 0.16362418234348297, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.7566, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05382674516400336, |
|
"grad_norm": 0.16996099054813385, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.7022, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05651808242220353, |
|
"grad_norm": 0.16316403448581696, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.7155, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0592094196804037, |
|
"grad_norm": 0.16969940066337585, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.7168, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06190075693860387, |
|
"grad_norm": 0.17208510637283325, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.739, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06459209419680403, |
|
"grad_norm": 0.17505396902561188, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.6977, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0672834314550042, |
|
"grad_norm": 0.18648380041122437, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.6757, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0672834314550042, |
|
"eval_loss": 0.693597674369812, |
|
"eval_runtime": 27.2048, |
|
"eval_samples_per_second": 92.042, |
|
"eval_steps_per_second": 11.505, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06997476871320438, |
|
"grad_norm": 0.13479048013687134, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6979, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07266610597140453, |
|
"grad_norm": 0.14756473898887634, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.7009, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0753574432296047, |
|
"grad_norm": 0.14167127013206482, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6821, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07804878048780488, |
|
"grad_norm": 0.14789500832557678, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.7102, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08074011774600505, |
|
"grad_norm": 0.14351996779441833, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.7426, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08343145500420522, |
|
"grad_norm": 0.136067196726799, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.6718, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08612279226240538, |
|
"grad_norm": 0.14951838552951813, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.6889, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08881412952060555, |
|
"grad_norm": 0.14795440435409546, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.7103, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.09150546677880572, |
|
"grad_norm": 0.15034087002277374, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.6955, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0941968040370059, |
|
"grad_norm": 0.14145706593990326, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.6991, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09688814129520605, |
|
"grad_norm": 0.1578945815563202, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.7034, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09957947855340622, |
|
"grad_norm": 0.16180913150310516, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.6644, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1022708158116064, |
|
"grad_norm": 0.12289156764745712, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.6528, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10496215306980657, |
|
"grad_norm": 0.11805767565965652, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.68, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10765349032800672, |
|
"grad_norm": 0.12661293148994446, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.74, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1103448275862069, |
|
"grad_norm": 0.12309448421001434, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.7105, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.11303616484440707, |
|
"grad_norm": 0.1316811442375183, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.7034, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11572750210260724, |
|
"grad_norm": 0.12469199299812317, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.7072, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1184188393608074, |
|
"grad_norm": 0.13566303253173828, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.708, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.12111017661900757, |
|
"grad_norm": 0.13655786216259003, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.6734, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.12380151387720774, |
|
"grad_norm": 0.13654503226280212, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.6583, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.1264928511354079, |
|
"grad_norm": 0.14090988039970398, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.7098, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12918418839360807, |
|
"grad_norm": 0.1505599319934845, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.7067, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.13187552565180824, |
|
"grad_norm": 0.15436117351055145, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.6933, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1345668629100084, |
|
"grad_norm": 0.19895680248737335, |
|
"learning_rate": 0.0, |
|
"loss": 0.6498, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1345668629100084, |
|
"eval_loss": 0.6818761825561523, |
|
"eval_runtime": 28.2352, |
|
"eval_samples_per_second": 88.684, |
|
"eval_steps_per_second": 11.085, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0452536871039795e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|