|
{ |
|
"best_metric": 1.9384169578552246, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.7117437722419929, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.014234875444839857, |
|
"grad_norm": 4.127208709716797, |
|
"learning_rate": 5e-05, |
|
"loss": 4.7109, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014234875444839857, |
|
"eval_loss": 5.259206295013428, |
|
"eval_runtime": 22.3441, |
|
"eval_samples_per_second": 21.169, |
|
"eval_steps_per_second": 2.685, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.028469750889679714, |
|
"grad_norm": 5.206362724304199, |
|
"learning_rate": 0.0001, |
|
"loss": 4.7682, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.042704626334519574, |
|
"grad_norm": 8.270171165466309, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.599, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05693950177935943, |
|
"grad_norm": 3.9124391078948975, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.4156, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0711743772241993, |
|
"grad_norm": 3.400902032852173, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.7391, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08540925266903915, |
|
"grad_norm": 4.260780334472656, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 3.0167, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.099644128113879, |
|
"grad_norm": 2.086765766143799, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 2.6469, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.11387900355871886, |
|
"grad_norm": 1.7398333549499512, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.3918, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.12811387900355872, |
|
"grad_norm": 2.3132033348083496, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 2.3496, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1423487544483986, |
|
"grad_norm": 1.6994189023971558, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.2287, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15658362989323843, |
|
"grad_norm": 1.3915258646011353, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 2.1647, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1708185053380783, |
|
"grad_norm": 1.4232553243637085, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 2.28, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.18505338078291814, |
|
"grad_norm": 1.0158861875534058, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 2.0606, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.199288256227758, |
|
"grad_norm": 1.0213874578475952, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.8842, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.21352313167259787, |
|
"grad_norm": 0.9500750303268433, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.7835, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2277580071174377, |
|
"grad_norm": 0.8292347192764282, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.7693, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.24199288256227758, |
|
"grad_norm": 0.8554771542549133, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.8696, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.25622775800711745, |
|
"grad_norm": 0.840994656085968, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.9225, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2704626334519573, |
|
"grad_norm": 1.0327759981155396, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 2.0754, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2846975088967972, |
|
"grad_norm": 0.8893172144889832, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.1191, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.298932384341637, |
|
"grad_norm": 0.7258279919624329, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 2.0286, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.31316725978647686, |
|
"grad_norm": 0.9243592619895935, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.0714, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3274021352313167, |
|
"grad_norm": 0.9329895377159119, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 2.0226, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3416370106761566, |
|
"grad_norm": 0.9377391338348389, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.9961, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.35587188612099646, |
|
"grad_norm": 0.9633881449699402, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.3204, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.35587188612099646, |
|
"eval_loss": 1.9744657278060913, |
|
"eval_runtime": 22.4898, |
|
"eval_samples_per_second": 21.032, |
|
"eval_steps_per_second": 2.668, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3701067615658363, |
|
"grad_norm": 0.9666065573692322, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8401, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.38434163701067614, |
|
"grad_norm": 0.9301469326019287, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.7366, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.398576512455516, |
|
"grad_norm": 0.7510563135147095, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.8588, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4128113879003559, |
|
"grad_norm": 0.8679802417755127, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.859, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.42704626334519574, |
|
"grad_norm": 0.7821605801582336, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.8511, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4412811387900356, |
|
"grad_norm": 0.8082809448242188, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.8725, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4555160142348754, |
|
"grad_norm": 0.869269609451294, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.9881, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4697508896797153, |
|
"grad_norm": 0.8141599893569946, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.9531, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.48398576512455516, |
|
"grad_norm": 0.7377688884735107, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.9266, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.498220640569395, |
|
"grad_norm": 0.8427651524543762, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.0421, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5124555160142349, |
|
"grad_norm": 0.7727118134498596, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.1763, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5266903914590747, |
|
"grad_norm": 1.126348853111267, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 2.2711, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5409252669039146, |
|
"grad_norm": 0.7392522096633911, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.0126, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5551601423487544, |
|
"grad_norm": 0.7053424119949341, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.8244, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5693950177935944, |
|
"grad_norm": 0.6880508065223694, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.7971, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5836298932384342, |
|
"grad_norm": 0.6148623824119568, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.8143, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.597864768683274, |
|
"grad_norm": 0.6629939079284668, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.9547, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6120996441281139, |
|
"grad_norm": 0.6230815052986145, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.7457, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6263345195729537, |
|
"grad_norm": 0.6868031024932861, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.7492, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6405693950177936, |
|
"grad_norm": 0.6768158078193665, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.8826, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6548042704626335, |
|
"grad_norm": 0.7183138728141785, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.9686, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6690391459074733, |
|
"grad_norm": 0.7213199734687805, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.0478, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6832740213523132, |
|
"grad_norm": 0.7075734734535217, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.9768, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.697508896797153, |
|
"grad_norm": 0.779909074306488, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.1713, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7117437722419929, |
|
"grad_norm": 0.9000588655471802, |
|
"learning_rate": 0.0, |
|
"loss": 2.2541, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7117437722419929, |
|
"eval_loss": 1.9384169578552246, |
|
"eval_runtime": 22.4329, |
|
"eval_samples_per_second": 21.085, |
|
"eval_steps_per_second": 2.675, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0228361536208896e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|