|
{ |
|
"best_metric": 10.363272666931152, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.2936857562408223, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005873715124816446, |
|
"grad_norm": 0.15584339201450348, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3832, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005873715124816446, |
|
"eval_loss": 10.380758285522461, |
|
"eval_runtime": 3.6353, |
|
"eval_samples_per_second": 315.52, |
|
"eval_steps_per_second": 39.612, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011747430249632892, |
|
"grad_norm": 0.14473983645439148, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3827, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01762114537444934, |
|
"grad_norm": 0.1327889859676361, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3817, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.023494860499265784, |
|
"grad_norm": 0.12545689940452576, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3798, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02936857562408223, |
|
"grad_norm": 0.12021041661500931, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3802, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03524229074889868, |
|
"grad_norm": 0.11450187116861343, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3801, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.041116005873715125, |
|
"grad_norm": 0.11224997043609619, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3793, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04698972099853157, |
|
"grad_norm": 0.11222485452890396, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3792, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05286343612334802, |
|
"grad_norm": 0.11607800424098969, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3793, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05873715124816446, |
|
"grad_norm": 0.11430998146533966, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3762, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06461086637298091, |
|
"grad_norm": 0.11606329679489136, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3729, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07048458149779736, |
|
"grad_norm": 0.12196508795022964, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3719, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0763582966226138, |
|
"grad_norm": 0.1646091341972351, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3762, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08223201174743025, |
|
"grad_norm": 0.16281186044216156, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3761, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0881057268722467, |
|
"grad_norm": 0.15427953004837036, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3758, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09397944199706314, |
|
"grad_norm": 0.1527239978313446, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3745, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09985315712187959, |
|
"grad_norm": 0.148397296667099, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3733, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.10572687224669604, |
|
"grad_norm": 0.148326113820076, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3729, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.11160058737151249, |
|
"grad_norm": 0.14326217770576477, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3734, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.11747430249632893, |
|
"grad_norm": 0.14348213374614716, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3744, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12334801762114538, |
|
"grad_norm": 0.15196223556995392, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3727, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.12922173274596183, |
|
"grad_norm": 0.15216338634490967, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3709, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.13509544787077826, |
|
"grad_norm": 0.15067099034786224, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3704, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.14096916299559473, |
|
"grad_norm": 0.15914303064346313, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3667, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.14684287812041116, |
|
"grad_norm": 0.1697520911693573, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3637, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14684287812041116, |
|
"eval_loss": 10.368462562561035, |
|
"eval_runtime": 3.6356, |
|
"eval_samples_per_second": 315.488, |
|
"eval_steps_per_second": 39.608, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1527165932452276, |
|
"grad_norm": 0.21917545795440674, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3686, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.15859030837004406, |
|
"grad_norm": 0.20469699800014496, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.3687, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1644640234948605, |
|
"grad_norm": 0.20218636095523834, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.3679, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.17033773861967694, |
|
"grad_norm": 0.19212639331817627, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3662, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1762114537444934, |
|
"grad_norm": 0.18560166656970978, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3675, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.18208516886930984, |
|
"grad_norm": 0.18543921411037445, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.368, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.18795888399412627, |
|
"grad_norm": 0.18739567697048187, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3678, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.19383259911894274, |
|
"grad_norm": 0.1920151710510254, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3679, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.19970631424375918, |
|
"grad_norm": 0.18959976732730865, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3669, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2055800293685756, |
|
"grad_norm": 0.1854933500289917, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.3668, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.21145374449339208, |
|
"grad_norm": 0.18347898125648499, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.3623, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2173274596182085, |
|
"grad_norm": 0.1941113919019699, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.3596, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.22320117474302498, |
|
"grad_norm": 0.24184665083885193, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.3645, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2290748898678414, |
|
"grad_norm": 0.24406494200229645, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3634, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.23494860499265785, |
|
"grad_norm": 0.23579344153404236, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.3638, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24082232011747431, |
|
"grad_norm": 0.2283434420824051, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.3634, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.24669603524229075, |
|
"grad_norm": 0.21662116050720215, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3624, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2525697503671072, |
|
"grad_norm": 0.21385407447814941, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.3656, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.25844346549192365, |
|
"grad_norm": 0.20657625794410706, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3659, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2643171806167401, |
|
"grad_norm": 0.20029610395431519, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3655, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2701908957415565, |
|
"grad_norm": 0.2030303031206131, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.365, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.27606461086637296, |
|
"grad_norm": 0.20757298171520233, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3641, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.28193832599118945, |
|
"grad_norm": 0.20119069516658783, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.3643, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2878120411160059, |
|
"grad_norm": 0.1982789784669876, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.3607, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2936857562408223, |
|
"grad_norm": 0.20990411937236786, |
|
"learning_rate": 0.0, |
|
"loss": 10.3577, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2936857562408223, |
|
"eval_loss": 10.363272666931152, |
|
"eval_runtime": 3.6828, |
|
"eval_samples_per_second": 311.451, |
|
"eval_steps_per_second": 39.101, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 43062090006528.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|