|
{ |
|
"best_metric": 0.5065415501594543, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0505836575875485, |
|
"eval_steps": 25, |
|
"global_step": 49, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"grad_norm": 3.598991870880127, |
|
"learning_rate": 5e-05, |
|
"loss": 11.1207, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"eval_loss": 0.791270911693573, |
|
"eval_runtime": 4.6163, |
|
"eval_samples_per_second": 23.396, |
|
"eval_steps_per_second": 3.033, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1245136186770428, |
|
"grad_norm": 4.802671909332275, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9479, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1867704280155642, |
|
"grad_norm": 5.3119587898254395, |
|
"learning_rate": 9.988834393115767e-05, |
|
"loss": 12.4291, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2490272373540856, |
|
"grad_norm": 4.467419147491455, |
|
"learning_rate": 9.9553874407739e-05, |
|
"loss": 12.4584, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.311284046692607, |
|
"grad_norm": 2.2450504302978516, |
|
"learning_rate": 9.899808525182935e-05, |
|
"loss": 10.1383, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3735408560311284, |
|
"grad_norm": 2.6530046463012695, |
|
"learning_rate": 9.822345875271883e-05, |
|
"loss": 10.1286, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4357976653696498, |
|
"grad_norm": 2.063546895980835, |
|
"learning_rate": 9.723345458039594e-05, |
|
"loss": 9.4453, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.4980544747081712, |
|
"grad_norm": 1.9874131679534912, |
|
"learning_rate": 9.603249433382144e-05, |
|
"loss": 9.4453, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5603112840466926, |
|
"grad_norm": 1.2906339168548584, |
|
"learning_rate": 9.462594179299406e-05, |
|
"loss": 8.8573, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.622568093385214, |
|
"grad_norm": 1.4211024045944214, |
|
"learning_rate": 9.302007896300698e-05, |
|
"loss": 9.0304, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6848249027237354, |
|
"grad_norm": 1.2602137327194214, |
|
"learning_rate": 9.122207801708802e-05, |
|
"loss": 8.5633, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.7470817120622568, |
|
"grad_norm": 1.8209120035171509, |
|
"learning_rate": 8.923996926393305e-05, |
|
"loss": 8.1361, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8093385214007782, |
|
"grad_norm": 1.0615341663360596, |
|
"learning_rate": 8.708260528239788e-05, |
|
"loss": 8.2272, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.8715953307392996, |
|
"grad_norm": 1.0992909669876099, |
|
"learning_rate": 8.475962138373213e-05, |
|
"loss": 8.4856, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.933852140077821, |
|
"grad_norm": 1.3216913938522339, |
|
"learning_rate": 8.228139257794012e-05, |
|
"loss": 8.2967, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.9961089494163424, |
|
"grad_norm": 1.0753597021102905, |
|
"learning_rate": 7.965898723646776e-05, |
|
"loss": 7.9484, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.0583657587548638, |
|
"grad_norm": 1.0627950429916382, |
|
"learning_rate": 7.690411765816864e-05, |
|
"loss": 8.0572, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.1206225680933852, |
|
"grad_norm": 1.584797739982605, |
|
"learning_rate": 7.402908775933419e-05, |
|
"loss": 8.3771, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.1828793774319066, |
|
"grad_norm": 0.9177744388580322, |
|
"learning_rate": 7.104673812141675e-05, |
|
"loss": 7.598, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.245136186770428, |
|
"grad_norm": 1.159196138381958, |
|
"learning_rate": 6.797038864187564e-05, |
|
"loss": 7.6854, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3073929961089494, |
|
"grad_norm": 2.9085676670074463, |
|
"learning_rate": 6.481377904428171e-05, |
|
"loss": 7.9392, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.3696498054474708, |
|
"grad_norm": 0.8160005211830139, |
|
"learning_rate": 6.159100751337642e-05, |
|
"loss": 8.0554, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4319066147859922, |
|
"grad_norm": 0.8344722390174866, |
|
"learning_rate": 5.831646772915651e-05, |
|
"loss": 8.5086, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.4941634241245136, |
|
"grad_norm": 0.9519956111907959, |
|
"learning_rate": 5.5004784581204927e-05, |
|
"loss": 7.8829, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.556420233463035, |
|
"grad_norm": 0.8052926659584045, |
|
"learning_rate": 5.167074885038373e-05, |
|
"loss": 7.9963, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.556420233463035, |
|
"eval_loss": 0.5065415501594543, |
|
"eval_runtime": 4.6296, |
|
"eval_samples_per_second": 23.328, |
|
"eval_steps_per_second": 3.024, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6186770428015564, |
|
"grad_norm": 0.7603065371513367, |
|
"learning_rate": 4.832925114961629e-05, |
|
"loss": 7.9956, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.6809338521400778, |
|
"grad_norm": 0.7943618893623352, |
|
"learning_rate": 4.4995215418795085e-05, |
|
"loss": 7.7549, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.7431906614785992, |
|
"grad_norm": 0.967389702796936, |
|
"learning_rate": 4.1683532270843504e-05, |
|
"loss": 7.6843, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.8054474708171206, |
|
"grad_norm": 0.7394790053367615, |
|
"learning_rate": 3.840899248662358e-05, |
|
"loss": 8.0537, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.867704280155642, |
|
"grad_norm": 0.7868384718894958, |
|
"learning_rate": 3.5186220955718306e-05, |
|
"loss": 7.7551, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.9299610894941635, |
|
"grad_norm": 1.0688693523406982, |
|
"learning_rate": 3.202961135812437e-05, |
|
"loss": 8.1714, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.9922178988326849, |
|
"grad_norm": 0.8963145017623901, |
|
"learning_rate": 2.895326187858326e-05, |
|
"loss": 7.9336, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.054474708171206, |
|
"grad_norm": 0.7154244184494019, |
|
"learning_rate": 2.5970912240665813e-05, |
|
"loss": 7.4189, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.1167315175097277, |
|
"grad_norm": 1.0934628248214722, |
|
"learning_rate": 2.3095882341831372e-05, |
|
"loss": 7.9469, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.178988326848249, |
|
"grad_norm": 0.7415445446968079, |
|
"learning_rate": 2.0341012763532243e-05, |
|
"loss": 7.5521, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.2412451361867705, |
|
"grad_norm": 0.7995844483375549, |
|
"learning_rate": 1.771860742205988e-05, |
|
"loss": 7.5147, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.3035019455252916, |
|
"grad_norm": 1.1787042617797852, |
|
"learning_rate": 1.5240378616267886e-05, |
|
"loss": 7.7859, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.3657587548638133, |
|
"grad_norm": 0.7349408864974976, |
|
"learning_rate": 1.2917394717602121e-05, |
|
"loss": 7.8634, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.4280155642023344, |
|
"grad_norm": 0.8099451065063477, |
|
"learning_rate": 1.0760030736066951e-05, |
|
"loss": 8.0651, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.490272373540856, |
|
"grad_norm": 0.8851613402366638, |
|
"learning_rate": 8.777921982911996e-06, |
|
"loss": 7.5255, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.5525291828793772, |
|
"grad_norm": 0.8391734957695007, |
|
"learning_rate": 6.979921036993042e-06, |
|
"loss": 8.0917, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.614785992217899, |
|
"grad_norm": 1.488593339920044, |
|
"learning_rate": 5.374058207005944e-06, |
|
"loss": 7.9092, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.6770428015564205, |
|
"grad_norm": 1.0373982191085815, |
|
"learning_rate": 3.967505666178556e-06, |
|
"loss": 8.0546, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.7392996108949417, |
|
"grad_norm": 0.8613387942314148, |
|
"learning_rate": 2.7665454196040664e-06, |
|
"loss": 7.4587, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.801556420233463, |
|
"grad_norm": 0.8068078756332397, |
|
"learning_rate": 1.7765412472811771e-06, |
|
"loss": 7.8842, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.8638132295719845, |
|
"grad_norm": 0.713803231716156, |
|
"learning_rate": 1.0019147481706625e-06, |
|
"loss": 7.6852, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.926070038910506, |
|
"grad_norm": 0.7315906286239624, |
|
"learning_rate": 4.461255922609986e-07, |
|
"loss": 7.5289, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.9883268482490273, |
|
"grad_norm": 0.807794451713562, |
|
"learning_rate": 1.1165606884234181e-07, |
|
"loss": 7.7788, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 3.0505836575875485, |
|
"grad_norm": 0.7264207005500793, |
|
"learning_rate": 0.0, |
|
"loss": 7.7754, |
|
"step": 49 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 49, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0781614054771261e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|