|
{ |
|
"best_metric": 4.028105735778809, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1467620620069712, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002935241240139424, |
|
"grad_norm": 1.0239430665969849, |
|
"learning_rate": 5e-05, |
|
"loss": 3.8981, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002935241240139424, |
|
"eval_loss": 5.815672874450684, |
|
"eval_runtime": 14.0337, |
|
"eval_samples_per_second": 163.606, |
|
"eval_steps_per_second": 20.451, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005870482480278848, |
|
"grad_norm": 1.9530309438705444, |
|
"learning_rate": 0.0001, |
|
"loss": 4.5273, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008805723720418272, |
|
"grad_norm": 1.8931856155395508, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.7057, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.011740964960557695, |
|
"grad_norm": 2.567333221435547, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.7502, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01467620620069712, |
|
"grad_norm": 1.4395949840545654, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 4.8912, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017611447440836543, |
|
"grad_norm": 1.5201845169067383, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 5.056, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.020546688680975967, |
|
"grad_norm": 1.8026448488235474, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 5.1448, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02348192992111539, |
|
"grad_norm": 1.9539623260498047, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.1671, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.026417171161254815, |
|
"grad_norm": 2.1242480278015137, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 5.4479, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02935241240139424, |
|
"grad_norm": 2.840369701385498, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.4989, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.032287653641533666, |
|
"grad_norm": 3.428213357925415, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 5.7564, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.035222894881673086, |
|
"grad_norm": 2.512089967727661, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 5.7054, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.038158136121812514, |
|
"grad_norm": 0.8332175016403198, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 3.8813, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.041093377361951934, |
|
"grad_norm": 1.1140429973602295, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 3.9291, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04402861860209136, |
|
"grad_norm": 1.1462697982788086, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 3.9426, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04696385984223078, |
|
"grad_norm": 1.049325704574585, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 3.8919, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04989910108237021, |
|
"grad_norm": 1.6786147356033325, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 4.1401, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05283434232250963, |
|
"grad_norm": 1.5152158737182617, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.3537, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05576958356264906, |
|
"grad_norm": 1.5700799226760864, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 4.1969, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05870482480278848, |
|
"grad_norm": 1.7648606300354004, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.2697, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.061640066042927905, |
|
"grad_norm": 2.7130236625671387, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 4.3922, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06457530728306733, |
|
"grad_norm": 2.8758461475372314, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 4.5291, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06751054852320675, |
|
"grad_norm": 2.656874418258667, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 4.454, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07044578976334617, |
|
"grad_norm": 2.822298288345337, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 4.6382, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0733810310034856, |
|
"grad_norm": 2.601090669631958, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 4.7237, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0733810310034856, |
|
"eval_loss": 4.178318977355957, |
|
"eval_runtime": 13.9396, |
|
"eval_samples_per_second": 164.711, |
|
"eval_steps_per_second": 20.589, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07631627224362503, |
|
"grad_norm": 1.2970190048217773, |
|
"learning_rate": 5e-05, |
|
"loss": 3.3026, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07925151348376444, |
|
"grad_norm": 2.200300931930542, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 3.5441, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08218675472390387, |
|
"grad_norm": 1.8640614748001099, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 3.6378, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0851219959640433, |
|
"grad_norm": 1.2264716625213623, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 3.555, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08805723720418272, |
|
"grad_norm": 1.5774284601211548, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 3.8577, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09099247844432214, |
|
"grad_norm": 1.717018485069275, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 3.7962, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09392771968446156, |
|
"grad_norm": 1.5714224576950073, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 3.9705, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09686296092460099, |
|
"grad_norm": 1.3049858808517456, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 3.9805, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.09979820216474042, |
|
"grad_norm": 1.8364142179489136, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 4.1243, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10273344340487983, |
|
"grad_norm": 1.8525341749191284, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 4.049, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10566868464501926, |
|
"grad_norm": 2.7360332012176514, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 4.3234, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10860392588515869, |
|
"grad_norm": 1.7006713151931763, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 4.5007, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11153916712529811, |
|
"grad_norm": 3.3105547428131104, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.5437, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11447440836543754, |
|
"grad_norm": 2.7863497734069824, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 3.422, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11740964960557695, |
|
"grad_norm": 3.747509002685547, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 3.4732, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12034489084571638, |
|
"grad_norm": 3.606635332107544, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 3.5158, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12328013208585581, |
|
"grad_norm": 2.7214155197143555, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 3.6458, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12621537332599522, |
|
"grad_norm": 1.4316763877868652, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 3.667, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12915061456613466, |
|
"grad_norm": 1.5033129453659058, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 3.9072, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13208585580627408, |
|
"grad_norm": 3.340280294418335, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 3.91, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1350210970464135, |
|
"grad_norm": 1.2967872619628906, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 3.838, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13795633828655293, |
|
"grad_norm": 1.9000120162963867, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 4.0708, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14089157952669235, |
|
"grad_norm": 1.6708569526672363, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 4.1453, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1438268207668318, |
|
"grad_norm": 1.74222993850708, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 4.2787, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1467620620069712, |
|
"grad_norm": 2.433371067047119, |
|
"learning_rate": 0.0, |
|
"loss": 4.2654, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1467620620069712, |
|
"eval_loss": 4.028105735778809, |
|
"eval_runtime": 13.8349, |
|
"eval_samples_per_second": 165.957, |
|
"eval_steps_per_second": 20.745, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.13737243557888e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|