|
{ |
|
"best_metric": 0.704846203327179, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 3.0285714285714285, |
|
"eval_steps": 10, |
|
"global_step": 53, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"grad_norm": 0.3966389298439026, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9692, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"eval_loss": 1.0108790397644043, |
|
"eval_runtime": 2.0492, |
|
"eval_samples_per_second": 7.32, |
|
"eval_steps_per_second": 1.952, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 0.3902052640914917, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9505, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.17142857142857143, |
|
"grad_norm": 0.3335510790348053, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0194, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 0.36140090227127075, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9429, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.375689297914505, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0009, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 0.3948005139827728, |
|
"learning_rate": 6e-05, |
|
"loss": 0.8756, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.4350300133228302, |
|
"learning_rate": 7e-05, |
|
"loss": 1.0427, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 0.36623847484588623, |
|
"learning_rate": 8e-05, |
|
"loss": 0.9048, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5142857142857142, |
|
"grad_norm": 0.3320309817790985, |
|
"learning_rate": 9e-05, |
|
"loss": 0.8974, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.32206276059150696, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8542, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"eval_loss": 0.9411152005195618, |
|
"eval_runtime": 2.0837, |
|
"eval_samples_per_second": 7.199, |
|
"eval_steps_per_second": 1.92, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6285714285714286, |
|
"grad_norm": 0.33402150869369507, |
|
"learning_rate": 9.986661418317759e-05, |
|
"loss": 0.8844, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 0.31261247396469116, |
|
"learning_rate": 9.946716840375551e-05, |
|
"loss": 0.8441, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.7428571428571429, |
|
"grad_norm": 0.33552902936935425, |
|
"learning_rate": 9.880379387779637e-05, |
|
"loss": 0.8498, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.33501139283180237, |
|
"learning_rate": 9.78800299954203e-05, |
|
"loss": 0.8905, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 0.33641278743743896, |
|
"learning_rate": 9.67008054366274e-05, |
|
"loss": 0.8753, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 0.3439123332500458, |
|
"learning_rate": 9.527241187465734e-05, |
|
"loss": 0.8292, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.9714285714285714, |
|
"grad_norm": 0.3387420177459717, |
|
"learning_rate": 9.360247040719039e-05, |
|
"loss": 0.8047, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.0285714285714285, |
|
"grad_norm": 0.35765647888183594, |
|
"learning_rate": 9.16998908944939e-05, |
|
"loss": 0.8683, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.0857142857142856, |
|
"grad_norm": 0.33680498600006104, |
|
"learning_rate": 8.957482442146272e-05, |
|
"loss": 0.7337, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.32664304971694946, |
|
"learning_rate": 8.72386091371891e-05, |
|
"loss": 0.7241, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"eval_loss": 0.784389317035675, |
|
"eval_runtime": 2.086, |
|
"eval_samples_per_second": 7.191, |
|
"eval_steps_per_second": 1.918, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.35325202345848083, |
|
"learning_rate": 8.47037097610317e-05, |
|
"loss": 0.7718, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.2571428571428571, |
|
"grad_norm": 0.30332982540130615, |
|
"learning_rate": 8.198365107794457e-05, |
|
"loss": 0.6796, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.3142857142857143, |
|
"grad_norm": 0.365041047334671, |
|
"learning_rate": 7.909294577789766e-05, |
|
"loss": 0.7059, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.3714285714285714, |
|
"grad_norm": 0.2987039387226105, |
|
"learning_rate": 7.604701702439651e-05, |
|
"loss": 0.6808, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.3556249141693115, |
|
"learning_rate": 7.286211616523193e-05, |
|
"loss": 0.7061, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.4857142857142858, |
|
"grad_norm": 0.37903037667274475, |
|
"learning_rate": 6.95552360245078e-05, |
|
"loss": 0.736, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.5428571428571427, |
|
"grad_norm": 0.36219486594200134, |
|
"learning_rate": 6.614402023857232e-05, |
|
"loss": 0.7291, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.31098002195358276, |
|
"learning_rate": 6.264666911958404e-05, |
|
"loss": 0.6836, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.657142857142857, |
|
"grad_norm": 0.3429687023162842, |
|
"learning_rate": 5.908184254897182e-05, |
|
"loss": 0.7052, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.3376871645450592, |
|
"learning_rate": 5.546856041889373e-05, |
|
"loss": 0.678, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"eval_loss": 0.7271688580513, |
|
"eval_runtime": 2.0876, |
|
"eval_samples_per_second": 7.185, |
|
"eval_steps_per_second": 1.916, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.7714285714285714, |
|
"grad_norm": 0.29218417406082153, |
|
"learning_rate": 5.182610115288295e-05, |
|
"loss": 0.6656, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.8285714285714287, |
|
"grad_norm": 0.30864810943603516, |
|
"learning_rate": 4.817389884711705e-05, |
|
"loss": 0.7106, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.8857142857142857, |
|
"grad_norm": 0.3442583382129669, |
|
"learning_rate": 4.4531439581106295e-05, |
|
"loss": 0.7097, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.9428571428571428, |
|
"grad_norm": 0.32284116744995117, |
|
"learning_rate": 4.0918157451028185e-05, |
|
"loss": 0.7053, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.2941528856754303, |
|
"learning_rate": 3.735333088041596e-05, |
|
"loss": 0.5891, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.057142857142857, |
|
"grad_norm": 0.3290832042694092, |
|
"learning_rate": 3.38559797614277e-05, |
|
"loss": 0.7012, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.1142857142857143, |
|
"grad_norm": 0.29661279916763306, |
|
"learning_rate": 3.0444763975492208e-05, |
|
"loss": 0.6034, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.1714285714285713, |
|
"grad_norm": 0.30858200788497925, |
|
"learning_rate": 2.7137883834768073e-05, |
|
"loss": 0.6878, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.2285714285714286, |
|
"grad_norm": 0.2789033353328705, |
|
"learning_rate": 2.3952982975603496e-05, |
|
"loss": 0.7019, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.2946871519088745, |
|
"learning_rate": 2.090705422210237e-05, |
|
"loss": 0.6679, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"eval_loss": 0.7075809240341187, |
|
"eval_runtime": 2.087, |
|
"eval_samples_per_second": 7.187, |
|
"eval_steps_per_second": 1.917, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.342857142857143, |
|
"grad_norm": 0.30310121178627014, |
|
"learning_rate": 1.801634892205545e-05, |
|
"loss": 0.633, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.3069086968898773, |
|
"learning_rate": 1.5296290238968303e-05, |
|
"loss": 0.7029, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.4571428571428573, |
|
"grad_norm": 0.2964320182800293, |
|
"learning_rate": 1.2761390862810907e-05, |
|
"loss": 0.7385, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.5142857142857142, |
|
"grad_norm": 0.28013864159584045, |
|
"learning_rate": 1.0425175578537299e-05, |
|
"loss": 0.6347, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 0.29106035828590393, |
|
"learning_rate": 8.30010910550611e-06, |
|
"loss": 0.6346, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.6285714285714286, |
|
"grad_norm": 0.28448185324668884, |
|
"learning_rate": 6.397529592809614e-06, |
|
"loss": 0.6148, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.685714285714286, |
|
"grad_norm": 0.2868902087211609, |
|
"learning_rate": 4.727588125342669e-06, |
|
"loss": 0.5431, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.742857142857143, |
|
"grad_norm": 0.28776589035987854, |
|
"learning_rate": 3.299194563372604e-06, |
|
"loss": 0.6296, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.2633899748325348, |
|
"learning_rate": 2.1199700045797077e-06, |
|
"loss": 0.6345, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.2832770049571991, |
|
"learning_rate": 1.196206122203647e-06, |
|
"loss": 0.6468, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_loss": 0.704846203327179, |
|
"eval_runtime": 2.0892, |
|
"eval_samples_per_second": 7.18, |
|
"eval_steps_per_second": 1.915, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.914285714285714, |
|
"grad_norm": 0.2923741936683655, |
|
"learning_rate": 5.328315962444874e-07, |
|
"loss": 0.6826, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.9714285714285715, |
|
"grad_norm": 0.29426372051239014, |
|
"learning_rate": 1.333858168224178e-07, |
|
"loss": 0.6043, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 3.0285714285714285, |
|
"grad_norm": 0.29648035764694214, |
|
"learning_rate": 0.0, |
|
"loss": 0.6566, |
|
"step": 53 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 53, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.0398259709149184e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|