|
{ |
|
"best_metric": 0.6919782757759094, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.1902439024390246, |
|
"eval_steps": 25, |
|
"global_step": 39, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07804878048780488, |
|
"grad_norm": 0.2701288163661957, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7934, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07804878048780488, |
|
"eval_loss": 1.0395761728286743, |
|
"eval_runtime": 1.603, |
|
"eval_samples_per_second": 31.191, |
|
"eval_steps_per_second": 8.11, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15609756097560976, |
|
"grad_norm": 0.4705568253993988, |
|
"learning_rate": 0.0001, |
|
"loss": 0.652, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.23414634146341465, |
|
"grad_norm": 0.5142024159431458, |
|
"learning_rate": 9.983788698441369e-05, |
|
"loss": 0.7973, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3121951219512195, |
|
"grad_norm": 0.2899770736694336, |
|
"learning_rate": 9.935271596564688e-05, |
|
"loss": 0.9691, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.3027752637863159, |
|
"learning_rate": 9.854798261200746e-05, |
|
"loss": 0.5577, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.4682926829268293, |
|
"grad_norm": 0.397220253944397, |
|
"learning_rate": 9.74294850457488e-05, |
|
"loss": 0.555, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5463414634146342, |
|
"grad_norm": 0.579659640789032, |
|
"learning_rate": 9.600528206746612e-05, |
|
"loss": 0.6453, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.624390243902439, |
|
"grad_norm": 0.8211554884910583, |
|
"learning_rate": 9.428563509225347e-05, |
|
"loss": 0.576, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7024390243902439, |
|
"grad_norm": 0.3230455815792084, |
|
"learning_rate": 9.22829342159729e-05, |
|
"loss": 0.4713, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7804878048780488, |
|
"grad_norm": 0.2374473512172699, |
|
"learning_rate": 9.001160894432978e-05, |
|
"loss": 0.7677, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8585365853658536, |
|
"grad_norm": 0.24319641292095184, |
|
"learning_rate": 8.74880242279536e-05, |
|
"loss": 0.4588, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.9365853658536586, |
|
"grad_norm": 0.3264821469783783, |
|
"learning_rate": 8.473036255255366e-05, |
|
"loss": 0.4458, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.0634146341463415, |
|
"grad_norm": 0.44675925374031067, |
|
"learning_rate": 8.175849293369291e-05, |
|
"loss": 1.2329, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.1414634146341462, |
|
"grad_norm": 0.22685877978801727, |
|
"learning_rate": 7.859382776007543e-05, |
|
"loss": 0.4817, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.2195121951219512, |
|
"grad_norm": 0.2206551730632782, |
|
"learning_rate": 7.525916851679529e-05, |
|
"loss": 0.4677, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.2975609756097561, |
|
"grad_norm": 0.19651265442371368, |
|
"learning_rate": 7.177854150011389e-05, |
|
"loss": 0.5962, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.3756097560975609, |
|
"grad_norm": 0.22256837785243988, |
|
"learning_rate": 6.817702470744477e-05, |
|
"loss": 0.3712, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.4536585365853658, |
|
"grad_norm": 0.21558333933353424, |
|
"learning_rate": 6.448056714980767e-05, |
|
"loss": 0.4386, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.5317073170731708, |
|
"grad_norm": 0.2228691130876541, |
|
"learning_rate": 6.071580188860955e-05, |
|
"loss": 0.735, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.6097560975609757, |
|
"grad_norm": 0.25346508622169495, |
|
"learning_rate": 5.690985414382668e-05, |
|
"loss": 0.4369, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.6878048780487804, |
|
"grad_norm": 0.19522270560264587, |
|
"learning_rate": 5.3090145856173346e-05, |
|
"loss": 0.4907, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.7658536585365854, |
|
"grad_norm": 0.1740201860666275, |
|
"learning_rate": 4.9284198111390456e-05, |
|
"loss": 0.6792, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.84390243902439, |
|
"grad_norm": 0.1370556354522705, |
|
"learning_rate": 4.551943285019234e-05, |
|
"loss": 0.3666, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.921951219512195, |
|
"grad_norm": 0.23627439141273499, |
|
"learning_rate": 4.182297529255525e-05, |
|
"loss": 0.4073, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.048780487804878, |
|
"grad_norm": 0.41981157660484314, |
|
"learning_rate": 3.822145849988612e-05, |
|
"loss": 1.2191, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.048780487804878, |
|
"eval_loss": 0.6919782757759094, |
|
"eval_runtime": 1.7192, |
|
"eval_samples_per_second": 29.083, |
|
"eval_steps_per_second": 7.562, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.126829268292683, |
|
"grad_norm": 0.15212808549404144, |
|
"learning_rate": 3.474083148320469e-05, |
|
"loss": 0.3673, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.204878048780488, |
|
"grad_norm": 0.22316193580627441, |
|
"learning_rate": 3.1406172239924584e-05, |
|
"loss": 0.4834, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.2829268292682925, |
|
"grad_norm": 0.1796901971101761, |
|
"learning_rate": 2.8241507066307104e-05, |
|
"loss": 0.636, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.3609756097560974, |
|
"grad_norm": 0.14367538690567017, |
|
"learning_rate": 2.5269637447446348e-05, |
|
"loss": 0.3709, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.4390243902439024, |
|
"grad_norm": 0.16765695810317993, |
|
"learning_rate": 2.2511975772046403e-05, |
|
"loss": 0.4021, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.5170731707317073, |
|
"grad_norm": 0.1606360524892807, |
|
"learning_rate": 1.9988391055670233e-05, |
|
"loss": 0.6083, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.5951219512195123, |
|
"grad_norm": 0.12848107516765594, |
|
"learning_rate": 1.771706578402711e-05, |
|
"loss": 0.3334, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.6731707317073172, |
|
"grad_norm": 0.17367133498191833, |
|
"learning_rate": 1.5714364907746536e-05, |
|
"loss": 0.4383, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.7512195121951217, |
|
"grad_norm": 0.15716971457004547, |
|
"learning_rate": 1.3994717932533891e-05, |
|
"loss": 0.6039, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.8292682926829267, |
|
"grad_norm": 0.1574130654335022, |
|
"learning_rate": 1.257051495425121e-05, |
|
"loss": 0.3731, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.9073170731707316, |
|
"grad_norm": 0.18796025216579437, |
|
"learning_rate": 1.1452017387992552e-05, |
|
"loss": 0.4538, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.0341463414634147, |
|
"grad_norm": 0.3865165114402771, |
|
"learning_rate": 1.064728403435312e-05, |
|
"loss": 1.1283, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.1121951219512196, |
|
"grad_norm": 0.12527284026145935, |
|
"learning_rate": 1.0162113015586309e-05, |
|
"loss": 0.3601, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.1902439024390246, |
|
"grad_norm": 0.17297080159187317, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4422, |
|
"step": 39 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 39, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0058973260546048e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|