dada22231's picture
Training in progress, step 50, checkpoint
c4947a1 verified
raw
history blame
10.1 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 1.7527114967462039,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03470715835140998,
"grad_norm": 1.0270408391952515,
"learning_rate": 5e-05,
"loss": 5.7043,
"step": 1
},
{
"epoch": 0.03470715835140998,
"eval_loss": NaN,
"eval_runtime": 3.6098,
"eval_samples_per_second": 13.851,
"eval_steps_per_second": 3.601,
"step": 1
},
{
"epoch": 0.06941431670281996,
"grad_norm": 0.9911738634109497,
"learning_rate": 0.0001,
"loss": 5.8026,
"step": 2
},
{
"epoch": 0.10412147505422993,
"grad_norm": 1.0123313665390015,
"learning_rate": 9.990365154573717e-05,
"loss": 5.9004,
"step": 3
},
{
"epoch": 0.13882863340563992,
"grad_norm": 1.1171038150787354,
"learning_rate": 9.961501876182148e-05,
"loss": 6.0405,
"step": 4
},
{
"epoch": 0.1735357917570499,
"grad_norm": 1.0495178699493408,
"learning_rate": 9.913533761814537e-05,
"loss": 5.9618,
"step": 5
},
{
"epoch": 0.20824295010845986,
"grad_norm": 1.032975196838379,
"learning_rate": 9.846666218300807e-05,
"loss": 5.9063,
"step": 6
},
{
"epoch": 0.24295010845986983,
"grad_norm": 1.3030965328216553,
"learning_rate": 9.761185582727977e-05,
"loss": 5.8093,
"step": 7
},
{
"epoch": 0.27765726681127983,
"grad_norm": 1.113315224647522,
"learning_rate": 9.657457896300791e-05,
"loss": 4.9621,
"step": 8
},
{
"epoch": 0.3123644251626898,
"grad_norm": 1.320401668548584,
"learning_rate": 9.535927336897098e-05,
"loss": 4.8794,
"step": 9
},
{
"epoch": 0.3470715835140998,
"grad_norm": 1.2780348062515259,
"learning_rate": 9.397114317029975e-05,
"loss": 5.1614,
"step": 10
},
{
"epoch": 0.38177874186550975,
"grad_norm": 1.2609745264053345,
"learning_rate": 9.241613255361455e-05,
"loss": 5.2007,
"step": 11
},
{
"epoch": 0.4164859002169197,
"grad_norm": 1.1756610870361328,
"learning_rate": 9.070090031310558e-05,
"loss": 5.2247,
"step": 12
},
{
"epoch": 0.4511930585683297,
"grad_norm": 1.2289035320281982,
"learning_rate": 8.883279133655399e-05,
"loss": 5.2601,
"step": 13
},
{
"epoch": 0.48590021691973967,
"grad_norm": 1.4233766794204712,
"learning_rate": 8.681980515339464e-05,
"loss": 4.9673,
"step": 14
},
{
"epoch": 0.5206073752711496,
"grad_norm": 1.0535727739334106,
"learning_rate": 8.467056167950311e-05,
"loss": 4.5299,
"step": 15
},
{
"epoch": 0.5553145336225597,
"grad_norm": 1.213702917098999,
"learning_rate": 8.239426430539243e-05,
"loss": 4.7016,
"step": 16
},
{
"epoch": 0.5900216919739696,
"grad_norm": 1.3188772201538086,
"learning_rate": 8.000066048588211e-05,
"loss": 4.7211,
"step": 17
},
{
"epoch": 0.6247288503253796,
"grad_norm": 1.353604793548584,
"learning_rate": 7.75e-05,
"loss": 4.7997,
"step": 18
},
{
"epoch": 0.6594360086767896,
"grad_norm": 1.3442471027374268,
"learning_rate": 7.490299105985507e-05,
"loss": 4.7069,
"step": 19
},
{
"epoch": 0.6941431670281996,
"grad_norm": 1.2742968797683716,
"learning_rate": 7.222075445642904e-05,
"loss": 4.7996,
"step": 20
},
{
"epoch": 0.7288503253796096,
"grad_norm": 1.404556155204773,
"learning_rate": 6.946477593864228e-05,
"loss": 4.7581,
"step": 21
},
{
"epoch": 0.7635574837310195,
"grad_norm": 0.9362565875053406,
"learning_rate": 6.664685702961344e-05,
"loss": 4.3051,
"step": 22
},
{
"epoch": 0.7982646420824295,
"grad_norm": 1.0469181537628174,
"learning_rate": 6.377906449072578e-05,
"loss": 4.2941,
"step": 23
},
{
"epoch": 0.8329718004338394,
"grad_norm": 1.0510656833648682,
"learning_rate": 6.087367864990233e-05,
"loss": 4.3864,
"step": 24
},
{
"epoch": 0.8676789587852495,
"grad_norm": 1.2001053094863892,
"learning_rate": 5.794314081535644e-05,
"loss": 4.4031,
"step": 25
},
{
"epoch": 0.8676789587852495,
"eval_loss": NaN,
"eval_runtime": 3.2517,
"eval_samples_per_second": 15.376,
"eval_steps_per_second": 3.998,
"step": 25
},
{
"epoch": 0.9023861171366594,
"grad_norm": 1.270917534828186,
"learning_rate": 5.500000000000001e-05,
"loss": 4.3487,
"step": 26
},
{
"epoch": 0.9370932754880694,
"grad_norm": 1.400947093963623,
"learning_rate": 5.205685918464356e-05,
"loss": 4.1771,
"step": 27
},
{
"epoch": 0.9718004338394793,
"grad_norm": 1.8828790187835693,
"learning_rate": 4.912632135009769e-05,
"loss": 4.5593,
"step": 28
},
{
"epoch": 1.0238611713665944,
"grad_norm": 1.7455302476882935,
"learning_rate": 4.6220935509274235e-05,
"loss": 7.0855,
"step": 29
},
{
"epoch": 1.0585683297180044,
"grad_norm": 1.0067161321640015,
"learning_rate": 4.3353142970386564e-05,
"loss": 4.1927,
"step": 30
},
{
"epoch": 1.0932754880694142,
"grad_norm": 0.9785596132278442,
"learning_rate": 4.053522406135775e-05,
"loss": 3.8184,
"step": 31
},
{
"epoch": 1.1279826464208242,
"grad_norm": 1.14651358127594,
"learning_rate": 3.777924554357096e-05,
"loss": 4.2095,
"step": 32
},
{
"epoch": 1.1626898047722343,
"grad_norm": 1.0360416173934937,
"learning_rate": 3.509700894014496e-05,
"loss": 4.0984,
"step": 33
},
{
"epoch": 1.1973969631236443,
"grad_norm": 1.2359875440597534,
"learning_rate": 3.250000000000001e-05,
"loss": 4.1898,
"step": 34
},
{
"epoch": 1.2321041214750543,
"grad_norm": 1.3868677616119385,
"learning_rate": 2.9999339514117912e-05,
"loss": 3.7502,
"step": 35
},
{
"epoch": 1.2668112798264641,
"grad_norm": 1.1134669780731201,
"learning_rate": 2.760573569460757e-05,
"loss": 3.8309,
"step": 36
},
{
"epoch": 1.3015184381778742,
"grad_norm": 0.9911520481109619,
"learning_rate": 2.53294383204969e-05,
"loss": 3.8265,
"step": 37
},
{
"epoch": 1.3362255965292842,
"grad_norm": 0.9708497524261475,
"learning_rate": 2.3180194846605367e-05,
"loss": 3.5501,
"step": 38
},
{
"epoch": 1.3709327548806942,
"grad_norm": 1.183776617050171,
"learning_rate": 2.1167208663446025e-05,
"loss": 4.2784,
"step": 39
},
{
"epoch": 1.405639913232104,
"grad_norm": 1.1506365537643433,
"learning_rate": 1.9299099686894423e-05,
"loss": 3.6222,
"step": 40
},
{
"epoch": 1.440347071583514,
"grad_norm": 1.267316222190857,
"learning_rate": 1.758386744638546e-05,
"loss": 3.9714,
"step": 41
},
{
"epoch": 1.475054229934924,
"grad_norm": 1.6185253858566284,
"learning_rate": 1.602885682970026e-05,
"loss": 4.3338,
"step": 42
},
{
"epoch": 1.509761388286334,
"grad_norm": 1.0542787313461304,
"learning_rate": 1.464072663102903e-05,
"loss": 3.8664,
"step": 43
},
{
"epoch": 1.5444685466377441,
"grad_norm": 0.9902780055999756,
"learning_rate": 1.3425421036992098e-05,
"loss": 3.4753,
"step": 44
},
{
"epoch": 1.579175704989154,
"grad_norm": 1.081815481185913,
"learning_rate": 1.2388144172720251e-05,
"loss": 4.0674,
"step": 45
},
{
"epoch": 1.613882863340564,
"grad_norm": 1.1183944940567017,
"learning_rate": 1.1533337816991932e-05,
"loss": 3.8616,
"step": 46
},
{
"epoch": 1.648590021691974,
"grad_norm": 1.1336209774017334,
"learning_rate": 1.0864662381854632e-05,
"loss": 3.8991,
"step": 47
},
{
"epoch": 1.6832971800433838,
"grad_norm": 1.3395631313323975,
"learning_rate": 1.0384981238178534e-05,
"loss": 4.0566,
"step": 48
},
{
"epoch": 1.718004338394794,
"grad_norm": 1.4792617559432983,
"learning_rate": 1.0096348454262845e-05,
"loss": 3.8368,
"step": 49
},
{
"epoch": 1.7527114967462039,
"grad_norm": 1.0658278465270996,
"learning_rate": 1e-05,
"loss": 3.6137,
"step": 50
},
{
"epoch": 1.7527114967462039,
"eval_loss": NaN,
"eval_runtime": 3.2427,
"eval_samples_per_second": 15.419,
"eval_steps_per_second": 4.009,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.624057810649088e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}