dsakerkwq's picture
Training in progress, step 50, checkpoint
407290b verified
raw
history blame
10.2 kB
{
"best_metric": 0.4297037422657013,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 1.3686440677966103,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02711864406779661,
"grad_norm": 6.654329776763916,
"learning_rate": 5e-05,
"loss": 5.9176,
"step": 1
},
{
"epoch": 0.02711864406779661,
"eval_loss": 7.1914238929748535,
"eval_runtime": 3.0005,
"eval_samples_per_second": 16.664,
"eval_steps_per_second": 4.333,
"step": 1
},
{
"epoch": 0.05423728813559322,
"grad_norm": 7.07365608215332,
"learning_rate": 0.0001,
"loss": 6.263,
"step": 2
},
{
"epoch": 0.08135593220338982,
"grad_norm": 6.792131423950195,
"learning_rate": 9.990365154573717e-05,
"loss": 6.2585,
"step": 3
},
{
"epoch": 0.10847457627118644,
"grad_norm": 6.233402729034424,
"learning_rate": 9.961501876182148e-05,
"loss": 4.6468,
"step": 4
},
{
"epoch": 0.13559322033898305,
"grad_norm": 7.542125701904297,
"learning_rate": 9.913533761814537e-05,
"loss": 3.8493,
"step": 5
},
{
"epoch": 0.16271186440677965,
"grad_norm": 7.424484729766846,
"learning_rate": 9.846666218300807e-05,
"loss": 2.7199,
"step": 6
},
{
"epoch": 0.18983050847457628,
"grad_norm": 8.212982177734375,
"learning_rate": 9.761185582727977e-05,
"loss": 2.0568,
"step": 7
},
{
"epoch": 0.21694915254237288,
"grad_norm": 7.07874059677124,
"learning_rate": 9.657457896300791e-05,
"loss": 1.4176,
"step": 8
},
{
"epoch": 0.2440677966101695,
"grad_norm": 6.48041296005249,
"learning_rate": 9.535927336897098e-05,
"loss": 1.0568,
"step": 9
},
{
"epoch": 0.2711864406779661,
"grad_norm": 7.512641429901123,
"learning_rate": 9.397114317029975e-05,
"loss": 0.7767,
"step": 10
},
{
"epoch": 0.2983050847457627,
"grad_norm": 3.8094427585601807,
"learning_rate": 9.241613255361455e-05,
"loss": 0.6959,
"step": 11
},
{
"epoch": 0.3254237288135593,
"grad_norm": 2.816176652908325,
"learning_rate": 9.070090031310558e-05,
"loss": 0.5913,
"step": 12
},
{
"epoch": 0.3525423728813559,
"grad_norm": 3.8954684734344482,
"learning_rate": 8.883279133655399e-05,
"loss": 0.5801,
"step": 13
},
{
"epoch": 0.37966101694915255,
"grad_norm": 2.8891689777374268,
"learning_rate": 8.681980515339464e-05,
"loss": 0.574,
"step": 14
},
{
"epoch": 0.4067796610169492,
"grad_norm": 3.304783344268799,
"learning_rate": 8.467056167950311e-05,
"loss": 0.5433,
"step": 15
},
{
"epoch": 0.43389830508474575,
"grad_norm": 2.807281732559204,
"learning_rate": 8.239426430539243e-05,
"loss": 0.5548,
"step": 16
},
{
"epoch": 0.4610169491525424,
"grad_norm": 8.233451843261719,
"learning_rate": 8.000066048588211e-05,
"loss": 0.6403,
"step": 17
},
{
"epoch": 0.488135593220339,
"grad_norm": 8.032986640930176,
"learning_rate": 7.75e-05,
"loss": 0.6811,
"step": 18
},
{
"epoch": 0.5152542372881356,
"grad_norm": 2.5166850090026855,
"learning_rate": 7.490299105985507e-05,
"loss": 0.4471,
"step": 19
},
{
"epoch": 0.5423728813559322,
"grad_norm": 2.66556453704834,
"learning_rate": 7.222075445642904e-05,
"loss": 0.5133,
"step": 20
},
{
"epoch": 0.5694915254237288,
"grad_norm": 3.332446813583374,
"learning_rate": 6.946477593864228e-05,
"loss": 0.5163,
"step": 21
},
{
"epoch": 0.5966101694915255,
"grad_norm": 2.7612719535827637,
"learning_rate": 6.664685702961344e-05,
"loss": 0.5193,
"step": 22
},
{
"epoch": 0.6237288135593221,
"grad_norm": 2.3616223335266113,
"learning_rate": 6.377906449072578e-05,
"loss": 0.5007,
"step": 23
},
{
"epoch": 0.6508474576271186,
"grad_norm": 2.6576013565063477,
"learning_rate": 6.087367864990233e-05,
"loss": 0.5167,
"step": 24
},
{
"epoch": 0.6779661016949152,
"grad_norm": 1.7816935777664185,
"learning_rate": 5.794314081535644e-05,
"loss": 0.5106,
"step": 25
},
{
"epoch": 0.6779661016949152,
"eval_loss": 0.5217246413230896,
"eval_runtime": 3.0304,
"eval_samples_per_second": 16.5,
"eval_steps_per_second": 4.29,
"step": 25
},
{
"epoch": 0.7050847457627119,
"grad_norm": 2.0804121494293213,
"learning_rate": 5.500000000000001e-05,
"loss": 0.4793,
"step": 26
},
{
"epoch": 0.7322033898305085,
"grad_norm": 2.41282057762146,
"learning_rate": 5.205685918464356e-05,
"loss": 0.4887,
"step": 27
},
{
"epoch": 0.7593220338983051,
"grad_norm": 1.6098804473876953,
"learning_rate": 4.912632135009769e-05,
"loss": 0.4879,
"step": 28
},
{
"epoch": 0.7864406779661017,
"grad_norm": 1.7615506649017334,
"learning_rate": 4.6220935509274235e-05,
"loss": 0.4757,
"step": 29
},
{
"epoch": 0.8135593220338984,
"grad_norm": 1.765375018119812,
"learning_rate": 4.3353142970386564e-05,
"loss": 0.4545,
"step": 30
},
{
"epoch": 0.8406779661016949,
"grad_norm": 1.167455792427063,
"learning_rate": 4.053522406135775e-05,
"loss": 0.4685,
"step": 31
},
{
"epoch": 0.8677966101694915,
"grad_norm": 1.3772835731506348,
"learning_rate": 3.777924554357096e-05,
"loss": 0.458,
"step": 32
},
{
"epoch": 0.8949152542372881,
"grad_norm": 1.6152493953704834,
"learning_rate": 3.509700894014496e-05,
"loss": 0.4649,
"step": 33
},
{
"epoch": 0.9220338983050848,
"grad_norm": 1.2044497728347778,
"learning_rate": 3.250000000000001e-05,
"loss": 0.4376,
"step": 34
},
{
"epoch": 0.9491525423728814,
"grad_norm": 1.3679577112197876,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.4165,
"step": 35
},
{
"epoch": 0.976271186440678,
"grad_norm": 1.4477646350860596,
"learning_rate": 2.760573569460757e-05,
"loss": 0.4905,
"step": 36
},
{
"epoch": 1.0161016949152541,
"grad_norm": 3.4008398056030273,
"learning_rate": 2.53294383204969e-05,
"loss": 0.6072,
"step": 37
},
{
"epoch": 1.0432203389830508,
"grad_norm": 1.8189399242401123,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.3882,
"step": 38
},
{
"epoch": 1.0703389830508474,
"grad_norm": 0.9250749349594116,
"learning_rate": 2.1167208663446025e-05,
"loss": 0.3339,
"step": 39
},
{
"epoch": 1.097457627118644,
"grad_norm": 1.2397493124008179,
"learning_rate": 1.9299099686894423e-05,
"loss": 0.4049,
"step": 40
},
{
"epoch": 1.1245762711864407,
"grad_norm": 1.5129092931747437,
"learning_rate": 1.758386744638546e-05,
"loss": 0.3999,
"step": 41
},
{
"epoch": 1.1516949152542373,
"grad_norm": 1.52627432346344,
"learning_rate": 1.602885682970026e-05,
"loss": 0.3978,
"step": 42
},
{
"epoch": 1.178813559322034,
"grad_norm": 1.3015371561050415,
"learning_rate": 1.464072663102903e-05,
"loss": 0.378,
"step": 43
},
{
"epoch": 1.2059322033898305,
"grad_norm": 1.3487688302993774,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.4213,
"step": 44
},
{
"epoch": 1.2330508474576272,
"grad_norm": 1.26216721534729,
"learning_rate": 1.2388144172720251e-05,
"loss": 0.3905,
"step": 45
},
{
"epoch": 1.2601694915254238,
"grad_norm": 1.045103669166565,
"learning_rate": 1.1533337816991932e-05,
"loss": 0.3698,
"step": 46
},
{
"epoch": 1.2872881355932204,
"grad_norm": 1.2951055765151978,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.3458,
"step": 47
},
{
"epoch": 1.314406779661017,
"grad_norm": 1.3998348712921143,
"learning_rate": 1.0384981238178534e-05,
"loss": 0.3566,
"step": 48
},
{
"epoch": 1.3415254237288137,
"grad_norm": 1.8256560564041138,
"learning_rate": 1.0096348454262845e-05,
"loss": 0.3871,
"step": 49
},
{
"epoch": 1.3686440677966103,
"grad_norm": 1.4336544275283813,
"learning_rate": 1e-05,
"loss": 0.3851,
"step": 50
},
{
"epoch": 1.3686440677966103,
"eval_loss": 0.4297037422657013,
"eval_runtime": 3.0435,
"eval_samples_per_second": 16.429,
"eval_steps_per_second": 4.271,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}