Qwen2.5-3B-instruct-lora / trainer_state.json
JackeyLai's picture
Upload 6 files
4a63656 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 44.44444444444444,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.4814814814814814,
"grad_norm": NaN,
"learning_rate": 4.995066821070679e-05,
"loss": 3.9182,
"num_input_tokens_seen": 11376,
"step": 5
},
{
"epoch": 2.962962962962963,
"grad_norm": 4.32528018951416,
"learning_rate": 4.964990092676263e-05,
"loss": 2.9902,
"num_input_tokens_seen": 23120,
"step": 10
},
{
"epoch": 4.444444444444445,
"grad_norm": 3.1273458003997803,
"learning_rate": 4.907906416994146e-05,
"loss": 2.0558,
"num_input_tokens_seen": 34960,
"step": 15
},
{
"epoch": 5.925925925925926,
"grad_norm": 3.0323095321655273,
"learning_rate": 4.8244412147206284e-05,
"loss": 1.5538,
"num_input_tokens_seen": 46368,
"step": 20
},
{
"epoch": 7.407407407407407,
"grad_norm": 1.668092131614685,
"learning_rate": 4.715508948078037e-05,
"loss": 1.2808,
"num_input_tokens_seen": 58112,
"step": 25
},
{
"epoch": 8.88888888888889,
"grad_norm": 1.1815176010131836,
"learning_rate": 4.5823031017752485e-05,
"loss": 1.0824,
"num_input_tokens_seen": 69600,
"step": 30
},
{
"epoch": 10.37037037037037,
"grad_norm": 1.2929940223693848,
"learning_rate": 4.426283106939474e-05,
"loss": 0.9639,
"num_input_tokens_seen": 81392,
"step": 35
},
{
"epoch": 11.851851851851851,
"grad_norm": 0.8027080297470093,
"learning_rate": 4.249158351283414e-05,
"loss": 0.8585,
"num_input_tokens_seen": 92784,
"step": 40
},
{
"epoch": 13.333333333333334,
"grad_norm": 0.9231483936309814,
"learning_rate": 4.052869450695776e-05,
"loss": 0.7936,
"num_input_tokens_seen": 104656,
"step": 45
},
{
"epoch": 14.814814814814815,
"grad_norm": 0.8608518838882446,
"learning_rate": 3.8395669874474915e-05,
"loss": 0.6884,
"num_input_tokens_seen": 116352,
"step": 50
},
{
"epoch": 16.296296296296298,
"grad_norm": 1.0098791122436523,
"learning_rate": 3.611587947962319e-05,
"loss": 0.6035,
"num_input_tokens_seen": 127968,
"step": 55
},
{
"epoch": 17.77777777777778,
"grad_norm": 0.9739498496055603,
"learning_rate": 3.3714301183045385e-05,
"loss": 0.5181,
"num_input_tokens_seen": 139184,
"step": 60
},
{
"epoch": 19.25925925925926,
"grad_norm": 1.0032066106796265,
"learning_rate": 3.121724717912138e-05,
"loss": 0.4617,
"num_input_tokens_seen": 150848,
"step": 65
},
{
"epoch": 20.74074074074074,
"grad_norm": 0.943274974822998,
"learning_rate": 2.8652075714060295e-05,
"loss": 0.3967,
"num_input_tokens_seen": 162544,
"step": 70
},
{
"epoch": 22.22222222222222,
"grad_norm": 1.0475025177001953,
"learning_rate": 2.604689134322999e-05,
"loss": 0.3591,
"num_input_tokens_seen": 174128,
"step": 75
},
{
"epoch": 23.703703703703702,
"grad_norm": 0.9079996943473816,
"learning_rate": 2.3430237011767167e-05,
"loss": 0.2662,
"num_input_tokens_seen": 185968,
"step": 80
},
{
"epoch": 25.185185185185187,
"grad_norm": 1.1758606433868408,
"learning_rate": 2.0830781332097446e-05,
"loss": 0.2432,
"num_input_tokens_seen": 197680,
"step": 85
},
{
"epoch": 26.666666666666668,
"grad_norm": 1.1381499767303467,
"learning_rate": 1.827700448461836e-05,
"loss": 0.2029,
"num_input_tokens_seen": 209504,
"step": 90
},
{
"epoch": 28.14814814814815,
"grad_norm": 0.9022475481033325,
"learning_rate": 1.5796886182883053e-05,
"loss": 0.1476,
"num_input_tokens_seen": 220880,
"step": 95
},
{
"epoch": 29.62962962962963,
"grad_norm": 1.102629542350769,
"learning_rate": 1.3417599122003464e-05,
"loss": 0.1575,
"num_input_tokens_seen": 232544,
"step": 100
},
{
"epoch": 31.11111111111111,
"grad_norm": 0.8161982297897339,
"learning_rate": 1.11652112689164e-05,
"loss": 0.1061,
"num_input_tokens_seen": 244144,
"step": 105
},
{
"epoch": 32.592592592592595,
"grad_norm": 0.6333702802658081,
"learning_rate": 9.064400256282757e-06,
"loss": 0.1196,
"num_input_tokens_seen": 255952,
"step": 110
},
{
"epoch": 34.074074074074076,
"grad_norm": 0.6209620833396912,
"learning_rate": 7.138183009179922e-06,
"loss": 0.0896,
"num_input_tokens_seen": 267328,
"step": 115
},
{
"epoch": 35.55555555555556,
"grad_norm": 0.5725650787353516,
"learning_rate": 5.4076635668540075e-06,
"loss": 0.1,
"num_input_tokens_seen": 279264,
"step": 120
},
{
"epoch": 37.03703703703704,
"grad_norm": 0.5420427918434143,
"learning_rate": 3.891801862449629e-06,
"loss": 0.0777,
"num_input_tokens_seen": 290736,
"step": 125
},
{
"epoch": 38.51851851851852,
"grad_norm": 0.608285129070282,
"learning_rate": 2.6072059940146775e-06,
"loss": 0.0705,
"num_input_tokens_seen": 302128,
"step": 130
},
{
"epoch": 40.0,
"grad_norm": 0.6441460251808167,
"learning_rate": 1.5679502627027136e-06,
"loss": 0.0845,
"num_input_tokens_seen": 314000,
"step": 135
},
{
"epoch": 41.48148148148148,
"grad_norm": 0.6497734189033508,
"learning_rate": 7.854209717842231e-07,
"loss": 0.0748,
"num_input_tokens_seen": 325600,
"step": 140
},
{
"epoch": 42.96296296296296,
"grad_norm": 0.6788354516029358,
"learning_rate": 2.681916759252917e-07,
"loss": 0.0746,
"num_input_tokens_seen": 337232,
"step": 145
},
{
"epoch": 44.44444444444444,
"grad_norm": 0.6091296076774597,
"learning_rate": 2.192924752854042e-08,
"loss": 0.0824,
"num_input_tokens_seen": 349136,
"step": 150
},
{
"epoch": 44.44444444444444,
"num_input_tokens_seen": 349136,
"step": 150,
"total_flos": 5843993170542592.0,
"train_loss": 0.6807288352648417,
"train_runtime": 762.8508,
"train_samples_per_second": 7.079,
"train_steps_per_second": 0.197
}
],
"logging_steps": 5,
"max_steps": 150,
"num_input_tokens_seen": 349136,
"num_train_epochs": 50,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5843993170542592.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}