|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.954653937947494, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0477326968973747, |
|
"grad_norm": 4.068389892578125, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 2.3107, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0954653937947494, |
|
"grad_norm": 2.991318464279175, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 1.8576, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1431980906921241, |
|
"grad_norm": 2.333883285522461, |
|
"learning_rate": 9.943858030057405e-05, |
|
"loss": 1.7539, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1909307875894988, |
|
"grad_norm": 2.2262773513793945, |
|
"learning_rate": 9.751405104519151e-05, |
|
"loss": 1.692, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2386634844868735, |
|
"grad_norm": 2.6444602012634277, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 1.6101, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2863961813842482, |
|
"grad_norm": 2.4133706092834473, |
|
"learning_rate": 8.980465328528219e-05, |
|
"loss": 1.6038, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3341288782816229, |
|
"grad_norm": 2.31902813911438, |
|
"learning_rate": 8.423342941649866e-05, |
|
"loss": 1.5127, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3818615751789976, |
|
"grad_norm": 2.5150299072265625, |
|
"learning_rate": 7.771352073656629e-05, |
|
"loss": 1.4941, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.4295942720763723, |
|
"grad_norm": 2.1890738010406494, |
|
"learning_rate": 7.042560848134592e-05, |
|
"loss": 1.4806, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.477326968973747, |
|
"grad_norm": 2.1867263317108154, |
|
"learning_rate": 6.257165698159149e-05, |
|
"loss": 1.4077, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5250596658711217, |
|
"grad_norm": 2.231353521347046, |
|
"learning_rate": 5.4369316780129554e-05, |
|
"loss": 1.4155, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5727923627684964, |
|
"grad_norm": 2.0291645526885986, |
|
"learning_rate": 4.604589304905754e-05, |
|
"loss": 1.3634, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6205250596658711, |
|
"grad_norm": 2.1311423778533936, |
|
"learning_rate": 3.783204645559504e-05, |
|
"loss": 1.3241, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6682577565632458, |
|
"grad_norm": 1.8706629276275635, |
|
"learning_rate": 2.995540103966864e-05, |
|
"loss": 1.2945, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.7159904534606205, |
|
"grad_norm": 1.8337299823760986, |
|
"learning_rate": 2.263423624322326e-05, |
|
"loss": 1.264, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7637231503579952, |
|
"grad_norm": 1.8664114475250244, |
|
"learning_rate": 1.6071437899220688e-05, |
|
"loss": 1.2934, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.8114558472553699, |
|
"grad_norm": 2.2168829441070557, |
|
"learning_rate": 1.0448875811934417e-05, |
|
"loss": 1.2624, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.8591885441527446, |
|
"grad_norm": 2.4389848709106445, |
|
"learning_rate": 5.922363738351888e-06, |
|
"loss": 1.2715, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.9069212410501193, |
|
"grad_norm": 2.066229820251465, |
|
"learning_rate": 2.6173414408598827e-06, |
|
"loss": 1.2322, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.954653937947494, |
|
"grad_norm": 1.689720869064331, |
|
"learning_rate": 6.253984711796612e-07, |
|
"loss": 1.2093, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2095, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.602614826159309e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|