llama2_SFT / trainer_state.json
4daJ's picture
Upload folder using huggingface_hub
a388d0e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 80.0,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.0,
"learning_rate": 4.9692208514878444e-05,
"loss": 2.4048,
"step": 10
},
{
"epoch": 8.0,
"learning_rate": 4.877641290737884e-05,
"loss": 1.6571,
"step": 20
},
{
"epoch": 12.0,
"learning_rate": 4.72751631047092e-05,
"loss": 1.088,
"step": 30
},
{
"epoch": 16.0,
"learning_rate": 4.522542485937369e-05,
"loss": 0.7735,
"step": 40
},
{
"epoch": 20.0,
"learning_rate": 4.267766952966369e-05,
"loss": 0.6017,
"step": 50
},
{
"epoch": 24.0,
"learning_rate": 3.969463130731183e-05,
"loss": 0.4956,
"step": 60
},
{
"epoch": 28.0,
"learning_rate": 3.634976249348867e-05,
"loss": 0.4105,
"step": 70
},
{
"epoch": 32.0,
"learning_rate": 3.272542485937369e-05,
"loss": 0.3463,
"step": 80
},
{
"epoch": 36.0,
"learning_rate": 2.8910861626005776e-05,
"loss": 0.2862,
"step": 90
},
{
"epoch": 40.0,
"learning_rate": 2.5e-05,
"loss": 0.2345,
"step": 100
},
{
"epoch": 44.0,
"learning_rate": 2.1089138373994223e-05,
"loss": 0.1905,
"step": 110
},
{
"epoch": 48.0,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.1554,
"step": 120
},
{
"epoch": 52.0,
"learning_rate": 1.3650237506511331e-05,
"loss": 0.1269,
"step": 130
},
{
"epoch": 56.0,
"learning_rate": 1.0305368692688174e-05,
"loss": 0.1071,
"step": 140
},
{
"epoch": 60.0,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.0921,
"step": 150
},
{
"epoch": 64.0,
"learning_rate": 4.7745751406263165e-06,
"loss": 0.0832,
"step": 160
},
{
"epoch": 68.0,
"learning_rate": 2.7248368952908053e-06,
"loss": 0.0777,
"step": 170
},
{
"epoch": 72.0,
"learning_rate": 1.2235870926211619e-06,
"loss": 0.0739,
"step": 180
},
{
"epoch": 76.0,
"learning_rate": 3.077914851215585e-07,
"loss": 0.0714,
"step": 190
},
{
"epoch": 80.0,
"learning_rate": 0.0,
"loss": 0.072,
"step": 200
},
{
"epoch": 80.0,
"step": 200,
"total_flos": 3.758484756417741e+16,
"train_loss": 0.4674161618947983,
"train_runtime": 1650.9942,
"train_samples_per_second": 4.846,
"train_steps_per_second": 0.121
}
],
"max_steps": 200,
"num_train_epochs": 100,
"total_flos": 3.758484756417741e+16,
"trial_name": null,
"trial_params": null
}