zephyr-7b-teacher / trainer_state.json
weijie210's picture
Model save
579766a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9976470588235294,
"eval_steps": 500,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.999890203099519e-05,
"loss": 1.2086,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 1.99725628283861e-05,
"loss": 1.0943,
"step": 5
},
{
"epoch": 0.05,
"learning_rate": 1.9890401873221642e-05,
"loss": 0.916,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 1.975396798735198e-05,
"loss": 0.8638,
"step": 15
},
{
"epoch": 0.09,
"learning_rate": 1.9564009842765225e-05,
"loss": 0.8407,
"step": 20
},
{
"epoch": 0.12,
"learning_rate": 1.932156982230388e-05,
"loss": 0.8379,
"step": 25
},
{
"epoch": 0.14,
"learning_rate": 1.9027978299657436e-05,
"loss": 0.806,
"step": 30
},
{
"epoch": 0.16,
"learning_rate": 1.8684846339024145e-05,
"loss": 0.7773,
"step": 35
},
{
"epoch": 0.19,
"learning_rate": 1.829405685450202e-05,
"loss": 0.7836,
"step": 40
},
{
"epoch": 0.21,
"learning_rate": 1.7857754277721417e-05,
"loss": 0.7955,
"step": 45
},
{
"epoch": 0.24,
"learning_rate": 1.7378332790417275e-05,
"loss": 0.7679,
"step": 50
},
{
"epoch": 0.26,
"learning_rate": 1.6858423186514108e-05,
"loss": 0.7665,
"step": 55
},
{
"epoch": 0.28,
"learning_rate": 1.6300878435817115e-05,
"loss": 0.7698,
"step": 60
},
{
"epoch": 0.31,
"learning_rate": 1.5708758028527754e-05,
"loss": 0.7811,
"step": 65
},
{
"epoch": 0.33,
"learning_rate": 1.5085311186492206e-05,
"loss": 0.7672,
"step": 70
},
{
"epoch": 0.35,
"learning_rate": 1.4433959033309888e-05,
"loss": 0.7651,
"step": 75
},
{
"epoch": 0.38,
"learning_rate": 1.3758275821142382e-05,
"loss": 0.7348,
"step": 80
},
{
"epoch": 0.4,
"learning_rate": 1.306196931723947e-05,
"loss": 0.7506,
"step": 85
},
{
"epoch": 0.42,
"learning_rate": 1.234886045780984e-05,
"loss": 0.7291,
"step": 90
},
{
"epoch": 0.45,
"learning_rate": 1.16228623808846e-05,
"loss": 0.7273,
"step": 95
},
{
"epoch": 0.47,
"learning_rate": 1.0887958953229349e-05,
"loss": 0.7319,
"step": 100
},
{
"epoch": 0.49,
"learning_rate": 1.0148182909136928e-05,
"loss": 0.723,
"step": 105
},
{
"epoch": 0.52,
"learning_rate": 9.407593721062858e-06,
"loss": 0.7265,
"step": 110
},
{
"epoch": 0.54,
"learning_rate": 8.670255323536858e-06,
"loss": 0.7163,
"step": 115
},
{
"epoch": 0.56,
"learning_rate": 7.940213812589018e-06,
"loss": 0.7213,
"step": 120
},
{
"epoch": 0.59,
"learning_rate": 7.2214752430635625e-06,
"loss": 0.717,
"step": 125
},
{
"epoch": 0.61,
"learning_rate": 6.517983645656014e-06,
"loss": 0.7258,
"step": 130
},
{
"epoch": 0.64,
"learning_rate": 5.8335993843037695e-06,
"loss": 0.7135,
"step": 135
},
{
"epoch": 0.66,
"learning_rate": 5.172077972692553e-06,
"loss": 0.7251,
"step": 140
},
{
"epoch": 0.68,
"learning_rate": 4.5370494661216835e-06,
"loss": 0.7171,
"step": 145
},
{
"epoch": 0.71,
"learning_rate": 3.931998541814069e-06,
"loss": 0.7152,
"step": 150
},
{
"epoch": 0.73,
"learning_rate": 3.360245376978779e-06,
"loss": 0.7163,
"step": 155
},
{
"epoch": 0.75,
"learning_rate": 2.8249274295566863e-06,
"loss": 0.7232,
"step": 160
},
{
"epoch": 0.78,
"learning_rate": 2.328982221626087e-06,
"loss": 0.6938,
"step": 165
},
{
"epoch": 0.8,
"learning_rate": 1.875131219943187e-06,
"loss": 0.6961,
"step": 170
},
{
"epoch": 0.82,
"learning_rate": 1.4658649020720539e-06,
"loss": 0.7121,
"step": 175
},
{
"epoch": 0.85,
"learning_rate": 1.1034290900525279e-06,
"loss": 0.7038,
"step": 180
},
{
"epoch": 0.87,
"learning_rate": 7.898126265992912e-07,
"loss": 0.7029,
"step": 185
},
{
"epoch": 0.89,
"learning_rate": 5.267364614580861e-07,
"loss": 0.695,
"step": 190
},
{
"epoch": 0.92,
"learning_rate": 3.1564420780701435e-07,
"loss": 0.7147,
"step": 195
},
{
"epoch": 0.94,
"learning_rate": 1.5769422052403172e-07,
"loss": 0.7114,
"step": 200
},
{
"epoch": 0.96,
"learning_rate": 5.375323979063929e-08,
"loss": 0.6828,
"step": 205
},
{
"epoch": 0.99,
"learning_rate": 4.39163491205652e-09,
"loss": 0.7076,
"step": 210
},
{
"epoch": 1.0,
"eval_loss": 0.7019486427307129,
"eval_runtime": 73.9949,
"eval_samples_per_second": 9.568,
"eval_steps_per_second": 0.162,
"step": 212
},
{
"epoch": 1.0,
"step": 212,
"total_flos": 88567594352640.0,
"train_loss": 0.7541754785573708,
"train_runtime": 4987.605,
"train_samples_per_second": 2.726,
"train_steps_per_second": 0.043
}
],
"logging_steps": 5,
"max_steps": 212,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 88567594352640.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}