aia-hw231223 / run-9 /checkpoint-2138 /trainer_state.json
zhe0's picture
Training in progress, epoch 3
d936cbe
raw
history blame
1.66 kB
{
"best_metric": 0.5117946747628033,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-9/checkpoint-2138",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2138,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.47,
"learning_rate": 1.942054911488753e-05,
"loss": 0.5525,
"step": 500
},
{
"epoch": 0.94,
"learning_rate": 1.5833451014612775e-05,
"loss": 0.4922,
"step": 1000
},
{
"epoch": 1.0,
"eval_loss": 0.4696793258190155,
"eval_matthews_correlation": 0.4691032179514943,
"eval_runtime": 0.7564,
"eval_samples_per_second": 1378.966,
"eval_steps_per_second": 87.26,
"step": 1069
},
{
"epoch": 1.4,
"learning_rate": 1.2246352914338016e-05,
"loss": 0.364,
"step": 1500
},
{
"epoch": 1.87,
"learning_rate": 8.659254814063262e-06,
"loss": 0.3351,
"step": 2000
},
{
"epoch": 2.0,
"eval_loss": 0.6338055729866028,
"eval_matthews_correlation": 0.5117946747628033,
"eval_runtime": 0.84,
"eval_samples_per_second": 1241.606,
"eval_steps_per_second": 78.568,
"step": 2138
}
],
"logging_steps": 500,
"max_steps": 3207,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 134171622786756.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 2.3007647215162288e-05,
"num_train_epochs": 3,
"per_device_train_batch_size": 8,
"seed": 14
}
}