kokovova's picture
Training in progress, step 30, checkpoint
90a67e0 verified
{
"best_metric": 1.2432726621627808,
"best_model_checkpoint": "miner_id_24/checkpoint-30",
"epoch": 0.0050729232720355105,
"eval_steps": 5,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001690974424011837,
"eval_loss": 1.4077675342559814,
"eval_runtime": 176.3171,
"eval_samples_per_second": 14.122,
"eval_steps_per_second": 7.061,
"step": 1
},
{
"epoch": 0.000507292327203551,
"grad_norm": 0.5090880393981934,
"learning_rate": 0.00012,
"loss": 1.3482,
"step": 3
},
{
"epoch": 0.0008454872120059184,
"eval_loss": 1.357488989830017,
"eval_runtime": 175.9636,
"eval_samples_per_second": 14.151,
"eval_steps_per_second": 7.075,
"step": 5
},
{
"epoch": 0.001014584654407102,
"grad_norm": 0.43426570296287537,
"learning_rate": 0.0001992114701314478,
"loss": 1.3285,
"step": 6
},
{
"epoch": 0.0015218769816106531,
"grad_norm": 0.4685356616973877,
"learning_rate": 0.00018763066800438636,
"loss": 1.3682,
"step": 9
},
{
"epoch": 0.0016909744240118367,
"eval_loss": 1.2989118099212646,
"eval_runtime": 175.9695,
"eval_samples_per_second": 14.15,
"eval_steps_per_second": 7.075,
"step": 10
},
{
"epoch": 0.002029169308814204,
"grad_norm": 0.4550609588623047,
"learning_rate": 0.000163742398974869,
"loss": 1.3542,
"step": 12
},
{
"epoch": 0.0025364616360177552,
"grad_norm": 0.4199211299419403,
"learning_rate": 0.00013090169943749476,
"loss": 1.3569,
"step": 15
},
{
"epoch": 0.0025364616360177552,
"eval_loss": 1.263357162475586,
"eval_runtime": 175.9689,
"eval_samples_per_second": 14.15,
"eval_steps_per_second": 7.075,
"step": 15
},
{
"epoch": 0.0030437539632213063,
"grad_norm": 0.40665045380592346,
"learning_rate": 9.372094804706867e-05,
"loss": 1.1759,
"step": 18
},
{
"epoch": 0.0033819488480236735,
"eval_loss": 1.249632477760315,
"eval_runtime": 175.9054,
"eval_samples_per_second": 14.155,
"eval_steps_per_second": 7.078,
"step": 20
},
{
"epoch": 0.0035510462904248573,
"grad_norm": 0.4052756428718567,
"learning_rate": 5.7422070843492734e-05,
"loss": 1.2445,
"step": 21
},
{
"epoch": 0.004058338617628408,
"grad_norm": 0.46778208017349243,
"learning_rate": 2.7103137257858868e-05,
"loss": 1.2339,
"step": 24
},
{
"epoch": 0.004227436060029592,
"eval_loss": 1.2444090843200684,
"eval_runtime": 175.9887,
"eval_samples_per_second": 14.149,
"eval_steps_per_second": 7.074,
"step": 25
},
{
"epoch": 0.004565630944831959,
"grad_norm": 0.466905802488327,
"learning_rate": 7.022351411174866e-06,
"loss": 1.1967,
"step": 27
},
{
"epoch": 0.0050729232720355105,
"grad_norm": 0.46483084559440613,
"learning_rate": 0.0,
"loss": 1.2324,
"step": 30
},
{
"epoch": 0.0050729232720355105,
"eval_loss": 1.2432726621627808,
"eval_runtime": 175.9673,
"eval_samples_per_second": 14.15,
"eval_steps_per_second": 7.075,
"step": 30
}
],
"logging_steps": 3,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3261235193643008e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}