|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05, |
|
"global_step": 50000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 4.7524, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 3.5548, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6e-06, |
|
"loss": 2.9636, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.7618, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1e-05, |
|
"loss": 2.6257, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.2e-05, |
|
"loss": 2.5517, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 2.4628, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.4338, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.8e-05, |
|
"loss": 2.3362, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2e-05, |
|
"loss": 2.2746, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 2.1795, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.4e-05, |
|
"loss": 2.2309, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 1.8605, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 1.7232, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6872, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 2.1581, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 2.0653, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.6e-05, |
|
"loss": 2.049, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.8e-05, |
|
"loss": 2.0773, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4e-05, |
|
"loss": 1.9397, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.2e-05, |
|
"loss": 1.8899, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 1.8474, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 1.8034, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.8e-05, |
|
"loss": 1.7666, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7899, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 1.4265, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 1.3535, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 1.1703, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.8e-05, |
|
"loss": 1.129, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2811, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.2e-05, |
|
"loss": 1.2189, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 1.4371, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.6e-05, |
|
"loss": 1.3551, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 1.3616, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7e-05, |
|
"loss": 1.2483, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.2e-05, |
|
"loss": 1.2174, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.4e-05, |
|
"loss": 1.1861, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.6e-05, |
|
"loss": 1.1465, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 1.1957, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8e-05, |
|
"loss": 1.136, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.2e-05, |
|
"loss": 1.1122, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.4e-05, |
|
"loss": 1.0905, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.6e-05, |
|
"loss": 1.0996, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 1.0632, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9e-05, |
|
"loss": 1.1387, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 1.2049, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.4e-05, |
|
"loss": 1.1783, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.1043, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.8e-05, |
|
"loss": 1.0633, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1331, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_loss": 0.753776490688324, |
|
"eval_runtime": 33.8225, |
|
"eval_samples_per_second": 147.83, |
|
"eval_steps_per_second": 2.336, |
|
"step": 50000 |
|
} |
|
], |
|
"max_steps": 1000000, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 2.9824949157888e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|