TianyiQ's picture
Upload ./trainer_state.json with huggingface_hub
e15edbb verified
{
"best_metric": 1.191159963607788,
"best_model_checkpoint": "./output/training_results/C021_random_sample_Meta-Llama-3-8B_pretrain_20240726_033210/checkpoint-33984",
"epoch": 4.0,
"eval_steps": 3776,
"global_step": 37752,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00010595465140919687,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 1.6347,
"step": 1
},
{
"epoch": 0.20004238186056367,
"grad_norm": 2.0841434899950975,
"learning_rate": 1.9947033898305087e-06,
"loss": 1.411,
"step": 1888
},
{
"epoch": 0.40008476372112733,
"grad_norm": 1.7712195190814046,
"learning_rate": 2.23788694222373e-06,
"loss": 1.3112,
"step": 3776
},
{
"epoch": 0.40008476372112733,
"eval_loss": 1.2742130756378174,
"eval_runtime": 330.8268,
"eval_samples_per_second": 202.868,
"eval_steps_per_second": 1.587,
"step": 3776
},
{
"epoch": 0.600127145581691,
"grad_norm": 1.8384043597752484,
"learning_rate": 1.2167168103937e-06,
"loss": 1.2493,
"step": 5664
},
{
"epoch": 0.8001695274422547,
"grad_norm": 1.7725919716418708,
"learning_rate": 6.493264000872854e-07,
"loss": 1.2264,
"step": 7552
},
{
"epoch": 0.8001695274422547,
"eval_loss": 1.214762568473816,
"eval_runtime": 327.7212,
"eval_samples_per_second": 204.79,
"eval_steps_per_second": 1.602,
"step": 7552
},
{
"epoch": 1.0002119093028183,
"grad_norm": 1.8972637627842048,
"learning_rate": 3.4492111928598874e-07,
"loss": 1.211,
"step": 9440
},
{
"epoch": 1.200254291163382,
"grad_norm": 1.8787635251660875,
"learning_rate": 1.8831885638765917e-07,
"loss": 1.1558,
"step": 11328
},
{
"epoch": 1.200254291163382,
"eval_loss": 1.2016733884811401,
"eval_runtime": 327.5932,
"eval_samples_per_second": 204.87,
"eval_steps_per_second": 1.603,
"step": 11328
},
{
"epoch": 1.4002966730239457,
"grad_norm": 2.0582903963596952,
"learning_rate": 1.1128443649930022e-07,
"loss": 1.1568,
"step": 13216
},
{
"epoch": 1.6003390548845093,
"grad_norm": 1.8665084344236207,
"learning_rate": 7.542962298572276e-08,
"loss": 1.1544,
"step": 15104
},
{
"epoch": 1.6003390548845093,
"eval_loss": 1.1974143981933594,
"eval_runtime": 327.7112,
"eval_samples_per_second": 204.796,
"eval_steps_per_second": 1.602,
"step": 15104
},
{
"epoch": 1.8003814367450732,
"grad_norm": 1.7528553673770804,
"learning_rate": 5.978512044954537e-08,
"loss": 1.1574,
"step": 16992
},
{
"epoch": 2.0004238186056367,
"grad_norm": 1.8861106343782263,
"learning_rate": 5.343416604131081e-08,
"loss": 1.1562,
"step": 18880
},
{
"epoch": 2.0004238186056367,
"eval_loss": 1.19540274143219,
"eval_runtime": 328.044,
"eval_samples_per_second": 204.588,
"eval_steps_per_second": 1.6,
"step": 18880
},
{
"epoch": 2.2004662004662006,
"grad_norm": 1.9003730335704914,
"learning_rate": 5.1080120038479373e-08,
"loss": 1.1431,
"step": 20768
},
{
"epoch": 2.400508582326764,
"grad_norm": 1.941099899013768,
"learning_rate": 5.029649877564949e-08,
"loss": 1.1416,
"step": 22656
},
{
"epoch": 2.400508582326764,
"eval_loss": 1.1947613954544067,
"eval_runtime": 327.5948,
"eval_samples_per_second": 204.869,
"eval_steps_per_second": 1.603,
"step": 22656
},
{
"epoch": 2.600550964187328,
"grad_norm": 1.9472750123018565,
"learning_rate": 5.0068500018386117e-08,
"loss": 1.1461,
"step": 24544
},
{
"epoch": 2.8005933460478913,
"grad_norm": 1.893076803756726,
"learning_rate": 5.0012641588155837e-08,
"loss": 1.1449,
"step": 26432
},
{
"epoch": 2.8005933460478913,
"eval_loss": 1.1934314966201782,
"eval_runtime": 328.7941,
"eval_samples_per_second": 204.122,
"eval_steps_per_second": 1.597,
"step": 26432
},
{
"epoch": 3.0006357279084552,
"grad_norm": 2.082824554026489,
"learning_rate": 5.0001709336153935e-08,
"loss": 1.1464,
"step": 28320
},
{
"epoch": 3.2006781097690187,
"grad_norm": 1.9988580640926128,
"learning_rate": 5.000014828801133e-08,
"loss": 1.1401,
"step": 30208
},
{
"epoch": 3.2006781097690187,
"eval_loss": 1.1925288438796997,
"eval_runtime": 327.8373,
"eval_samples_per_second": 204.717,
"eval_steps_per_second": 1.601,
"step": 30208
},
{
"epoch": 3.4007204916295826,
"grad_norm": 1.8907403603158888,
"learning_rate": 5.0000006391422415e-08,
"loss": 1.1397,
"step": 32096
},
{
"epoch": 3.6007628734901465,
"grad_norm": 1.9407599933229671,
"learning_rate": 5.0000000076061e-08,
"loss": 1.1382,
"step": 33984
},
{
"epoch": 3.6007628734901465,
"eval_loss": 1.191159963607788,
"eval_runtime": 327.6723,
"eval_samples_per_second": 204.82,
"eval_steps_per_second": 1.602,
"step": 33984
},
{
"epoch": 3.80080525535071,
"grad_norm": 1.9516245761095283,
"learning_rate": 5.000000000004096e-08,
"loss": 1.1404,
"step": 35872
},
{
"epoch": 4.0,
"step": 37752,
"total_flos": 3947644028190720.0,
"train_loss": 1.1804671342891009,
"train_runtime": 53865.2221,
"train_samples_per_second": 44.854,
"train_steps_per_second": 0.701
}
],
"logging_steps": 1888,
"max_steps": 37752,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 3776,
"total_flos": 3947644028190720.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}