zlm_b128_le4_s4000 / checkpoint-1000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 1000, checkpoint
d7e3c5c verified
raw
history blame
4.54 kB
{
"best_metric": 0.40312233567237854,
"best_model_checkpoint": "mikhail-panzo/zlm_b128_le4_s4000/checkpoint-1000",
"epoch": 1.675392670157068,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08376963350785341,
"grad_norm": 2.9717624187469482,
"learning_rate": 2.4500000000000003e-06,
"loss": 1.0424,
"step": 50
},
{
"epoch": 0.16753926701570682,
"grad_norm": 2.9720630645751953,
"learning_rate": 4.950000000000001e-06,
"loss": 0.8474,
"step": 100
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.445929765701294,
"learning_rate": 7.45e-06,
"loss": 0.7336,
"step": 150
},
{
"epoch": 0.33507853403141363,
"grad_norm": 5.502955913543701,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6492,
"step": 200
},
{
"epoch": 0.418848167539267,
"grad_norm": 2.3356130123138428,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.6133,
"step": 250
},
{
"epoch": 0.5026178010471204,
"grad_norm": 1.937270164489746,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5889,
"step": 300
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.392244338989258,
"learning_rate": 1.745e-05,
"loss": 0.5694,
"step": 350
},
{
"epoch": 0.6701570680628273,
"grad_norm": 7.3209919929504395,
"learning_rate": 1.995e-05,
"loss": 0.5477,
"step": 400
},
{
"epoch": 0.7539267015706806,
"grad_norm": 3.415917158126831,
"learning_rate": 2.245e-05,
"loss": 0.5329,
"step": 450
},
{
"epoch": 0.837696335078534,
"grad_norm": 3.0256705284118652,
"learning_rate": 2.495e-05,
"loss": 0.5173,
"step": 500
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4566049873828888,
"eval_runtime": 261.3511,
"eval_samples_per_second": 32.481,
"eval_steps_per_second": 4.063,
"step": 500
},
{
"epoch": 0.9214659685863874,
"grad_norm": 1.9436837434768677,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.5079,
"step": 550
},
{
"epoch": 1.0052356020942408,
"grad_norm": 1.819956660270691,
"learning_rate": 2.995e-05,
"loss": 0.4969,
"step": 600
},
{
"epoch": 1.0890052356020943,
"grad_norm": 5.457251071929932,
"learning_rate": 3.245e-05,
"loss": 0.4977,
"step": 650
},
{
"epoch": 1.1727748691099475,
"grad_norm": 3.183980703353882,
"learning_rate": 3.495e-05,
"loss": 0.4923,
"step": 700
},
{
"epoch": 1.256544502617801,
"grad_norm": 7.1660051345825195,
"learning_rate": 3.745e-05,
"loss": 0.4802,
"step": 750
},
{
"epoch": 1.3403141361256545,
"grad_norm": 5.499026775360107,
"learning_rate": 3.995e-05,
"loss": 0.4754,
"step": 800
},
{
"epoch": 1.4240837696335078,
"grad_norm": 2.8053908348083496,
"learning_rate": 4.245e-05,
"loss": 0.4669,
"step": 850
},
{
"epoch": 1.5078534031413613,
"grad_norm": 3.017005443572998,
"learning_rate": 4.495e-05,
"loss": 0.4604,
"step": 900
},
{
"epoch": 1.5916230366492146,
"grad_norm": 2.7971177101135254,
"learning_rate": 4.745e-05,
"loss": 0.4565,
"step": 950
},
{
"epoch": 1.675392670157068,
"grad_norm": 3.1588356494903564,
"learning_rate": 4.995e-05,
"loss": 0.455,
"step": 1000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.40312233567237854,
"eval_runtime": 256.4334,
"eval_samples_per_second": 33.104,
"eval_steps_per_second": 4.141,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.791435708710208e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}