{ "best_metric": 0.3777858018875122, "best_model_checkpoint": "mikhail-panzo/zlm_b128_le4_s4000/checkpoint-1500", "epoch": 2.513089005235602, "eval_steps": 500, "global_step": 1500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.08376963350785341, "grad_norm": 2.9717624187469482, "learning_rate": 2.4500000000000003e-06, "loss": 1.0424, "step": 50 }, { "epoch": 0.16753926701570682, "grad_norm": 2.9720630645751953, "learning_rate": 4.950000000000001e-06, "loss": 0.8474, "step": 100 }, { "epoch": 0.2513089005235602, "grad_norm": 2.445929765701294, "learning_rate": 7.45e-06, "loss": 0.7336, "step": 150 }, { "epoch": 0.33507853403141363, "grad_norm": 5.502955913543701, "learning_rate": 9.950000000000001e-06, "loss": 0.6492, "step": 200 }, { "epoch": 0.418848167539267, "grad_norm": 2.3356130123138428, "learning_rate": 1.2450000000000001e-05, "loss": 0.6133, "step": 250 }, { "epoch": 0.5026178010471204, "grad_norm": 1.937270164489746, "learning_rate": 1.4950000000000001e-05, "loss": 0.5889, "step": 300 }, { "epoch": 0.5863874345549738, "grad_norm": 2.392244338989258, "learning_rate": 1.745e-05, "loss": 0.5694, "step": 350 }, { "epoch": 0.6701570680628273, "grad_norm": 7.3209919929504395, "learning_rate": 1.995e-05, "loss": 0.5477, "step": 400 }, { "epoch": 0.7539267015706806, "grad_norm": 3.415917158126831, "learning_rate": 2.245e-05, "loss": 0.5329, "step": 450 }, { "epoch": 0.837696335078534, "grad_norm": 3.0256705284118652, "learning_rate": 2.495e-05, "loss": 0.5173, "step": 500 }, { "epoch": 0.837696335078534, "eval_loss": 0.4566049873828888, "eval_runtime": 261.3511, "eval_samples_per_second": 32.481, "eval_steps_per_second": 4.063, "step": 500 }, { "epoch": 0.9214659685863874, "grad_norm": 1.9436837434768677, "learning_rate": 2.7450000000000003e-05, "loss": 0.5079, "step": 550 }, { "epoch": 1.0052356020942408, "grad_norm": 1.819956660270691, "learning_rate": 2.995e-05, "loss": 0.4969, "step": 600 }, { "epoch": 1.0890052356020943, "grad_norm": 5.457251071929932, "learning_rate": 3.245e-05, "loss": 0.4977, "step": 650 }, { "epoch": 1.1727748691099475, "grad_norm": 3.183980703353882, "learning_rate": 3.495e-05, "loss": 0.4923, "step": 700 }, { "epoch": 1.256544502617801, "grad_norm": 7.1660051345825195, "learning_rate": 3.745e-05, "loss": 0.4802, "step": 750 }, { "epoch": 1.3403141361256545, "grad_norm": 5.499026775360107, "learning_rate": 3.995e-05, "loss": 0.4754, "step": 800 }, { "epoch": 1.4240837696335078, "grad_norm": 2.8053908348083496, "learning_rate": 4.245e-05, "loss": 0.4669, "step": 850 }, { "epoch": 1.5078534031413613, "grad_norm": 3.017005443572998, "learning_rate": 4.495e-05, "loss": 0.4604, "step": 900 }, { "epoch": 1.5916230366492146, "grad_norm": 2.7971177101135254, "learning_rate": 4.745e-05, "loss": 0.4565, "step": 950 }, { "epoch": 1.675392670157068, "grad_norm": 3.1588356494903564, "learning_rate": 4.995e-05, "loss": 0.455, "step": 1000 }, { "epoch": 1.675392670157068, "eval_loss": 0.40312233567237854, "eval_runtime": 256.4334, "eval_samples_per_second": 33.104, "eval_steps_per_second": 4.141, "step": 1000 }, { "epoch": 1.7591623036649215, "grad_norm": 2.2053232192993164, "learning_rate": 5.245e-05, "loss": 0.4543, "step": 1050 }, { "epoch": 1.8429319371727748, "grad_norm": 2.0562164783477783, "learning_rate": 5.495e-05, "loss": 0.4456, "step": 1100 }, { "epoch": 1.9267015706806283, "grad_norm": 2.730119466781616, "learning_rate": 5.745e-05, "loss": 0.4355, "step": 1150 }, { "epoch": 2.0104712041884816, "grad_norm": 1.7484283447265625, "learning_rate": 5.995000000000001e-05, "loss": 0.4299, "step": 1200 }, { "epoch": 2.094240837696335, "grad_norm": 1.1786061525344849, "learning_rate": 6.245000000000001e-05, "loss": 0.4305, "step": 1250 }, { "epoch": 2.1780104712041886, "grad_norm": 1.98978590965271, "learning_rate": 6.494999999999999e-05, "loss": 0.4295, "step": 1300 }, { "epoch": 2.261780104712042, "grad_norm": 2.818659782409668, "learning_rate": 6.745e-05, "loss": 0.4235, "step": 1350 }, { "epoch": 2.345549738219895, "grad_norm": 2.3864262104034424, "learning_rate": 6.995e-05, "loss": 0.4271, "step": 1400 }, { "epoch": 2.4293193717277486, "grad_norm": 1.3647903203964233, "learning_rate": 7.245000000000001e-05, "loss": 0.4208, "step": 1450 }, { "epoch": 2.513089005235602, "grad_norm": 2.2144172191619873, "learning_rate": 7.495e-05, "loss": 0.4175, "step": 1500 }, { "epoch": 2.513089005235602, "eval_loss": 0.3777858018875122, "eval_runtime": 260.5025, "eval_samples_per_second": 32.587, "eval_steps_per_second": 4.077, "step": 1500 } ], "logging_steps": 50, "max_steps": 4000, "num_input_tokens_seen": 0, "num_train_epochs": 7, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.686672014814656e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }