lora-voice-passive-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
06c21f2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.04457688331604,
"learning_rate": 1.267605633802817e-05,
"loss": 0.8663,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 0.8013755679130554,
"eval_runtime": 56.6879,
"eval_samples_per_second": 3.969,
"eval_steps_per_second": 1.006,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.8853890895843506,
"learning_rate": 2.535211267605634e-05,
"loss": 0.6963,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 0.5671647191047668,
"eval_runtime": 56.7063,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 1.005,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.1792943477630615,
"learning_rate": 3.802816901408451e-05,
"loss": 0.5304,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 0.4987545311450958,
"eval_runtime": 56.6968,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 1.005,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 1.5920032262802124,
"learning_rate": 4.992175273865415e-05,
"loss": 0.5226,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 0.47676804661750793,
"eval_runtime": 56.7097,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 1.005,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.3091418743133545,
"learning_rate": 4.85133020344288e-05,
"loss": 0.4447,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 0.4610518515110016,
"eval_runtime": 56.7044,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 1.005,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.401709794998169,
"learning_rate": 4.710485133020345e-05,
"loss": 0.4398,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 0.451718807220459,
"eval_runtime": 56.7057,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 1.005,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.706294059753418,
"learning_rate": 4.569640062597809e-05,
"loss": 0.4576,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 0.4466288983821869,
"eval_runtime": 56.7181,
"eval_samples_per_second": 3.967,
"eval_steps_per_second": 1.005,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 1.9919854402542114,
"learning_rate": 4.428794992175274e-05,
"loss": 0.4311,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 0.4444536566734314,
"eval_runtime": 56.726,
"eval_samples_per_second": 3.966,
"eval_steps_per_second": 1.005,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.293198823928833,
"learning_rate": 4.287949921752739e-05,
"loss": 0.3878,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 0.4513635039329529,
"eval_runtime": 56.7175,
"eval_samples_per_second": 3.967,
"eval_steps_per_second": 1.005,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 2.474388360977173,
"learning_rate": 4.1471048513302035e-05,
"loss": 0.4075,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 0.4425508379936218,
"eval_runtime": 56.7136,
"eval_samples_per_second": 3.967,
"eval_steps_per_second": 1.005,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 1.9115936756134033,
"learning_rate": 4.0062597809076686e-05,
"loss": 0.3908,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 0.4394964575767517,
"eval_runtime": 56.7192,
"eval_samples_per_second": 3.967,
"eval_steps_per_second": 1.005,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 4.41642351905833e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}