kooff11's picture
Training in progress, step 20, checkpoint
967dae3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.04108885464817668,
"eval_steps": 5,
"global_step": 20,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002054442732408834,
"grad_norm": 174.16879272460938,
"learning_rate": 1e-05,
"loss": 93.7032,
"step": 1
},
{
"epoch": 0.002054442732408834,
"eval_loss": 2.998701333999634,
"eval_runtime": 291.3939,
"eval_samples_per_second": 11.256,
"eval_steps_per_second": 2.814,
"step": 1
},
{
"epoch": 0.004108885464817668,
"grad_norm": 185.02249145507812,
"learning_rate": 2e-05,
"loss": 96.5026,
"step": 2
},
{
"epoch": 0.0061633281972265025,
"grad_norm": 166.1304931640625,
"learning_rate": 3e-05,
"loss": 94.5685,
"step": 3
},
{
"epoch": 0.008217770929635337,
"grad_norm": 163.54811096191406,
"learning_rate": 4e-05,
"loss": 91.1858,
"step": 4
},
{
"epoch": 0.01027221366204417,
"grad_norm": 137.41305541992188,
"learning_rate": 5e-05,
"loss": 90.2512,
"step": 5
},
{
"epoch": 0.01027221366204417,
"eval_loss": 2.595139265060425,
"eval_runtime": 291.9524,
"eval_samples_per_second": 11.235,
"eval_steps_per_second": 2.809,
"step": 5
},
{
"epoch": 0.012326656394453005,
"grad_norm": 123.82645416259766,
"learning_rate": 6e-05,
"loss": 85.2479,
"step": 6
},
{
"epoch": 0.014381099126861838,
"grad_norm": 148.07144165039062,
"learning_rate": 7e-05,
"loss": 79.2969,
"step": 7
},
{
"epoch": 0.016435541859270673,
"grad_norm": 127.11111450195312,
"learning_rate": 8e-05,
"loss": 73.8599,
"step": 8
},
{
"epoch": 0.01848998459167951,
"grad_norm": 146.44976806640625,
"learning_rate": 9e-05,
"loss": 74.0257,
"step": 9
},
{
"epoch": 0.02054442732408834,
"grad_norm": 166.2652130126953,
"learning_rate": 0.0001,
"loss": 73.5464,
"step": 10
},
{
"epoch": 0.02054442732408834,
"eval_loss": 2.1906113624572754,
"eval_runtime": 291.9218,
"eval_samples_per_second": 11.236,
"eval_steps_per_second": 2.809,
"step": 10
},
{
"epoch": 0.022598870056497175,
"grad_norm": 182.4556884765625,
"learning_rate": 9.755282581475769e-05,
"loss": 68.4839,
"step": 11
},
{
"epoch": 0.02465331278890601,
"grad_norm": 198.03158569335938,
"learning_rate": 9.045084971874738e-05,
"loss": 67.4511,
"step": 12
},
{
"epoch": 0.02670775552131484,
"grad_norm": 212.62405395507812,
"learning_rate": 7.938926261462366e-05,
"loss": 64.195,
"step": 13
},
{
"epoch": 0.028762198253723677,
"grad_norm": 215.69439697265625,
"learning_rate": 6.545084971874738e-05,
"loss": 62.9509,
"step": 14
},
{
"epoch": 0.030816640986132512,
"grad_norm": 143.1745147705078,
"learning_rate": 5e-05,
"loss": 62.3859,
"step": 15
},
{
"epoch": 0.030816640986132512,
"eval_loss": 1.8680182695388794,
"eval_runtime": 291.9506,
"eval_samples_per_second": 11.235,
"eval_steps_per_second": 2.809,
"step": 15
},
{
"epoch": 0.03287108371854135,
"grad_norm": 89.2127456665039,
"learning_rate": 3.4549150281252636e-05,
"loss": 62.5249,
"step": 16
},
{
"epoch": 0.03492552645095018,
"grad_norm": 77.51390075683594,
"learning_rate": 2.061073738537635e-05,
"loss": 59.9075,
"step": 17
},
{
"epoch": 0.03697996918335902,
"grad_norm": 63.38047790527344,
"learning_rate": 9.549150281252633e-06,
"loss": 58.074,
"step": 18
},
{
"epoch": 0.03903441191576785,
"grad_norm": 67.86468505859375,
"learning_rate": 2.4471741852423237e-06,
"loss": 56.1145,
"step": 19
},
{
"epoch": 0.04108885464817668,
"grad_norm": 66.08454895019531,
"learning_rate": 0.0,
"loss": 58.0635,
"step": 20
},
{
"epoch": 0.04108885464817668,
"eval_loss": 1.7998082637786865,
"eval_runtime": 291.8631,
"eval_samples_per_second": 11.238,
"eval_steps_per_second": 2.81,
"step": 20
}
],
"logging_steps": 1,
"max_steps": 20,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.500033945521357e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}