|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.15328019619865113, |
|
"eval_steps": 500, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0061312078479460455, |
|
"grad_norm": 1.8461681604385376, |
|
"learning_rate": 0.00019936113105200085, |
|
"loss": 2.9781, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.012262415695892091, |
|
"grad_norm": 3.338013172149658, |
|
"learning_rate": 0.0001971631732914674, |
|
"loss": 1.7629, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.018393623543838136, |
|
"grad_norm": 2.6224777698516846, |
|
"learning_rate": 0.00019343289424566122, |
|
"loss": 0.8836, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024524831391784182, |
|
"grad_norm": 2.1204261779785156, |
|
"learning_rate": 0.00018822912264349534, |
|
"loss": 0.5916, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.030656039239730228, |
|
"grad_norm": 1.0972894430160522, |
|
"learning_rate": 0.00018163392507171842, |
|
"loss": 0.2375, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03678724708767627, |
|
"grad_norm": 7.48022985458374, |
|
"learning_rate": 0.0001737513117358174, |
|
"loss": 0.3354, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04291845493562232, |
|
"grad_norm": 0.04426204413175583, |
|
"learning_rate": 0.00016470559615694446, |
|
"loss": 0.2289, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.049049662783568364, |
|
"grad_norm": 2.7863688468933105, |
|
"learning_rate": 0.00015463943467342693, |
|
"loss": 0.1755, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.05518087063151441, |
|
"grad_norm": 0.12476109713315964, |
|
"learning_rate": 0.0001437115766650933, |
|
"loss": 0.1166, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.061312078479460456, |
|
"grad_norm": 2.828798770904541, |
|
"learning_rate": 0.00013209436098072095, |
|
"loss": 0.2017, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0674432863274065, |
|
"grad_norm": 0.5962816476821899, |
|
"learning_rate": 0.00011997099805144069, |
|
"loss": 0.158, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.07357449417535254, |
|
"grad_norm": 1.1787047386169434, |
|
"learning_rate": 0.00010753268055279329, |
|
"loss": 0.1305, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.07970570202329859, |
|
"grad_norm": 0.029666101559996605, |
|
"learning_rate": 9.497556818202306e-05, |
|
"loss": 0.2012, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.08583690987124463, |
|
"grad_norm": 0.8552451133728027, |
|
"learning_rate": 8.249769410247239e-05, |
|
"loss": 0.0833, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.09196811771919068, |
|
"grad_norm": 6.994303226470947, |
|
"learning_rate": 7.029584184229653e-05, |
|
"loss": 0.1842, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.09809932556713673, |
|
"grad_norm": 0.14720402657985687, |
|
"learning_rate": 5.856244190067159e-05, |
|
"loss": 0.0773, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.10423053341508277, |
|
"grad_norm": 0.9132323265075684, |
|
"learning_rate": 4.748253700387042e-05, |
|
"loss": 0.0718, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.11036174126302882, |
|
"grad_norm": 0.5882895588874817, |
|
"learning_rate": 3.7230863870929964e-05, |
|
"loss": 0.0874, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.11649294911097487, |
|
"grad_norm": 7.029378414154053, |
|
"learning_rate": 2.7969097511209308e-05, |
|
"loss": 0.1575, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.12262415695892091, |
|
"grad_norm": 0.9121600985527039, |
|
"learning_rate": 1.9843301512912327e-05, |
|
"loss": 0.0978, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12875536480686695, |
|
"grad_norm": 1.3362879753112793, |
|
"learning_rate": 1.2981624533047432e-05, |
|
"loss": 0.15, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.134886572654813, |
|
"grad_norm": 2.2885565757751465, |
|
"learning_rate": 7.492279316554207e-06, |
|
"loss": 0.1171, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.14101778050275904, |
|
"grad_norm": 0.739460825920105, |
|
"learning_rate": 3.461836116672612e-06, |
|
"loss": 0.1375, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.14714898835070508, |
|
"grad_norm": 0.732049286365509, |
|
"learning_rate": 9.538574303348813e-07, |
|
"loss": 0.0661, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.15328019619865113, |
|
"grad_norm": 0.795531153678894, |
|
"learning_rate": 7.895579618388827e-09, |
|
"loss": 0.176, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 20000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 138282270720000.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|