|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10064412238325282, |
|
"eval_steps": 500, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002012882447665056, |
|
"grad_norm": 22.921390533447266, |
|
"learning_rate": 6.5e-06, |
|
"loss": 1.3427, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004025764895330112, |
|
"grad_norm": 14.772722244262695, |
|
"learning_rate": 1.3e-05, |
|
"loss": 1.3647, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.006038647342995169, |
|
"grad_norm": 11.458742141723633, |
|
"learning_rate": 1.9499999999999996e-05, |
|
"loss": 1.2841, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.008051529790660225, |
|
"grad_norm": 4.747677326202393, |
|
"learning_rate": 2.6e-05, |
|
"loss": 1.1644, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.010064412238325281, |
|
"grad_norm": 3.687121629714966, |
|
"learning_rate": 3.25e-05, |
|
"loss": 1.1084, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.012077294685990338, |
|
"grad_norm": 3.178232431411743, |
|
"learning_rate": 3.899999999999999e-05, |
|
"loss": 1.0753, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.014090177133655395, |
|
"grad_norm": 2.476033926010132, |
|
"learning_rate": 4.5499999999999995e-05, |
|
"loss": 1.075, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01610305958132045, |
|
"grad_norm": 1.9162139892578125, |
|
"learning_rate": 5.2e-05, |
|
"loss": 1.0195, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.018115942028985508, |
|
"grad_norm": 1.797377109527588, |
|
"learning_rate": 5.85e-05, |
|
"loss": 0.9884, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.020128824476650563, |
|
"grad_norm": 1.816100835800171, |
|
"learning_rate": 6.5e-05, |
|
"loss": 0.9945, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02214170692431562, |
|
"grad_norm": 1.843664526939392, |
|
"learning_rate": 6.499932098548219e-05, |
|
"loss": 0.8802, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.024154589371980676, |
|
"grad_norm": 1.7202250957489014, |
|
"learning_rate": 6.49972839703017e-05, |
|
"loss": 0.9833, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.026167471819645734, |
|
"grad_norm": 1.4981484413146973, |
|
"learning_rate": 6.499388903957628e-05, |
|
"loss": 0.9481, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02818035426731079, |
|
"grad_norm": 1.4716213941574097, |
|
"learning_rate": 6.498913633516483e-05, |
|
"loss": 0.8809, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.030193236714975844, |
|
"grad_norm": 1.3817026615142822, |
|
"learning_rate": 6.498302605566152e-05, |
|
"loss": 0.9264, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0322061191626409, |
|
"grad_norm": 1.509097695350647, |
|
"learning_rate": 6.497555845638748e-05, |
|
"loss": 0.8829, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03421900161030596, |
|
"grad_norm": 1.5498337745666504, |
|
"learning_rate": 6.496673384938014e-05, |
|
"loss": 0.9227, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.036231884057971016, |
|
"grad_norm": 1.527828574180603, |
|
"learning_rate": 6.49565526033802e-05, |
|
"loss": 0.9488, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.038244766505636074, |
|
"grad_norm": 1.4578592777252197, |
|
"learning_rate": 6.494501514381621e-05, |
|
"loss": 0.8845, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.040257648953301126, |
|
"grad_norm": 1.4198046922683716, |
|
"learning_rate": 6.493212195278678e-05, |
|
"loss": 0.8809, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.042270531400966184, |
|
"grad_norm": 1.4455400705337524, |
|
"learning_rate": 6.491787356904047e-05, |
|
"loss": 0.8686, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04428341384863124, |
|
"grad_norm": 1.5711616277694702, |
|
"learning_rate": 6.490227058795323e-05, |
|
"loss": 0.96, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.046296296296296294, |
|
"grad_norm": 1.4603729248046875, |
|
"learning_rate": 6.488531366150359e-05, |
|
"loss": 0.9026, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04830917874396135, |
|
"grad_norm": 1.4184608459472656, |
|
"learning_rate": 6.48670034982453e-05, |
|
"loss": 0.9426, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05032206119162641, |
|
"grad_norm": 1.4080291986465454, |
|
"learning_rate": 6.484734086327788e-05, |
|
"loss": 0.8857, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05233494363929147, |
|
"grad_norm": 1.4739091396331787, |
|
"learning_rate": 6.482632657821454e-05, |
|
"loss": 0.8897, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05434782608695652, |
|
"grad_norm": 1.320746898651123, |
|
"learning_rate": 6.480396152114787e-05, |
|
"loss": 0.8512, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05636070853462158, |
|
"grad_norm": 1.4142905473709106, |
|
"learning_rate": 6.478024662661315e-05, |
|
"loss": 0.9459, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05837359098228664, |
|
"grad_norm": 1.3830432891845703, |
|
"learning_rate": 6.47551828855493e-05, |
|
"loss": 0.8605, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06038647342995169, |
|
"grad_norm": 1.3653528690338135, |
|
"learning_rate": 6.472877134525753e-05, |
|
"loss": 0.8446, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06239935587761675, |
|
"grad_norm": 1.3041807413101196, |
|
"learning_rate": 6.470101310935746e-05, |
|
"loss": 0.8407, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0644122383252818, |
|
"grad_norm": 1.3803820610046387, |
|
"learning_rate": 6.467190933774112e-05, |
|
"loss": 0.8505, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06642512077294686, |
|
"grad_norm": 1.3407087326049805, |
|
"learning_rate": 6.464146124652441e-05, |
|
"loss": 0.794, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06843800322061191, |
|
"grad_norm": 1.4283268451690674, |
|
"learning_rate": 6.460967010799629e-05, |
|
"loss": 0.9139, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07045088566827697, |
|
"grad_norm": 1.3512784242630005, |
|
"learning_rate": 6.457653725056568e-05, |
|
"loss": 0.807, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07246376811594203, |
|
"grad_norm": 1.3088512420654297, |
|
"learning_rate": 6.454206405870587e-05, |
|
"loss": 0.8968, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07447665056360708, |
|
"grad_norm": 1.2908034324645996, |
|
"learning_rate": 6.450625197289675e-05, |
|
"loss": 0.8447, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.07648953301127215, |
|
"grad_norm": 1.3444992303848267, |
|
"learning_rate": 6.446910248956453e-05, |
|
"loss": 0.8891, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0785024154589372, |
|
"grad_norm": 1.2567275762557983, |
|
"learning_rate": 6.443061716101926e-05, |
|
"loss": 0.8134, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.08051529790660225, |
|
"grad_norm": 1.3401930332183838, |
|
"learning_rate": 6.439079759539e-05, |
|
"loss": 0.8751, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08252818035426732, |
|
"grad_norm": 1.3348793983459473, |
|
"learning_rate": 6.434964545655754e-05, |
|
"loss": 0.7942, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.08454106280193237, |
|
"grad_norm": 1.311357021331787, |
|
"learning_rate": 6.430716246408493e-05, |
|
"loss": 0.8461, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.08655394524959742, |
|
"grad_norm": 1.3319345712661743, |
|
"learning_rate": 6.426335039314566e-05, |
|
"loss": 0.8821, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08856682769726248, |
|
"grad_norm": 1.304795742034912, |
|
"learning_rate": 6.421821107444936e-05, |
|
"loss": 0.8655, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.09057971014492754, |
|
"grad_norm": 1.422869086265564, |
|
"learning_rate": 6.417174639416547e-05, |
|
"loss": 0.9261, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 1.2931973934173584, |
|
"learning_rate": 6.412395829384427e-05, |
|
"loss": 0.8157, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.09460547504025765, |
|
"grad_norm": 1.2984646558761597, |
|
"learning_rate": 6.407484877033586e-05, |
|
"loss": 0.7919, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0966183574879227, |
|
"grad_norm": 1.2596486806869507, |
|
"learning_rate": 6.402441987570665e-05, |
|
"loss": 0.7682, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.09863123993558776, |
|
"grad_norm": 1.3544796705245972, |
|
"learning_rate": 6.397267371715368e-05, |
|
"loss": 0.9084, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.10064412238325282, |
|
"grad_norm": 1.2623202800750732, |
|
"learning_rate": 6.391961245691652e-05, |
|
"loss": 0.8482, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 496, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.877172627557581e+16, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|