|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0221130221130221, |
|
"eval_steps": 10, |
|
"global_step": 39, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02620802620802621, |
|
"grad_norm": 0.688605546951294, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9615, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02620802620802621, |
|
"eval_loss": 1.9379749298095703, |
|
"eval_runtime": 6.8873, |
|
"eval_samples_per_second": 37.315, |
|
"eval_steps_per_second": 9.438, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05241605241605242, |
|
"grad_norm": 0.7276808023452759, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9459, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07862407862407862, |
|
"grad_norm": 0.7033953666687012, |
|
"learning_rate": 9.981987442712633e-05, |
|
"loss": 1.9077, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.10483210483210484, |
|
"grad_norm": 0.719717264175415, |
|
"learning_rate": 9.928079551738543e-05, |
|
"loss": 1.9109, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.13104013104013104, |
|
"grad_norm": 0.6515451669692993, |
|
"learning_rate": 9.838664734667495e-05, |
|
"loss": 1.8254, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15724815724815724, |
|
"grad_norm": 0.6293128728866577, |
|
"learning_rate": 9.714387227305422e-05, |
|
"loss": 1.7722, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18345618345618345, |
|
"grad_norm": 0.5616139769554138, |
|
"learning_rate": 9.55614245194068e-05, |
|
"loss": 1.7974, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.20966420966420968, |
|
"grad_norm": 0.6011109948158264, |
|
"learning_rate": 9.365070565805941e-05, |
|
"loss": 1.7045, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.23587223587223588, |
|
"grad_norm": 0.6134763956069946, |
|
"learning_rate": 9.142548246219212e-05, |
|
"loss": 1.7578, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2620802620802621, |
|
"grad_norm": 0.6061597466468811, |
|
"learning_rate": 8.890178771592199e-05, |
|
"loss": 1.7537, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2620802620802621, |
|
"eval_loss": 1.690554141998291, |
|
"eval_runtime": 6.9618, |
|
"eval_samples_per_second": 36.916, |
|
"eval_steps_per_second": 9.337, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2882882882882883, |
|
"grad_norm": 0.5569241642951965, |
|
"learning_rate": 8.609780469772623e-05, |
|
"loss": 1.6436, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3144963144963145, |
|
"grad_norm": 0.5158886313438416, |
|
"learning_rate": 8.303373616950408e-05, |
|
"loss": 1.6523, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.3407043407043407, |
|
"grad_norm": 0.4968787729740143, |
|
"learning_rate": 7.973165881521434e-05, |
|
"loss": 1.6673, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3669123669123669, |
|
"grad_norm": 0.49128782749176025, |
|
"learning_rate": 7.621536417786159e-05, |
|
"loss": 1.6519, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3931203931203931, |
|
"grad_norm": 0.493300199508667, |
|
"learning_rate": 7.251018724088367e-05, |
|
"loss": 1.624, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.41932841932841936, |
|
"grad_norm": 0.5251982808113098, |
|
"learning_rate": 6.864282388901544e-05, |
|
"loss": 1.6108, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.44553644553644556, |
|
"grad_norm": 0.4884173274040222, |
|
"learning_rate": 6.464113856382752e-05, |
|
"loss": 1.6367, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.47174447174447176, |
|
"grad_norm": 0.5205239653587341, |
|
"learning_rate": 6.0533963499786314e-05, |
|
"loss": 1.6079, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.49795249795249796, |
|
"grad_norm": 0.5160295963287354, |
|
"learning_rate": 5.6350890987343944e-05, |
|
"loss": 1.6298, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5241605241605242, |
|
"grad_norm": 0.46167778968811035, |
|
"learning_rate": 5.212206015980742e-05, |
|
"loss": 1.7101, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5241605241605242, |
|
"eval_loss": 1.6339422464370728, |
|
"eval_runtime": 6.9719, |
|
"eval_samples_per_second": 36.862, |
|
"eval_steps_per_second": 9.323, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5503685503685504, |
|
"grad_norm": 0.4809012711048126, |
|
"learning_rate": 4.78779398401926e-05, |
|
"loss": 1.6745, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5765765765765766, |
|
"grad_norm": 0.44002801179885864, |
|
"learning_rate": 4.364910901265606e-05, |
|
"loss": 1.5675, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6027846027846028, |
|
"grad_norm": 0.4592677652835846, |
|
"learning_rate": 3.94660365002137e-05, |
|
"loss": 1.7446, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.628992628992629, |
|
"grad_norm": 0.4685349762439728, |
|
"learning_rate": 3.5358861436172485e-05, |
|
"loss": 1.6143, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6552006552006552, |
|
"grad_norm": 0.46893665194511414, |
|
"learning_rate": 3.135717611098458e-05, |
|
"loss": 1.6969, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6814086814086814, |
|
"grad_norm": 0.4547032415866852, |
|
"learning_rate": 2.748981275911633e-05, |
|
"loss": 1.6562, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.7076167076167076, |
|
"grad_norm": 0.46321848034858704, |
|
"learning_rate": 2.3784635822138424e-05, |
|
"loss": 1.6516, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7338247338247338, |
|
"grad_norm": 0.453426718711853, |
|
"learning_rate": 2.026834118478567e-05, |
|
"loss": 1.5961, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.76003276003276, |
|
"grad_norm": 0.4333644211292267, |
|
"learning_rate": 1.6966263830495936e-05, |
|
"loss": 1.6134, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7862407862407862, |
|
"grad_norm": 0.4507792592048645, |
|
"learning_rate": 1.3902195302273779e-05, |
|
"loss": 1.5558, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7862407862407862, |
|
"eval_loss": 1.6152641773223877, |
|
"eval_runtime": 6.9374, |
|
"eval_samples_per_second": 37.045, |
|
"eval_steps_per_second": 9.369, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8124488124488124, |
|
"grad_norm": 0.4604407548904419, |
|
"learning_rate": 1.1098212284078036e-05, |
|
"loss": 1.6154, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8386568386568387, |
|
"grad_norm": 0.4262702763080597, |
|
"learning_rate": 8.574517537807897e-06, |
|
"loss": 1.6072, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.452141135931015, |
|
"learning_rate": 6.349294341940593e-06, |
|
"loss": 1.6484, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8910728910728911, |
|
"grad_norm": 0.45040103793144226, |
|
"learning_rate": 4.43857548059321e-06, |
|
"loss": 1.6131, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.9172809172809173, |
|
"grad_norm": 0.43721693754196167, |
|
"learning_rate": 2.85612772694579e-06, |
|
"loss": 1.5875, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9434889434889435, |
|
"grad_norm": 0.4300181269645691, |
|
"learning_rate": 1.6133526533250565e-06, |
|
"loss": 1.6122, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 0.4537125825881958, |
|
"learning_rate": 7.192044826145771e-07, |
|
"loss": 1.6159, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.9959049959049959, |
|
"grad_norm": 0.44113290309906006, |
|
"learning_rate": 1.8012557287367392e-07, |
|
"loss": 1.6142, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0221130221130221, |
|
"grad_norm": 1.147554636001587, |
|
"learning_rate": 0.0, |
|
"loss": 2.9159, |
|
"step": 39 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 39, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2251367690416947e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|