|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.12953367875647667, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025906735751295338, |
|
"grad_norm": 3.7401230335235596, |
|
"learning_rate": 1e-05, |
|
"loss": 3.0666, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025906735751295338, |
|
"eval_loss": 4.18491792678833, |
|
"eval_runtime": 35.7075, |
|
"eval_samples_per_second": 9.102, |
|
"eval_steps_per_second": 1.148, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0051813471502590676, |
|
"grad_norm": 4.653674125671387, |
|
"learning_rate": 2e-05, |
|
"loss": 4.1643, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007772020725388601, |
|
"grad_norm": 5.061185836791992, |
|
"learning_rate": 3e-05, |
|
"loss": 3.7233, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010362694300518135, |
|
"grad_norm": 4.979389667510986, |
|
"learning_rate": 4e-05, |
|
"loss": 4.6052, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012953367875647668, |
|
"grad_norm": 4.482627868652344, |
|
"learning_rate": 5e-05, |
|
"loss": 3.4867, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015544041450777202, |
|
"grad_norm": 6.006535053253174, |
|
"learning_rate": 6e-05, |
|
"loss": 3.7069, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.018134715025906734, |
|
"grad_norm": 4.89888334274292, |
|
"learning_rate": 7e-05, |
|
"loss": 3.9644, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02072538860103627, |
|
"grad_norm": 5.465033531188965, |
|
"learning_rate": 8e-05, |
|
"loss": 3.3032, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.023316062176165803, |
|
"grad_norm": 4.801088809967041, |
|
"learning_rate": 9e-05, |
|
"loss": 2.0563, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.023316062176165803, |
|
"eval_loss": 3.25215744972229, |
|
"eval_runtime": 35.2692, |
|
"eval_samples_per_second": 9.215, |
|
"eval_steps_per_second": 1.162, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.025906735751295335, |
|
"grad_norm": 8.266736030578613, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6629, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02849740932642487, |
|
"grad_norm": 4.273421287536621, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 1.7825, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.031088082901554404, |
|
"grad_norm": 9.370466232299805, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.9157, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03367875647668394, |
|
"grad_norm": 5.781856536865234, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.4527, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03626943005181347, |
|
"grad_norm": 7.081896781921387, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.0791, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.038860103626943004, |
|
"grad_norm": 9.198928833007812, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.8983, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04145077720207254, |
|
"grad_norm": 7.364632606506348, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.0986, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04404145077720207, |
|
"grad_norm": 5.105291366577148, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 1.4194, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.046632124352331605, |
|
"grad_norm": 7.902851104736328, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.5238, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.046632124352331605, |
|
"eval_loss": 2.1568806171417236, |
|
"eval_runtime": 35.3004, |
|
"eval_samples_per_second": 9.207, |
|
"eval_steps_per_second": 1.161, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04922279792746114, |
|
"grad_norm": 7.482724666595459, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0681, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05181347150259067, |
|
"grad_norm": 6.279477119445801, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.0164, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054404145077720206, |
|
"grad_norm": 6.212917804718018, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.9994, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05699481865284974, |
|
"grad_norm": 5.9393815994262695, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.2696, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05958549222797927, |
|
"grad_norm": 6.2451171875, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.0264, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06217616580310881, |
|
"grad_norm": 4.755683898925781, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.7309, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06476683937823834, |
|
"grad_norm": 5.793985366821289, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.432, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06735751295336788, |
|
"grad_norm": 6.06044864654541, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.6466, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06994818652849741, |
|
"grad_norm": 5.624882698059082, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 1.5609, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06994818652849741, |
|
"eval_loss": 1.9685734510421753, |
|
"eval_runtime": 35.2965, |
|
"eval_samples_per_second": 9.208, |
|
"eval_steps_per_second": 1.162, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07253886010362694, |
|
"grad_norm": 5.412827968597412, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.9472, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07512953367875648, |
|
"grad_norm": 8.074312210083008, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 1.8224, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07772020725388601, |
|
"grad_norm": 10.367136001586914, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.7846, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08031088082901554, |
|
"grad_norm": 5.971827507019043, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 1.8626, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08290155440414508, |
|
"grad_norm": 6.0518975257873535, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.9026, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08549222797927461, |
|
"grad_norm": 7.365506172180176, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.6508, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08808290155440414, |
|
"grad_norm": 7.813849925994873, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.2382, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.09067357512953368, |
|
"grad_norm": 6.097650527954102, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.8182, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09326424870466321, |
|
"grad_norm": 8.286858558654785, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.0077, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09326424870466321, |
|
"eval_loss": 1.8582597970962524, |
|
"eval_runtime": 35.2881, |
|
"eval_samples_per_second": 9.21, |
|
"eval_steps_per_second": 1.162, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09585492227979274, |
|
"grad_norm": 7.306047439575195, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.754, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09844559585492228, |
|
"grad_norm": 8.38849925994873, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.59, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10103626943005181, |
|
"grad_norm": 5.015171527862549, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 1.272, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10362694300518134, |
|
"grad_norm": 7.593957901000977, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.1336, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10621761658031088, |
|
"grad_norm": 9.562592506408691, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.4349, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10880829015544041, |
|
"grad_norm": 7.189981937408447, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.827, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11139896373056994, |
|
"grad_norm": 5.631102561950684, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.5876, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11398963730569948, |
|
"grad_norm": 5.808564186096191, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.7378, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11658031088082901, |
|
"grad_norm": 6.575247764587402, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.4891, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11658031088082901, |
|
"eval_loss": 1.8105183839797974, |
|
"eval_runtime": 35.2968, |
|
"eval_samples_per_second": 9.208, |
|
"eval_steps_per_second": 1.162, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11917098445595854, |
|
"grad_norm": 6.088664531707764, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.4166, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12176165803108809, |
|
"grad_norm": 5.614904403686523, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.6618, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12435233160621761, |
|
"grad_norm": 5.483867645263672, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.0089, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12694300518134716, |
|
"grad_norm": 5.806712627410889, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.3856, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.12953367875647667, |
|
"grad_norm": 6.254551410675049, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.0932, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.70943641780224e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|