|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7565011820330969, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015130023640661938, |
|
"grad_norm": 0.007734136655926704, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9302, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.015130023640661938, |
|
"eval_loss": 11.932888984680176, |
|
"eval_runtime": 0.2037, |
|
"eval_samples_per_second": 245.425, |
|
"eval_steps_per_second": 63.811, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.030260047281323876, |
|
"grad_norm": 0.007177683059126139, |
|
"learning_rate": 0.0001, |
|
"loss": 11.931, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04539007092198582, |
|
"grad_norm": 0.007821178995072842, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 11.9312, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06052009456264775, |
|
"grad_norm": 0.009464521892368793, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 11.9313, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.07565011820330969, |
|
"grad_norm": 0.0079513443633914, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 11.9312, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09078014184397164, |
|
"grad_norm": 0.00863523781299591, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 11.931, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10591016548463357, |
|
"grad_norm": 0.008535042405128479, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 11.9313, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1210401891252955, |
|
"grad_norm": 0.007440511137247086, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 11.931, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.13617021276595745, |
|
"grad_norm": 0.006813289597630501, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 11.932, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.15130023640661938, |
|
"grad_norm": 0.008292514830827713, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 11.9311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16643026004728131, |
|
"grad_norm": 0.00823824480175972, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 11.9319, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.18156028368794327, |
|
"grad_norm": 0.009189211763441563, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 11.9317, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1966903073286052, |
|
"grad_norm": 0.007587582804262638, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 11.9307, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.21182033096926714, |
|
"grad_norm": 0.010311111807823181, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 11.9301, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.22695035460992907, |
|
"grad_norm": 0.009515615180134773, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 11.9314, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.242080378250591, |
|
"grad_norm": 0.011019989848136902, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 11.931, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.25721040189125294, |
|
"grad_norm": 0.008023861795663834, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 11.9312, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2723404255319149, |
|
"grad_norm": 0.010304928757250309, |
|
"learning_rate": 7.75e-05, |
|
"loss": 11.9316, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.28747044917257686, |
|
"grad_norm": 0.01065912190824747, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 11.931, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.30260047281323876, |
|
"grad_norm": 0.009081180207431316, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 11.9301, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3177304964539007, |
|
"grad_norm": 0.011267852038145065, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 11.9316, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.33286052009456263, |
|
"grad_norm": 0.010181689634919167, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 11.9315, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3479905437352246, |
|
"grad_norm": 0.01211818028241396, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 11.9308, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.36312056737588655, |
|
"grad_norm": 0.013043578714132309, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 11.9316, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.37825059101654845, |
|
"grad_norm": 0.01362883672118187, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 11.932, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.37825059101654845, |
|
"eval_loss": 11.932381629943848, |
|
"eval_runtime": 0.202, |
|
"eval_samples_per_second": 247.551, |
|
"eval_steps_per_second": 64.363, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3933806146572104, |
|
"grad_norm": 0.011090955697000027, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 11.9304, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4085106382978723, |
|
"grad_norm": 0.011664253659546375, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 11.9306, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4236406619385343, |
|
"grad_norm": 0.014071961864829063, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 11.9306, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.43877068557919624, |
|
"grad_norm": 0.013513321988284588, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 11.9313, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.45390070921985815, |
|
"grad_norm": 0.01655971258878708, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 11.9318, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4690307328605201, |
|
"grad_norm": 0.014017711393535137, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 11.9305, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.484160756501182, |
|
"grad_norm": 0.011544297449290752, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 11.9314, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.49929078014184397, |
|
"grad_norm": 0.012642547488212585, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 11.9314, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5144208037825059, |
|
"grad_norm": 0.01794467680156231, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 11.9302, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5295508274231678, |
|
"grad_norm": 0.013963800854980946, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 11.9299, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5446808510638298, |
|
"grad_norm": 0.021733710542321205, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 11.9303, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5598108747044918, |
|
"grad_norm": 0.02163160964846611, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 11.9305, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5749408983451537, |
|
"grad_norm": 0.014926633797585964, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 11.9306, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5900709219858156, |
|
"grad_norm": 0.015309125185012817, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 11.9299, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6052009456264775, |
|
"grad_norm": 0.01804831251502037, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 11.9294, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6203309692671395, |
|
"grad_norm": 0.017892878502607346, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 11.9306, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6354609929078014, |
|
"grad_norm": 0.016832858324050903, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 11.9301, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6505910165484634, |
|
"grad_norm": 0.020296163856983185, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 11.9303, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6657210401891253, |
|
"grad_norm": 0.018417399376630783, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 11.9303, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6808510638297872, |
|
"grad_norm": 0.018799107521772385, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 11.9308, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6959810874704492, |
|
"grad_norm": 0.01869620941579342, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 11.9309, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.0199186522513628, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 11.9309, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.7262411347517731, |
|
"grad_norm": 0.01802201010286808, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 11.9306, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.741371158392435, |
|
"grad_norm": 0.015932127833366394, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 11.9317, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7565011820330969, |
|
"grad_norm": 0.021696912124753, |
|
"learning_rate": 1e-05, |
|
"loss": 11.9307, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7565011820330969, |
|
"eval_loss": 11.932082176208496, |
|
"eval_runtime": 0.2066, |
|
"eval_samples_per_second": 242.043, |
|
"eval_steps_per_second": 62.931, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1064973041664.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|