|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.0,
|
|
"eval_steps": 500,
|
|
"global_step": 546,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.054945054945054944,
|
|
"grad_norm": 0.27392202615737915,
|
|
"learning_rate": 4.908424908424908e-05,
|
|
"loss": 0.6974,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.10989010989010989,
|
|
"grad_norm": 1.1879206895828247,
|
|
"learning_rate": 4.816849816849817e-05,
|
|
"loss": 0.707,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.16483516483516483,
|
|
"grad_norm": 0.04194138944149017,
|
|
"learning_rate": 4.7252747252747257e-05,
|
|
"loss": 0.6944,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.21978021978021978,
|
|
"grad_norm": 0.4733542203903198,
|
|
"learning_rate": 4.6336996336996343e-05,
|
|
"loss": 0.6962,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.27472527472527475,
|
|
"grad_norm": 0.5218614339828491,
|
|
"learning_rate": 4.5421245421245424e-05,
|
|
"loss": 0.6971,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.32967032967032966,
|
|
"grad_norm": 0.6687294244766235,
|
|
"learning_rate": 4.4505494505494504e-05,
|
|
"loss": 0.6912,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.38461538461538464,
|
|
"grad_norm": 0.8817914128303528,
|
|
"learning_rate": 4.358974358974359e-05,
|
|
"loss": 0.7019,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.43956043956043955,
|
|
"grad_norm": 0.0032853269949555397,
|
|
"learning_rate": 4.267399267399267e-05,
|
|
"loss": 0.6987,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.4945054945054945,
|
|
"grad_norm": 0.41789019107818604,
|
|
"learning_rate": 4.1758241758241765e-05,
|
|
"loss": 0.7004,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.5494505494505495,
|
|
"grad_norm": 0.36019155383110046,
|
|
"learning_rate": 4.0842490842490845e-05,
|
|
"loss": 0.697,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.6043956043956044,
|
|
"grad_norm": 0.10219324380159378,
|
|
"learning_rate": 3.992673992673993e-05,
|
|
"loss": 0.6915,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.6593406593406593,
|
|
"grad_norm": 0.5682424902915955,
|
|
"learning_rate": 3.901098901098901e-05,
|
|
"loss": 0.6871,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.7142857142857143,
|
|
"grad_norm": 0.32208287715911865,
|
|
"learning_rate": 3.809523809523809e-05,
|
|
"loss": 0.6929,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.7692307692307693,
|
|
"grad_norm": 0.6399782299995422,
|
|
"learning_rate": 3.717948717948718e-05,
|
|
"loss": 0.6935,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.8241758241758241,
|
|
"grad_norm": 0.6218127012252808,
|
|
"learning_rate": 3.6263736263736266e-05,
|
|
"loss": 0.689,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.8791208791208791,
|
|
"grad_norm": 0.15324005484580994,
|
|
"learning_rate": 3.534798534798535e-05,
|
|
"loss": 0.6934,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.9340659340659341,
|
|
"grad_norm": 0.6022002100944519,
|
|
"learning_rate": 3.443223443223443e-05,
|
|
"loss": 0.6959,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 0.989010989010989,
|
|
"grad_norm": 0.5421551465988159,
|
|
"learning_rate": 3.3516483516483513e-05,
|
|
"loss": 0.6943,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_loss": 0.6939666867256165,
|
|
"eval_runtime": 189.5452,
|
|
"eval_samples_per_second": 3.83,
|
|
"eval_steps_per_second": 0.243,
|
|
"step": 182
|
|
},
|
|
{
|
|
"epoch": 1.043956043956044,
|
|
"grad_norm": 0.029725607484579086,
|
|
"learning_rate": 3.26007326007326e-05,
|
|
"loss": 0.6941,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 1.098901098901099,
|
|
"grad_norm": 0.2241191416978836,
|
|
"learning_rate": 3.168498168498169e-05,
|
|
"loss": 0.6947,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 1.1538461538461537,
|
|
"grad_norm": 0.3774152100086212,
|
|
"learning_rate": 3.0769230769230774e-05,
|
|
"loss": 0.6942,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 1.2087912087912087,
|
|
"grad_norm": 0.20546726882457733,
|
|
"learning_rate": 2.9853479853479855e-05,
|
|
"loss": 0.6969,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 1.2637362637362637,
|
|
"grad_norm": 0.13492144644260406,
|
|
"learning_rate": 2.893772893772894e-05,
|
|
"loss": 0.6902,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 1.3186813186813187,
|
|
"grad_norm": 0.8513393402099609,
|
|
"learning_rate": 2.8021978021978025e-05,
|
|
"loss": 0.6876,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 1.3736263736263736,
|
|
"grad_norm": 0.6317694783210754,
|
|
"learning_rate": 2.7106227106227105e-05,
|
|
"loss": 0.6997,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 1.4285714285714286,
|
|
"grad_norm": 0.5553500056266785,
|
|
"learning_rate": 2.6190476190476192e-05,
|
|
"loss": 0.6957,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 1.4835164835164836,
|
|
"grad_norm": 0.6473520994186401,
|
|
"learning_rate": 2.5274725274725276e-05,
|
|
"loss": 0.6845,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 1.5384615384615383,
|
|
"grad_norm": 0.4740413427352905,
|
|
"learning_rate": 2.435897435897436e-05,
|
|
"loss": 0.6901,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 1.5934065934065935,
|
|
"grad_norm": 0.30147257447242737,
|
|
"learning_rate": 2.3443223443223443e-05,
|
|
"loss": 0.6938,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 1.6483516483516483,
|
|
"grad_norm": 0.8619340658187866,
|
|
"learning_rate": 2.252747252747253e-05,
|
|
"loss": 0.702,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 1.7032967032967035,
|
|
"grad_norm": 0.7783694863319397,
|
|
"learning_rate": 2.1611721611721613e-05,
|
|
"loss": 0.6931,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 1.7582417582417582,
|
|
"grad_norm": 1.2516826391220093,
|
|
"learning_rate": 2.0695970695970697e-05,
|
|
"loss": 0.6936,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 1.8131868131868132,
|
|
"grad_norm": 0.9541371464729309,
|
|
"learning_rate": 1.978021978021978e-05,
|
|
"loss": 0.6923,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 1.8681318681318682,
|
|
"grad_norm": 0.26722267270088196,
|
|
"learning_rate": 1.8864468864468864e-05,
|
|
"loss": 0.6944,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 1.9230769230769231,
|
|
"grad_norm": 0.26405128836631775,
|
|
"learning_rate": 1.794871794871795e-05,
|
|
"loss": 0.6936,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 1.978021978021978,
|
|
"grad_norm": 0.6899142861366272,
|
|
"learning_rate": 1.7032967032967035e-05,
|
|
"loss": 0.691,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_loss": 0.6916947960853577,
|
|
"eval_runtime": 240.4419,
|
|
"eval_samples_per_second": 3.019,
|
|
"eval_steps_per_second": 0.191,
|
|
"step": 364
|
|
},
|
|
{
|
|
"epoch": 2.032967032967033,
|
|
"grad_norm": 0.15929658710956573,
|
|
"learning_rate": 1.6117216117216118e-05,
|
|
"loss": 0.6928,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 2.087912087912088,
|
|
"grad_norm": 0.15032555162906647,
|
|
"learning_rate": 1.52014652014652e-05,
|
|
"loss": 0.6908,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 2.142857142857143,
|
|
"grad_norm": 0.12846487760543823,
|
|
"learning_rate": 1.4285714285714285e-05,
|
|
"loss": 0.6912,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 2.197802197802198,
|
|
"grad_norm": 0.1073530912399292,
|
|
"learning_rate": 1.336996336996337e-05,
|
|
"loss": 0.6958,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 2.2527472527472527,
|
|
"grad_norm": 0.3209209144115448,
|
|
"learning_rate": 1.2454212454212454e-05,
|
|
"loss": 0.695,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 2.3076923076923075,
|
|
"grad_norm": 0.19632519781589508,
|
|
"learning_rate": 1.153846153846154e-05,
|
|
"loss": 0.6951,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 2.3626373626373627,
|
|
"grad_norm": 1.1780059337615967,
|
|
"learning_rate": 1.0622710622710623e-05,
|
|
"loss": 0.6906,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 2.4175824175824174,
|
|
"grad_norm": 0.2050175666809082,
|
|
"learning_rate": 9.706959706959708e-06,
|
|
"loss": 0.6941,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 2.4725274725274726,
|
|
"grad_norm": 0.6877321004867554,
|
|
"learning_rate": 8.791208791208792e-06,
|
|
"loss": 0.6908,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 2.5274725274725274,
|
|
"grad_norm": 0.41336432099342346,
|
|
"learning_rate": 7.875457875457876e-06,
|
|
"loss": 0.6903,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 2.5824175824175826,
|
|
"grad_norm": 0.339912474155426,
|
|
"learning_rate": 6.95970695970696e-06,
|
|
"loss": 0.6932,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 2.6373626373626373,
|
|
"grad_norm": 0.3437363803386688,
|
|
"learning_rate": 6.043956043956044e-06,
|
|
"loss": 0.692,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 2.6923076923076925,
|
|
"grad_norm": 0.10014953464269638,
|
|
"learning_rate": 5.128205128205128e-06,
|
|
"loss": 0.6907,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 2.7472527472527473,
|
|
"grad_norm": 0.10608287155628204,
|
|
"learning_rate": 4.212454212454213e-06,
|
|
"loss": 0.6899,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 2.802197802197802,
|
|
"grad_norm": 0.13735678791999817,
|
|
"learning_rate": 3.2967032967032968e-06,
|
|
"loss": 0.694,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 2.857142857142857,
|
|
"grad_norm": 0.5880533456802368,
|
|
"learning_rate": 2.3809523809523808e-06,
|
|
"loss": 0.7005,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 2.912087912087912,
|
|
"grad_norm": 0.3347012996673584,
|
|
"learning_rate": 1.4652014652014652e-06,
|
|
"loss": 0.692,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 2.967032967032967,
|
|
"grad_norm": 0.39770108461380005,
|
|
"learning_rate": 5.494505494505495e-07,
|
|
"loss": 0.6867,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_loss": 0.6915315389633179,
|
|
"eval_runtime": 192.9872,
|
|
"eval_samples_per_second": 3.762,
|
|
"eval_steps_per_second": 0.238,
|
|
"step": 546
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 546,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 6.751102135223255e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|