lesso08's picture
Training in progress, step 100, checkpoint
b57f413 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00754233133461553,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.54233133461553e-05,
"grad_norm": 3.156315565109253,
"learning_rate": 1e-05,
"loss": 6.3547,
"step": 1
},
{
"epoch": 7.54233133461553e-05,
"eval_loss": 6.566169261932373,
"eval_runtime": 1323.5586,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 1
},
{
"epoch": 0.0001508466266923106,
"grad_norm": 3.608928918838501,
"learning_rate": 2e-05,
"loss": 6.6439,
"step": 2
},
{
"epoch": 0.00022626994003846588,
"grad_norm": 3.671382427215576,
"learning_rate": 3e-05,
"loss": 6.4279,
"step": 3
},
{
"epoch": 0.0003016932533846212,
"grad_norm": 3.6660938262939453,
"learning_rate": 4e-05,
"loss": 6.3964,
"step": 4
},
{
"epoch": 0.0003771165667307765,
"grad_norm": 4.2082672119140625,
"learning_rate": 5e-05,
"loss": 6.6657,
"step": 5
},
{
"epoch": 0.00045253988007693175,
"grad_norm": 3.8837738037109375,
"learning_rate": 6e-05,
"loss": 6.4626,
"step": 6
},
{
"epoch": 0.0005279631934230871,
"grad_norm": 4.669962406158447,
"learning_rate": 7e-05,
"loss": 6.3364,
"step": 7
},
{
"epoch": 0.0006033865067692424,
"grad_norm": 3.6005444526672363,
"learning_rate": 8e-05,
"loss": 5.4372,
"step": 8
},
{
"epoch": 0.0006788098201153976,
"grad_norm": 3.552445650100708,
"learning_rate": 9e-05,
"loss": 5.2645,
"step": 9
},
{
"epoch": 0.0006788098201153976,
"eval_loss": 4.850156307220459,
"eval_runtime": 1323.3509,
"eval_samples_per_second": 8.437,
"eval_steps_per_second": 1.055,
"step": 9
},
{
"epoch": 0.000754233133461553,
"grad_norm": 3.7075414657592773,
"learning_rate": 0.0001,
"loss": 4.7484,
"step": 10
},
{
"epoch": 0.0008296564468077082,
"grad_norm": 3.7549526691436768,
"learning_rate": 9.99695413509548e-05,
"loss": 4.4416,
"step": 11
},
{
"epoch": 0.0009050797601538635,
"grad_norm": 3.413628339767456,
"learning_rate": 9.987820251299122e-05,
"loss": 3.7846,
"step": 12
},
{
"epoch": 0.0009805030735000188,
"grad_norm": 2.9563262462615967,
"learning_rate": 9.972609476841367e-05,
"loss": 3.5005,
"step": 13
},
{
"epoch": 0.0010559263868461742,
"grad_norm": 2.9408886432647705,
"learning_rate": 9.951340343707852e-05,
"loss": 3.4305,
"step": 14
},
{
"epoch": 0.0011313497001923295,
"grad_norm": 2.1472573280334473,
"learning_rate": 9.924038765061042e-05,
"loss": 3.1604,
"step": 15
},
{
"epoch": 0.0012067730135384847,
"grad_norm": 2.671536922454834,
"learning_rate": 9.890738003669029e-05,
"loss": 3.0808,
"step": 16
},
{
"epoch": 0.00128219632688464,
"grad_norm": 2.8072383403778076,
"learning_rate": 9.851478631379982e-05,
"loss": 3.2296,
"step": 17
},
{
"epoch": 0.0013576196402307953,
"grad_norm": 2.2312707901000977,
"learning_rate": 9.806308479691595e-05,
"loss": 3.0198,
"step": 18
},
{
"epoch": 0.0013576196402307953,
"eval_loss": 2.764563798904419,
"eval_runtime": 1323.5053,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 18
},
{
"epoch": 0.0014330429535769507,
"grad_norm": 3.1025147438049316,
"learning_rate": 9.755282581475769e-05,
"loss": 2.6769,
"step": 19
},
{
"epoch": 0.001508466266923106,
"grad_norm": 1.560180425643921,
"learning_rate": 9.698463103929542e-05,
"loss": 2.5627,
"step": 20
},
{
"epoch": 0.0015838895802692612,
"grad_norm": 2.205591917037964,
"learning_rate": 9.635919272833938e-05,
"loss": 2.6943,
"step": 21
},
{
"epoch": 0.0016593128936154165,
"grad_norm": 1.5366450548171997,
"learning_rate": 9.567727288213005e-05,
"loss": 2.5889,
"step": 22
},
{
"epoch": 0.0017347362069615718,
"grad_norm": 1.582607626914978,
"learning_rate": 9.493970231495835e-05,
"loss": 2.5521,
"step": 23
},
{
"epoch": 0.001810159520307727,
"grad_norm": 1.9383635520935059,
"learning_rate": 9.414737964294636e-05,
"loss": 2.4834,
"step": 24
},
{
"epoch": 0.0018855828336538825,
"grad_norm": 1.7182579040527344,
"learning_rate": 9.330127018922194e-05,
"loss": 2.574,
"step": 25
},
{
"epoch": 0.0019610061470000375,
"grad_norm": 1.902744174003601,
"learning_rate": 9.24024048078213e-05,
"loss": 2.4183,
"step": 26
},
{
"epoch": 0.002036429460346193,
"grad_norm": 1.877042293548584,
"learning_rate": 9.145187862775209e-05,
"loss": 2.4997,
"step": 27
},
{
"epoch": 0.002036429460346193,
"eval_loss": 2.507841110229492,
"eval_runtime": 1323.4725,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 27
},
{
"epoch": 0.0021118527736923485,
"grad_norm": 1.5418832302093506,
"learning_rate": 9.045084971874738e-05,
"loss": 2.4695,
"step": 28
},
{
"epoch": 0.0021872760870385037,
"grad_norm": 1.6520047187805176,
"learning_rate": 8.940053768033609e-05,
"loss": 2.471,
"step": 29
},
{
"epoch": 0.002262699400384659,
"grad_norm": 1.7770787477493286,
"learning_rate": 8.83022221559489e-05,
"loss": 2.5716,
"step": 30
},
{
"epoch": 0.0023381227137308142,
"grad_norm": 2.1953811645507812,
"learning_rate": 8.715724127386972e-05,
"loss": 2.6485,
"step": 31
},
{
"epoch": 0.0024135460270769695,
"grad_norm": 2.0734782218933105,
"learning_rate": 8.596699001693255e-05,
"loss": 2.4759,
"step": 32
},
{
"epoch": 0.0024889693404231247,
"grad_norm": 1.8175076246261597,
"learning_rate": 8.473291852294987e-05,
"loss": 2.4829,
"step": 33
},
{
"epoch": 0.00256439265376928,
"grad_norm": 2.310616970062256,
"learning_rate": 8.345653031794292e-05,
"loss": 2.6573,
"step": 34
},
{
"epoch": 0.0026398159671154353,
"grad_norm": 0.9801550507545471,
"learning_rate": 8.213938048432697e-05,
"loss": 2.4736,
"step": 35
},
{
"epoch": 0.0027152392804615905,
"grad_norm": 6.086284637451172,
"learning_rate": 8.07830737662829e-05,
"loss": 2.4809,
"step": 36
},
{
"epoch": 0.0027152392804615905,
"eval_loss": 2.5135347843170166,
"eval_runtime": 1324.1065,
"eval_samples_per_second": 8.432,
"eval_steps_per_second": 1.054,
"step": 36
},
{
"epoch": 0.0027906625938077458,
"grad_norm": 0.9378789663314819,
"learning_rate": 7.938926261462366e-05,
"loss": 2.4888,
"step": 37
},
{
"epoch": 0.0028660859071539015,
"grad_norm": 1.425811529159546,
"learning_rate": 7.795964517353735e-05,
"loss": 2.5393,
"step": 38
},
{
"epoch": 0.0029415092205000567,
"grad_norm": 0.885962724685669,
"learning_rate": 7.649596321166024e-05,
"loss": 2.4674,
"step": 39
},
{
"epoch": 0.003016932533846212,
"grad_norm": 1.203602910041809,
"learning_rate": 7.500000000000001e-05,
"loss": 2.4967,
"step": 40
},
{
"epoch": 0.0030923558471923672,
"grad_norm": 0.8594803810119629,
"learning_rate": 7.347357813929454e-05,
"loss": 2.4137,
"step": 41
},
{
"epoch": 0.0031677791605385225,
"grad_norm": 1.4859495162963867,
"learning_rate": 7.191855733945387e-05,
"loss": 2.5474,
"step": 42
},
{
"epoch": 0.0032432024738846777,
"grad_norm": 0.9513015747070312,
"learning_rate": 7.033683215379002e-05,
"loss": 2.3964,
"step": 43
},
{
"epoch": 0.003318625787230833,
"grad_norm": 0.8883151412010193,
"learning_rate": 6.873032967079561e-05,
"loss": 2.5413,
"step": 44
},
{
"epoch": 0.0033940491005769883,
"grad_norm": 1.0262858867645264,
"learning_rate": 6.710100716628344e-05,
"loss": 2.4256,
"step": 45
},
{
"epoch": 0.0033940491005769883,
"eval_loss": 2.4895801544189453,
"eval_runtime": 1323.5763,
"eval_samples_per_second": 8.435,
"eval_steps_per_second": 1.055,
"step": 45
},
{
"epoch": 0.0034694724139231435,
"grad_norm": 1.1027235984802246,
"learning_rate": 6.545084971874738e-05,
"loss": 2.526,
"step": 46
},
{
"epoch": 0.0035448957272692988,
"grad_norm": 3.1927082538604736,
"learning_rate": 6.378186779084995e-05,
"loss": 2.6,
"step": 47
},
{
"epoch": 0.003620319040615454,
"grad_norm": 1.3928576707839966,
"learning_rate": 6.209609477998338e-05,
"loss": 2.6268,
"step": 48
},
{
"epoch": 0.0036957423539616097,
"grad_norm": 0.8399583101272583,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.4112,
"step": 49
},
{
"epoch": 0.003771165667307765,
"grad_norm": 1.293001413345337,
"learning_rate": 5.868240888334653e-05,
"loss": 2.4761,
"step": 50
},
{
"epoch": 0.0038465889806539202,
"grad_norm": 1.18425714969635,
"learning_rate": 5.695865504800327e-05,
"loss": 2.4176,
"step": 51
},
{
"epoch": 0.003922012294000075,
"grad_norm": 0.9308878183364868,
"learning_rate": 5.522642316338268e-05,
"loss": 2.4435,
"step": 52
},
{
"epoch": 0.00399743560734623,
"grad_norm": 0.8556021451950073,
"learning_rate": 5.348782368720626e-05,
"loss": 2.5139,
"step": 53
},
{
"epoch": 0.004072858920692386,
"grad_norm": 0.7264602184295654,
"learning_rate": 5.174497483512506e-05,
"loss": 2.3871,
"step": 54
},
{
"epoch": 0.004072858920692386,
"eval_loss": 2.4673752784729004,
"eval_runtime": 1323.4747,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 54
},
{
"epoch": 0.004148282234038542,
"grad_norm": 1.2506457567214966,
"learning_rate": 5e-05,
"loss": 2.3847,
"step": 55
},
{
"epoch": 0.004223705547384697,
"grad_norm": 0.8360309600830078,
"learning_rate": 4.825502516487497e-05,
"loss": 2.4915,
"step": 56
},
{
"epoch": 0.004299128860730852,
"grad_norm": 0.7913613319396973,
"learning_rate": 4.6512176312793736e-05,
"loss": 2.4851,
"step": 57
},
{
"epoch": 0.0043745521740770075,
"grad_norm": 0.8072302937507629,
"learning_rate": 4.477357683661734e-05,
"loss": 2.45,
"step": 58
},
{
"epoch": 0.004449975487423163,
"grad_norm": 0.6270408630371094,
"learning_rate": 4.3041344951996746e-05,
"loss": 2.4552,
"step": 59
},
{
"epoch": 0.004525398800769318,
"grad_norm": 1.4017157554626465,
"learning_rate": 4.131759111665349e-05,
"loss": 2.5578,
"step": 60
},
{
"epoch": 0.004600822114115473,
"grad_norm": 0.829573929309845,
"learning_rate": 3.960441545911204e-05,
"loss": 2.4176,
"step": 61
},
{
"epoch": 0.0046762454274616285,
"grad_norm": 0.8128294348716736,
"learning_rate": 3.790390522001662e-05,
"loss": 2.445,
"step": 62
},
{
"epoch": 0.004751668740807784,
"grad_norm": 0.8237362504005432,
"learning_rate": 3.6218132209150045e-05,
"loss": 2.4833,
"step": 63
},
{
"epoch": 0.004751668740807784,
"eval_loss": 2.470191240310669,
"eval_runtime": 1323.4084,
"eval_samples_per_second": 8.437,
"eval_steps_per_second": 1.055,
"step": 63
},
{
"epoch": 0.004827092054153939,
"grad_norm": 1.046515941619873,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.6124,
"step": 64
},
{
"epoch": 0.004902515367500094,
"grad_norm": 0.7122370600700378,
"learning_rate": 3.289899283371657e-05,
"loss": 2.3582,
"step": 65
},
{
"epoch": 0.0049779386808462495,
"grad_norm": 1.1943559646606445,
"learning_rate": 3.12696703292044e-05,
"loss": 2.3669,
"step": 66
},
{
"epoch": 0.005053361994192405,
"grad_norm": 0.792339563369751,
"learning_rate": 2.9663167846209998e-05,
"loss": 2.4925,
"step": 67
},
{
"epoch": 0.00512878530753856,
"grad_norm": 0.9303301572799683,
"learning_rate": 2.8081442660546125e-05,
"loss": 2.357,
"step": 68
},
{
"epoch": 0.005204208620884715,
"grad_norm": 1.2022520303726196,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.3788,
"step": 69
},
{
"epoch": 0.0052796319342308705,
"grad_norm": 0.982182502746582,
"learning_rate": 2.500000000000001e-05,
"loss": 2.5207,
"step": 70
},
{
"epoch": 0.005355055247577026,
"grad_norm": 0.7129021883010864,
"learning_rate": 2.350403678833976e-05,
"loss": 2.4658,
"step": 71
},
{
"epoch": 0.005430478560923181,
"grad_norm": 1.0058438777923584,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.4702,
"step": 72
},
{
"epoch": 0.005430478560923181,
"eval_loss": 2.4624416828155518,
"eval_runtime": 1323.3721,
"eval_samples_per_second": 8.437,
"eval_steps_per_second": 1.055,
"step": 72
},
{
"epoch": 0.005505901874269336,
"grad_norm": 1.0868438482284546,
"learning_rate": 2.061073738537635e-05,
"loss": 2.4515,
"step": 73
},
{
"epoch": 0.0055813251876154915,
"grad_norm": 0.7750610709190369,
"learning_rate": 1.9216926233717085e-05,
"loss": 2.4766,
"step": 74
},
{
"epoch": 0.005656748500961648,
"grad_norm": 0.6583645939826965,
"learning_rate": 1.7860619515673033e-05,
"loss": 2.4686,
"step": 75
},
{
"epoch": 0.005732171814307803,
"grad_norm": 0.8557270169258118,
"learning_rate": 1.6543469682057106e-05,
"loss": 2.37,
"step": 76
},
{
"epoch": 0.005807595127653958,
"grad_norm": 3.060455322265625,
"learning_rate": 1.526708147705013e-05,
"loss": 2.7771,
"step": 77
},
{
"epoch": 0.0058830184410001134,
"grad_norm": 0.7005658149719238,
"learning_rate": 1.4033009983067452e-05,
"loss": 2.377,
"step": 78
},
{
"epoch": 0.005958441754346269,
"grad_norm": 1.1821985244750977,
"learning_rate": 1.2842758726130283e-05,
"loss": 2.4553,
"step": 79
},
{
"epoch": 0.006033865067692424,
"grad_norm": 0.8524439930915833,
"learning_rate": 1.1697777844051105e-05,
"loss": 2.4688,
"step": 80
},
{
"epoch": 0.006109288381038579,
"grad_norm": 1.0849894285202026,
"learning_rate": 1.0599462319663905e-05,
"loss": 2.5193,
"step": 81
},
{
"epoch": 0.006109288381038579,
"eval_loss": 2.4592955112457275,
"eval_runtime": 1323.4238,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 81
},
{
"epoch": 0.0061847116943847345,
"grad_norm": 0.606669545173645,
"learning_rate": 9.549150281252633e-06,
"loss": 2.3663,
"step": 82
},
{
"epoch": 0.00626013500773089,
"grad_norm": 0.716972291469574,
"learning_rate": 8.548121372247918e-06,
"loss": 2.4092,
"step": 83
},
{
"epoch": 0.006335558321077045,
"grad_norm": 0.848617434501648,
"learning_rate": 7.597595192178702e-06,
"loss": 2.3845,
"step": 84
},
{
"epoch": 0.0064109816344232,
"grad_norm": 0.7678719758987427,
"learning_rate": 6.698729810778065e-06,
"loss": 2.4621,
"step": 85
},
{
"epoch": 0.0064864049477693555,
"grad_norm": 0.7551043033599854,
"learning_rate": 5.852620357053651e-06,
"loss": 2.5631,
"step": 86
},
{
"epoch": 0.006561828261115511,
"grad_norm": 0.7578583359718323,
"learning_rate": 5.060297685041659e-06,
"loss": 2.3585,
"step": 87
},
{
"epoch": 0.006637251574461666,
"grad_norm": 0.6959741711616516,
"learning_rate": 4.322727117869951e-06,
"loss": 2.4228,
"step": 88
},
{
"epoch": 0.006712674887807821,
"grad_norm": 0.9423866271972656,
"learning_rate": 3.6408072716606346e-06,
"loss": 2.523,
"step": 89
},
{
"epoch": 0.0067880982011539765,
"grad_norm": 0.7611425518989563,
"learning_rate": 3.0153689607045845e-06,
"loss": 2.4523,
"step": 90
},
{
"epoch": 0.0067880982011539765,
"eval_loss": 2.459738254547119,
"eval_runtime": 1323.4409,
"eval_samples_per_second": 8.436,
"eval_steps_per_second": 1.055,
"step": 90
},
{
"epoch": 0.006863521514500132,
"grad_norm": 0.8202887177467346,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.299,
"step": 91
},
{
"epoch": 0.006938944827846287,
"grad_norm": 0.9283888339996338,
"learning_rate": 1.9369152030840556e-06,
"loss": 2.3971,
"step": 92
},
{
"epoch": 0.007014368141192442,
"grad_norm": 1.0150047540664673,
"learning_rate": 1.4852136862001764e-06,
"loss": 2.4436,
"step": 93
},
{
"epoch": 0.0070897914545385975,
"grad_norm": 0.8555284142494202,
"learning_rate": 1.0926199633097157e-06,
"loss": 2.4183,
"step": 94
},
{
"epoch": 0.007165214767884753,
"grad_norm": 0.7611926198005676,
"learning_rate": 7.596123493895991e-07,
"loss": 2.3982,
"step": 95
},
{
"epoch": 0.007240638081230908,
"grad_norm": 0.9337876439094543,
"learning_rate": 4.865965629214819e-07,
"loss": 2.4278,
"step": 96
},
{
"epoch": 0.007316061394577064,
"grad_norm": 0.8473365306854248,
"learning_rate": 2.7390523158633554e-07,
"loss": 2.5949,
"step": 97
},
{
"epoch": 0.007391484707923219,
"grad_norm": 0.8833069205284119,
"learning_rate": 1.2179748700879012e-07,
"loss": 2.4205,
"step": 98
},
{
"epoch": 0.007466908021269375,
"grad_norm": 0.707373321056366,
"learning_rate": 3.04586490452119e-08,
"loss": 2.3859,
"step": 99
},
{
"epoch": 0.007466908021269375,
"eval_loss": 2.459726572036743,
"eval_runtime": 1323.3201,
"eval_samples_per_second": 8.437,
"eval_steps_per_second": 1.055,
"step": 99
},
{
"epoch": 0.00754233133461553,
"grad_norm": 0.8906799554824829,
"learning_rate": 0.0,
"loss": 2.4652,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.506716988669952e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}