inflaton's picture
done fine-tuning
72043da
raw
history blame
15 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9900497512437811,
"eval_steps": 35,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.028429282160625444,
"grad_norm": 11.854507446289062,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.9685,
"step": 5
},
{
"epoch": 0.05685856432125089,
"grad_norm": 2.9244329929351807,
"learning_rate": 2.857142857142857e-05,
"loss": 0.5325,
"step": 10
},
{
"epoch": 0.08528784648187633,
"grad_norm": 2.8243346214294434,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.2362,
"step": 15
},
{
"epoch": 0.11371712864250177,
"grad_norm": 1.644755244255066,
"learning_rate": 5.714285714285714e-05,
"loss": 0.2163,
"step": 20
},
{
"epoch": 0.14214641080312723,
"grad_norm": 5.001497268676758,
"learning_rate": 7.142857142857143e-05,
"loss": 0.2204,
"step": 25
},
{
"epoch": 0.17057569296375266,
"grad_norm": 1.6563094854354858,
"learning_rate": 8.571428571428571e-05,
"loss": 0.2098,
"step": 30
},
{
"epoch": 0.19900497512437812,
"grad_norm": 1.911417841911316,
"learning_rate": 0.0001,
"loss": 0.1803,
"step": 35
},
{
"epoch": 0.19900497512437812,
"eval_loss": 0.16817478835582733,
"eval_runtime": 318.5306,
"eval_samples_per_second": 7.849,
"eval_steps_per_second": 7.849,
"step": 35
},
{
"epoch": 0.22743425728500355,
"grad_norm": 1.6745359897613525,
"learning_rate": 9.993784606094612e-05,
"loss": 0.1843,
"step": 40
},
{
"epoch": 0.255863539445629,
"grad_norm": 0.9290557503700256,
"learning_rate": 9.975153876827008e-05,
"loss": 0.1751,
"step": 45
},
{
"epoch": 0.28429282160625446,
"grad_norm": 1.3401949405670166,
"learning_rate": 9.944154131125642e-05,
"loss": 0.1722,
"step": 50
},
{
"epoch": 0.31272210376687987,
"grad_norm": 0.9433913826942444,
"learning_rate": 9.900862439242719e-05,
"loss": 0.165,
"step": 55
},
{
"epoch": 0.3411513859275053,
"grad_norm": 1.2080743312835693,
"learning_rate": 9.84538643114539e-05,
"loss": 0.1734,
"step": 60
},
{
"epoch": 0.3695806680881308,
"grad_norm": 0.9101002812385559,
"learning_rate": 9.777864028930705e-05,
"loss": 0.1476,
"step": 65
},
{
"epoch": 0.39800995024875624,
"grad_norm": 0.9633578658103943,
"learning_rate": 9.698463103929542e-05,
"loss": 0.1583,
"step": 70
},
{
"epoch": 0.39800995024875624,
"eval_loss": 0.15481097996234894,
"eval_runtime": 318.1907,
"eval_samples_per_second": 7.857,
"eval_steps_per_second": 7.857,
"step": 70
},
{
"epoch": 0.42643923240938164,
"grad_norm": 0.9153623580932617,
"learning_rate": 9.607381059352038e-05,
"loss": 0.1632,
"step": 75
},
{
"epoch": 0.4548685145700071,
"grad_norm": 0.9270277619361877,
"learning_rate": 9.504844339512095e-05,
"loss": 0.1503,
"step": 80
},
{
"epoch": 0.48329779673063256,
"grad_norm": 0.863593578338623,
"learning_rate": 9.391107866851143e-05,
"loss": 0.1527,
"step": 85
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.9579532146453857,
"learning_rate": 9.266454408160779e-05,
"loss": 0.1523,
"step": 90
},
{
"epoch": 0.5401563610518835,
"grad_norm": 1.0070995092391968,
"learning_rate": 9.131193871579975e-05,
"loss": 0.1512,
"step": 95
},
{
"epoch": 0.5685856432125089,
"grad_norm": 1.0314483642578125,
"learning_rate": 8.985662536114613e-05,
"loss": 0.1485,
"step": 100
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.5653307437896729,
"learning_rate": 8.83022221559489e-05,
"loss": 0.1439,
"step": 105
},
{
"epoch": 0.5970149253731343,
"eval_loss": 0.14900080859661102,
"eval_runtime": 317.9816,
"eval_samples_per_second": 7.862,
"eval_steps_per_second": 7.862,
"step": 105
},
{
"epoch": 0.6254442075337597,
"grad_norm": 0.6183350682258606,
"learning_rate": 8.665259359149132e-05,
"loss": 0.1459,
"step": 110
},
{
"epoch": 0.6538734896943852,
"grad_norm": 0.667679488658905,
"learning_rate": 8.491184090430364e-05,
"loss": 0.1367,
"step": 115
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.8107192516326904,
"learning_rate": 8.308429187984297e-05,
"loss": 0.1468,
"step": 120
},
{
"epoch": 0.7107320540156361,
"grad_norm": 0.7757948637008667,
"learning_rate": 8.117449009293668e-05,
"loss": 0.1349,
"step": 125
},
{
"epoch": 0.7391613361762616,
"grad_norm": 0.8467870354652405,
"learning_rate": 7.91871836117395e-05,
"loss": 0.1411,
"step": 130
},
{
"epoch": 0.767590618336887,
"grad_norm": 1.0613672733306885,
"learning_rate": 7.712731319328798e-05,
"loss": 0.1663,
"step": 135
},
{
"epoch": 0.7960199004975125,
"grad_norm": 0.7184066772460938,
"learning_rate": 7.500000000000001e-05,
"loss": 0.1488,
"step": 140
},
{
"epoch": 0.7960199004975125,
"eval_loss": 0.13560061156749725,
"eval_runtime": 316.1201,
"eval_samples_per_second": 7.908,
"eval_steps_per_second": 7.908,
"step": 140
},
{
"epoch": 0.8244491826581379,
"grad_norm": 0.5415666699409485,
"learning_rate": 7.281053286765815e-05,
"loss": 0.1426,
"step": 145
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.7227004170417786,
"learning_rate": 7.056435515653059e-05,
"loss": 0.1542,
"step": 150
},
{
"epoch": 0.8813077469793887,
"grad_norm": 0.47049930691719055,
"learning_rate": 6.826705121831976e-05,
"loss": 0.1455,
"step": 155
},
{
"epoch": 0.9097370291400142,
"grad_norm": 0.4833870828151703,
"learning_rate": 6.592433251258423e-05,
"loss": 0.1376,
"step": 160
},
{
"epoch": 0.9381663113006397,
"grad_norm": 1.1025599241256714,
"learning_rate": 6.354202340715026e-05,
"loss": 0.1352,
"step": 165
},
{
"epoch": 0.9665955934612651,
"grad_norm": 0.6722602248191833,
"learning_rate": 6.112604669781572e-05,
"loss": 0.1402,
"step": 170
},
{
"epoch": 0.9950248756218906,
"grad_norm": 0.6222644448280334,
"learning_rate": 5.868240888334653e-05,
"loss": 0.1385,
"step": 175
},
{
"epoch": 0.9950248756218906,
"eval_loss": 0.136678546667099,
"eval_runtime": 318.243,
"eval_samples_per_second": 7.856,
"eval_steps_per_second": 7.856,
"step": 175
},
{
"epoch": 1.023454157782516,
"grad_norm": 0.49759426712989807,
"learning_rate": 5.621718523237427e-05,
"loss": 0.1232,
"step": 180
},
{
"epoch": 1.0518834399431414,
"grad_norm": 0.6171165108680725,
"learning_rate": 5.373650467932122e-05,
"loss": 0.1148,
"step": 185
},
{
"epoch": 1.080312722103767,
"grad_norm": 0.9642676711082458,
"learning_rate": 5.124653458690365e-05,
"loss": 0.1272,
"step": 190
},
{
"epoch": 1.1087420042643923,
"grad_norm": 0.5711795091629028,
"learning_rate": 4.875346541309637e-05,
"loss": 0.111,
"step": 195
},
{
"epoch": 1.1371712864250179,
"grad_norm": 0.5575156211853027,
"learning_rate": 4.626349532067879e-05,
"loss": 0.1284,
"step": 200
},
{
"epoch": 1.1656005685856432,
"grad_norm": 0.5733321905136108,
"learning_rate": 4.378281476762576e-05,
"loss": 0.1214,
"step": 205
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.47317320108413696,
"learning_rate": 4.131759111665349e-05,
"loss": 0.1092,
"step": 210
},
{
"epoch": 1.1940298507462686,
"eval_loss": 0.13155004382133484,
"eval_runtime": 318.2328,
"eval_samples_per_second": 7.856,
"eval_steps_per_second": 7.856,
"step": 210
},
{
"epoch": 1.2224591329068941,
"grad_norm": 0.7402136921882629,
"learning_rate": 3.887395330218429e-05,
"loss": 0.1128,
"step": 215
},
{
"epoch": 1.2508884150675195,
"grad_norm": 0.6445468068122864,
"learning_rate": 3.6457976592849754e-05,
"loss": 0.1064,
"step": 220
},
{
"epoch": 1.279317697228145,
"grad_norm": 1.001204013824463,
"learning_rate": 3.4075667487415785e-05,
"loss": 0.1146,
"step": 225
},
{
"epoch": 1.3077469793887704,
"grad_norm": 0.6902646422386169,
"learning_rate": 3.173294878168025e-05,
"loss": 0.1118,
"step": 230
},
{
"epoch": 1.336176261549396,
"grad_norm": 0.5311295390129089,
"learning_rate": 2.9435644843469436e-05,
"loss": 0.1183,
"step": 235
},
{
"epoch": 1.3646055437100213,
"grad_norm": 0.7023885250091553,
"learning_rate": 2.718946713234185e-05,
"loss": 0.1248,
"step": 240
},
{
"epoch": 1.3930348258706466,
"grad_norm": 0.6486346125602722,
"learning_rate": 2.500000000000001e-05,
"loss": 0.1282,
"step": 245
},
{
"epoch": 1.3930348258706466,
"eval_loss": 0.12762804329395294,
"eval_runtime": 316.104,
"eval_samples_per_second": 7.909,
"eval_steps_per_second": 7.909,
"step": 245
},
{
"epoch": 1.4214641080312722,
"grad_norm": 0.6740933060646057,
"learning_rate": 2.2872686806712035e-05,
"loss": 0.1147,
"step": 250
},
{
"epoch": 1.4498933901918978,
"grad_norm": 0.6045389175415039,
"learning_rate": 2.0812816388260518e-05,
"loss": 0.1116,
"step": 255
},
{
"epoch": 1.4783226723525231,
"grad_norm": 0.8824101686477661,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.0991,
"step": 260
},
{
"epoch": 1.5067519545131485,
"grad_norm": 0.6621524691581726,
"learning_rate": 1.691570812015704e-05,
"loss": 0.1137,
"step": 265
},
{
"epoch": 1.535181236673774,
"grad_norm": 0.6356005668640137,
"learning_rate": 1.5088159095696363e-05,
"loss": 0.12,
"step": 270
},
{
"epoch": 1.5636105188343994,
"grad_norm": 0.7734110951423645,
"learning_rate": 1.3347406408508695e-05,
"loss": 0.1055,
"step": 275
},
{
"epoch": 1.5920398009950247,
"grad_norm": 0.7122759819030762,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.1135,
"step": 280
},
{
"epoch": 1.5920398009950247,
"eval_loss": 0.1305658519268036,
"eval_runtime": 318.0092,
"eval_samples_per_second": 7.861,
"eval_steps_per_second": 7.861,
"step": 280
},
{
"epoch": 1.6204690831556503,
"grad_norm": 0.7382331490516663,
"learning_rate": 1.0143374638853891e-05,
"loss": 0.1153,
"step": 285
},
{
"epoch": 1.6488983653162759,
"grad_norm": 0.5564413070678711,
"learning_rate": 8.688061284200266e-06,
"loss": 0.1137,
"step": 290
},
{
"epoch": 1.6773276474769012,
"grad_norm": 0.6935514211654663,
"learning_rate": 7.33545591839222e-06,
"loss": 0.1002,
"step": 295
},
{
"epoch": 1.7057569296375266,
"grad_norm": 0.5724297165870667,
"learning_rate": 6.088921331488568e-06,
"loss": 0.1159,
"step": 300
},
{
"epoch": 1.7341862117981521,
"grad_norm": 0.6456014513969421,
"learning_rate": 4.951556604879048e-06,
"loss": 0.1151,
"step": 305
},
{
"epoch": 1.7626154939587777,
"grad_norm": 0.6402171850204468,
"learning_rate": 3.9261894064796135e-06,
"loss": 0.0968,
"step": 310
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.6542567014694214,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.1071,
"step": 315
},
{
"epoch": 1.7910447761194028,
"eval_loss": 0.13068035244941711,
"eval_runtime": 317.9913,
"eval_samples_per_second": 7.862,
"eval_steps_per_second": 7.862,
"step": 315
},
{
"epoch": 1.8194740582800284,
"grad_norm": 0.5055389404296875,
"learning_rate": 2.221359710692961e-06,
"loss": 0.1052,
"step": 320
},
{
"epoch": 1.847903340440654,
"grad_norm": 0.7156793475151062,
"learning_rate": 1.5461356885461075e-06,
"loss": 0.1098,
"step": 325
},
{
"epoch": 1.8763326226012793,
"grad_norm": 0.7110255360603333,
"learning_rate": 9.913756075728087e-07,
"loss": 0.1062,
"step": 330
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.6753076910972595,
"learning_rate": 5.584586887435739e-07,
"loss": 0.12,
"step": 335
},
{
"epoch": 1.9331911869225302,
"grad_norm": 0.6932408213615417,
"learning_rate": 2.4846123172992954e-07,
"loss": 0.1109,
"step": 340
},
{
"epoch": 1.9616204690831558,
"grad_norm": 0.7656528949737549,
"learning_rate": 6.215393905388278e-08,
"loss": 0.1168,
"step": 345
},
{
"epoch": 1.9900497512437811,
"grad_norm": 0.5726015567779541,
"learning_rate": 0.0,
"loss": 0.1075,
"step": 350
},
{
"epoch": 1.9900497512437811,
"eval_loss": 0.1294924020767212,
"eval_runtime": 316.3526,
"eval_samples_per_second": 7.903,
"eval_steps_per_second": 7.903,
"step": 350
}
],
"logging_steps": 5,
"max_steps": 350,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 35,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6683611739382088e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}