adapters-opt-gptq-QLORA-super_glue-wsc
/
trainer_state-opt-gptq-QLORA-super_glue-wsc-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 10.0, | |
"eval_steps": 1, | |
"global_step": 70, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.14285714285714285, | |
"grad_norm": 30.89147186279297, | |
"learning_rate": 2.5e-05, | |
"loss": 1.1486, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.14285714285714285, | |
"eval_accuracy": 0.3712121212121212, | |
"eval_loss": 1.3744148015975952, | |
"eval_runtime": 1.5448, | |
"eval_samples_per_second": 85.447, | |
"eval_steps_per_second": 2.589, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"grad_norm": 34.26921081542969, | |
"learning_rate": 5e-05, | |
"loss": 1.2255, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"eval_accuracy": 0.3787878787878788, | |
"eval_loss": 1.354632019996643, | |
"eval_runtime": 1.4924, | |
"eval_samples_per_second": 88.447, | |
"eval_steps_per_second": 2.68, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"grad_norm": 33.26761245727539, | |
"learning_rate": 4.9264705882352944e-05, | |
"loss": 1.2041, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"eval_accuracy": 0.3787878787878788, | |
"eval_loss": 1.2853270769119263, | |
"eval_runtime": 1.4949, | |
"eval_samples_per_second": 88.301, | |
"eval_steps_per_second": 2.676, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"grad_norm": 34.27346420288086, | |
"learning_rate": 4.8529411764705885e-05, | |
"loss": 1.1975, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"eval_accuracy": 0.3787878787878788, | |
"eval_loss": 1.2138265371322632, | |
"eval_runtime": 1.4955, | |
"eval_samples_per_second": 88.263, | |
"eval_steps_per_second": 2.675, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"grad_norm": 26.523130416870117, | |
"learning_rate": 4.7794117647058826e-05, | |
"loss": 1.012, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"eval_accuracy": 0.4166666666666667, | |
"eval_loss": 1.1368831396102905, | |
"eval_runtime": 1.5429, | |
"eval_samples_per_second": 85.554, | |
"eval_steps_per_second": 2.593, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"grad_norm": 26.6580753326416, | |
"learning_rate": 4.705882352941177e-05, | |
"loss": 0.9865, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"eval_accuracy": 0.4166666666666667, | |
"eval_loss": 1.0571379661560059, | |
"eval_runtime": 1.5511, | |
"eval_samples_per_second": 85.102, | |
"eval_steps_per_second": 2.579, | |
"step": 6 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 21.781848907470703, | |
"learning_rate": 4.632352941176471e-05, | |
"loss": 0.8759, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.9770345687866211, | |
"eval_runtime": 1.4987, | |
"eval_samples_per_second": 88.077, | |
"eval_steps_per_second": 2.669, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"grad_norm": 11.7108154296875, | |
"learning_rate": 4.558823529411765e-05, | |
"loss": 0.7938, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.9049923419952393, | |
"eval_runtime": 1.4998, | |
"eval_samples_per_second": 88.013, | |
"eval_steps_per_second": 2.667, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"grad_norm": 14.725457191467285, | |
"learning_rate": 4.485294117647059e-05, | |
"loss": 0.7778, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.8363458514213562, | |
"eval_runtime": 1.4943, | |
"eval_samples_per_second": 88.337, | |
"eval_steps_per_second": 2.677, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"grad_norm": 14.5535249710083, | |
"learning_rate": 4.411764705882353e-05, | |
"loss": 0.7377, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.7730409502983093, | |
"eval_runtime": 1.5479, | |
"eval_samples_per_second": 85.277, | |
"eval_steps_per_second": 2.584, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"grad_norm": 16.812053680419922, | |
"learning_rate": 4.3382352941176474e-05, | |
"loss": 0.7563, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.7208273410797119, | |
"eval_runtime": 1.5435, | |
"eval_samples_per_second": 85.519, | |
"eval_steps_per_second": 2.591, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"grad_norm": 10.605402946472168, | |
"learning_rate": 4.2647058823529415e-05, | |
"loss": 0.755, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6822857856750488, | |
"eval_runtime": 1.5006, | |
"eval_samples_per_second": 87.968, | |
"eval_steps_per_second": 2.666, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"grad_norm": 5.790449619293213, | |
"learning_rate": 4.1911764705882356e-05, | |
"loss": 0.7331, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"eval_accuracy": 0.6287878787878788, | |
"eval_loss": 0.6623136401176453, | |
"eval_runtime": 1.4474, | |
"eval_samples_per_second": 91.196, | |
"eval_steps_per_second": 2.764, | |
"step": 13 | |
}, | |
{ | |
"epoch": 2.0, | |
"grad_norm": 7.441537857055664, | |
"learning_rate": 4.11764705882353e-05, | |
"loss": 0.774, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": 0.6287878787878788, | |
"eval_loss": 0.6604166626930237, | |
"eval_runtime": 1.5501, | |
"eval_samples_per_second": 85.155, | |
"eval_steps_per_second": 2.58, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"grad_norm": 2.9518423080444336, | |
"learning_rate": 4.044117647058824e-05, | |
"loss": 0.6674, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"eval_accuracy": 0.5984848484848485, | |
"eval_loss": 0.6644145846366882, | |
"eval_runtime": 1.498, | |
"eval_samples_per_second": 88.118, | |
"eval_steps_per_second": 2.67, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"grad_norm": 5.2332682609558105, | |
"learning_rate": 3.970588235294117e-05, | |
"loss": 0.7251, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"eval_accuracy": 0.5909090909090909, | |
"eval_loss": 0.6694009900093079, | |
"eval_runtime": 1.5426, | |
"eval_samples_per_second": 85.572, | |
"eval_steps_per_second": 2.593, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"grad_norm": 15.631101608276367, | |
"learning_rate": 3.897058823529412e-05, | |
"loss": 0.7931, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"eval_accuracy": 0.5833333333333334, | |
"eval_loss": 0.6746625900268555, | |
"eval_runtime": 1.5454, | |
"eval_samples_per_second": 85.415, | |
"eval_steps_per_second": 2.588, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"grad_norm": 8.961908340454102, | |
"learning_rate": 3.8235294117647055e-05, | |
"loss": 0.7463, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"eval_accuracy": 0.5757575757575758, | |
"eval_loss": 0.683046281337738, | |
"eval_runtime": 1.5455, | |
"eval_samples_per_second": 85.408, | |
"eval_steps_per_second": 2.588, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"grad_norm": 1.4260056018829346, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 0.6853, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"eval_accuracy": 0.5833333333333334, | |
"eval_loss": 0.6882531046867371, | |
"eval_runtime": 1.4995, | |
"eval_samples_per_second": 88.032, | |
"eval_steps_per_second": 2.668, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"grad_norm": 4.879419326782227, | |
"learning_rate": 3.6764705882352945e-05, | |
"loss": 0.7112, | |
"step": 20 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"eval_accuracy": 0.5833333333333334, | |
"eval_loss": 0.6945282816886902, | |
"eval_runtime": 1.5509, | |
"eval_samples_per_second": 85.109, | |
"eval_steps_per_second": 2.579, | |
"step": 20 | |
}, | |
{ | |
"epoch": 3.0, | |
"grad_norm": 2.966745615005493, | |
"learning_rate": 3.6029411764705886e-05, | |
"loss": 0.7261, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.705810546875, | |
"eval_runtime": 1.5516, | |
"eval_samples_per_second": 85.075, | |
"eval_steps_per_second": 2.578, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"grad_norm": 1.4628124237060547, | |
"learning_rate": 3.529411764705883e-05, | |
"loss": 0.7183, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"eval_accuracy": 0.5, | |
"eval_loss": 0.7155613899230957, | |
"eval_runtime": 1.5506, | |
"eval_samples_per_second": 85.127, | |
"eval_steps_per_second": 2.58, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"grad_norm": 2.861602306365967, | |
"learning_rate": 3.455882352941177e-05, | |
"loss": 0.714, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.7245516777038574, | |
"eval_runtime": 1.5005, | |
"eval_samples_per_second": 87.969, | |
"eval_steps_per_second": 2.666, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"grad_norm": 5.409416675567627, | |
"learning_rate": 3.382352941176471e-05, | |
"loss": 0.7123, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"eval_accuracy": 0.48484848484848486, | |
"eval_loss": 0.7311227321624756, | |
"eval_runtime": 1.5492, | |
"eval_samples_per_second": 85.203, | |
"eval_steps_per_second": 2.582, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"grad_norm": 5.741272926330566, | |
"learning_rate": 3.308823529411765e-05, | |
"loss": 0.7952, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"eval_accuracy": 0.4696969696969697, | |
"eval_loss": 0.7317812442779541, | |
"eval_runtime": 1.4986, | |
"eval_samples_per_second": 88.082, | |
"eval_steps_per_second": 2.669, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"grad_norm": 4.539316177368164, | |
"learning_rate": 3.235294117647059e-05, | |
"loss": 0.6719, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"eval_accuracy": 0.44696969696969696, | |
"eval_loss": 0.7320696711540222, | |
"eval_runtime": 1.5498, | |
"eval_samples_per_second": 85.173, | |
"eval_steps_per_second": 2.581, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"grad_norm": 5.660553932189941, | |
"learning_rate": 3.161764705882353e-05, | |
"loss": 0.6752, | |
"step": 27 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"eval_accuracy": 0.4318181818181818, | |
"eval_loss": 0.7303592562675476, | |
"eval_runtime": 1.4988, | |
"eval_samples_per_second": 88.068, | |
"eval_steps_per_second": 2.669, | |
"step": 27 | |
}, | |
{ | |
"epoch": 4.0, | |
"grad_norm": 5.854210376739502, | |
"learning_rate": 3.0882352941176475e-05, | |
"loss": 0.7359, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_accuracy": 0.4621212121212121, | |
"eval_loss": 0.7259455323219299, | |
"eval_runtime": 1.5005, | |
"eval_samples_per_second": 87.973, | |
"eval_steps_per_second": 2.666, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"grad_norm": 5.565073490142822, | |
"learning_rate": 3.0147058823529413e-05, | |
"loss": 0.7201, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"eval_accuracy": 0.4696969696969697, | |
"eval_loss": 0.7197902798652649, | |
"eval_runtime": 1.4968, | |
"eval_samples_per_second": 88.189, | |
"eval_steps_per_second": 2.672, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"grad_norm": 7.555918216705322, | |
"learning_rate": 2.9411764705882354e-05, | |
"loss": 0.7135, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.7117217183113098, | |
"eval_runtime": 1.5427, | |
"eval_samples_per_second": 85.563, | |
"eval_steps_per_second": 2.593, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"grad_norm": 3.8518428802490234, | |
"learning_rate": 2.8676470588235295e-05, | |
"loss": 0.6927, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.7075935006141663, | |
"eval_runtime": 1.4971, | |
"eval_samples_per_second": 88.171, | |
"eval_steps_per_second": 2.672, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"grad_norm": 2.8224422931671143, | |
"learning_rate": 2.7941176470588236e-05, | |
"loss": 0.7018, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"eval_accuracy": 0.5, | |
"eval_loss": 0.700927734375, | |
"eval_runtime": 1.5484, | |
"eval_samples_per_second": 85.247, | |
"eval_steps_per_second": 2.583, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"grad_norm": 2.890439033508301, | |
"learning_rate": 2.7205882352941174e-05, | |
"loss": 0.7398, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.695911705493927, | |
"eval_runtime": 1.5466, | |
"eval_samples_per_second": 85.349, | |
"eval_steps_per_second": 2.586, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"grad_norm": 2.9305408000946045, | |
"learning_rate": 2.647058823529412e-05, | |
"loss": 0.7487, | |
"step": 34 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6937810778617859, | |
"eval_runtime": 1.5486, | |
"eval_samples_per_second": 85.237, | |
"eval_steps_per_second": 2.583, | |
"step": 34 | |
}, | |
{ | |
"epoch": 5.0, | |
"grad_norm": 3.190406084060669, | |
"learning_rate": 2.5735294117647057e-05, | |
"loss": 0.7183, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6898541450500488, | |
"eval_runtime": 1.5451, | |
"eval_samples_per_second": 85.433, | |
"eval_steps_per_second": 2.589, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"grad_norm": 4.026767730712891, | |
"learning_rate": 2.5e-05, | |
"loss": 0.7524, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6862215995788574, | |
"eval_runtime": 1.5452, | |
"eval_samples_per_second": 85.425, | |
"eval_steps_per_second": 2.589, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"grad_norm": 7.148804187774658, | |
"learning_rate": 2.4264705882352942e-05, | |
"loss": 0.731, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.6843928098678589, | |
"eval_runtime": 1.5499, | |
"eval_samples_per_second": 85.166, | |
"eval_steps_per_second": 2.581, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"grad_norm": 6.0132737159729, | |
"learning_rate": 2.3529411764705884e-05, | |
"loss": 0.7534, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6832978129386902, | |
"eval_runtime": 1.5495, | |
"eval_samples_per_second": 85.19, | |
"eval_steps_per_second": 2.582, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"grad_norm": 2.2362067699432373, | |
"learning_rate": 2.2794117647058825e-05, | |
"loss": 0.7339, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6838245987892151, | |
"eval_runtime": 1.5476, | |
"eval_samples_per_second": 85.292, | |
"eval_steps_per_second": 2.585, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"grad_norm": 3.735261917114258, | |
"learning_rate": 2.2058823529411766e-05, | |
"loss": 0.6537, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6850881576538086, | |
"eval_runtime": 1.5467, | |
"eval_samples_per_second": 85.344, | |
"eval_steps_per_second": 2.586, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"grad_norm": 5.130250453948975, | |
"learning_rate": 2.1323529411764707e-05, | |
"loss": 0.7286, | |
"step": 41 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6860766410827637, | |
"eval_runtime": 1.5496, | |
"eval_samples_per_second": 85.186, | |
"eval_steps_per_second": 2.581, | |
"step": 41 | |
}, | |
{ | |
"epoch": 6.0, | |
"grad_norm": 2.0442426204681396, | |
"learning_rate": 2.058823529411765e-05, | |
"loss": 0.7069, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.0, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6856178641319275, | |
"eval_runtime": 1.55, | |
"eval_samples_per_second": 85.163, | |
"eval_steps_per_second": 2.581, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"grad_norm": 2.5700104236602783, | |
"learning_rate": 1.9852941176470586e-05, | |
"loss": 0.724, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.684641420841217, | |
"eval_runtime": 1.548, | |
"eval_samples_per_second": 85.27, | |
"eval_steps_per_second": 2.584, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"grad_norm": 7.043388366699219, | |
"learning_rate": 1.9117647058823528e-05, | |
"loss": 0.7109, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6849846243858337, | |
"eval_runtime": 1.5478, | |
"eval_samples_per_second": 85.281, | |
"eval_steps_per_second": 2.584, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"grad_norm": 3.666529655456543, | |
"learning_rate": 1.8382352941176472e-05, | |
"loss": 0.6973, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.6853426694869995, | |
"eval_runtime": 1.5469, | |
"eval_samples_per_second": 85.333, | |
"eval_steps_per_second": 2.586, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"grad_norm": 3.5915279388427734, | |
"learning_rate": 1.7647058823529414e-05, | |
"loss": 0.701, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.6859287023544312, | |
"eval_runtime": 1.5453, | |
"eval_samples_per_second": 85.419, | |
"eval_steps_per_second": 2.588, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"grad_norm": 6.7808003425598145, | |
"learning_rate": 1.6911764705882355e-05, | |
"loss": 0.7755, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6857155561447144, | |
"eval_runtime": 1.5467, | |
"eval_samples_per_second": 85.341, | |
"eval_steps_per_second": 2.586, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"grad_norm": 5.13129997253418, | |
"learning_rate": 1.6176470588235296e-05, | |
"loss": 0.7192, | |
"step": 48 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.6857806444168091, | |
"eval_runtime": 1.5503, | |
"eval_samples_per_second": 85.146, | |
"eval_steps_per_second": 2.58, | |
"step": 48 | |
}, | |
{ | |
"epoch": 7.0, | |
"grad_norm": 2.5239486694335938, | |
"learning_rate": 1.5441176470588237e-05, | |
"loss": 0.7459, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.0, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.68532794713974, | |
"eval_runtime": 1.5455, | |
"eval_samples_per_second": 85.407, | |
"eval_steps_per_second": 2.588, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"grad_norm": 5.412455081939697, | |
"learning_rate": 1.4705882352941177e-05, | |
"loss": 0.7348, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.6866211295127869, | |
"eval_runtime": 1.5453, | |
"eval_samples_per_second": 85.421, | |
"eval_steps_per_second": 2.589, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"grad_norm": 2.2723119258880615, | |
"learning_rate": 1.3970588235294118e-05, | |
"loss": 0.7152, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.687363862991333, | |
"eval_runtime": 1.5415, | |
"eval_samples_per_second": 85.631, | |
"eval_steps_per_second": 2.595, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"grad_norm": 2.08304762840271, | |
"learning_rate": 1.323529411764706e-05, | |
"loss": 0.6919, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6890033483505249, | |
"eval_runtime": 1.5478, | |
"eval_samples_per_second": 85.284, | |
"eval_steps_per_second": 2.584, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"grad_norm": 8.980510711669922, | |
"learning_rate": 1.25e-05, | |
"loss": 0.7328, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6898555755615234, | |
"eval_runtime": 1.5482, | |
"eval_samples_per_second": 85.263, | |
"eval_steps_per_second": 2.584, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"grad_norm": 3.777273416519165, | |
"learning_rate": 1.1764705882352942e-05, | |
"loss": 0.6631, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6898496150970459, | |
"eval_runtime": 1.5483, | |
"eval_samples_per_second": 85.252, | |
"eval_steps_per_second": 2.583, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"grad_norm": 5.7015204429626465, | |
"learning_rate": 1.1029411764705883e-05, | |
"loss": 0.7114, | |
"step": 55 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.6892370581626892, | |
"eval_runtime": 1.5489, | |
"eval_samples_per_second": 85.22, | |
"eval_steps_per_second": 2.582, | |
"step": 55 | |
}, | |
{ | |
"epoch": 8.0, | |
"grad_norm": 2.6641385555267334, | |
"learning_rate": 1.0294117647058824e-05, | |
"loss": 0.7448, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.0, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.6905569434165955, | |
"eval_runtime": 1.5448, | |
"eval_samples_per_second": 85.448, | |
"eval_steps_per_second": 2.589, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"grad_norm": 2.0213890075683594, | |
"learning_rate": 9.558823529411764e-06, | |
"loss": 0.7065, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6913914680480957, | |
"eval_runtime": 1.5496, | |
"eval_samples_per_second": 85.183, | |
"eval_steps_per_second": 2.581, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"grad_norm": 2.5657827854156494, | |
"learning_rate": 8.823529411764707e-06, | |
"loss": 0.697, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.691057026386261, | |
"eval_runtime": 1.545, | |
"eval_samples_per_second": 85.438, | |
"eval_steps_per_second": 2.589, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"grad_norm": 2.7576677799224854, | |
"learning_rate": 8.088235294117648e-06, | |
"loss": 0.7197, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6916370987892151, | |
"eval_runtime": 1.5467, | |
"eval_samples_per_second": 85.343, | |
"eval_steps_per_second": 2.586, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"grad_norm": 4.963264465332031, | |
"learning_rate": 7.3529411764705884e-06, | |
"loss": 0.7097, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.6910924911499023, | |
"eval_runtime": 1.5475, | |
"eval_samples_per_second": 85.297, | |
"eval_steps_per_second": 2.585, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"grad_norm": 10.05405044555664, | |
"learning_rate": 6.61764705882353e-06, | |
"loss": 0.687, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.690488874912262, | |
"eval_runtime": 1.5501, | |
"eval_samples_per_second": 85.156, | |
"eval_steps_per_second": 2.58, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"grad_norm": 5.774103164672852, | |
"learning_rate": 5.882352941176471e-06, | |
"loss": 0.7323, | |
"step": 62 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.6909504532814026, | |
"eval_runtime": 1.5471, | |
"eval_samples_per_second": 85.323, | |
"eval_steps_per_second": 2.586, | |
"step": 62 | |
}, | |
{ | |
"epoch": 9.0, | |
"grad_norm": 7.126034736633301, | |
"learning_rate": 5.147058823529412e-06, | |
"loss": 0.721, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.0, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6910215616226196, | |
"eval_runtime": 1.5426, | |
"eval_samples_per_second": 85.569, | |
"eval_steps_per_second": 2.593, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"grad_norm": 4.496044635772705, | |
"learning_rate": 4.411764705882353e-06, | |
"loss": 0.7411, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6925870180130005, | |
"eval_runtime": 1.5463, | |
"eval_samples_per_second": 85.367, | |
"eval_steps_per_second": 2.587, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"grad_norm": 5.705209732055664, | |
"learning_rate": 3.6764705882352942e-06, | |
"loss": 0.7041, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6924833655357361, | |
"eval_runtime": 1.5448, | |
"eval_samples_per_second": 85.45, | |
"eval_steps_per_second": 2.589, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"grad_norm": 3.551724910736084, | |
"learning_rate": 2.9411764705882355e-06, | |
"loss": 0.7559, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6926906108856201, | |
"eval_runtime": 1.5456, | |
"eval_samples_per_second": 85.403, | |
"eval_steps_per_second": 2.588, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"grad_norm": 1.5494858026504517, | |
"learning_rate": 2.2058823529411767e-06, | |
"loss": 0.6951, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.6926224231719971, | |
"eval_runtime": 1.5442, | |
"eval_samples_per_second": 85.483, | |
"eval_steps_per_second": 2.59, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"grad_norm": 6.727674961090088, | |
"learning_rate": 1.4705882352941177e-06, | |
"loss": 0.7175, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.692868173122406, | |
"eval_runtime": 1.5499, | |
"eval_samples_per_second": 85.165, | |
"eval_steps_per_second": 2.581, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"grad_norm": 2.2420670986175537, | |
"learning_rate": 7.352941176470589e-07, | |
"loss": 0.6852, | |
"step": 69 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6923591494560242, | |
"eval_runtime": 1.5004, | |
"eval_samples_per_second": 87.976, | |
"eval_steps_per_second": 2.666, | |
"step": 69 | |
}, | |
{ | |
"epoch": 10.0, | |
"grad_norm": 4.505035400390625, | |
"learning_rate": 0.0, | |
"loss": 0.6894, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6922733187675476, | |
"eval_runtime": 1.4985, | |
"eval_samples_per_second": 88.086, | |
"eval_steps_per_second": 2.669, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"step": 70, | |
"total_flos": 62972060631040.0, | |
"train_loss": 0.7597047252314432, | |
"train_runtime": 351.9683, | |
"train_samples_per_second": 14.945, | |
"train_steps_per_second": 0.199 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 70, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"total_flos": 62972060631040.0, | |
"train_batch_size": 10, | |
"trial_name": null, | |
"trial_params": null | |
} | |