|
{ |
|
"best_metric": 0.986134647974118, |
|
"best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-lora-medmnistv2/checkpoint-4864", |
|
"epoch": 9.990749306197966, |
|
"eval_steps": 500, |
|
"global_step": 5400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 3.66642165184021, |
|
"learning_rate": 0.004990740740740741, |
|
"loss": 1.7617, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 5.545452117919922, |
|
"learning_rate": 0.004982407407407408, |
|
"loss": 1.1989, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 5.292081356048584, |
|
"learning_rate": 0.004973148148148148, |
|
"loss": 1.0735, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 3.0206360816955566, |
|
"learning_rate": 0.004963888888888889, |
|
"loss": 1.1936, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 3.947781562805176, |
|
"learning_rate": 0.004954629629629629, |
|
"loss": 1.0373, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 1.839845895767212, |
|
"learning_rate": 0.004945370370370371, |
|
"loss": 0.9221, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 2.843251943588257, |
|
"learning_rate": 0.004936111111111112, |
|
"loss": 0.8724, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 3.0122690200805664, |
|
"learning_rate": 0.0049268518518518515, |
|
"loss": 0.9201, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 3.403461456298828, |
|
"learning_rate": 0.004917592592592593, |
|
"loss": 0.9221, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 2.4073173999786377, |
|
"learning_rate": 0.004908333333333333, |
|
"loss": 0.8766, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 1.793009877204895, |
|
"learning_rate": 0.004899074074074074, |
|
"loss": 0.8742, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 1.8483041524887085, |
|
"learning_rate": 0.004889814814814815, |
|
"loss": 0.8097, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 1.997031807899475, |
|
"learning_rate": 0.0048805555555555555, |
|
"loss": 0.888, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 3.958829641342163, |
|
"learning_rate": 0.004871296296296296, |
|
"loss": 0.8276, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 2.8835649490356445, |
|
"learning_rate": 0.004862037037037037, |
|
"loss": 0.8469, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 2.2343573570251465, |
|
"learning_rate": 0.004852777777777778, |
|
"loss": 0.8354, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 1.8973902463912964, |
|
"learning_rate": 0.004843518518518519, |
|
"loss": 0.7974, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 2.303403854370117, |
|
"learning_rate": 0.0048342592592592595, |
|
"loss": 0.7531, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 2.833897352218628, |
|
"learning_rate": 0.004825, |
|
"loss": 0.7917, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 2.833991527557373, |
|
"learning_rate": 0.004815740740740741, |
|
"loss": 0.9165, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 2.109811305999756, |
|
"learning_rate": 0.004806481481481482, |
|
"loss": 0.7347, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 4.833127498626709, |
|
"learning_rate": 0.004797222222222223, |
|
"loss": 0.8852, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 6.393899440765381, |
|
"learning_rate": 0.004787962962962963, |
|
"loss": 0.7416, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 2.243924856185913, |
|
"learning_rate": 0.004778703703703704, |
|
"loss": 0.7616, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 2.55934739112854, |
|
"learning_rate": 0.004769444444444444, |
|
"loss": 0.6881, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 2.707200765609741, |
|
"learning_rate": 0.004760185185185185, |
|
"loss": 0.7597, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 3.238736867904663, |
|
"learning_rate": 0.004750925925925926, |
|
"loss": 0.6598, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 1.6335750818252563, |
|
"learning_rate": 0.004741666666666667, |
|
"loss": 0.6988, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 1.6415876150131226, |
|
"learning_rate": 0.004732407407407407, |
|
"loss": 0.7615, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 2.09512996673584, |
|
"learning_rate": 0.004723148148148148, |
|
"loss": 0.7345, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.1141433715820312, |
|
"learning_rate": 0.004713888888888889, |
|
"loss": 0.7503, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 1.1782424449920654, |
|
"learning_rate": 0.00470462962962963, |
|
"loss": 0.6997, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 2.7585694789886475, |
|
"learning_rate": 0.004695370370370371, |
|
"loss": 0.7703, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 1.3906846046447754, |
|
"learning_rate": 0.004686111111111111, |
|
"loss": 0.6595, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 1.9824657440185547, |
|
"learning_rate": 0.004676851851851852, |
|
"loss": 0.612, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 3.896270990371704, |
|
"learning_rate": 0.004667592592592593, |
|
"loss": 0.8571, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 1.860967993736267, |
|
"learning_rate": 0.004658333333333333, |
|
"loss": 0.774, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.4278061389923096, |
|
"learning_rate": 0.004649074074074074, |
|
"loss": 0.7693, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 4.371230125427246, |
|
"learning_rate": 0.004639814814814815, |
|
"loss": 0.6989, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 1.6411677598953247, |
|
"learning_rate": 0.004630555555555555, |
|
"loss": 0.8566, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 3.412655830383301, |
|
"learning_rate": 0.004621296296296296, |
|
"loss": 0.6902, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 1.9510608911514282, |
|
"learning_rate": 0.004612037037037038, |
|
"loss": 0.8284, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.8800582885742188, |
|
"learning_rate": 0.004602777777777778, |
|
"loss": 0.6096, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 2.7876181602478027, |
|
"learning_rate": 0.0045935185185185185, |
|
"loss": 0.7293, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 2.0592384338378906, |
|
"learning_rate": 0.004584259259259259, |
|
"loss": 0.756, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 3.5372824668884277, |
|
"learning_rate": 0.004575, |
|
"loss": 0.6865, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 1.7686892747879028, |
|
"learning_rate": 0.004565740740740741, |
|
"loss": 0.7477, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 2.8619141578674316, |
|
"learning_rate": 0.004556481481481482, |
|
"loss": 0.6861, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 2.4887492656707764, |
|
"learning_rate": 0.0045472222222222225, |
|
"loss": 0.7623, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 1.9150561094284058, |
|
"learning_rate": 0.004537962962962963, |
|
"loss": 0.7462, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 2.1251327991485596, |
|
"learning_rate": 0.004528703703703704, |
|
"loss": 0.673, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 2.338467597961426, |
|
"learning_rate": 0.004519444444444444, |
|
"loss": 0.6898, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 2.199749708175659, |
|
"learning_rate": 0.004510185185185186, |
|
"loss": 0.6776, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.5489593744277954, |
|
"learning_rate": 0.0045009259259259264, |
|
"loss": 0.6786, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9339084886766292, |
|
"eval_f1": 0.938537564429046, |
|
"eval_loss": 0.17764028906822205, |
|
"eval_precision": 0.9507286953627705, |
|
"eval_recall": 0.934120062770795, |
|
"eval_runtime": 54.7942, |
|
"eval_samples_per_second": 118.462, |
|
"eval_steps_per_second": 7.41, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 2.148123264312744, |
|
"learning_rate": 0.004491666666666666, |
|
"loss": 0.592, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 3.1778488159179688, |
|
"learning_rate": 0.004482407407407407, |
|
"loss": 0.93, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 1.9821240901947021, |
|
"learning_rate": 0.004473148148148149, |
|
"loss": 0.8283, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 1.9005764722824097, |
|
"learning_rate": 0.004463888888888889, |
|
"loss": 0.7101, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 2.126354217529297, |
|
"learning_rate": 0.0044546296296296296, |
|
"loss": 0.6671, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 2.0805599689483643, |
|
"learning_rate": 0.00444537037037037, |
|
"loss": 0.6665, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 1.5277152061462402, |
|
"learning_rate": 0.004436111111111111, |
|
"loss": 0.7092, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.088949680328369, |
|
"learning_rate": 0.004426851851851852, |
|
"loss": 0.8679, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 4.5228376388549805, |
|
"learning_rate": 0.004417592592592593, |
|
"loss": 0.7648, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 4.287468910217285, |
|
"learning_rate": 0.0044083333333333335, |
|
"loss": 0.725, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 3.0292155742645264, |
|
"learning_rate": 0.004399074074074074, |
|
"loss": 0.7416, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 2.364569664001465, |
|
"learning_rate": 0.004389814814814815, |
|
"loss": 0.7576, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 1.8046070337295532, |
|
"learning_rate": 0.004380555555555555, |
|
"loss": 0.6917, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 3.8957250118255615, |
|
"learning_rate": 0.004371296296296297, |
|
"loss": 0.6671, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 2.4471426010131836, |
|
"learning_rate": 0.0043620370370370375, |
|
"loss": 0.6749, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 1.9041227102279663, |
|
"learning_rate": 0.0043527777777777775, |
|
"loss": 0.7423, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"grad_norm": 3.001101016998291, |
|
"learning_rate": 0.004343518518518519, |
|
"loss": 0.7775, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 2.1323304176330566, |
|
"learning_rate": 0.004334259259259259, |
|
"loss": 0.6961, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 3.0839684009552, |
|
"learning_rate": 0.004325, |
|
"loss": 0.6791, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 1.924004316329956, |
|
"learning_rate": 0.004315740740740741, |
|
"loss": 0.663, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 1.854284644126892, |
|
"learning_rate": 0.0043064814814814814, |
|
"loss": 0.706, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 2.4816770553588867, |
|
"learning_rate": 0.004297222222222222, |
|
"loss": 0.6623, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 1.8469676971435547, |
|
"learning_rate": 0.004287962962962963, |
|
"loss": 0.6025, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 1.6703932285308838, |
|
"learning_rate": 0.004278703703703704, |
|
"loss": 0.7209, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 1.3802216053009033, |
|
"learning_rate": 0.004269444444444445, |
|
"loss": 0.6823, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 2.1210741996765137, |
|
"learning_rate": 0.004260185185185185, |
|
"loss": 0.6577, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 1.841590404510498, |
|
"learning_rate": 0.004250925925925926, |
|
"loss": 0.6049, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 2.4515421390533447, |
|
"learning_rate": 0.004241666666666667, |
|
"loss": 0.6555, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 2.086291551589966, |
|
"learning_rate": 0.004232407407407408, |
|
"loss": 0.7836, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 1.714977741241455, |
|
"learning_rate": 0.004223148148148149, |
|
"loss": 0.6868, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 1.9800652265548706, |
|
"learning_rate": 0.0042138888888888885, |
|
"loss": 0.7313, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 2.726518154144287, |
|
"learning_rate": 0.00420462962962963, |
|
"loss": 0.6946, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 2.3700504302978516, |
|
"learning_rate": 0.00419537037037037, |
|
"loss": 0.7765, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 3.294623851776123, |
|
"learning_rate": 0.004186111111111111, |
|
"loss": 0.7001, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 1.3391900062561035, |
|
"learning_rate": 0.004176851851851852, |
|
"loss": 0.7045, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 3.3717267513275146, |
|
"learning_rate": 0.0041675925925925925, |
|
"loss": 0.7396, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 2.7791171073913574, |
|
"learning_rate": 0.004158333333333333, |
|
"loss": 0.6946, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 2.9420201778411865, |
|
"learning_rate": 0.004149074074074074, |
|
"loss": 0.7215, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 2.337606191635132, |
|
"learning_rate": 0.004139814814814815, |
|
"loss": 0.6967, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 2.43172025680542, |
|
"learning_rate": 0.004130555555555556, |
|
"loss": 0.7989, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 1.683398723602295, |
|
"learning_rate": 0.0041212962962962965, |
|
"loss": 0.6185, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 2.1114211082458496, |
|
"learning_rate": 0.004112037037037037, |
|
"loss": 0.7013, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 2.566915512084961, |
|
"learning_rate": 0.004102777777777778, |
|
"loss": 0.6091, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 4.286575794219971, |
|
"learning_rate": 0.004093518518518519, |
|
"loss": 0.7474, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 1.9286409616470337, |
|
"learning_rate": 0.004084259259259259, |
|
"loss": 0.7089, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 2.5214929580688477, |
|
"learning_rate": 0.004075, |
|
"loss": 0.7412, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 2.4169070720672607, |
|
"learning_rate": 0.004065740740740741, |
|
"loss": 0.826, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 2.7352206707000732, |
|
"learning_rate": 0.004056481481481481, |
|
"loss": 0.7868, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 1.969298243522644, |
|
"learning_rate": 0.004047222222222222, |
|
"loss": 0.6795, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 3.015155792236328, |
|
"learning_rate": 0.004037962962962964, |
|
"loss": 0.7852, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 2.2428641319274902, |
|
"learning_rate": 0.004028703703703704, |
|
"loss": 0.7271, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 2.1830852031707764, |
|
"learning_rate": 0.004019444444444444, |
|
"loss": 0.6171, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 4.2572455406188965, |
|
"learning_rate": 0.004010185185185185, |
|
"loss": 0.715, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.7257713079452515, |
|
"learning_rate": 0.004000925925925926, |
|
"loss": 0.7397, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9406871052226159, |
|
"eval_f1": 0.9414537311136683, |
|
"eval_loss": 0.17834338545799255, |
|
"eval_precision": 0.9539065276276052, |
|
"eval_recall": 0.9346301030411137, |
|
"eval_runtime": 55.3829, |
|
"eval_samples_per_second": 117.202, |
|
"eval_steps_per_second": 7.331, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 1.4031952619552612, |
|
"learning_rate": 0.003991666666666667, |
|
"loss": 0.652, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 2.210618257522583, |
|
"learning_rate": 0.003982407407407408, |
|
"loss": 0.6434, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 2.780142068862915, |
|
"learning_rate": 0.0039731481481481475, |
|
"loss": 0.6638, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"grad_norm": 2.0362918376922607, |
|
"learning_rate": 0.003963888888888889, |
|
"loss": 0.6194, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"grad_norm": 2.7346765995025635, |
|
"learning_rate": 0.00395462962962963, |
|
"loss": 0.6721, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"grad_norm": 1.8441007137298584, |
|
"learning_rate": 0.00394537037037037, |
|
"loss": 0.7129, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"grad_norm": 2.510021448135376, |
|
"learning_rate": 0.003936111111111112, |
|
"loss": 0.669, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 2.9301140308380127, |
|
"learning_rate": 0.003926851851851852, |
|
"loss": 0.6342, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 1.851173996925354, |
|
"learning_rate": 0.003917592592592592, |
|
"loss": 0.6602, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 2.490886926651001, |
|
"learning_rate": 0.003908333333333333, |
|
"loss": 0.6195, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 1.5079678297042847, |
|
"learning_rate": 0.0038990740740740743, |
|
"loss": 0.8126, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"grad_norm": 1.9084036350250244, |
|
"learning_rate": 0.003889814814814815, |
|
"loss": 0.6769, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 1.8284987211227417, |
|
"learning_rate": 0.0038814814814814814, |
|
"loss": 0.7317, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 3.351156234741211, |
|
"learning_rate": 0.0038722222222222226, |
|
"loss": 0.7049, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"grad_norm": 2.314993143081665, |
|
"learning_rate": 0.003862962962962963, |
|
"loss": 0.6291, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 2.27850341796875, |
|
"learning_rate": 0.003853703703703704, |
|
"loss": 0.6749, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"grad_norm": 2.4541099071502686, |
|
"learning_rate": 0.003844444444444444, |
|
"loss": 0.7365, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 1.6301252841949463, |
|
"learning_rate": 0.0038351851851851854, |
|
"loss": 0.6236, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 2.035531520843506, |
|
"learning_rate": 0.003825925925925926, |
|
"loss": 0.7176, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 2.2524566650390625, |
|
"learning_rate": 0.0038166666666666666, |
|
"loss": 0.7119, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 1.778144121170044, |
|
"learning_rate": 0.003807407407407408, |
|
"loss": 0.7021, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"grad_norm": 1.8808122873306274, |
|
"learning_rate": 0.003798148148148148, |
|
"loss": 0.6876, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 2.720100164413452, |
|
"learning_rate": 0.003788888888888889, |
|
"loss": 0.7559, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 1.9608194828033447, |
|
"learning_rate": 0.0037796296296296297, |
|
"loss": 0.6799, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"grad_norm": 1.8149750232696533, |
|
"learning_rate": 0.0037703703703703705, |
|
"loss": 0.6701, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 1.8840572834014893, |
|
"learning_rate": 0.0037611111111111113, |
|
"loss": 0.7146, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.7513312101364136, |
|
"learning_rate": 0.0037518518518518517, |
|
"loss": 0.6712, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"grad_norm": 1.748304009437561, |
|
"learning_rate": 0.0037425925925925925, |
|
"loss": 0.7049, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 1.2645372152328491, |
|
"learning_rate": 0.0037333333333333337, |
|
"loss": 0.6088, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 3.14270281791687, |
|
"learning_rate": 0.003724074074074074, |
|
"loss": 0.7163, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"grad_norm": 2.0332484245300293, |
|
"learning_rate": 0.003714814814814815, |
|
"loss": 0.7047, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"grad_norm": 1.6158325672149658, |
|
"learning_rate": 0.0037055555555555557, |
|
"loss": 0.6448, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"grad_norm": 1.9182847738265991, |
|
"learning_rate": 0.0036962962962962965, |
|
"loss": 0.711, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"grad_norm": 2.140184164047241, |
|
"learning_rate": 0.0036870370370370373, |
|
"loss": 0.6617, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 1.746243953704834, |
|
"learning_rate": 0.0036777777777777776, |
|
"loss": 0.7103, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"grad_norm": 2.6956822872161865, |
|
"learning_rate": 0.003668518518518519, |
|
"loss": 0.7163, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 2.7663381099700928, |
|
"learning_rate": 0.0036592592592592592, |
|
"loss": 0.6509, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 2.410922050476074, |
|
"learning_rate": 0.00365, |
|
"loss": 0.6046, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"grad_norm": 1.8373881578445435, |
|
"learning_rate": 0.0036407407407407404, |
|
"loss": 0.701, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 1.8466787338256836, |
|
"learning_rate": 0.0036314814814814816, |
|
"loss": 0.6391, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 2.0422921180725098, |
|
"learning_rate": 0.0036222222222222224, |
|
"loss": 0.7001, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 1.739327311515808, |
|
"learning_rate": 0.003612962962962963, |
|
"loss": 0.6194, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 2.094775915145874, |
|
"learning_rate": 0.003603703703703704, |
|
"loss": 0.5824, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"grad_norm": 2.5036680698394775, |
|
"learning_rate": 0.003594444444444445, |
|
"loss": 0.6477, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 1.7037166357040405, |
|
"learning_rate": 0.003585185185185185, |
|
"loss": 0.6822, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 2.193714141845703, |
|
"learning_rate": 0.003575925925925926, |
|
"loss": 0.7301, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 4.449968338012695, |
|
"learning_rate": 0.0035666666666666668, |
|
"loss": 0.7114, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"grad_norm": 1.640833854675293, |
|
"learning_rate": 0.0035574074074074076, |
|
"loss": 0.5631, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 2.4068825244903564, |
|
"learning_rate": 0.003548148148148148, |
|
"loss": 0.5896, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"grad_norm": 2.7234010696411133, |
|
"learning_rate": 0.0035388888888888887, |
|
"loss": 0.7563, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"grad_norm": 1.8094590902328491, |
|
"learning_rate": 0.00352962962962963, |
|
"loss": 0.7116, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 2.4650044441223145, |
|
"learning_rate": 0.0035203703703703703, |
|
"loss": 0.6583, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"grad_norm": 1.8076032400131226, |
|
"learning_rate": 0.003511111111111111, |
|
"loss": 0.5859, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 2.924779176712036, |
|
"learning_rate": 0.0035018518518518523, |
|
"loss": 0.7151, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9551686951163149, |
|
"eval_f1": 0.957151302239187, |
|
"eval_loss": 0.12974682450294495, |
|
"eval_precision": 0.961140704317014, |
|
"eval_recall": 0.9555174635857528, |
|
"eval_runtime": 55.4288, |
|
"eval_samples_per_second": 117.105, |
|
"eval_steps_per_second": 7.325, |
|
"step": 1621 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"grad_norm": 2.4308605194091797, |
|
"learning_rate": 0.0034925925925925927, |
|
"loss": 0.6032, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"grad_norm": 1.6040972471237183, |
|
"learning_rate": 0.0034833333333333335, |
|
"loss": 0.6163, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"grad_norm": 1.7093302011489868, |
|
"learning_rate": 0.003474074074074074, |
|
"loss": 0.6484, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"grad_norm": 2.0212676525115967, |
|
"learning_rate": 0.003464814814814815, |
|
"loss": 0.5747, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"grad_norm": 3.725684642791748, |
|
"learning_rate": 0.0034555555555555555, |
|
"loss": 0.6488, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"grad_norm": 2.5696334838867188, |
|
"learning_rate": 0.0034462962962962963, |
|
"loss": 0.7223, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"grad_norm": 1.7049779891967773, |
|
"learning_rate": 0.003437037037037037, |
|
"loss": 0.641, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"grad_norm": 2.035477638244629, |
|
"learning_rate": 0.003427777777777778, |
|
"loss": 0.6237, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"grad_norm": 1.8789253234863281, |
|
"learning_rate": 0.0034185185185185187, |
|
"loss": 0.6312, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"grad_norm": 2.2245302200317383, |
|
"learning_rate": 0.003409259259259259, |
|
"loss": 0.5985, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 2.150331735610962, |
|
"learning_rate": 0.0034000000000000002, |
|
"loss": 0.612, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"grad_norm": 1.986059546470642, |
|
"learning_rate": 0.003390740740740741, |
|
"loss": 0.6001, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"grad_norm": 1.4887031316757202, |
|
"learning_rate": 0.0033814814814814814, |
|
"loss": 0.6574, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"grad_norm": 1.1965304613113403, |
|
"learning_rate": 0.003372222222222222, |
|
"loss": 0.6766, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"grad_norm": 1.564257025718689, |
|
"learning_rate": 0.0033629629629629634, |
|
"loss": 0.6955, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"grad_norm": 5.508277416229248, |
|
"learning_rate": 0.003353703703703704, |
|
"loss": 0.6097, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"grad_norm": 1.9245549440383911, |
|
"learning_rate": 0.0033444444444444446, |
|
"loss": 0.6515, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"grad_norm": 2.3552732467651367, |
|
"learning_rate": 0.003335185185185185, |
|
"loss": 0.6401, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"grad_norm": 1.622826099395752, |
|
"learning_rate": 0.003325925925925926, |
|
"loss": 0.7, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"grad_norm": 1.6205705404281616, |
|
"learning_rate": 0.0033166666666666665, |
|
"loss": 0.5968, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"grad_norm": 1.9269992113113403, |
|
"learning_rate": 0.0033074074074074073, |
|
"loss": 0.6, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 2.3607101440429688, |
|
"learning_rate": 0.0032981481481481486, |
|
"loss": 0.5655, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"grad_norm": 1.4156101942062378, |
|
"learning_rate": 0.003288888888888889, |
|
"loss": 0.6071, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"grad_norm": 2.652838706970215, |
|
"learning_rate": 0.0032796296296296297, |
|
"loss": 0.5959, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"grad_norm": 1.7326959371566772, |
|
"learning_rate": 0.00327037037037037, |
|
"loss": 0.7051, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"grad_norm": 1.4709230661392212, |
|
"learning_rate": 0.0032611111111111113, |
|
"loss": 0.68, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 2.1479110717773438, |
|
"learning_rate": 0.003251851851851852, |
|
"loss": 0.6491, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"grad_norm": 2.462580442428589, |
|
"learning_rate": 0.0032425925925925925, |
|
"loss": 0.6129, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"grad_norm": 2.8849940299987793, |
|
"learning_rate": 0.0032333333333333333, |
|
"loss": 0.6553, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"grad_norm": 1.6244118213653564, |
|
"learning_rate": 0.003224074074074074, |
|
"loss": 0.5845, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"grad_norm": 2.803816795349121, |
|
"learning_rate": 0.003214814814814815, |
|
"loss": 0.6695, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"grad_norm": 2.4138996601104736, |
|
"learning_rate": 0.0032055555555555552, |
|
"loss": 0.707, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"grad_norm": 1.1193430423736572, |
|
"learning_rate": 0.0031962962962962965, |
|
"loss": 0.6003, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"grad_norm": 1.8593242168426514, |
|
"learning_rate": 0.0031870370370370373, |
|
"loss": 0.5724, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"grad_norm": 2.084005117416382, |
|
"learning_rate": 0.0031777777777777776, |
|
"loss": 0.6246, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"grad_norm": 1.32781982421875, |
|
"learning_rate": 0.0031685185185185184, |
|
"loss": 0.6075, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"grad_norm": 2.452892303466797, |
|
"learning_rate": 0.0031592592592592597, |
|
"loss": 0.6104, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"grad_norm": 1.6961791515350342, |
|
"learning_rate": 0.00315, |
|
"loss": 0.5483, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"grad_norm": 2.94992733001709, |
|
"learning_rate": 0.003140740740740741, |
|
"loss": 0.5403, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"grad_norm": 2.3624625205993652, |
|
"learning_rate": 0.0031314814814814816, |
|
"loss": 0.6748, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"grad_norm": 2.0412802696228027, |
|
"learning_rate": 0.0031222222222222224, |
|
"loss": 0.4809, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"grad_norm": 1.5728198289871216, |
|
"learning_rate": 0.003112962962962963, |
|
"loss": 0.5755, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"grad_norm": 1.3974460363388062, |
|
"learning_rate": 0.0031037037037037036, |
|
"loss": 0.5436, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"grad_norm": 1.5604791641235352, |
|
"learning_rate": 0.003094444444444445, |
|
"loss": 0.6197, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"grad_norm": 2.040276288986206, |
|
"learning_rate": 0.003085185185185185, |
|
"loss": 0.6062, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"grad_norm": 3.173293113708496, |
|
"learning_rate": 0.003075925925925926, |
|
"loss": 0.717, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"grad_norm": 2.018458843231201, |
|
"learning_rate": 0.0030666666666666663, |
|
"loss": 0.6418, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"grad_norm": 1.7057709693908691, |
|
"learning_rate": 0.0030574074074074076, |
|
"loss": 0.5948, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"grad_norm": 2.133119821548462, |
|
"learning_rate": 0.0030481481481481484, |
|
"loss": 0.6099, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"grad_norm": 2.217524290084839, |
|
"learning_rate": 0.0030388888888888887, |
|
"loss": 0.6263, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"grad_norm": 2.3708674907684326, |
|
"learning_rate": 0.00302962962962963, |
|
"loss": 0.5851, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"grad_norm": 1.4289004802703857, |
|
"learning_rate": 0.0030203703703703707, |
|
"loss": 0.5971, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"grad_norm": 2.463360071182251, |
|
"learning_rate": 0.003011111111111111, |
|
"loss": 0.5889, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.5189619064331055, |
|
"learning_rate": 0.003001851851851852, |
|
"loss": 0.4964, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.97350177168387, |
|
"eval_f1": 0.9729772233764705, |
|
"eval_loss": 0.07412911206483841, |
|
"eval_precision": 0.9765336155795084, |
|
"eval_recall": 0.9701628526053132, |
|
"eval_runtime": 55.303, |
|
"eval_samples_per_second": 117.372, |
|
"eval_steps_per_second": 7.341, |
|
"step": 2162 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"grad_norm": 2.313246965408325, |
|
"learning_rate": 0.0029925925925925927, |
|
"loss": 0.5979, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"grad_norm": 1.554916501045227, |
|
"learning_rate": 0.0029833333333333335, |
|
"loss": 0.621, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"grad_norm": 2.7611353397369385, |
|
"learning_rate": 0.002974074074074074, |
|
"loss": 0.6662, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"grad_norm": 1.206809639930725, |
|
"learning_rate": 0.0029648148148148147, |
|
"loss": 0.6061, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"grad_norm": 1.876120686531067, |
|
"learning_rate": 0.002955555555555556, |
|
"loss": 0.5656, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"grad_norm": 2.117579936981201, |
|
"learning_rate": 0.0029462962962962963, |
|
"loss": 0.611, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"grad_norm": 2.014042377471924, |
|
"learning_rate": 0.002937037037037037, |
|
"loss": 0.5893, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"grad_norm": 1.6792641878128052, |
|
"learning_rate": 0.0029277777777777783, |
|
"loss": 0.5691, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"grad_norm": 1.2664446830749512, |
|
"learning_rate": 0.0029185185185185186, |
|
"loss": 0.5729, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"grad_norm": 3.1098408699035645, |
|
"learning_rate": 0.0029092592592592594, |
|
"loss": 0.6115, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 2.1186132431030273, |
|
"learning_rate": 0.0029, |
|
"loss": 0.596, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"grad_norm": 1.8666993379592896, |
|
"learning_rate": 0.002890740740740741, |
|
"loss": 0.6021, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"grad_norm": 1.655548334121704, |
|
"learning_rate": 0.0028814814814814814, |
|
"loss": 0.5928, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"grad_norm": 2.1081762313842773, |
|
"learning_rate": 0.002872222222222222, |
|
"loss": 0.5683, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"grad_norm": 1.4986019134521484, |
|
"learning_rate": 0.0028629629629629626, |
|
"loss": 0.562, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"grad_norm": 2.6111056804656982, |
|
"learning_rate": 0.002853703703703704, |
|
"loss": 0.5804, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"grad_norm": 1.8590542078018188, |
|
"learning_rate": 0.0028444444444444446, |
|
"loss": 0.5409, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"grad_norm": 1.897940754890442, |
|
"learning_rate": 0.002835185185185185, |
|
"loss": 0.6299, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"grad_norm": 1.9114952087402344, |
|
"learning_rate": 0.002825925925925926, |
|
"loss": 0.5333, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"grad_norm": 2.015868663787842, |
|
"learning_rate": 0.002816666666666667, |
|
"loss": 0.5611, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"grad_norm": 1.5808693170547485, |
|
"learning_rate": 0.0028074074074074073, |
|
"loss": 0.6212, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"grad_norm": 1.8288767337799072, |
|
"learning_rate": 0.002798148148148148, |
|
"loss": 0.604, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"grad_norm": 1.389092206954956, |
|
"learning_rate": 0.002788888888888889, |
|
"loss": 0.4657, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"grad_norm": 2.551008701324463, |
|
"learning_rate": 0.0027796296296296297, |
|
"loss": 0.6214, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"grad_norm": 2.26882266998291, |
|
"learning_rate": 0.0027703703703703705, |
|
"loss": 0.5511, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"grad_norm": 2.3850460052490234, |
|
"learning_rate": 0.002761111111111111, |
|
"loss": 0.5768, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 1.9451379776000977, |
|
"learning_rate": 0.002751851851851852, |
|
"loss": 0.5732, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"grad_norm": 1.6541566848754883, |
|
"learning_rate": 0.0027425925925925925, |
|
"loss": 0.549, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"grad_norm": 1.3802433013916016, |
|
"learning_rate": 0.0027333333333333333, |
|
"loss": 0.5486, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"grad_norm": 1.941901683807373, |
|
"learning_rate": 0.0027240740740740745, |
|
"loss": 0.6119, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"grad_norm": 1.9203604459762573, |
|
"learning_rate": 0.002714814814814815, |
|
"loss": 0.632, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"grad_norm": 2.170319080352783, |
|
"learning_rate": 0.0027055555555555557, |
|
"loss": 0.5819, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"grad_norm": 1.8074395656585693, |
|
"learning_rate": 0.002696296296296296, |
|
"loss": 0.6101, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"grad_norm": 2.010608196258545, |
|
"learning_rate": 0.0026870370370370373, |
|
"loss": 0.492, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"grad_norm": 1.3907346725463867, |
|
"learning_rate": 0.002677777777777778, |
|
"loss": 0.5658, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"grad_norm": 1.8057767152786255, |
|
"learning_rate": 0.0026685185185185184, |
|
"loss": 0.509, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"grad_norm": 2.036869764328003, |
|
"learning_rate": 0.002659259259259259, |
|
"loss": 0.5711, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"grad_norm": 1.673647165298462, |
|
"learning_rate": 0.00265, |
|
"loss": 0.5692, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"grad_norm": 2.044961929321289, |
|
"learning_rate": 0.002640740740740741, |
|
"loss": 0.6153, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"grad_norm": 1.3360215425491333, |
|
"learning_rate": 0.002631481481481481, |
|
"loss": 0.5785, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"grad_norm": 2.4942688941955566, |
|
"learning_rate": 0.0026222222222222224, |
|
"loss": 0.5423, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"grad_norm": 1.556859016418457, |
|
"learning_rate": 0.002612962962962963, |
|
"loss": 0.5783, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"grad_norm": 3.010946750640869, |
|
"learning_rate": 0.0026037037037037036, |
|
"loss": 0.5786, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"grad_norm": 1.5896031856536865, |
|
"learning_rate": 0.0025944444444444444, |
|
"loss": 0.6075, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"grad_norm": 1.9418363571166992, |
|
"learning_rate": 0.0025851851851851856, |
|
"loss": 0.5482, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"grad_norm": 1.9787338972091675, |
|
"learning_rate": 0.002576851851851852, |
|
"loss": 0.6026, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"grad_norm": 2.7021665573120117, |
|
"learning_rate": 0.0025675925925925927, |
|
"loss": 0.4977, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"grad_norm": 2.2835893630981445, |
|
"learning_rate": 0.0025583333333333335, |
|
"loss": 0.5375, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"grad_norm": 1.9748303890228271, |
|
"learning_rate": 0.0025490740740740743, |
|
"loss": 0.5263, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"grad_norm": 1.6271171569824219, |
|
"learning_rate": 0.0025398148148148146, |
|
"loss": 0.5275, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"grad_norm": 2.3140103816986084, |
|
"learning_rate": 0.0025305555555555554, |
|
"loss": 0.5493, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"grad_norm": 2.2506508827209473, |
|
"learning_rate": 0.0025212962962962967, |
|
"loss": 0.5756, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"grad_norm": 2.5812437534332275, |
|
"learning_rate": 0.002512037037037037, |
|
"loss": 0.511, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.886813998222351, |
|
"learning_rate": 0.002502777777777778, |
|
"loss": 0.5509, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9770451394238175, |
|
"eval_f1": 0.9783131597050811, |
|
"eval_loss": 0.06713523715734482, |
|
"eval_precision": 0.9775503610869588, |
|
"eval_recall": 0.9795962655538212, |
|
"eval_runtime": 55.3992, |
|
"eval_samples_per_second": 117.168, |
|
"eval_steps_per_second": 7.329, |
|
"step": 2702 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"grad_norm": 2.990206241607666, |
|
"learning_rate": 0.0024935185185185186, |
|
"loss": 0.5212, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"grad_norm": 1.2570441961288452, |
|
"learning_rate": 0.0024842592592592594, |
|
"loss": 0.4703, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"grad_norm": 1.6648197174072266, |
|
"learning_rate": 0.002475, |
|
"loss": 0.6173, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"grad_norm": 1.9291369915008545, |
|
"learning_rate": 0.002465740740740741, |
|
"loss": 0.5929, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"grad_norm": 2.2894320487976074, |
|
"learning_rate": 0.0024564814814814814, |
|
"loss": 0.5282, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.002448148148148148, |
|
"loss": 0.5085, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"grad_norm": 2.4065709114074707, |
|
"learning_rate": 0.002438888888888889, |
|
"loss": 0.5983, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"grad_norm": 1.4784891605377197, |
|
"learning_rate": 0.0024296296296296297, |
|
"loss": 0.5586, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"grad_norm": 3.6442089080810547, |
|
"learning_rate": 0.0024203703703703705, |
|
"loss": 0.5824, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"grad_norm": 1.4887707233428955, |
|
"learning_rate": 0.0024111111111111113, |
|
"loss": 0.6023, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"grad_norm": 1.4839690923690796, |
|
"learning_rate": 0.002401851851851852, |
|
"loss": 0.5486, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"grad_norm": 1.522079586982727, |
|
"learning_rate": 0.0023925925925925924, |
|
"loss": 0.5803, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"grad_norm": 1.6155500411987305, |
|
"learning_rate": 0.0023833333333333337, |
|
"loss": 0.5765, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"grad_norm": 1.4608234167099, |
|
"learning_rate": 0.002374074074074074, |
|
"loss": 0.5163, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"grad_norm": 1.8083865642547607, |
|
"learning_rate": 0.002364814814814815, |
|
"loss": 0.5533, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"grad_norm": 1.7674206495285034, |
|
"learning_rate": 0.0023555555555555556, |
|
"loss": 0.5558, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"grad_norm": 1.7677381038665771, |
|
"learning_rate": 0.0023462962962962964, |
|
"loss": 0.551, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"grad_norm": 2.006486415863037, |
|
"learning_rate": 0.0023370370370370368, |
|
"loss": 0.5311, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"grad_norm": 1.3190593719482422, |
|
"learning_rate": 0.002327777777777778, |
|
"loss": 0.5462, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"grad_norm": 2.1701364517211914, |
|
"learning_rate": 0.002318518518518519, |
|
"loss": 0.5056, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"grad_norm": 3.071043014526367, |
|
"learning_rate": 0.002309259259259259, |
|
"loss": 0.5611, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"grad_norm": 1.7673370838165283, |
|
"learning_rate": 0.0023, |
|
"loss": 0.5369, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"grad_norm": 1.6603857278823853, |
|
"learning_rate": 0.0022907407407407408, |
|
"loss": 0.6108, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"grad_norm": 3.754216194152832, |
|
"learning_rate": 0.0022814814814814816, |
|
"loss": 0.5355, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"grad_norm": 2.2037136554718018, |
|
"learning_rate": 0.0022722222222222224, |
|
"loss": 0.4998, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"grad_norm": 2.2701072692871094, |
|
"learning_rate": 0.002262962962962963, |
|
"loss": 0.5672, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"grad_norm": 2.246368408203125, |
|
"learning_rate": 0.0022537037037037035, |
|
"loss": 0.5309, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"grad_norm": 1.4481878280639648, |
|
"learning_rate": 0.0022444444444444443, |
|
"loss": 0.4786, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"grad_norm": 1.7569130659103394, |
|
"learning_rate": 0.002235185185185185, |
|
"loss": 0.579, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"grad_norm": 2.219710350036621, |
|
"learning_rate": 0.002225925925925926, |
|
"loss": 0.4768, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"grad_norm": 1.3611927032470703, |
|
"learning_rate": 0.0022166666666666667, |
|
"loss": 0.4504, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"grad_norm": 1.528106451034546, |
|
"learning_rate": 0.0022074074074074075, |
|
"loss": 0.491, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"grad_norm": 1.934448003768921, |
|
"learning_rate": 0.0021981481481481483, |
|
"loss": 0.5553, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"grad_norm": 1.9539951086044312, |
|
"learning_rate": 0.002188888888888889, |
|
"loss": 0.514, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"grad_norm": 3.347055196762085, |
|
"learning_rate": 0.00217962962962963, |
|
"loss": 0.565, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"grad_norm": 2.0984296798706055, |
|
"learning_rate": 0.0021703703703703702, |
|
"loss": 0.4917, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"grad_norm": 4.030628681182861, |
|
"learning_rate": 0.002161111111111111, |
|
"loss": 0.5485, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"grad_norm": 1.62152898311615, |
|
"learning_rate": 0.002151851851851852, |
|
"loss": 0.5468, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"grad_norm": 2.245067596435547, |
|
"learning_rate": 0.0021425925925925926, |
|
"loss": 0.4992, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"grad_norm": 2.4511606693267822, |
|
"learning_rate": 0.0021333333333333334, |
|
"loss": 0.5478, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"grad_norm": 3.2524254322052, |
|
"learning_rate": 0.0021240740740740742, |
|
"loss": 0.5006, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"grad_norm": 1.8704936504364014, |
|
"learning_rate": 0.002114814814814815, |
|
"loss": 0.5177, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"grad_norm": 1.6047933101654053, |
|
"learning_rate": 0.0021064814814814813, |
|
"loss": 0.5491, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"grad_norm": 2.038342237472534, |
|
"learning_rate": 0.002097222222222222, |
|
"loss": 0.5096, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"grad_norm": 1.1834994554519653, |
|
"learning_rate": 0.002087962962962963, |
|
"loss": 0.4611, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"grad_norm": 3.6137051582336426, |
|
"learning_rate": 0.0020787037037037037, |
|
"loss": 0.5056, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"grad_norm": 1.7401528358459473, |
|
"learning_rate": 0.0020694444444444445, |
|
"loss": 0.5285, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"grad_norm": 1.1825543642044067, |
|
"learning_rate": 0.0020601851851851853, |
|
"loss": 0.5166, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"grad_norm": 1.469771385192871, |
|
"learning_rate": 0.002050925925925926, |
|
"loss": 0.5246, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"grad_norm": 1.9187328815460205, |
|
"learning_rate": 0.0020416666666666665, |
|
"loss": 0.5687, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"grad_norm": 1.27570641040802, |
|
"learning_rate": 0.0020324074074074077, |
|
"loss": 0.5367, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"grad_norm": 2.436992883682251, |
|
"learning_rate": 0.002023148148148148, |
|
"loss": 0.5108, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"grad_norm": 2.1716506481170654, |
|
"learning_rate": 0.002013888888888889, |
|
"loss": 0.5081, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"grad_norm": 2.3047661781311035, |
|
"learning_rate": 0.0020046296296296296, |
|
"loss": 0.5746, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9753504852873209, |
|
"eval_f1": 0.9794812735446677, |
|
"eval_loss": 0.06419458985328674, |
|
"eval_precision": 0.9810236528071776, |
|
"eval_recall": 0.9788061689635984, |
|
"eval_runtime": 55.1158, |
|
"eval_samples_per_second": 117.77, |
|
"eval_steps_per_second": 7.366, |
|
"step": 3243 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"grad_norm": 2.193244457244873, |
|
"learning_rate": 0.0019953703703703704, |
|
"loss": 0.4844, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"grad_norm": 1.7774107456207275, |
|
"learning_rate": 0.0019861111111111112, |
|
"loss": 0.5153, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"grad_norm": 1.8430052995681763, |
|
"learning_rate": 0.001976851851851852, |
|
"loss": 0.5332, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"grad_norm": 2.268313407897949, |
|
"learning_rate": 0.001967592592592593, |
|
"loss": 0.4762, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"grad_norm": 2.3177573680877686, |
|
"learning_rate": 0.001958333333333333, |
|
"loss": 0.5045, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"grad_norm": 2.091614007949829, |
|
"learning_rate": 0.0019490740740740742, |
|
"loss": 0.426, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"grad_norm": 2.227275848388672, |
|
"learning_rate": 0.0019398148148148148, |
|
"loss": 0.5102, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"grad_norm": 1.47825026512146, |
|
"learning_rate": 0.0019305555555555556, |
|
"loss": 0.4559, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"grad_norm": 2.3970351219177246, |
|
"learning_rate": 0.0019212962962962962, |
|
"loss": 0.5202, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"grad_norm": 1.6059942245483398, |
|
"learning_rate": 0.0019120370370370372, |
|
"loss": 0.4684, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"grad_norm": 1.4332858324050903, |
|
"learning_rate": 0.0019027777777777778, |
|
"loss": 0.5029, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"grad_norm": 2.158965587615967, |
|
"learning_rate": 0.0018935185185185186, |
|
"loss": 0.5185, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"grad_norm": 2.077367067337036, |
|
"learning_rate": 0.0018842592592592591, |
|
"loss": 0.472, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"grad_norm": 2.0703649520874023, |
|
"learning_rate": 0.001875, |
|
"loss": 0.4818, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"grad_norm": 2.000805139541626, |
|
"learning_rate": 0.001865740740740741, |
|
"loss": 0.4643, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"grad_norm": 1.556027889251709, |
|
"learning_rate": 0.0018564814814814815, |
|
"loss": 0.468, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"grad_norm": 1.2694203853607178, |
|
"learning_rate": 0.0018472222222222223, |
|
"loss": 0.4437, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"grad_norm": 2.0669643878936768, |
|
"learning_rate": 0.001837962962962963, |
|
"loss": 0.4565, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"grad_norm": 1.6692304611206055, |
|
"learning_rate": 0.001828703703703704, |
|
"loss": 0.4573, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"grad_norm": 1.6164546012878418, |
|
"learning_rate": 0.0018194444444444445, |
|
"loss": 0.4282, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"grad_norm": 1.9918891191482544, |
|
"learning_rate": 0.0018101851851851853, |
|
"loss": 0.4522, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 1.4127854108810425, |
|
"learning_rate": 0.0018009259259259259, |
|
"loss": 0.5317, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"grad_norm": 1.8190535306930542, |
|
"learning_rate": 0.0017916666666666667, |
|
"loss": 0.5193, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"grad_norm": 1.8793113231658936, |
|
"learning_rate": 0.0017824074074074072, |
|
"loss": 0.5322, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"grad_norm": 1.1124266386032104, |
|
"learning_rate": 0.0017731481481481483, |
|
"loss": 0.4849, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"grad_norm": 1.5575320720672607, |
|
"learning_rate": 0.001763888888888889, |
|
"loss": 0.5254, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"grad_norm": 1.3481545448303223, |
|
"learning_rate": 0.0017546296296296296, |
|
"loss": 0.4869, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"grad_norm": 1.0486959218978882, |
|
"learning_rate": 0.0017453703703703704, |
|
"loss": 0.5048, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"grad_norm": 1.848264217376709, |
|
"learning_rate": 0.001736111111111111, |
|
"loss": 0.5064, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"grad_norm": 1.6750601530075073, |
|
"learning_rate": 0.001726851851851852, |
|
"loss": 0.4459, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"grad_norm": 1.5514721870422363, |
|
"learning_rate": 0.0017175925925925926, |
|
"loss": 0.4382, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"grad_norm": 1.3411380052566528, |
|
"learning_rate": 0.0017083333333333334, |
|
"loss": 0.4007, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"grad_norm": 1.8607478141784668, |
|
"learning_rate": 0.001699074074074074, |
|
"loss": 0.4776, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"grad_norm": 1.6788519620895386, |
|
"learning_rate": 0.0016898148148148148, |
|
"loss": 0.4499, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"grad_norm": 1.4055275917053223, |
|
"learning_rate": 0.0016805555555555558, |
|
"loss": 0.5017, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"grad_norm": 1.9145076274871826, |
|
"learning_rate": 0.0016712962962962964, |
|
"loss": 0.4196, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"grad_norm": 1.7690577507019043, |
|
"learning_rate": 0.0016620370370370372, |
|
"loss": 0.4792, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"grad_norm": 1.713212490081787, |
|
"learning_rate": 0.0016527777777777778, |
|
"loss": 0.4418, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"grad_norm": 2.4528722763061523, |
|
"learning_rate": 0.0016435185185185185, |
|
"loss": 0.4519, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"grad_norm": 1.8348699808120728, |
|
"learning_rate": 0.0016342592592592591, |
|
"loss": 0.3959, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"grad_norm": 1.38786780834198, |
|
"learning_rate": 0.0016250000000000001, |
|
"loss": 0.4433, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"grad_norm": 0.8413279056549072, |
|
"learning_rate": 0.0016157407407407407, |
|
"loss": 0.4869, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"grad_norm": 1.3703649044036865, |
|
"learning_rate": 0.0016064814814814815, |
|
"loss": 0.5021, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"grad_norm": 1.6931146383285522, |
|
"learning_rate": 0.001597222222222222, |
|
"loss": 0.416, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"grad_norm": 1.314029574394226, |
|
"learning_rate": 0.001587962962962963, |
|
"loss": 0.4489, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"grad_norm": 1.6631927490234375, |
|
"learning_rate": 0.001578703703703704, |
|
"loss": 0.4334, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"grad_norm": 1.821455478668213, |
|
"learning_rate": 0.0015694444444444445, |
|
"loss": 0.4307, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"grad_norm": 1.4613134860992432, |
|
"learning_rate": 0.0015601851851851853, |
|
"loss": 0.414, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"grad_norm": 1.9156243801116943, |
|
"learning_rate": 0.0015509259259259259, |
|
"loss": 0.5022, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"grad_norm": 1.9036259651184082, |
|
"learning_rate": 0.0015416666666666669, |
|
"loss": 0.4562, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"grad_norm": 1.6884894371032715, |
|
"learning_rate": 0.0015324074074074075, |
|
"loss": 0.4418, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"grad_norm": 1.4793485403060913, |
|
"learning_rate": 0.0015231481481481483, |
|
"loss": 0.4091, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"grad_norm": 1.9029099941253662, |
|
"learning_rate": 0.0015138888888888888, |
|
"loss": 0.4827, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"grad_norm": 1.2703421115875244, |
|
"learning_rate": 0.0015046296296296296, |
|
"loss": 0.4066, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9565552303189031, |
|
"eval_f1": 0.96141521763694, |
|
"eval_loss": 0.11959208548069, |
|
"eval_precision": 0.9693468985322017, |
|
"eval_recall": 0.9563045431610278, |
|
"eval_runtime": 55.0215, |
|
"eval_samples_per_second": 117.972, |
|
"eval_steps_per_second": 7.379, |
|
"step": 3783 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"grad_norm": 1.4109677076339722, |
|
"learning_rate": 0.0014953703703703702, |
|
"loss": 0.434, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"grad_norm": 1.6477872133255005, |
|
"learning_rate": 0.0014861111111111112, |
|
"loss": 0.466, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"grad_norm": 0.9931581020355225, |
|
"learning_rate": 0.001476851851851852, |
|
"loss": 0.4282, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"grad_norm": 2.313886880874634, |
|
"learning_rate": 0.0014675925925925926, |
|
"loss": 0.4249, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"grad_norm": 1.5501474142074585, |
|
"learning_rate": 0.0014583333333333334, |
|
"loss": 0.4062, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"grad_norm": 2.267270088195801, |
|
"learning_rate": 0.001449074074074074, |
|
"loss": 0.4546, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"grad_norm": 2.5071542263031006, |
|
"learning_rate": 0.001439814814814815, |
|
"loss": 0.4828, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"grad_norm": 1.464120864868164, |
|
"learning_rate": 0.0014305555555555556, |
|
"loss": 0.416, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"grad_norm": 1.3284567594528198, |
|
"learning_rate": 0.0014212962962962964, |
|
"loss": 0.3704, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"grad_norm": 1.5376476049423218, |
|
"learning_rate": 0.001412037037037037, |
|
"loss": 0.454, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 1.0760722160339355, |
|
"learning_rate": 0.0014027777777777777, |
|
"loss": 0.4538, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"grad_norm": 2.0362236499786377, |
|
"learning_rate": 0.0013935185185185188, |
|
"loss": 0.4178, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"grad_norm": 1.949922800064087, |
|
"learning_rate": 0.0013842592592592593, |
|
"loss": 0.3457, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"grad_norm": 1.173089623451233, |
|
"learning_rate": 0.0013750000000000001, |
|
"loss": 0.389, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"grad_norm": 1.4733856916427612, |
|
"learning_rate": 0.0013657407407407407, |
|
"loss": 0.456, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"grad_norm": 1.5104173421859741, |
|
"learning_rate": 0.0013564814814814815, |
|
"loss": 0.406, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"grad_norm": 2.4948642253875732, |
|
"learning_rate": 0.001347222222222222, |
|
"loss": 0.4195, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"grad_norm": 2.137315511703491, |
|
"learning_rate": 0.001337962962962963, |
|
"loss": 0.4857, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"grad_norm": 1.20357084274292, |
|
"learning_rate": 0.0013287037037037037, |
|
"loss": 0.4287, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"grad_norm": 1.5241217613220215, |
|
"learning_rate": 0.0013194444444444445, |
|
"loss": 0.4535, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"grad_norm": 1.4119207859039307, |
|
"learning_rate": 0.001310185185185185, |
|
"loss": 0.486, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"grad_norm": 1.3262003660202026, |
|
"learning_rate": 0.0013009259259259259, |
|
"loss": 0.4833, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"grad_norm": 1.0021650791168213, |
|
"learning_rate": 0.0012916666666666669, |
|
"loss": 0.3996, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"grad_norm": 1.8423185348510742, |
|
"learning_rate": 0.0012824074074074075, |
|
"loss": 0.4569, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"grad_norm": 2.010634183883667, |
|
"learning_rate": 0.0012731481481481483, |
|
"loss": 0.4673, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"grad_norm": 2.974384307861328, |
|
"learning_rate": 0.0012638888888888888, |
|
"loss": 0.3992, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"grad_norm": 1.022455096244812, |
|
"learning_rate": 0.0012546296296296296, |
|
"loss": 0.3598, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"grad_norm": 1.5852398872375488, |
|
"learning_rate": 0.0012453703703703704, |
|
"loss": 0.4115, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"grad_norm": 1.3596739768981934, |
|
"learning_rate": 0.0012361111111111112, |
|
"loss": 0.3534, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"grad_norm": 1.9260822534561157, |
|
"learning_rate": 0.001226851851851852, |
|
"loss": 0.4297, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"grad_norm": 1.5783950090408325, |
|
"learning_rate": 0.0012175925925925926, |
|
"loss": 0.368, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"grad_norm": 1.7927485704421997, |
|
"learning_rate": 0.0012083333333333334, |
|
"loss": 0.3701, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"grad_norm": 1.1680549383163452, |
|
"learning_rate": 0.0011990740740740742, |
|
"loss": 0.3883, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"grad_norm": 1.4916154146194458, |
|
"learning_rate": 0.0011898148148148148, |
|
"loss": 0.373, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"grad_norm": 1.3623180389404297, |
|
"learning_rate": 0.0011805555555555556, |
|
"loss": 0.3697, |
|
"step": 4130 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"grad_norm": 1.2618119716644287, |
|
"learning_rate": 0.0011712962962962964, |
|
"loss": 0.4114, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"grad_norm": 1.7506533861160278, |
|
"learning_rate": 0.001162037037037037, |
|
"loss": 0.4003, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"grad_norm": 1.0284491777420044, |
|
"learning_rate": 0.001152777777777778, |
|
"loss": 0.3749, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"grad_norm": 1.344994306564331, |
|
"learning_rate": 0.0011435185185185185, |
|
"loss": 0.4025, |
|
"step": 4170 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"grad_norm": 3.066920042037964, |
|
"learning_rate": 0.0011342592592592593, |
|
"loss": 0.4203, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"grad_norm": 1.4180749654769897, |
|
"learning_rate": 0.0011250000000000001, |
|
"loss": 0.4364, |
|
"step": 4190 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"grad_norm": 1.5211139917373657, |
|
"learning_rate": 0.0011157407407407407, |
|
"loss": 0.4133, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"grad_norm": 1.9516364336013794, |
|
"learning_rate": 0.0011064814814814815, |
|
"loss": 0.4232, |
|
"step": 4210 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"grad_norm": 2.458101511001587, |
|
"learning_rate": 0.0010972222222222223, |
|
"loss": 0.434, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"grad_norm": 1.2099213600158691, |
|
"learning_rate": 0.0010879629629629629, |
|
"loss": 0.3169, |
|
"step": 4230 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"grad_norm": 1.0573714971542358, |
|
"learning_rate": 0.0010787037037037037, |
|
"loss": 0.3777, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"grad_norm": 1.4449975490570068, |
|
"learning_rate": 0.0010694444444444445, |
|
"loss": 0.4426, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"grad_norm": 1.335119366645813, |
|
"learning_rate": 0.001060185185185185, |
|
"loss": 0.4643, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"grad_norm": 2.1732048988342285, |
|
"learning_rate": 0.001050925925925926, |
|
"loss": 0.3903, |
|
"step": 4270 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"grad_norm": 1.193305492401123, |
|
"learning_rate": 0.0010416666666666667, |
|
"loss": 0.3744, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"grad_norm": 0.9779842495918274, |
|
"learning_rate": 0.0010324074074074074, |
|
"loss": 0.4124, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"grad_norm": 2.7513339519500732, |
|
"learning_rate": 0.0010231481481481482, |
|
"loss": 0.3917, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"grad_norm": 1.756395936012268, |
|
"learning_rate": 0.0010138888888888888, |
|
"loss": 0.4301, |
|
"step": 4310 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"grad_norm": 1.5804221630096436, |
|
"learning_rate": 0.0010046296296296296, |
|
"loss": 0.4046, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.979818209828994, |
|
"eval_f1": 0.9833721625743178, |
|
"eval_loss": 0.04691172018647194, |
|
"eval_precision": 0.9853003976747072, |
|
"eval_recall": 0.9820557579494299, |
|
"eval_runtime": 54.8438, |
|
"eval_samples_per_second": 118.354, |
|
"eval_steps_per_second": 7.403, |
|
"step": 4324 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"grad_norm": 1.8008298873901367, |
|
"learning_rate": 0.0009953703703703704, |
|
"loss": 0.4452, |
|
"step": 4330 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"grad_norm": 3.602733850479126, |
|
"learning_rate": 0.000986111111111111, |
|
"loss": 0.3281, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"grad_norm": 1.5571168661117554, |
|
"learning_rate": 0.0009768518518518518, |
|
"loss": 0.432, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"grad_norm": 1.5973519086837769, |
|
"learning_rate": 0.0009675925925925926, |
|
"loss": 0.3682, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"grad_norm": 1.1785231828689575, |
|
"learning_rate": 0.0009583333333333334, |
|
"loss": 0.3233, |
|
"step": 4370 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"grad_norm": 1.9037755727767944, |
|
"learning_rate": 0.0009490740740740742, |
|
"loss": 0.3733, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"grad_norm": 1.3710215091705322, |
|
"learning_rate": 0.0009398148148148149, |
|
"loss": 0.3562, |
|
"step": 4390 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"grad_norm": 1.4512821435928345, |
|
"learning_rate": 0.0009305555555555556, |
|
"loss": 0.3391, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"grad_norm": 1.5068678855895996, |
|
"learning_rate": 0.0009212962962962964, |
|
"loss": 0.3703, |
|
"step": 4410 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"grad_norm": 1.2810806035995483, |
|
"learning_rate": 0.000912037037037037, |
|
"loss": 0.3729, |
|
"step": 4420 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"grad_norm": 1.9714285135269165, |
|
"learning_rate": 0.0009027777777777777, |
|
"loss": 0.3988, |
|
"step": 4430 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"grad_norm": 1.3327577114105225, |
|
"learning_rate": 0.0008935185185185185, |
|
"loss": 0.4024, |
|
"step": 4440 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"grad_norm": 1.0256530046463013, |
|
"learning_rate": 0.0008842592592592592, |
|
"loss": 0.3818, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"grad_norm": 2.233092784881592, |
|
"learning_rate": 0.0008749999999999999, |
|
"loss": 0.3668, |
|
"step": 4460 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"grad_norm": 1.3951568603515625, |
|
"learning_rate": 0.0008657407407407407, |
|
"loss": 0.44, |
|
"step": 4470 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"grad_norm": 1.827919840812683, |
|
"learning_rate": 0.0008564814814814815, |
|
"loss": 0.3801, |
|
"step": 4480 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"grad_norm": 2.934967041015625, |
|
"learning_rate": 0.0008472222222222223, |
|
"loss": 0.422, |
|
"step": 4490 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"grad_norm": 1.2205818891525269, |
|
"learning_rate": 0.000837962962962963, |
|
"loss": 0.3991, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"grad_norm": 1.424593448638916, |
|
"learning_rate": 0.0008287037037037038, |
|
"loss": 0.418, |
|
"step": 4510 |
|
}, |
|
{ |
|
"epoch": 8.36, |
|
"grad_norm": 1.0254913568496704, |
|
"learning_rate": 0.0008194444444444445, |
|
"loss": 0.3876, |
|
"step": 4520 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"grad_norm": 0.9939424991607666, |
|
"learning_rate": 0.0008101851851851852, |
|
"loss": 0.4267, |
|
"step": 4530 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"grad_norm": 1.7678611278533936, |
|
"learning_rate": 0.000800925925925926, |
|
"loss": 0.3916, |
|
"step": 4540 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"grad_norm": 1.0343542098999023, |
|
"learning_rate": 0.0007916666666666666, |
|
"loss": 0.3939, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"grad_norm": 0.9549526572227478, |
|
"learning_rate": 0.0007824074074074073, |
|
"loss": 0.3785, |
|
"step": 4560 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"grad_norm": 1.0010316371917725, |
|
"learning_rate": 0.0007731481481481481, |
|
"loss": 0.3508, |
|
"step": 4570 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"grad_norm": 2.0805888175964355, |
|
"learning_rate": 0.0007638888888888889, |
|
"loss": 0.3614, |
|
"step": 4580 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"grad_norm": 1.181626558303833, |
|
"learning_rate": 0.0007546296296296297, |
|
"loss": 0.359, |
|
"step": 4590 |
|
}, |
|
{ |
|
"epoch": 8.51, |
|
"grad_norm": 1.138376235961914, |
|
"learning_rate": 0.0007453703703703704, |
|
"loss": 0.3513, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"grad_norm": 1.0916022062301636, |
|
"learning_rate": 0.0007361111111111111, |
|
"loss": 0.3199, |
|
"step": 4610 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"grad_norm": 2.3587114810943604, |
|
"learning_rate": 0.0007268518518518519, |
|
"loss": 0.3658, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"grad_norm": 1.2504198551177979, |
|
"learning_rate": 0.0007175925925925926, |
|
"loss": 0.3952, |
|
"step": 4630 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"grad_norm": 1.3484505414962769, |
|
"learning_rate": 0.0007083333333333333, |
|
"loss": 0.3542, |
|
"step": 4640 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"grad_norm": 2.4245710372924805, |
|
"learning_rate": 0.0006990740740740741, |
|
"loss": 0.3421, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"grad_norm": 1.2606480121612549, |
|
"learning_rate": 0.0006898148148148148, |
|
"loss": 0.3404, |
|
"step": 4660 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"grad_norm": 1.913200855255127, |
|
"learning_rate": 0.0006805555555555556, |
|
"loss": 0.4015, |
|
"step": 4670 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"grad_norm": 1.5260008573532104, |
|
"learning_rate": 0.0006712962962962964, |
|
"loss": 0.3425, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"grad_norm": 1.221198320388794, |
|
"learning_rate": 0.0006620370370370372, |
|
"loss": 0.4078, |
|
"step": 4690 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"grad_norm": 1.630136489868164, |
|
"learning_rate": 0.0006527777777777778, |
|
"loss": 0.3508, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"grad_norm": 1.3053523302078247, |
|
"learning_rate": 0.0006435185185185185, |
|
"loss": 0.2958, |
|
"step": 4710 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"grad_norm": 1.5121403932571411, |
|
"learning_rate": 0.0006342592592592593, |
|
"loss": 0.3552, |
|
"step": 4720 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"grad_norm": 1.1451386213302612, |
|
"learning_rate": 0.000625, |
|
"loss": 0.3688, |
|
"step": 4730 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"grad_norm": 2.4750773906707764, |
|
"learning_rate": 0.0006157407407407407, |
|
"loss": 0.3435, |
|
"step": 4740 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"grad_norm": 0.6995977163314819, |
|
"learning_rate": 0.0006064814814814815, |
|
"loss": 0.3152, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"grad_norm": 1.6774075031280518, |
|
"learning_rate": 0.0005972222222222223, |
|
"loss": 0.2898, |
|
"step": 4760 |
|
}, |
|
{ |
|
"epoch": 8.83, |
|
"grad_norm": 1.1041797399520874, |
|
"learning_rate": 0.000587962962962963, |
|
"loss": 0.3086, |
|
"step": 4770 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"grad_norm": 1.2594033479690552, |
|
"learning_rate": 0.0005787037037037037, |
|
"loss": 0.3981, |
|
"step": 4780 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"grad_norm": 0.9759043455123901, |
|
"learning_rate": 0.0005694444444444445, |
|
"loss": 0.356, |
|
"step": 4790 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"grad_norm": 1.4041826725006104, |
|
"learning_rate": 0.0005601851851851852, |
|
"loss": 0.3514, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"grad_norm": 0.9928153157234192, |
|
"learning_rate": 0.000550925925925926, |
|
"loss": 0.388, |
|
"step": 4810 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"grad_norm": 1.0990333557128906, |
|
"learning_rate": 0.0005416666666666668, |
|
"loss": 0.3013, |
|
"step": 4820 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"grad_norm": 1.2321481704711914, |
|
"learning_rate": 0.0005324074074074074, |
|
"loss": 0.4016, |
|
"step": 4830 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"grad_norm": 1.1745514869689941, |
|
"learning_rate": 0.0005231481481481481, |
|
"loss": 0.3601, |
|
"step": 4840 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"grad_norm": 1.633880615234375, |
|
"learning_rate": 0.0005138888888888889, |
|
"loss": 0.3291, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"grad_norm": 1.1216607093811035, |
|
"learning_rate": 0.0005046296296296297, |
|
"loss": 0.3314, |
|
"step": 4860 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.986134647974118, |
|
"eval_f1": 0.987440563210657, |
|
"eval_loss": 0.038755957037210464, |
|
"eval_precision": 0.9891828577753293, |
|
"eval_recall": 0.98596866524898, |
|
"eval_runtime": 55.0297, |
|
"eval_samples_per_second": 117.955, |
|
"eval_steps_per_second": 7.378, |
|
"step": 4864 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"grad_norm": 1.2497656345367432, |
|
"learning_rate": 0.0004953703703703704, |
|
"loss": 0.3183, |
|
"step": 4870 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"grad_norm": 1.5892603397369385, |
|
"learning_rate": 0.00048611111111111115, |
|
"loss": 0.3222, |
|
"step": 4880 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"grad_norm": 1.4494744539260864, |
|
"learning_rate": 0.00047685185185185184, |
|
"loss": 0.3573, |
|
"step": 4890 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"grad_norm": 1.7034820318222046, |
|
"learning_rate": 0.0004675925925925926, |
|
"loss": 0.3194, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"grad_norm": 0.8668962121009827, |
|
"learning_rate": 0.0004583333333333333, |
|
"loss": 0.3483, |
|
"step": 4910 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"grad_norm": 1.081874132156372, |
|
"learning_rate": 0.0004490740740740741, |
|
"loss": 0.3626, |
|
"step": 4920 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"grad_norm": 1.78743314743042, |
|
"learning_rate": 0.0004398148148148148, |
|
"loss": 0.3651, |
|
"step": 4930 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"grad_norm": 1.1324867010116577, |
|
"learning_rate": 0.00043055555555555555, |
|
"loss": 0.2714, |
|
"step": 4940 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"grad_norm": 1.9258543252944946, |
|
"learning_rate": 0.0004212962962962963, |
|
"loss": 0.3635, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"grad_norm": 1.2044012546539307, |
|
"learning_rate": 0.00041203703703703704, |
|
"loss": 0.3126, |
|
"step": 4960 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"grad_norm": 1.3639435768127441, |
|
"learning_rate": 0.00040277777777777783, |
|
"loss": 0.3203, |
|
"step": 4970 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"grad_norm": 1.7811472415924072, |
|
"learning_rate": 0.0003935185185185185, |
|
"loss": 0.3208, |
|
"step": 4980 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"grad_norm": 1.4253028631210327, |
|
"learning_rate": 0.00038425925925925927, |
|
"loss": 0.3326, |
|
"step": 4990 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"grad_norm": 1.4700595140457153, |
|
"learning_rate": 0.000375, |
|
"loss": 0.3015, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"grad_norm": 2.1158525943756104, |
|
"learning_rate": 0.0003657407407407407, |
|
"loss": 0.3472, |
|
"step": 5010 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"grad_norm": 1.325528621673584, |
|
"learning_rate": 0.0003564814814814815, |
|
"loss": 0.3365, |
|
"step": 5020 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"grad_norm": 1.173003077507019, |
|
"learning_rate": 0.00034722222222222224, |
|
"loss": 0.3213, |
|
"step": 5030 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"grad_norm": 2.355008840560913, |
|
"learning_rate": 0.000337962962962963, |
|
"loss": 0.3386, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 9.34, |
|
"grad_norm": 1.198581576347351, |
|
"learning_rate": 0.0003287037037037037, |
|
"loss": 0.2985, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"grad_norm": 1.7199565172195435, |
|
"learning_rate": 0.0003194444444444444, |
|
"loss": 0.3666, |
|
"step": 5060 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"grad_norm": 1.359098196029663, |
|
"learning_rate": 0.0003101851851851852, |
|
"loss": 0.327, |
|
"step": 5070 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"grad_norm": 1.2990363836288452, |
|
"learning_rate": 0.0003009259259259259, |
|
"loss": 0.3673, |
|
"step": 5080 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"grad_norm": 1.5110586881637573, |
|
"learning_rate": 0.0002916666666666667, |
|
"loss": 0.3124, |
|
"step": 5090 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"grad_norm": 1.6193897724151611, |
|
"learning_rate": 0.0002824074074074074, |
|
"loss": 0.3056, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"grad_norm": 1.2994916439056396, |
|
"learning_rate": 0.0002731481481481482, |
|
"loss": 0.3041, |
|
"step": 5110 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"grad_norm": 1.1582506895065308, |
|
"learning_rate": 0.0002638888888888889, |
|
"loss": 0.325, |
|
"step": 5120 |
|
}, |
|
{ |
|
"epoch": 9.49, |
|
"grad_norm": 1.2252171039581299, |
|
"learning_rate": 0.0002546296296296296, |
|
"loss": 0.3541, |
|
"step": 5130 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"grad_norm": 1.0199075937271118, |
|
"learning_rate": 0.0002453703703703704, |
|
"loss": 0.3763, |
|
"step": 5140 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"grad_norm": 1.113032341003418, |
|
"learning_rate": 0.00023611111111111112, |
|
"loss": 0.3459, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"grad_norm": 1.1221752166748047, |
|
"learning_rate": 0.00022685185185185186, |
|
"loss": 0.284, |
|
"step": 5160 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"grad_norm": 1.0831656455993652, |
|
"learning_rate": 0.0002175925925925926, |
|
"loss": 0.3117, |
|
"step": 5170 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"grad_norm": 0.723981499671936, |
|
"learning_rate": 0.00020833333333333332, |
|
"loss": 0.3231, |
|
"step": 5180 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"grad_norm": 1.0349414348602295, |
|
"learning_rate": 0.0001990740740740741, |
|
"loss": 0.3344, |
|
"step": 5190 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"grad_norm": 0.9438874125480652, |
|
"learning_rate": 0.0001898148148148148, |
|
"loss": 0.3272, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"grad_norm": 1.0570435523986816, |
|
"learning_rate": 0.00018055555555555555, |
|
"loss": 0.3309, |
|
"step": 5210 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"grad_norm": 0.9774361848831177, |
|
"learning_rate": 0.00017129629629629632, |
|
"loss": 0.3129, |
|
"step": 5220 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"grad_norm": 1.1588292121887207, |
|
"learning_rate": 0.00016203703703703703, |
|
"loss": 0.3287, |
|
"step": 5230 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"grad_norm": 1.1384470462799072, |
|
"learning_rate": 0.00015277777777777777, |
|
"loss": 0.2838, |
|
"step": 5240 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"grad_norm": 1.3432296514511108, |
|
"learning_rate": 0.00014351851851851852, |
|
"loss": 0.3424, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"grad_norm": 0.6609634160995483, |
|
"learning_rate": 0.00013425925925925926, |
|
"loss": 0.2843, |
|
"step": 5260 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"grad_norm": 1.2155038118362427, |
|
"learning_rate": 0.000125, |
|
"loss": 0.3159, |
|
"step": 5270 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"grad_norm": 1.5623424053192139, |
|
"learning_rate": 0.00011574074074074073, |
|
"loss": 0.2891, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"grad_norm": 1.9647743701934814, |
|
"learning_rate": 0.00010648148148148149, |
|
"loss": 0.3394, |
|
"step": 5290 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"grad_norm": 1.1939747333526611, |
|
"learning_rate": 9.722222222222223e-05, |
|
"loss": 0.3545, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"grad_norm": 1.5705734491348267, |
|
"learning_rate": 8.796296296296297e-05, |
|
"loss": 0.372, |
|
"step": 5310 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"grad_norm": 1.9765841960906982, |
|
"learning_rate": 7.87037037037037e-05, |
|
"loss": 0.3555, |
|
"step": 5320 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"grad_norm": 1.168407917022705, |
|
"learning_rate": 6.944444444444444e-05, |
|
"loss": 0.2545, |
|
"step": 5330 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"grad_norm": 0.8777741193771362, |
|
"learning_rate": 6.018518518518519e-05, |
|
"loss": 0.3408, |
|
"step": 5340 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"grad_norm": 1.2033889293670654, |
|
"learning_rate": 5.092592592592593e-05, |
|
"loss": 0.3518, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"grad_norm": 1.9429413080215454, |
|
"learning_rate": 4.1666666666666665e-05, |
|
"loss": 0.2779, |
|
"step": 5360 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"grad_norm": 1.5541284084320068, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 0.3169, |
|
"step": 5370 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"grad_norm": 0.8645866513252258, |
|
"learning_rate": 2.3148148148148147e-05, |
|
"loss": 0.2976, |
|
"step": 5380 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"grad_norm": 1.3550152778625488, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.2891, |
|
"step": 5390 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"grad_norm": 1.666207194328308, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.2865, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"eval_accuracy": 0.9830534586350331, |
|
"eval_f1": 0.9868535728073075, |
|
"eval_loss": 0.0450444296002388, |
|
"eval_precision": 0.9880250018061109, |
|
"eval_recall": 0.986225426158784, |
|
"eval_runtime": 55.3663, |
|
"eval_samples_per_second": 117.237, |
|
"eval_steps_per_second": 7.333, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"step": 5400, |
|
"total_flos": 2.695096772600047e+19, |
|
"train_loss": 0.5508582998205114, |
|
"train_runtime": 6179.724, |
|
"train_samples_per_second": 55.959, |
|
"train_steps_per_second": 0.874 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"eval_accuracy": 0.9328945888176398, |
|
"eval_f1": 0.9340102214756545, |
|
"eval_loss": 0.23724627494812012, |
|
"eval_precision": 0.9416197933012, |
|
"eval_recall": 0.9295532071921557, |
|
"eval_runtime": 150.877, |
|
"eval_samples_per_second": 117.831, |
|
"eval_steps_per_second": 7.37, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"eval_accuracy": 0.9328945888176398, |
|
"eval_f1": 0.9340102214756545, |
|
"eval_loss": 0.23724627494812012, |
|
"eval_precision": 0.9416197933012, |
|
"eval_recall": 0.9295532071921557, |
|
"eval_runtime": 150.8426, |
|
"eval_samples_per_second": 117.858, |
|
"eval_steps_per_second": 7.372, |
|
"step": 5400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 5400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.695096772600047e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|