|
{ |
|
"best_metric": 0.8911917098445595, |
|
"best_model_checkpoint": "./vit-base-skin/checkpoint-1252", |
|
"epoch": 4.0, |
|
"global_step": 2504, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019920127795527157, |
|
"loss": 1.3607, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019840255591054313, |
|
"loss": 1.0863, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019760383386581472, |
|
"loss": 0.8928, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019680511182108628, |
|
"loss": 1.0954, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019600638977635784, |
|
"loss": 0.7844, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001952076677316294, |
|
"loss": 0.8456, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019440894568690097, |
|
"loss": 0.8827, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019361022364217253, |
|
"loss": 0.8214, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001928115015974441, |
|
"loss": 0.9063, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019201277955271565, |
|
"loss": 0.8956, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001912140575079872, |
|
"loss": 0.9014, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001904153354632588, |
|
"loss": 0.83, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018961661341853036, |
|
"loss": 0.7877, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018881789137380192, |
|
"loss": 0.6501, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018801916932907348, |
|
"loss": 0.8166, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018722044728434505, |
|
"loss": 0.8288, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018642172523961663, |
|
"loss": 0.662, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001856230031948882, |
|
"loss": 0.7948, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018482428115015976, |
|
"loss": 0.6333, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00018402555910543132, |
|
"loss": 0.7313, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00018322683706070288, |
|
"loss": 0.7396, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00018242811501597444, |
|
"loss": 0.683, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000181629392971246, |
|
"loss": 0.6597, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00018083067092651756, |
|
"loss": 0.6867, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00018003194888178913, |
|
"loss": 0.5824, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017923322683706071, |
|
"loss": 0.7605, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017843450479233228, |
|
"loss": 0.7816, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00017763578274760384, |
|
"loss": 0.7451, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001768370607028754, |
|
"loss": 0.7161, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.000176038338658147, |
|
"loss": 0.6315, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017523961661341855, |
|
"loss": 0.6246, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001744408945686901, |
|
"loss": 0.5847, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00017364217252396167, |
|
"loss": 0.632, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00017284345047923323, |
|
"loss": 0.7901, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00017204472843450482, |
|
"loss": 0.6926, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00017124600638977638, |
|
"loss": 0.6079, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00017044728434504792, |
|
"loss": 0.6323, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00016964856230031948, |
|
"loss": 0.6733, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00016884984025559104, |
|
"loss": 0.6248, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00016805111821086263, |
|
"loss": 0.7995, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001672523961661342, |
|
"loss": 0.7554, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00016645367412140575, |
|
"loss": 0.6262, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001656549520766773, |
|
"loss": 0.71, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001648562300319489, |
|
"loss": 0.6281, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00016405750798722046, |
|
"loss": 0.6005, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00016325878594249202, |
|
"loss": 0.5122, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00016246006389776359, |
|
"loss": 0.5836, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00016166134185303515, |
|
"loss": 0.5401, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00016086261980830674, |
|
"loss": 0.8159, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001600638977635783, |
|
"loss": 0.632, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00015926517571884986, |
|
"loss": 0.6293, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00015846645367412142, |
|
"loss": 0.5831, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00015766773162939298, |
|
"loss": 0.4642, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00015686900958466454, |
|
"loss": 0.6678, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0001560702875399361, |
|
"loss": 0.6439, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00015527156549520767, |
|
"loss": 0.6224, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015447284345047923, |
|
"loss": 0.5306, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00015367412140575082, |
|
"loss": 0.4909, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015287539936102238, |
|
"loss": 0.4913, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00015207667731629394, |
|
"loss": 0.5959, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001512779552715655, |
|
"loss": 0.7232, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015047923322683706, |
|
"loss": 0.4987, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8031088082901554, |
|
"eval_loss": 0.5480978488922119, |
|
"eval_runtime": 1.2438, |
|
"eval_samples_per_second": 155.169, |
|
"eval_steps_per_second": 10.452, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00014968051118210865, |
|
"loss": 0.5424, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001488817891373802, |
|
"loss": 0.3795, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00014808306709265177, |
|
"loss": 0.5057, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00014728434504792333, |
|
"loss": 0.5375, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001464856230031949, |
|
"loss": 0.5068, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00014568690095846646, |
|
"loss": 0.5207, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014488817891373802, |
|
"loss": 0.4394, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014408945686900958, |
|
"loss": 0.3614, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00014329073482428114, |
|
"loss": 0.5282, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00014249201277955273, |
|
"loss": 0.4466, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001416932907348243, |
|
"loss": 0.4081, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00014089456869009585, |
|
"loss": 0.4235, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001400958466453674, |
|
"loss": 0.3791, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.000139297124600639, |
|
"loss": 0.4024, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00013849840255591056, |
|
"loss": 0.3729, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00013769968051118212, |
|
"loss": 0.4715, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00013690095846645369, |
|
"loss": 0.4664, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00013610223642172525, |
|
"loss": 0.4945, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001353035143769968, |
|
"loss": 0.4936, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00013450479233226837, |
|
"loss": 0.5814, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013370607028753993, |
|
"loss": 0.3679, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0001329073482428115, |
|
"loss": 0.4897, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00013210862619808305, |
|
"loss": 0.4264, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00013130990415335464, |
|
"loss": 0.6188, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001305111821086262, |
|
"loss": 0.4897, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00012971246006389777, |
|
"loss": 0.3392, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00012891373801916933, |
|
"loss": 0.5327, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012811501597444092, |
|
"loss": 0.3651, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00012731629392971248, |
|
"loss": 0.4061, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00012651757188498404, |
|
"loss": 0.4701, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001257188498402556, |
|
"loss": 0.3794, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00012492012779552716, |
|
"loss": 0.5627, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00012412140575079872, |
|
"loss": 0.514, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00012332268370607028, |
|
"loss": 0.424, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00012252396166134185, |
|
"loss": 0.4829, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00012172523961661342, |
|
"loss": 0.387, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.000120926517571885, |
|
"loss": 0.472, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00012012779552715656, |
|
"loss": 0.3891, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00011932907348242812, |
|
"loss": 0.4143, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00011853035143769968, |
|
"loss": 0.4191, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00011773162939297124, |
|
"loss": 0.44, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00011693290734824283, |
|
"loss": 0.4942, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00011613418530351439, |
|
"loss": 0.4446, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00011533546325878595, |
|
"loss": 0.3974, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011453674121405751, |
|
"loss": 0.4112, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00011373801916932908, |
|
"loss": 0.4987, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011293929712460065, |
|
"loss": 0.3738, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011214057507987221, |
|
"loss": 0.3729, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011134185303514377, |
|
"loss": 0.582, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011054313099041533, |
|
"loss": 0.3539, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00010974440894568691, |
|
"loss": 0.222, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00010894568690095847, |
|
"loss": 0.2987, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00010814696485623003, |
|
"loss": 0.5448, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001073482428115016, |
|
"loss": 0.377, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00010654952076677316, |
|
"loss": 0.3518, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00010575079872204474, |
|
"loss": 0.5725, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0001049520766773163, |
|
"loss": 0.2406, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010415335463258787, |
|
"loss": 0.3681, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00010335463258785943, |
|
"loss": 0.4454, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.000102555910543131, |
|
"loss": 0.2933, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00010175718849840256, |
|
"loss": 0.3093, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00010095846645367413, |
|
"loss": 0.4952, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010015974440894569, |
|
"loss": 0.3814, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8911917098445595, |
|
"eval_loss": 0.38037967681884766, |
|
"eval_runtime": 1.2431, |
|
"eval_samples_per_second": 155.258, |
|
"eval_steps_per_second": 10.458, |
|
"step": 1252 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.936102236421726e-05, |
|
"loss": 0.347, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.856230031948882e-05, |
|
"loss": 0.1718, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.77635782747604e-05, |
|
"loss": 0.2688, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.696485623003195e-05, |
|
"loss": 0.2019, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.616613418530351e-05, |
|
"loss": 0.2318, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.536741214057508e-05, |
|
"loss": 0.2364, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.456869009584664e-05, |
|
"loss": 0.2729, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 9.376996805111822e-05, |
|
"loss": 0.2046, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 9.297124600638978e-05, |
|
"loss": 0.2405, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.217252396166136e-05, |
|
"loss": 0.1687, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.137380191693292e-05, |
|
"loss": 0.2252, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 9.057507987220448e-05, |
|
"loss": 0.1544, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 8.977635782747604e-05, |
|
"loss": 0.2436, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.89776357827476e-05, |
|
"loss": 0.2051, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 8.817891373801918e-05, |
|
"loss": 0.163, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 8.738019169329074e-05, |
|
"loss": 0.2489, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 8.658146964856231e-05, |
|
"loss": 0.2738, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 8.578274760383387e-05, |
|
"loss": 0.1508, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 8.498402555910544e-05, |
|
"loss": 0.1544, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 8.4185303514377e-05, |
|
"loss": 0.2727, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 8.338658146964856e-05, |
|
"loss": 0.1686, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 8.258785942492013e-05, |
|
"loss": 0.1416, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 8.17891373801917e-05, |
|
"loss": 0.4586, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 8.099041533546327e-05, |
|
"loss": 0.1554, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 8.019169329073483e-05, |
|
"loss": 0.2235, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 7.939297124600639e-05, |
|
"loss": 0.1812, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 7.859424920127795e-05, |
|
"loss": 0.1648, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 7.779552715654951e-05, |
|
"loss": 0.1314, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 7.699680511182109e-05, |
|
"loss": 0.2276, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 7.619808306709265e-05, |
|
"loss": 0.1624, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 7.539936102236423e-05, |
|
"loss": 0.1603, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 7.460063897763579e-05, |
|
"loss": 0.2533, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 7.380191693290735e-05, |
|
"loss": 0.1245, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 7.300319488817891e-05, |
|
"loss": 0.1848, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 7.220447284345049e-05, |
|
"loss": 0.223, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 7.140575079872205e-05, |
|
"loss": 0.1223, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 7.060702875399361e-05, |
|
"loss": 0.309, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 6.980830670926518e-05, |
|
"loss": 0.354, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 6.900958466453674e-05, |
|
"loss": 0.2051, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.821086261980832e-05, |
|
"loss": 0.2932, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.741214057507987e-05, |
|
"loss": 0.1401, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.661341853035144e-05, |
|
"loss": 0.3012, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 6.5814696485623e-05, |
|
"loss": 0.1481, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 6.501597444089457e-05, |
|
"loss": 0.3556, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.421725239616614e-05, |
|
"loss": 0.1193, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 6.34185303514377e-05, |
|
"loss": 0.2956, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.261980830670928e-05, |
|
"loss": 0.164, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 6.182108626198084e-05, |
|
"loss": 0.1218, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 6.1022364217252406e-05, |
|
"loss": 0.094, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 6.022364217252396e-05, |
|
"loss": 0.1787, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 5.942492012779552e-05, |
|
"loss": 0.1833, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.86261980830671e-05, |
|
"loss": 0.1389, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 5.782747603833866e-05, |
|
"loss": 0.2109, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 5.702875399361023e-05, |
|
"loss": 0.1668, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.623003194888179e-05, |
|
"loss": 0.1651, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 5.543130990415336e-05, |
|
"loss": 0.2081, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.4632587859424925e-05, |
|
"loss": 0.2506, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 5.383386581469649e-05, |
|
"loss": 0.1272, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 5.3035143769968054e-05, |
|
"loss": 0.1332, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 5.2236421725239616e-05, |
|
"loss": 0.0827, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 5.1437699680511184e-05, |
|
"loss": 0.2688, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 5.0638977635782745e-05, |
|
"loss": 0.3182, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8652849740932642, |
|
"eval_loss": 0.3915177285671234, |
|
"eval_runtime": 1.2215, |
|
"eval_samples_per_second": 158.006, |
|
"eval_steps_per_second": 10.643, |
|
"step": 1878 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.984025559105431e-05, |
|
"loss": 0.1101, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.904153354632588e-05, |
|
"loss": 0.08, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.824281150159744e-05, |
|
"loss": 0.0661, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 4.744408945686901e-05, |
|
"loss": 0.088, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.664536741214058e-05, |
|
"loss": 0.069, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 4.584664536741215e-05, |
|
"loss": 0.0646, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.504792332268371e-05, |
|
"loss": 0.0334, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.424920127795527e-05, |
|
"loss": 0.0727, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 4.345047923322684e-05, |
|
"loss": 0.0762, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 4.265175718849841e-05, |
|
"loss": 0.0199, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 4.185303514376997e-05, |
|
"loss": 0.0673, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 4.1054313099041536e-05, |
|
"loss": 0.0639, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.0255591054313104e-05, |
|
"loss": 0.068, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.9456869009584666e-05, |
|
"loss": 0.034, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.8658146964856234e-05, |
|
"loss": 0.0384, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.7859424920127795e-05, |
|
"loss": 0.0797, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.7060702875399364e-05, |
|
"loss": 0.0842, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.6261980830670925e-05, |
|
"loss": 0.0483, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.546325878594249e-05, |
|
"loss": 0.0688, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.466453674121406e-05, |
|
"loss": 0.0651, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.386581469648562e-05, |
|
"loss": 0.0215, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.306709265175719e-05, |
|
"loss": 0.0203, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.226837060702875e-05, |
|
"loss": 0.035, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.146964856230032e-05, |
|
"loss": 0.1068, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.067092651757188e-05, |
|
"loss": 0.0426, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 2.987220447284345e-05, |
|
"loss": 0.0234, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.907348242811502e-05, |
|
"loss": 0.0345, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 2.8274760383386583e-05, |
|
"loss": 0.1023, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 2.747603833865815e-05, |
|
"loss": 0.0391, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 2.6677316293929716e-05, |
|
"loss": 0.0536, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.5878594249201278e-05, |
|
"loss": 0.0318, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.5079872204472842e-05, |
|
"loss": 0.0285, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 2.428115015974441e-05, |
|
"loss": 0.062, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.3482428115015975e-05, |
|
"loss": 0.0315, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.268370607028754e-05, |
|
"loss": 0.0656, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.188498402555911e-05, |
|
"loss": 0.0152, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.108626198083067e-05, |
|
"loss": 0.1108, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.0287539936102238e-05, |
|
"loss": 0.0951, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.9488817891373803e-05, |
|
"loss": 0.1139, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.869009584664537e-05, |
|
"loss": 0.027, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.7891373801916932e-05, |
|
"loss": 0.0577, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.7172523961661345e-05, |
|
"loss": 0.0411, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.6373801916932906e-05, |
|
"loss": 0.0706, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.5575079872204475e-05, |
|
"loss": 0.1357, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.477635782747604e-05, |
|
"loss": 0.0638, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.3977635782747606e-05, |
|
"loss": 0.0508, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.3178913738019169e-05, |
|
"loss": 0.0224, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.2380191693290735e-05, |
|
"loss": 0.0108, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 1.1581469648562302e-05, |
|
"loss": 0.0652, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.0782747603833867e-05, |
|
"loss": 0.0548, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 9.984025559105432e-06, |
|
"loss": 0.0335, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 9.185303514376996e-06, |
|
"loss": 0.0372, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.386581469648563e-06, |
|
"loss": 0.0994, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 7.5878594249201285e-06, |
|
"loss": 0.0135, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 6.789137380191693e-06, |
|
"loss": 0.0583, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 5.990415335463259e-06, |
|
"loss": 0.0961, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 5.191693290734825e-06, |
|
"loss": 0.0292, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 4.39297124600639e-06, |
|
"loss": 0.0288, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.5942492012779555e-06, |
|
"loss": 0.0565, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 2.7955271565495207e-06, |
|
"loss": 0.0607, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 1.9968051118210863e-06, |
|
"loss": 0.0529, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.1980830670926517e-06, |
|
"loss": 0.0449, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.9936102236421723e-07, |
|
"loss": 0.067, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8704663212435233, |
|
"eval_loss": 0.4523600935935974, |
|
"eval_runtime": 1.2991, |
|
"eval_samples_per_second": 148.567, |
|
"eval_steps_per_second": 10.007, |
|
"step": 2504 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2504, |
|
"total_flos": 3.104468219559813e+18, |
|
"train_loss": 0.3521563596071336, |
|
"train_runtime": 855.1502, |
|
"train_samples_per_second": 46.846, |
|
"train_steps_per_second": 2.928 |
|
} |
|
], |
|
"max_steps": 2504, |
|
"num_train_epochs": 4, |
|
"total_flos": 3.104468219559813e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|