|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.4444444444444444, |
|
"eval_steps": 5, |
|
"global_step": 182, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007936507936507936, |
|
"grad_norm": 3.5297670364379883, |
|
"learning_rate": 5.555555555555555e-07, |
|
"loss": 0.404, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.015873015873015872, |
|
"grad_norm": 3.6838796138763428, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 0.3185, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.023809523809523808, |
|
"grad_norm": 3.5556721687316895, |
|
"learning_rate": 1.6666666666666665e-06, |
|
"loss": 0.2821, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.031746031746031744, |
|
"grad_norm": 3.922109842300415, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 0.4036, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03968253968253968, |
|
"grad_norm": 3.9366657733917236, |
|
"learning_rate": 2.7777777777777775e-06, |
|
"loss": 0.3442, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03968253968253968, |
|
"eval_loss": 0.12529698014259338, |
|
"eval_runtime": 113.8002, |
|
"eval_samples_per_second": 26.819, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.886081184413048, |
|
"eval_sts-test_pearson_dot": 0.8767533438290611, |
|
"eval_sts-test_pearson_euclidean": 0.9080817963557108, |
|
"eval_sts-test_pearson_manhattan": 0.9087794191320873, |
|
"eval_sts-test_pearson_max": 0.9087794191320873, |
|
"eval_sts-test_spearman_cosine": 0.9077787555581409, |
|
"eval_sts-test_spearman_dot": 0.8792746633711961, |
|
"eval_sts-test_spearman_euclidean": 0.9039925750881216, |
|
"eval_sts-test_spearman_manhattan": 0.904489537845873, |
|
"eval_sts-test_spearman_max": 0.9077787555581409, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 3.8135547637939453, |
|
"learning_rate": 3.333333333333333e-06, |
|
"loss": 0.4145, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 4.132374286651611, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 0.4224, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06349206349206349, |
|
"grad_norm": 3.9953386783599854, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.4048, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 4.023675918579102, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.3899, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07936507936507936, |
|
"grad_norm": 3.854191780090332, |
|
"learning_rate": 5.555555555555555e-06, |
|
"loss": 0.4127, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07936507936507936, |
|
"eval_loss": 0.12369368970394135, |
|
"eval_runtime": 113.6707, |
|
"eval_samples_per_second": 26.849, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8860118050647048, |
|
"eval_sts-test_pearson_dot": 0.8760605933678182, |
|
"eval_sts-test_pearson_euclidean": 0.9086480781293332, |
|
"eval_sts-test_pearson_manhattan": 0.9092897840847158, |
|
"eval_sts-test_pearson_max": 0.9092897840847158, |
|
"eval_sts-test_spearman_cosine": 0.9078577415344969, |
|
"eval_sts-test_spearman_dot": 0.8791339654053815, |
|
"eval_sts-test_spearman_euclidean": 0.9047648028546915, |
|
"eval_sts-test_spearman_manhattan": 0.9052383607027356, |
|
"eval_sts-test_spearman_max": 0.9078577415344969, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0873015873015873, |
|
"grad_norm": 3.8079540729522705, |
|
"learning_rate": 6.11111111111111e-06, |
|
"loss": 0.3496, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 3.929018259048462, |
|
"learning_rate": 6.666666666666666e-06, |
|
"loss": 0.3731, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10317460317460317, |
|
"grad_norm": 4.284013271331787, |
|
"learning_rate": 7.222222222222221e-06, |
|
"loss": 0.3929, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 3.3490402698516846, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 0.2957, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 3.553280830383301, |
|
"learning_rate": 8.333333333333332e-06, |
|
"loss": 0.3324, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"eval_loss": 0.12056715041399002, |
|
"eval_runtime": 113.718, |
|
"eval_samples_per_second": 26.838, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8856265458568289, |
|
"eval_sts-test_pearson_dot": 0.8743050518330721, |
|
"eval_sts-test_pearson_euclidean": 0.9095228583162331, |
|
"eval_sts-test_pearson_manhattan": 0.9101600217218586, |
|
"eval_sts-test_pearson_max": 0.9101600217218586, |
|
"eval_sts-test_spearman_cosine": 0.908261263658463, |
|
"eval_sts-test_spearman_dot": 0.87867141636764, |
|
"eval_sts-test_spearman_euclidean": 0.9060734192402989, |
|
"eval_sts-test_spearman_manhattan": 0.9066336155303966, |
|
"eval_sts-test_spearman_max": 0.908261263658463, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12698412698412698, |
|
"grad_norm": 3.6310322284698486, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.3341, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1349206349206349, |
|
"grad_norm": 3.6535122394561768, |
|
"learning_rate": 9.444444444444443e-06, |
|
"loss": 0.3466, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 3.6199331283569336, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 0.3558, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15079365079365079, |
|
"grad_norm": 3.089895248413086, |
|
"learning_rate": 1.0555555555555554e-05, |
|
"loss": 0.2634, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 3.320916175842285, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 0.3095, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"eval_loss": 0.11563990265130997, |
|
"eval_runtime": 113.5377, |
|
"eval_samples_per_second": 26.881, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8848740042612456, |
|
"eval_sts-test_pearson_dot": 0.8724689429546052, |
|
"eval_sts-test_pearson_euclidean": 0.9104294765782397, |
|
"eval_sts-test_pearson_manhattan": 0.9111381492292419, |
|
"eval_sts-test_pearson_max": 0.9111381492292419, |
|
"eval_sts-test_spearman_cosine": 0.9087803335393421, |
|
"eval_sts-test_spearman_dot": 0.8777188410176626, |
|
"eval_sts-test_spearman_euclidean": 0.9069791847708608, |
|
"eval_sts-test_spearman_manhattan": 0.9078148698260838, |
|
"eval_sts-test_spearman_max": 0.9087803335393421, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 3.0193159580230713, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 0.2973, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1746031746031746, |
|
"grad_norm": 3.3553476333618164, |
|
"learning_rate": 1.222222222222222e-05, |
|
"loss": 0.2884, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18253968253968253, |
|
"grad_norm": 3.5176496505737305, |
|
"learning_rate": 1.2777777777777775e-05, |
|
"loss": 0.3697, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 3.2073943614959717, |
|
"learning_rate": 1.3333333333333332e-05, |
|
"loss": 0.2683, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1984126984126984, |
|
"grad_norm": 3.2101964950561523, |
|
"learning_rate": 1.3888888888888886e-05, |
|
"loss": 0.3026, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1984126984126984, |
|
"eval_loss": 0.10958973318338394, |
|
"eval_runtime": 113.6214, |
|
"eval_samples_per_second": 26.861, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8832622086480311, |
|
"eval_sts-test_pearson_dot": 0.8697582354953435, |
|
"eval_sts-test_pearson_euclidean": 0.9107566690862425, |
|
"eval_sts-test_pearson_manhattan": 0.9115546986654615, |
|
"eval_sts-test_pearson_max": 0.9115546986654615, |
|
"eval_sts-test_spearman_cosine": 0.9087605087305455, |
|
"eval_sts-test_spearman_dot": 0.8760767382321666, |
|
"eval_sts-test_spearman_euclidean": 0.9073999361304628, |
|
"eval_sts-test_spearman_manhattan": 0.9084107328715103, |
|
"eval_sts-test_spearman_max": 0.9087605087305455, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20634920634920634, |
|
"grad_norm": 2.84037709236145, |
|
"learning_rate": 1.4444444444444442e-05, |
|
"loss": 0.2441, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 3.3099992275238037, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 0.3145, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 3.061953067779541, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.3119, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23015873015873015, |
|
"grad_norm": 3.0163729190826416, |
|
"learning_rate": 1.6111111111111108e-05, |
|
"loss": 0.2766, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 3.140418291091919, |
|
"learning_rate": 1.6666666666666664e-05, |
|
"loss": 0.3343, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"eval_loss": 0.10535401105880737, |
|
"eval_runtime": 113.5942, |
|
"eval_samples_per_second": 26.868, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8819465403802665, |
|
"eval_sts-test_pearson_dot": 0.866997957398371, |
|
"eval_sts-test_pearson_euclidean": 0.9110501477101954, |
|
"eval_sts-test_pearson_manhattan": 0.9119047974126511, |
|
"eval_sts-test_pearson_max": 0.9119047974126511, |
|
"eval_sts-test_spearman_cosine": 0.9084358383291508, |
|
"eval_sts-test_spearman_dot": 0.8727757956894143, |
|
"eval_sts-test_spearman_euclidean": 0.9077817538926543, |
|
"eval_sts-test_spearman_manhattan": 0.9089103807049453, |
|
"eval_sts-test_spearman_max": 0.9089103807049453, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24603174603174602, |
|
"grad_norm": 3.1329221725463867, |
|
"learning_rate": 1.722222222222222e-05, |
|
"loss": 0.344, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.25396825396825395, |
|
"grad_norm": 2.9861748218536377, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.3005, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2619047619047619, |
|
"grad_norm": 2.8316733837127686, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.2526, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2698412698412698, |
|
"grad_norm": 2.8335487842559814, |
|
"learning_rate": 1.8888888888888886e-05, |
|
"loss": 0.2422, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 3.0785422325134277, |
|
"learning_rate": 1.9444444444444442e-05, |
|
"loss": 0.3447, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"eval_loss": 0.10223711282014847, |
|
"eval_runtime": 113.9847, |
|
"eval_samples_per_second": 26.776, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8812001280334643, |
|
"eval_sts-test_pearson_dot": 0.8652746969985129, |
|
"eval_sts-test_pearson_euclidean": 0.9105701789873448, |
|
"eval_sts-test_pearson_manhattan": 0.9116177887236803, |
|
"eval_sts-test_pearson_max": 0.9116177887236803, |
|
"eval_sts-test_spearman_cosine": 0.9072243769320245, |
|
"eval_sts-test_spearman_dot": 0.8716048789351082, |
|
"eval_sts-test_spearman_euclidean": 0.9073166540330135, |
|
"eval_sts-test_spearman_manhattan": 0.9081332302996223, |
|
"eval_sts-test_spearman_max": 0.9081332302996223, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 2.944396734237671, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.2809, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.29365079365079366, |
|
"grad_norm": 2.8323400020599365, |
|
"learning_rate": 2.0555555555555555e-05, |
|
"loss": 0.2836, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.30158730158730157, |
|
"grad_norm": 2.8760273456573486, |
|
"learning_rate": 2.1111111111111107e-05, |
|
"loss": 0.2878, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.30952380952380953, |
|
"grad_norm": 2.744379758834839, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.2738, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 2.8519983291625977, |
|
"learning_rate": 2.222222222222222e-05, |
|
"loss": 0.2806, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"eval_loss": 0.10033170133829117, |
|
"eval_runtime": 113.5147, |
|
"eval_samples_per_second": 26.886, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8802115569848467, |
|
"eval_sts-test_pearson_dot": 0.8634798448575132, |
|
"eval_sts-test_pearson_euclidean": 0.9094188438238102, |
|
"eval_sts-test_pearson_manhattan": 0.9105849471172345, |
|
"eval_sts-test_pearson_max": 0.9105849471172345, |
|
"eval_sts-test_spearman_cosine": 0.9064710789490229, |
|
"eval_sts-test_spearman_dot": 0.8693704037025742, |
|
"eval_sts-test_spearman_euclidean": 0.9064271779615981, |
|
"eval_sts-test_spearman_manhattan": 0.9073247092600637, |
|
"eval_sts-test_spearman_max": 0.9073247092600637, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3253968253968254, |
|
"grad_norm": 2.9139747619628906, |
|
"learning_rate": 2.2777777777777776e-05, |
|
"loss": 0.2797, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 2.9206557273864746, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 0.3217, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3412698412698413, |
|
"grad_norm": 2.755398988723755, |
|
"learning_rate": 2.388888888888889e-05, |
|
"loss": 0.2544, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3492063492063492, |
|
"grad_norm": 3.0441982746124268, |
|
"learning_rate": 2.444444444444444e-05, |
|
"loss": 0.3203, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 2.978891611099243, |
|
"learning_rate": 2.4999999999999998e-05, |
|
"loss": 0.2987, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"eval_loss": 0.09902294725179672, |
|
"eval_runtime": 113.6912, |
|
"eval_samples_per_second": 26.845, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8796209948380269, |
|
"eval_sts-test_pearson_dot": 0.8617122615494917, |
|
"eval_sts-test_pearson_euclidean": 0.9092272396432914, |
|
"eval_sts-test_pearson_manhattan": 0.9100341993020892, |
|
"eval_sts-test_pearson_max": 0.9100341993020892, |
|
"eval_sts-test_spearman_cosine": 0.9063911531961779, |
|
"eval_sts-test_spearman_dot": 0.867835166929281, |
|
"eval_sts-test_spearman_euclidean": 0.9066020658911155, |
|
"eval_sts-test_spearman_manhattan": 0.9072894005148261, |
|
"eval_sts-test_spearman_max": 0.9072894005148261, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.36507936507936506, |
|
"grad_norm": 2.9183595180511475, |
|
"learning_rate": 2.555555555555555e-05, |
|
"loss": 0.2765, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.373015873015873, |
|
"grad_norm": 2.960238456726074, |
|
"learning_rate": 2.611111111111111e-05, |
|
"loss": 0.2716, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 3.23356294631958, |
|
"learning_rate": 2.6666666666666663e-05, |
|
"loss": 0.3726, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 2.974705457687378, |
|
"learning_rate": 2.722222222222222e-05, |
|
"loss": 0.2963, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"grad_norm": 2.8041574954986572, |
|
"learning_rate": 2.7777777777777772e-05, |
|
"loss": 0.2784, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"eval_loss": 0.09521521627902985, |
|
"eval_runtime": 113.6139, |
|
"eval_samples_per_second": 26.863, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8802451373465323, |
|
"eval_sts-test_pearson_dot": 0.8609764645232105, |
|
"eval_sts-test_pearson_euclidean": 0.9103012041260427, |
|
"eval_sts-test_pearson_manhattan": 0.9108880877390901, |
|
"eval_sts-test_pearson_max": 0.9108880877390901, |
|
"eval_sts-test_spearman_cosine": 0.9071928272927434, |
|
"eval_sts-test_spearman_dot": 0.867374407941995, |
|
"eval_sts-test_spearman_euclidean": 0.9083242734345022, |
|
"eval_sts-test_spearman_manhattan": 0.9086424996542565, |
|
"eval_sts-test_spearman_max": 0.9086424996542565, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.40476190476190477, |
|
"grad_norm": 2.6451456546783447, |
|
"learning_rate": 2.8333333333333332e-05, |
|
"loss": 0.2437, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4126984126984127, |
|
"grad_norm": 2.7020044326782227, |
|
"learning_rate": 2.8888888888888885e-05, |
|
"loss": 0.2258, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.42063492063492064, |
|
"grad_norm": 2.7229156494140625, |
|
"learning_rate": 2.944444444444444e-05, |
|
"loss": 0.2821, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 2.770799398422241, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.249, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.4365079365079365, |
|
"grad_norm": 2.762690305709839, |
|
"learning_rate": 3.0555555555555554e-05, |
|
"loss": 0.2813, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4365079365079365, |
|
"eval_loss": 0.09280610829591751, |
|
"eval_runtime": 113.4966, |
|
"eval_samples_per_second": 26.891, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8804507408393794, |
|
"eval_sts-test_pearson_dot": 0.8631869703781383, |
|
"eval_sts-test_pearson_euclidean": 0.9108211341698824, |
|
"eval_sts-test_pearson_manhattan": 0.9114068237803576, |
|
"eval_sts-test_pearson_max": 0.9114068237803576, |
|
"eval_sts-test_spearman_cosine": 0.9079720810073518, |
|
"eval_sts-test_spearman_dot": 0.8709471248951776, |
|
"eval_sts-test_spearman_euclidean": 0.9085633794241165, |
|
"eval_sts-test_spearman_manhattan": 0.9093315348258998, |
|
"eval_sts-test_spearman_max": 0.9093315348258998, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 2.9767086505889893, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.3003, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4523809523809524, |
|
"grad_norm": 2.816253185272217, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 0.2812, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4603174603174603, |
|
"grad_norm": 2.5184807777404785, |
|
"learning_rate": 3.2222222222222216e-05, |
|
"loss": 0.2619, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.46825396825396826, |
|
"grad_norm": 2.7500715255737305, |
|
"learning_rate": 3.277777777777777e-05, |
|
"loss": 0.299, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 2.5309386253356934, |
|
"learning_rate": 3.333333333333333e-05, |
|
"loss": 0.2706, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"eval_loss": 0.09274312108755112, |
|
"eval_runtime": 113.479, |
|
"eval_samples_per_second": 26.895, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8814996628266308, |
|
"eval_sts-test_pearson_dot": 0.8647617194348185, |
|
"eval_sts-test_pearson_euclidean": 0.9116395612568413, |
|
"eval_sts-test_pearson_manhattan": 0.9121591417317261, |
|
"eval_sts-test_pearson_max": 0.9121591417317261, |
|
"eval_sts-test_spearman_cosine": 0.9087614932582961, |
|
"eval_sts-test_spearman_dot": 0.8732032149869635, |
|
"eval_sts-test_spearman_euclidean": 0.9101066714244602, |
|
"eval_sts-test_spearman_manhattan": 0.9099515188012163, |
|
"eval_sts-test_spearman_max": 0.9101066714244602, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.48412698412698413, |
|
"grad_norm": 2.7175261974334717, |
|
"learning_rate": 3.3888888888888884e-05, |
|
"loss": 0.297, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.49206349206349204, |
|
"grad_norm": 2.7492423057556152, |
|
"learning_rate": 3.444444444444444e-05, |
|
"loss": 0.2906, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 2.815702438354492, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.2914, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5079365079365079, |
|
"grad_norm": 2.9056921005249023, |
|
"learning_rate": 3.499798538091195e-05, |
|
"loss": 0.2669, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5158730158730159, |
|
"grad_norm": 2.832461357116699, |
|
"learning_rate": 3.4991942080268184e-05, |
|
"loss": 0.2723, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5158730158730159, |
|
"eval_loss": 0.09455278515815735, |
|
"eval_runtime": 113.5618, |
|
"eval_samples_per_second": 26.875, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8827592572843797, |
|
"eval_sts-test_pearson_dot": 0.8655702748779494, |
|
"eval_sts-test_pearson_euclidean": 0.9124138196335778, |
|
"eval_sts-test_pearson_manhattan": 0.9124858955018784, |
|
"eval_sts-test_pearson_max": 0.9124858955018784, |
|
"eval_sts-test_spearman_cosine": 0.9092536676310787, |
|
"eval_sts-test_spearman_dot": 0.87468645079452, |
|
"eval_sts-test_spearman_euclidean": 0.910149408879089, |
|
"eval_sts-test_spearman_manhattan": 0.9104867886387189, |
|
"eval_sts-test_spearman_max": 0.9104867886387189, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5238095238095238, |
|
"grad_norm": 2.834491729736328, |
|
"learning_rate": 3.4981871767775944e-05, |
|
"loss": 0.3194, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5317460317460317, |
|
"grad_norm": 3.168403148651123, |
|
"learning_rate": 3.496777722576811e-05, |
|
"loss": 0.3585, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.5396825396825397, |
|
"grad_norm": 2.8590433597564697, |
|
"learning_rate": 3.494966234843439e-05, |
|
"loss": 0.2843, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5476190476190477, |
|
"grad_norm": 2.4585649967193604, |
|
"learning_rate": 3.4927532140745435e-05, |
|
"loss": 0.1916, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 3.0862460136413574, |
|
"learning_rate": 3.490139271707e-05, |
|
"loss": 0.351, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"eval_loss": 0.09706800431013107, |
|
"eval_runtime": 113.496, |
|
"eval_samples_per_second": 26.891, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8816183440112817, |
|
"eval_sts-test_pearson_dot": 0.863407251078466, |
|
"eval_sts-test_pearson_euclidean": 0.9125994563651346, |
|
"eval_sts-test_pearson_manhattan": 0.9121928260729458, |
|
"eval_sts-test_pearson_max": 0.9125994563651346, |
|
"eval_sts-test_spearman_cosine": 0.9103631836274073, |
|
"eval_sts-test_spearman_dot": 0.8729154643762167, |
|
"eval_sts-test_spearman_euclidean": 0.9106339755374351, |
|
"eval_sts-test_spearman_manhattan": 0.9104940383430642, |
|
"eval_sts-test_spearman_max": 0.9106339755374351, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5634920634920635, |
|
"grad_norm": 2.948397636413574, |
|
"learning_rate": 3.48712512994856e-05, |
|
"loss": 0.3105, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 2.904085159301758, |
|
"learning_rate": 3.4837116215783116e-05, |
|
"loss": 0.2847, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.5793650793650794, |
|
"grad_norm": 2.6948978900909424, |
|
"learning_rate": 3.4798996897165926e-05, |
|
"loss": 0.2641, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.5873015873015873, |
|
"grad_norm": 3.068554162979126, |
|
"learning_rate": 3.475690387564411e-05, |
|
"loss": 0.3305, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"grad_norm": 2.6903178691864014, |
|
"learning_rate": 3.471084878112459e-05, |
|
"loss": 0.2461, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"eval_loss": 0.09646341949701309, |
|
"eval_runtime": 113.5342, |
|
"eval_samples_per_second": 26.882, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.879746728283104, |
|
"eval_sts-test_pearson_dot": 0.85998475002447, |
|
"eval_sts-test_pearson_euclidean": 0.9117602609729114, |
|
"eval_sts-test_pearson_manhattan": 0.9111396965114745, |
|
"eval_sts-test_pearson_max": 0.9117602609729114, |
|
"eval_sts-test_spearman_cosine": 0.9096228207862964, |
|
"eval_sts-test_spearman_dot": 0.8689540379665887, |
|
"eval_sts-test_spearman_euclidean": 0.9099527718365351, |
|
"eval_sts-test_spearman_manhattan": 0.9098263942743658, |
|
"eval_sts-test_spearman_max": 0.9099527718365351, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6031746031746031, |
|
"grad_norm": 2.81105637550354, |
|
"learning_rate": 3.4660844338197886e-05, |
|
"loss": 0.259, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"grad_norm": 2.629365921020508, |
|
"learning_rate": 3.460690436262242e-05, |
|
"loss": 0.2506, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.6190476190476191, |
|
"grad_norm": 2.6665291786193848, |
|
"learning_rate": 3.454904375750738e-05, |
|
"loss": 0.2832, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.626984126984127, |
|
"grad_norm": 2.916246175765991, |
|
"learning_rate": 3.448727850919509e-05, |
|
"loss": 0.3322, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": 2.4879415035247803, |
|
"learning_rate": 3.442162568284416e-05, |
|
"loss": 0.2533, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"eval_loss": 0.10007175803184509, |
|
"eval_runtime": 113.3295, |
|
"eval_samples_per_second": 26.93, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8791063595826033, |
|
"eval_sts-test_pearson_dot": 0.8594763353424633, |
|
"eval_sts-test_pearson_euclidean": 0.9109289279488433, |
|
"eval_sts-test_pearson_manhattan": 0.9101783025650423, |
|
"eval_sts-test_pearson_max": 0.9109289279488433, |
|
"eval_sts-test_spearman_cosine": 0.9088725211378084, |
|
"eval_sts-test_spearman_dot": 0.8680133664521414, |
|
"eval_sts-test_spearman_euclidean": 0.9091277823327847, |
|
"eval_sts-test_spearman_manhattan": 0.9091334209917199, |
|
"eval_sts-test_spearman_max": 0.9091334209917199, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 2.6558098793029785, |
|
"learning_rate": 3.435210341771455e-05, |
|
"loss": 0.2349, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.6507936507936508, |
|
"grad_norm": 2.690624475479126, |
|
"learning_rate": 3.427873092215584e-05, |
|
"loss": 0.2748, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.6587301587301587, |
|
"grad_norm": 2.451726198196411, |
|
"learning_rate": 3.420152846830015e-05, |
|
"loss": 0.223, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 2.6376216411590576, |
|
"learning_rate": 3.412051738646116e-05, |
|
"loss": 0.2416, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.6746031746031746, |
|
"grad_norm": 2.8111939430236816, |
|
"learning_rate": 3.403572005924071e-05, |
|
"loss": 0.2637, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6746031746031746, |
|
"eval_loss": 0.10335631668567657, |
|
"eval_runtime": 113.4166, |
|
"eval_samples_per_second": 26.91, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8779388329417936, |
|
"eval_sts-test_pearson_dot": 0.8608493769098732, |
|
"eval_sts-test_pearson_euclidean": 0.9095252832629803, |
|
"eval_sts-test_pearson_manhattan": 0.9090695197203245, |
|
"eval_sts-test_pearson_max": 0.9095252832629803, |
|
"eval_sts-test_spearman_cosine": 0.9082387985252446, |
|
"eval_sts-test_spearman_dot": 0.8707913010030126, |
|
"eval_sts-test_spearman_euclidean": 0.9083403391373417, |
|
"eval_sts-test_spearman_manhattan": 0.9084906586243554, |
|
"eval_sts-test_spearman_max": 0.9084906586243554, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6825396825396826, |
|
"grad_norm": 2.859077215194702, |
|
"learning_rate": 3.394715991534474e-05, |
|
"loss": 0.2856, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.6904761904761905, |
|
"grad_norm": 2.433560371398926, |
|
"learning_rate": 3.385486142311011e-05, |
|
"loss": 0.2476, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.6984126984126984, |
|
"grad_norm": 2.6791834831237793, |
|
"learning_rate": 3.375885008374425e-05, |
|
"loss": 0.2427, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.7063492063492064, |
|
"grad_norm": 2.6574490070343018, |
|
"learning_rate": 3.365915242427944e-05, |
|
"loss": 0.2614, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 2.5747766494750977, |
|
"learning_rate": 3.355579599024361e-05, |
|
"loss": 0.26, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"eval_loss": 0.10315236449241638, |
|
"eval_runtime": 113.4044, |
|
"eval_samples_per_second": 26.913, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8793659111713259, |
|
"eval_sts-test_pearson_dot": 0.8641308245754843, |
|
"eval_sts-test_pearson_euclidean": 0.9095961426218309, |
|
"eval_sts-test_pearson_manhattan": 0.9093977382821561, |
|
"eval_sts-test_pearson_max": 0.9095961426218309, |
|
"eval_sts-test_spearman_cosine": 0.9087700407492219, |
|
"eval_sts-test_spearman_dot": 0.8756799287974091, |
|
"eval_sts-test_spearman_euclidean": 0.9084703415516837, |
|
"eval_sts-test_spearman_manhattan": 0.908642678659302, |
|
"eval_sts-test_spearman_max": 0.9087700407492219, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7222222222222222, |
|
"grad_norm": 2.3570423126220703, |
|
"learning_rate": 3.3448809338049753e-05, |
|
"loss": 0.1862, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.7301587301587301, |
|
"grad_norm": 2.486401319503784, |
|
"learning_rate": 3.333822202710612e-05, |
|
"loss": 0.267, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.7380952380952381, |
|
"grad_norm": 2.436018705368042, |
|
"learning_rate": 3.322406461164916e-05, |
|
"loss": 0.2175, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.746031746031746, |
|
"grad_norm": 2.2685282230377197, |
|
"learning_rate": 3.310636863230172e-05, |
|
"loss": 0.2079, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.753968253968254, |
|
"grad_norm": 2.564317464828491, |
|
"learning_rate": 3.2985166607358637e-05, |
|
"loss": 0.2562, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.753968253968254, |
|
"eval_loss": 0.09990089386701584, |
|
"eval_runtime": 113.4641, |
|
"eval_samples_per_second": 26.898, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8795677625682299, |
|
"eval_sts-test_pearson_dot": 0.8659349142313639, |
|
"eval_sts-test_pearson_euclidean": 0.9099982334792637, |
|
"eval_sts-test_pearson_manhattan": 0.9099098081017423, |
|
"eval_sts-test_pearson_max": 0.9099982334792637, |
|
"eval_sts-test_spearman_cosine": 0.9085842782631862, |
|
"eval_sts-test_spearman_dot": 0.8767303751560495, |
|
"eval_sts-test_spearman_euclidean": 0.9083879992307234, |
|
"eval_sts-test_spearman_manhattan": 0.9084153422514337, |
|
"eval_sts-test_spearman_max": 0.9085842782631862, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 2.594452381134033, |
|
"learning_rate": 3.286049202380226e-05, |
|
"loss": 0.2516, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.7698412698412699, |
|
"grad_norm": 2.6356875896453857, |
|
"learning_rate": 3.273237932805032e-05, |
|
"loss": 0.2956, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 2.6263818740844727, |
|
"learning_rate": 3.260086391643865e-05, |
|
"loss": 0.2733, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.7857142857142857, |
|
"grad_norm": 2.636934757232666, |
|
"learning_rate": 3.246598212544159e-05, |
|
"loss": 0.2919, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 2.710754632949829, |
|
"learning_rate": 3.2327771221632486e-05, |
|
"loss": 0.2997, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"eval_loss": 0.10318750143051147, |
|
"eval_runtime": 113.3236, |
|
"eval_samples_per_second": 26.932, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8759700287215791, |
|
"eval_sts-test_pearson_dot": 0.8604257798750153, |
|
"eval_sts-test_pearson_euclidean": 0.9085309767938886, |
|
"eval_sts-test_pearson_manhattan": 0.9086607224369581, |
|
"eval_sts-test_pearson_max": 0.9086607224369581, |
|
"eval_sts-test_spearman_cosine": 0.9069411909499396, |
|
"eval_sts-test_spearman_dot": 0.8719651713405704, |
|
"eval_sts-test_spearman_euclidean": 0.9080629260679763, |
|
"eval_sts-test_spearman_manhattan": 0.907642129957113, |
|
"eval_sts-test_spearman_max": 0.9080629260679763, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8015873015873016, |
|
"grad_norm": 2.516517162322998, |
|
"learning_rate": 3.218626939138736e-05, |
|
"loss": 0.2276, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.8095238095238095, |
|
"grad_norm": 2.6900599002838135, |
|
"learning_rate": 3.204151573033428e-05, |
|
"loss": 0.2582, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.8174603174603174, |
|
"grad_norm": 2.5696845054626465, |
|
"learning_rate": 3.189355023255171e-05, |
|
"loss": 0.2559, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.8253968253968254, |
|
"grad_norm": 2.7647061347961426, |
|
"learning_rate": 3.174241377951843e-05, |
|
"loss": 0.2864, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 2.7451863288879395, |
|
"learning_rate": 3.1588148128818425e-05, |
|
"loss": 0.2839, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"eval_loss": 0.10743121802806854, |
|
"eval_runtime": 113.407, |
|
"eval_samples_per_second": 26.912, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8767875416189156, |
|
"eval_sts-test_pearson_dot": 0.8604239654952288, |
|
"eval_sts-test_pearson_euclidean": 0.9092313999029703, |
|
"eval_sts-test_pearson_manhattan": 0.9094243708597547, |
|
"eval_sts-test_pearson_max": 0.9094243708597547, |
|
"eval_sts-test_spearman_cosine": 0.9075548649973998, |
|
"eval_sts-test_spearman_dot": 0.8721893304088798, |
|
"eval_sts-test_spearman_euclidean": 0.9081462081654257, |
|
"eval_sts-test_spearman_manhattan": 0.9082743310267892, |
|
"eval_sts-test_spearman_max": 0.9082743310267892, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8412698412698413, |
|
"grad_norm": 2.5484859943389893, |
|
"learning_rate": 3.1430795902603625e-05, |
|
"loss": 0.2549, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.8492063492063492, |
|
"grad_norm": 2.6235854625701904, |
|
"learning_rate": 3.127040057581783e-05, |
|
"loss": 0.2826, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 2.5238397121429443, |
|
"learning_rate": 3.110700646418496e-05, |
|
"loss": 0.2334, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.8650793650793651, |
|
"grad_norm": 2.740260362625122, |
|
"learning_rate": 3.0940658711965065e-05, |
|
"loss": 0.2632, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.873015873015873, |
|
"grad_norm": 2.510667562484741, |
|
"learning_rate": 3.077140327948137e-05, |
|
"loss": 0.2255, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.873015873015873, |
|
"eval_loss": 0.10900076478719711, |
|
"eval_runtime": 113.5509, |
|
"eval_samples_per_second": 26.878, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8751607943383615, |
|
"eval_sts-test_pearson_dot": 0.8589309347875178, |
|
"eval_sts-test_pearson_euclidean": 0.9068514756772725, |
|
"eval_sts-test_pearson_manhattan": 0.9076530218955405, |
|
"eval_sts-test_pearson_max": 0.9076530218955405, |
|
"eval_sts-test_spearman_cosine": 0.9056104674412049, |
|
"eval_sts-test_spearman_dot": 0.8704153456560634, |
|
"eval_sts-test_spearman_euclidean": 0.9057139771088031, |
|
"eval_sts-test_spearman_manhattan": 0.9064273122153821, |
|
"eval_sts-test_spearman_max": 0.9064273122153821, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8809523809523809, |
|
"grad_norm": 2.552116632461548, |
|
"learning_rate": 3.059928693042189e-05, |
|
"loss": 0.2589, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 2.5401554107666016, |
|
"learning_rate": 3.0424357218919025e-05, |
|
"loss": 0.2569, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.8968253968253969, |
|
"grad_norm": 2.695404291152954, |
|
"learning_rate": 3.0246662476410844e-05, |
|
"loss": 0.2797, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.9047619047619048, |
|
"grad_norm": 2.8901283740997314, |
|
"learning_rate": 3.0066251798287526e-05, |
|
"loss": 0.2742, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.9126984126984127, |
|
"grad_norm": 2.4771928787231445, |
|
"learning_rate": 2.9883175030326795e-05, |
|
"loss": 0.2295, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9126984126984127, |
|
"eval_loss": 0.10703907907009125, |
|
"eval_runtime": 113.456, |
|
"eval_samples_per_second": 26.9, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8721877249457892, |
|
"eval_sts-test_pearson_dot": 0.8560616623137738, |
|
"eval_sts-test_pearson_euclidean": 0.9030366016666834, |
|
"eval_sts-test_pearson_manhattan": 0.9045537484069119, |
|
"eval_sts-test_pearson_max": 0.9045537484069119, |
|
"eval_sts-test_spearman_cosine": 0.9013517136508925, |
|
"eval_sts-test_spearman_dot": 0.8659265703821992, |
|
"eval_sts-test_spearman_euclidean": 0.9015738141611778, |
|
"eval_sts-test_spearman_manhattan": 0.9030298859530699, |
|
"eval_sts-test_spearman_max": 0.9030298859530699, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9206349206349206, |
|
"grad_norm": 2.4024698734283447, |
|
"learning_rate": 2.969748275492197e-05, |
|
"loss": 0.2047, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"grad_norm": 2.7750799655914307, |
|
"learning_rate": 2.9509226277106527e-05, |
|
"loss": 0.2577, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.9365079365079365, |
|
"grad_norm": 2.545588731765747, |
|
"learning_rate": 2.9318457610379043e-05, |
|
"loss": 0.2614, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.9444444444444444, |
|
"grad_norm": 2.685835123062134, |
|
"learning_rate": 2.9125229462332293e-05, |
|
"loss": 0.2722, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 2.3174188137054443, |
|
"learning_rate": 2.892959522009068e-05, |
|
"loss": 0.1927, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"eval_loss": 0.10235559195280075, |
|
"eval_runtime": 113.4984, |
|
"eval_samples_per_second": 26.89, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.872488088039419, |
|
"eval_sts-test_pearson_dot": 0.8563761302721377, |
|
"eval_sts-test_pearson_euclidean": 0.9034767476820997, |
|
"eval_sts-test_pearson_manhattan": 0.9044620383979292, |
|
"eval_sts-test_pearson_max": 0.9044620383979292, |
|
"eval_sts-test_spearman_cosine": 0.9008235592639511, |
|
"eval_sts-test_spearman_dot": 0.864130657511301, |
|
"eval_sts-test_spearman_euclidean": 0.9016059455668568, |
|
"eval_sts-test_spearman_manhattan": 0.9027126890123276, |
|
"eval_sts-test_spearman_max": 0.9027126890123276, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9603174603174603, |
|
"grad_norm": 2.8979287147521973, |
|
"learning_rate": 2.8731608935559857e-05, |
|
"loss": 0.2649, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.9682539682539683, |
|
"grad_norm": 2.485367774963379, |
|
"learning_rate": 2.8531325310492677e-05, |
|
"loss": 0.2386, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.9761904761904762, |
|
"grad_norm": 2.662865400314331, |
|
"learning_rate": 2.8328799681375657e-05, |
|
"loss": 0.2801, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.9841269841269841, |
|
"grad_norm": 2.693284511566162, |
|
"learning_rate": 2.812408800413997e-05, |
|
"loss": 0.2583, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.9920634920634921, |
|
"grad_norm": 3.0903170108795166, |
|
"learning_rate": 2.79172468387014e-05, |
|
"loss": 0.3076, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9920634920634921, |
|
"eval_loss": 0.09488630294799805, |
|
"eval_runtime": 113.5341, |
|
"eval_samples_per_second": 26.882, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8742114661417826, |
|
"eval_sts-test_pearson_dot": 0.8564803125306033, |
|
"eval_sts-test_pearson_euclidean": 0.905509251442753, |
|
"eval_sts-test_pearson_manhattan": 0.9061976864024648, |
|
"eval_sts-test_pearson_max": 0.9061976864024648, |
|
"eval_sts-test_spearman_cosine": 0.901563655624842, |
|
"eval_sts-test_spearman_dot": 0.8618259675496438, |
|
"eval_sts-test_spearman_euclidean": 0.90313473815851, |
|
"eval_sts-test_spearman_manhattan": 0.9041778900615435, |
|
"eval_sts-test_spearman_max": 0.9041778900615435, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.79172468387014e-05, |
|
"loss": 0.5477, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.007936507936508, |
|
"grad_norm": 0.982935905456543, |
|
"learning_rate": 2.7708333333333334e-05, |
|
"loss": 0.0031, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.0158730158730158, |
|
"grad_norm": 7.32542133619063e-08, |
|
"learning_rate": 2.7497405208877213e-05, |
|
"loss": 0.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.0238095238095237, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.7284520742794878e-05, |
|
"loss": 0.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.0317460317460319, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.706973875306696e-05, |
|
"loss": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0317460317460319, |
|
"eval_loss": 0.09545031189918518, |
|
"eval_runtime": 114.0298, |
|
"eval_samples_per_second": 26.765, |
|
"eval_steps_per_second": 0.21, |
|
"eval_sts-test_pearson_cosine": 0.874702030760496, |
|
"eval_sts-test_pearson_dot": 0.8555439931828537, |
|
"eval_sts-test_pearson_euclidean": 0.9065531106539271, |
|
"eval_sts-test_pearson_manhattan": 0.9071871020121037, |
|
"eval_sts-test_pearson_max": 0.9071871020121037, |
|
"eval_sts-test_spearman_cosine": 0.9021036690960521, |
|
"eval_sts-test_spearman_dot": 0.8598106392441436, |
|
"eval_sts-test_spearman_euclidean": 0.9043656663543417, |
|
"eval_sts-test_spearman_manhattan": 0.9048875555646884, |
|
"eval_sts-test_spearman_max": 0.9048875555646884, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0396825396825398, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.6853118581942095e-05, |
|
"loss": 0.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.0476190476190477, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.66347200795412e-05, |
|
"loss": 0.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.0555555555555556, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.6414603587321415e-05, |
|
"loss": 0.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.0634920634920635, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.6192829921404365e-05, |
|
"loss": 0.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.596946035577322e-05, |
|
"loss": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"eval_loss": 0.09676523506641388, |
|
"eval_runtime": 113.8781, |
|
"eval_samples_per_second": 26.801, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8747276197594331, |
|
"eval_sts-test_pearson_dot": 0.8544620877290974, |
|
"eval_sts-test_pearson_euclidean": 0.9070156259790649, |
|
"eval_sts-test_pearson_manhattan": 0.9076369642785601, |
|
"eval_sts-test_pearson_max": 0.9076369642785601, |
|
"eval_sts-test_spearman_cosine": 0.9022577924402841, |
|
"eval_sts-test_spearman_dot": 0.8575864120504681, |
|
"eval_sts-test_spearman_euclidean": 0.9046890144393489, |
|
"eval_sts-test_spearman_manhattan": 0.9049111394794415, |
|
"eval_sts-test_spearman_max": 0.9049111394794415, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.0793650793650793, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.5744556605343263e-05, |
|
"loss": 0.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.0873015873015872, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.5518180808910628e-05, |
|
"loss": 0.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.0952380952380953, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.5290395511983987e-05, |
|
"loss": 0.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.1031746031746033, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.5061263649503735e-05, |
|
"loss": 0.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.4830848528453706e-05, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"eval_loss": 0.09783273935317993, |
|
"eval_runtime": 113.4913, |
|
"eval_samples_per_second": 26.892, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8746125563575086, |
|
"eval_sts-test_pearson_dot": 0.8536822328210044, |
|
"eval_sts-test_pearson_euclidean": 0.9071769017286274, |
|
"eval_sts-test_pearson_manhattan": 0.9077849350046876, |
|
"eval_sts-test_pearson_max": 0.9077849350046876, |
|
"eval_sts-test_spearman_cosine": 0.9023702076088993, |
|
"eval_sts-test_spearman_dot": 0.8566226041338817, |
|
"eval_sts-test_spearman_euclidean": 0.9049979121752795, |
|
"eval_sts-test_spearman_manhattan": 0.9049124372660218, |
|
"eval_sts-test_spearman_max": 0.9049979121752795, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.119047619047619, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.4599213810370067e-05, |
|
"loss": 0.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.126984126984127, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.4366423493752155e-05, |
|
"loss": 0.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.1349206349206349, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.4132541896380374e-05, |
|
"loss": 0.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.3897633637545755e-05, |
|
"loss": 0.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.1507936507936507, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.366176362019625e-05, |
|
"loss": 0.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1507936507936507, |
|
"eval_loss": 0.09863892197608948, |
|
"eval_runtime": 113.5886, |
|
"eval_samples_per_second": 26.869, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8744759141501874, |
|
"eval_sts-test_pearson_dot": 0.8531679877756508, |
|
"eval_sts-test_pearson_euclidean": 0.9072042306444432, |
|
"eval_sts-test_pearson_manhattan": 0.907805846396001, |
|
"eval_sts-test_pearson_max": 0.907805846396001, |
|
"eval_sts-test_spearman_cosine": 0.9023667170105107, |
|
"eval_sts-test_spearman_dot": 0.8560352885793809, |
|
"eval_sts-test_spearman_euclidean": 0.9048520230631434, |
|
"eval_sts-test_spearman_manhattan": 0.9049029947498682, |
|
"eval_sts-test_spearman_max": 0.9049029947498682, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1587301587301588, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.342499701300467e-05, |
|
"loss": 0.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.1666666666666667, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.318739923236319e-05, |
|
"loss": 0.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.1746031746031746, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.29490359243094e-05, |
|
"loss": 0.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.1825396825396826, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.270997294638895e-05, |
|
"loss": 0.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.24702763494597e-05, |
|
"loss": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"eval_loss": 0.09906759858131409, |
|
"eval_runtime": 113.5604, |
|
"eval_samples_per_second": 26.876, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8744060293405248, |
|
"eval_sts-test_pearson_dot": 0.8528829505611442, |
|
"eval_sts-test_pearson_euclidean": 0.9072185184616055, |
|
"eval_sts-test_pearson_manhattan": 0.9078100463432079, |
|
"eval_sts-test_pearson_max": 0.9078100463432079, |
|
"eval_sts-test_spearman_cosine": 0.9022761852087159, |
|
"eval_sts-test_spearman_dot": 0.8555268694987131, |
|
"eval_sts-test_spearman_euclidean": 0.9047365648087538, |
|
"eval_sts-test_spearman_manhattan": 0.9048373894006686, |
|
"eval_sts-test_spearman_max": 0.9048373894006686, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1984126984126984, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.2230012359442495e-05, |
|
"loss": 0.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.2063492063492063, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.1989247359023566e-05, |
|
"loss": 0.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.2142857142857142, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.174804786931362e-05, |
|
"loss": 0.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.2222222222222223, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.150648053146869e-05, |
|
"loss": 0.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.2301587301587302, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.126461208827777e-05, |
|
"loss": 0.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2301587301587302, |
|
"eval_loss": 0.09936919808387756, |
|
"eval_runtime": 113.5013, |
|
"eval_samples_per_second": 26.89, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8743508789394699, |
|
"eval_sts-test_pearson_dot": 0.8527007540947626, |
|
"eval_sts-test_pearson_euclidean": 0.9072202919761476, |
|
"eval_sts-test_pearson_manhattan": 0.9078091191041777, |
|
"eval_sts-test_pearson_max": 0.9078091191041777, |
|
"eval_sts-test_spearman_cosine": 0.9023025884529369, |
|
"eval_sts-test_spearman_dot": 0.8550739867334323, |
|
"eval_sts-test_spearman_euclidean": 0.9046202541246842, |
|
"eval_sts-test_spearman_manhattan": 0.9049393775253795, |
|
"eval_sts-test_spearman_max": 0.9049393775253795, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2380952380952381, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.102250936572247e-05, |
|
"loss": 0.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.246031746031746, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.0780239254513565e-05, |
|
"loss": 0.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.253968253968254, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.0537868691609745e-05, |
|
"loss": 0.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.2619047619047619, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.0295464641723583e-05, |
|
"loss": 0.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.005309407881977e-05, |
|
"loss": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"eval_loss": 0.09954561293125153, |
|
"eval_runtime": 113.8237, |
|
"eval_samples_per_second": 26.813, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8743232038160612, |
|
"eval_sts-test_pearson_dot": 0.8526065970333739, |
|
"eval_sts-test_pearson_euclidean": 0.9072152805749177, |
|
"eval_sts-test_pearson_manhattan": 0.9078067984919014, |
|
"eval_sts-test_pearson_max": 0.9078067984919014, |
|
"eval_sts-test_spearman_cosine": 0.9022778857566487, |
|
"eval_sts-test_spearman_dot": 0.8549482356889225, |
|
"eval_sts-test_spearman_euclidean": 0.9046245971527523, |
|
"eval_sts-test_spearman_manhattan": 0.9048685362785969, |
|
"eval_sts-test_spearman_max": 0.9048685362785969, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.2777777777777777, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.981082396761086e-05, |
|
"loss": 0.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142858, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.956872124505556e-05, |
|
"loss": 0.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.2936507936507937, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.9326852801864646e-05, |
|
"loss": 0.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.3015873015873016, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.908528546401971e-05, |
|
"loss": 0.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.8844085974309768e-05, |
|
"loss": 0.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"eval_loss": 0.09964071214199066, |
|
"eval_runtime": 113.5481, |
|
"eval_samples_per_second": 26.878, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.874299456945445, |
|
"eval_sts-test_pearson_dot": 0.8525374757432119, |
|
"eval_sts-test_pearson_euclidean": 0.9072080565726848, |
|
"eval_sts-test_pearson_manhattan": 0.907801820994878, |
|
"eval_sts-test_pearson_max": 0.907801820994878, |
|
"eval_sts-test_spearman_cosine": 0.9022729631178957, |
|
"eval_sts-test_spearman_dot": 0.8546675557774756, |
|
"eval_sts-test_spearman_euclidean": 0.9046749423218178, |
|
"eval_sts-test_spearman_manhattan": 0.9047542415570032, |
|
"eval_sts-test_spearman_max": 0.9047542415570032, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.3174603174603174, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.8603320973890842e-05, |
|
"loss": 0.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.3253968253968254, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.836305698387363e-05, |
|
"loss": 0.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.812336038694438e-05, |
|
"loss": 0.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.3412698412698412, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.7884297409023932e-05, |
|
"loss": 0.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.3492063492063493, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.7645934100970145e-05, |
|
"loss": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3492063492063493, |
|
"eval_loss": 0.09969252347946167, |
|
"eval_runtime": 113.9068, |
|
"eval_samples_per_second": 26.794, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8742791376017494, |
|
"eval_sts-test_pearson_dot": 0.8524863854057724, |
|
"eval_sts-test_pearson_euclidean": 0.9072037622211908, |
|
"eval_sts-test_pearson_manhattan": 0.9077955012591168, |
|
"eval_sts-test_pearson_max": 0.9077955012591168, |
|
"eval_sts-test_spearman_cosine": 0.9022626703277757, |
|
"eval_sts-test_spearman_dot": 0.8547057286034424, |
|
"eval_sts-test_spearman_euclidean": 0.9046214198131936, |
|
"eval_sts-test_spearman_manhattan": 0.904806332025263, |
|
"eval_sts-test_spearman_max": 0.904806332025263, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3571428571428572, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.740833632032866e-05, |
|
"loss": 0.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.3650793650793651, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.717156971313708e-05, |
|
"loss": 0.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.373015873015873, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6935699695787573e-05, |
|
"loss": 0.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.380952380952381, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6700791436952954e-05, |
|
"loss": 0.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.3888888888888888, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6466909839581176e-05, |
|
"loss": 0.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.3888888888888888, |
|
"eval_loss": 0.09972869604825974, |
|
"eval_runtime": 113.3507, |
|
"eval_samples_per_second": 26.925, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.8742756437676581, |
|
"eval_sts-test_pearson_dot": 0.852466650373626, |
|
"eval_sts-test_pearson_euclidean": 0.9072071674153375, |
|
"eval_sts-test_pearson_manhattan": 0.9077967797332845, |
|
"eval_sts-test_pearson_max": 0.9077967797332845, |
|
"eval_sts-test_spearman_cosine": 0.9022777962541261, |
|
"eval_sts-test_spearman_dot": 0.8546733734414563, |
|
"eval_sts-test_spearman_euclidean": 0.9045410465477347, |
|
"eval_sts-test_spearman_manhattan": 0.9047774674616654, |
|
"eval_sts-test_spearman_max": 0.9047774674616654, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.3968253968253967, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6234119522963267e-05, |
|
"loss": 0.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.4047619047619047, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6002484804879622e-05, |
|
"loss": 0.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.4126984126984128, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.5772069683829603e-05, |
|
"loss": 0.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.4206349206349207, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.5542937821349347e-05, |
|
"loss": 0.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.5315152524422703e-05, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"eval_loss": 0.09972812980413437, |
|
"eval_runtime": 113.3751, |
|
"eval_samples_per_second": 26.919, |
|
"eval_steps_per_second": 0.212, |
|
"eval_sts-test_pearson_cosine": 0.874267739156544, |
|
"eval_sts-test_pearson_dot": 0.8524562621509095, |
|
"eval_sts-test_pearson_euclidean": 0.9072067299928095, |
|
"eval_sts-test_pearson_manhattan": 0.907793793015005, |
|
"eval_sts-test_pearson_max": 0.907793793015005, |
|
"eval_sts-test_spearman_cosine": 0.902276095706193, |
|
"eval_sts-test_spearman_dot": 0.8546634834126889, |
|
"eval_sts-test_spearman_euclidean": 0.9045989546799751, |
|
"eval_sts-test_spearman_manhattan": 0.904807495558059, |
|
"eval_sts-test_spearman_max": 0.904807495558059, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.4365079365079365, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.508877672799007e-05, |
|
"loss": 0.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.4444444444444444, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.486387297756011e-05, |
|
"loss": 0.0, |
|
"step": 182 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 252, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 26, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 960, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|