|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4126984126984127, |
|
"eval_steps": 5, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007936507936507936, |
|
"grad_norm": 3.5297670364379883, |
|
"learning_rate": 5.555555555555555e-07, |
|
"loss": 0.404, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.015873015873015872, |
|
"grad_norm": 3.6838796138763428, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 0.3185, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.023809523809523808, |
|
"grad_norm": 3.5556721687316895, |
|
"learning_rate": 1.6666666666666665e-06, |
|
"loss": 0.2821, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.031746031746031744, |
|
"grad_norm": 3.922109842300415, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 0.4036, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03968253968253968, |
|
"grad_norm": 3.9366657733917236, |
|
"learning_rate": 2.7777777777777775e-06, |
|
"loss": 0.3442, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03968253968253968, |
|
"eval_loss": 0.12529698014259338, |
|
"eval_runtime": 113.8002, |
|
"eval_samples_per_second": 26.819, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.886081184413048, |
|
"eval_sts-test_pearson_dot": 0.8767533438290611, |
|
"eval_sts-test_pearson_euclidean": 0.9080817963557108, |
|
"eval_sts-test_pearson_manhattan": 0.9087794191320873, |
|
"eval_sts-test_pearson_max": 0.9087794191320873, |
|
"eval_sts-test_spearman_cosine": 0.9077787555581409, |
|
"eval_sts-test_spearman_dot": 0.8792746633711961, |
|
"eval_sts-test_spearman_euclidean": 0.9039925750881216, |
|
"eval_sts-test_spearman_manhattan": 0.904489537845873, |
|
"eval_sts-test_spearman_max": 0.9077787555581409, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 3.8135547637939453, |
|
"learning_rate": 3.333333333333333e-06, |
|
"loss": 0.4145, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 4.132374286651611, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 0.4224, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06349206349206349, |
|
"grad_norm": 3.9953386783599854, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.4048, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 4.023675918579102, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.3899, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07936507936507936, |
|
"grad_norm": 3.854191780090332, |
|
"learning_rate": 5.555555555555555e-06, |
|
"loss": 0.4127, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07936507936507936, |
|
"eval_loss": 0.12369368970394135, |
|
"eval_runtime": 113.6707, |
|
"eval_samples_per_second": 26.849, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8860118050647048, |
|
"eval_sts-test_pearson_dot": 0.8760605933678182, |
|
"eval_sts-test_pearson_euclidean": 0.9086480781293332, |
|
"eval_sts-test_pearson_manhattan": 0.9092897840847158, |
|
"eval_sts-test_pearson_max": 0.9092897840847158, |
|
"eval_sts-test_spearman_cosine": 0.9078577415344969, |
|
"eval_sts-test_spearman_dot": 0.8791339654053815, |
|
"eval_sts-test_spearman_euclidean": 0.9047648028546915, |
|
"eval_sts-test_spearman_manhattan": 0.9052383607027356, |
|
"eval_sts-test_spearman_max": 0.9078577415344969, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0873015873015873, |
|
"grad_norm": 3.8079540729522705, |
|
"learning_rate": 6.11111111111111e-06, |
|
"loss": 0.3496, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 3.929018259048462, |
|
"learning_rate": 6.666666666666666e-06, |
|
"loss": 0.3731, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10317460317460317, |
|
"grad_norm": 4.284013271331787, |
|
"learning_rate": 7.222222222222221e-06, |
|
"loss": 0.3929, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 3.3490402698516846, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 0.2957, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 3.553280830383301, |
|
"learning_rate": 8.333333333333332e-06, |
|
"loss": 0.3324, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"eval_loss": 0.12056715041399002, |
|
"eval_runtime": 113.718, |
|
"eval_samples_per_second": 26.838, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8856265458568289, |
|
"eval_sts-test_pearson_dot": 0.8743050518330721, |
|
"eval_sts-test_pearson_euclidean": 0.9095228583162331, |
|
"eval_sts-test_pearson_manhattan": 0.9101600217218586, |
|
"eval_sts-test_pearson_max": 0.9101600217218586, |
|
"eval_sts-test_spearman_cosine": 0.908261263658463, |
|
"eval_sts-test_spearman_dot": 0.87867141636764, |
|
"eval_sts-test_spearman_euclidean": 0.9060734192402989, |
|
"eval_sts-test_spearman_manhattan": 0.9066336155303966, |
|
"eval_sts-test_spearman_max": 0.908261263658463, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12698412698412698, |
|
"grad_norm": 3.6310322284698486, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.3341, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1349206349206349, |
|
"grad_norm": 3.6535122394561768, |
|
"learning_rate": 9.444444444444443e-06, |
|
"loss": 0.3466, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 3.6199331283569336, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 0.3558, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15079365079365079, |
|
"grad_norm": 3.089895248413086, |
|
"learning_rate": 1.0555555555555554e-05, |
|
"loss": 0.2634, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 3.320916175842285, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 0.3095, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"eval_loss": 0.11563990265130997, |
|
"eval_runtime": 113.5377, |
|
"eval_samples_per_second": 26.881, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8848740042612456, |
|
"eval_sts-test_pearson_dot": 0.8724689429546052, |
|
"eval_sts-test_pearson_euclidean": 0.9104294765782397, |
|
"eval_sts-test_pearson_manhattan": 0.9111381492292419, |
|
"eval_sts-test_pearson_max": 0.9111381492292419, |
|
"eval_sts-test_spearman_cosine": 0.9087803335393421, |
|
"eval_sts-test_spearman_dot": 0.8777188410176626, |
|
"eval_sts-test_spearman_euclidean": 0.9069791847708608, |
|
"eval_sts-test_spearman_manhattan": 0.9078148698260838, |
|
"eval_sts-test_spearman_max": 0.9087803335393421, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 3.0193159580230713, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 0.2973, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1746031746031746, |
|
"grad_norm": 3.3553476333618164, |
|
"learning_rate": 1.222222222222222e-05, |
|
"loss": 0.2884, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18253968253968253, |
|
"grad_norm": 3.5176496505737305, |
|
"learning_rate": 1.2777777777777775e-05, |
|
"loss": 0.3697, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 3.2073943614959717, |
|
"learning_rate": 1.3333333333333332e-05, |
|
"loss": 0.2683, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1984126984126984, |
|
"grad_norm": 3.2101964950561523, |
|
"learning_rate": 1.3888888888888886e-05, |
|
"loss": 0.3026, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1984126984126984, |
|
"eval_loss": 0.10958973318338394, |
|
"eval_runtime": 113.6214, |
|
"eval_samples_per_second": 26.861, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8832622086480311, |
|
"eval_sts-test_pearson_dot": 0.8697582354953435, |
|
"eval_sts-test_pearson_euclidean": 0.9107566690862425, |
|
"eval_sts-test_pearson_manhattan": 0.9115546986654615, |
|
"eval_sts-test_pearson_max": 0.9115546986654615, |
|
"eval_sts-test_spearman_cosine": 0.9087605087305455, |
|
"eval_sts-test_spearman_dot": 0.8760767382321666, |
|
"eval_sts-test_spearman_euclidean": 0.9073999361304628, |
|
"eval_sts-test_spearman_manhattan": 0.9084107328715103, |
|
"eval_sts-test_spearman_max": 0.9087605087305455, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20634920634920634, |
|
"grad_norm": 2.84037709236145, |
|
"learning_rate": 1.4444444444444442e-05, |
|
"loss": 0.2441, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 3.3099992275238037, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 0.3145, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 3.061953067779541, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.3119, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23015873015873015, |
|
"grad_norm": 3.0163729190826416, |
|
"learning_rate": 1.6111111111111108e-05, |
|
"loss": 0.2766, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 3.140418291091919, |
|
"learning_rate": 1.6666666666666664e-05, |
|
"loss": 0.3343, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"eval_loss": 0.10535401105880737, |
|
"eval_runtime": 113.5942, |
|
"eval_samples_per_second": 26.868, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8819465403802665, |
|
"eval_sts-test_pearson_dot": 0.866997957398371, |
|
"eval_sts-test_pearson_euclidean": 0.9110501477101954, |
|
"eval_sts-test_pearson_manhattan": 0.9119047974126511, |
|
"eval_sts-test_pearson_max": 0.9119047974126511, |
|
"eval_sts-test_spearman_cosine": 0.9084358383291508, |
|
"eval_sts-test_spearman_dot": 0.8727757956894143, |
|
"eval_sts-test_spearman_euclidean": 0.9077817538926543, |
|
"eval_sts-test_spearman_manhattan": 0.9089103807049453, |
|
"eval_sts-test_spearman_max": 0.9089103807049453, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24603174603174602, |
|
"grad_norm": 3.1329221725463867, |
|
"learning_rate": 1.722222222222222e-05, |
|
"loss": 0.344, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.25396825396825395, |
|
"grad_norm": 2.9861748218536377, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.3005, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2619047619047619, |
|
"grad_norm": 2.8316733837127686, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.2526, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2698412698412698, |
|
"grad_norm": 2.8335487842559814, |
|
"learning_rate": 1.8888888888888886e-05, |
|
"loss": 0.2422, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 3.0785422325134277, |
|
"learning_rate": 1.9444444444444442e-05, |
|
"loss": 0.3447, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"eval_loss": 0.10223711282014847, |
|
"eval_runtime": 113.9847, |
|
"eval_samples_per_second": 26.776, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8812001280334643, |
|
"eval_sts-test_pearson_dot": 0.8652746969985129, |
|
"eval_sts-test_pearson_euclidean": 0.9105701789873448, |
|
"eval_sts-test_pearson_manhattan": 0.9116177887236803, |
|
"eval_sts-test_pearson_max": 0.9116177887236803, |
|
"eval_sts-test_spearman_cosine": 0.9072243769320245, |
|
"eval_sts-test_spearman_dot": 0.8716048789351082, |
|
"eval_sts-test_spearman_euclidean": 0.9073166540330135, |
|
"eval_sts-test_spearman_manhattan": 0.9081332302996223, |
|
"eval_sts-test_spearman_max": 0.9081332302996223, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 2.944396734237671, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.2809, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.29365079365079366, |
|
"grad_norm": 2.8323400020599365, |
|
"learning_rate": 2.0555555555555555e-05, |
|
"loss": 0.2836, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.30158730158730157, |
|
"grad_norm": 2.8760273456573486, |
|
"learning_rate": 2.1111111111111107e-05, |
|
"loss": 0.2878, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.30952380952380953, |
|
"grad_norm": 2.744379758834839, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.2738, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 2.8519983291625977, |
|
"learning_rate": 2.222222222222222e-05, |
|
"loss": 0.2806, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"eval_loss": 0.10033170133829117, |
|
"eval_runtime": 113.5147, |
|
"eval_samples_per_second": 26.886, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8802115569848467, |
|
"eval_sts-test_pearson_dot": 0.8634798448575132, |
|
"eval_sts-test_pearson_euclidean": 0.9094188438238102, |
|
"eval_sts-test_pearson_manhattan": 0.9105849471172345, |
|
"eval_sts-test_pearson_max": 0.9105849471172345, |
|
"eval_sts-test_spearman_cosine": 0.9064710789490229, |
|
"eval_sts-test_spearman_dot": 0.8693704037025742, |
|
"eval_sts-test_spearman_euclidean": 0.9064271779615981, |
|
"eval_sts-test_spearman_manhattan": 0.9073247092600637, |
|
"eval_sts-test_spearman_max": 0.9073247092600637, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3253968253968254, |
|
"grad_norm": 2.9139747619628906, |
|
"learning_rate": 2.2777777777777776e-05, |
|
"loss": 0.2797, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 2.9206557273864746, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 0.3217, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3412698412698413, |
|
"grad_norm": 2.755398988723755, |
|
"learning_rate": 2.388888888888889e-05, |
|
"loss": 0.2544, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3492063492063492, |
|
"grad_norm": 3.0441982746124268, |
|
"learning_rate": 2.444444444444444e-05, |
|
"loss": 0.3203, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 2.978891611099243, |
|
"learning_rate": 2.4999999999999998e-05, |
|
"loss": 0.2987, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"eval_loss": 0.09902294725179672, |
|
"eval_runtime": 113.6912, |
|
"eval_samples_per_second": 26.845, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8796209948380269, |
|
"eval_sts-test_pearson_dot": 0.8617122615494917, |
|
"eval_sts-test_pearson_euclidean": 0.9092272396432914, |
|
"eval_sts-test_pearson_manhattan": 0.9100341993020892, |
|
"eval_sts-test_pearson_max": 0.9100341993020892, |
|
"eval_sts-test_spearman_cosine": 0.9063911531961779, |
|
"eval_sts-test_spearman_dot": 0.867835166929281, |
|
"eval_sts-test_spearman_euclidean": 0.9066020658911155, |
|
"eval_sts-test_spearman_manhattan": 0.9072894005148261, |
|
"eval_sts-test_spearman_max": 0.9072894005148261, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.36507936507936506, |
|
"grad_norm": 2.9183595180511475, |
|
"learning_rate": 2.555555555555555e-05, |
|
"loss": 0.2765, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.373015873015873, |
|
"grad_norm": 2.960238456726074, |
|
"learning_rate": 2.611111111111111e-05, |
|
"loss": 0.2716, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 3.23356294631958, |
|
"learning_rate": 2.6666666666666663e-05, |
|
"loss": 0.3726, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 2.974705457687378, |
|
"learning_rate": 2.722222222222222e-05, |
|
"loss": 0.2963, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"grad_norm": 2.8041574954986572, |
|
"learning_rate": 2.7777777777777772e-05, |
|
"loss": 0.2784, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"eval_loss": 0.09521521627902985, |
|
"eval_runtime": 113.6139, |
|
"eval_samples_per_second": 26.863, |
|
"eval_steps_per_second": 0.211, |
|
"eval_sts-test_pearson_cosine": 0.8802451373465323, |
|
"eval_sts-test_pearson_dot": 0.8609764645232105, |
|
"eval_sts-test_pearson_euclidean": 0.9103012041260427, |
|
"eval_sts-test_pearson_manhattan": 0.9108880877390901, |
|
"eval_sts-test_pearson_max": 0.9108880877390901, |
|
"eval_sts-test_spearman_cosine": 0.9071928272927434, |
|
"eval_sts-test_spearman_dot": 0.867374407941995, |
|
"eval_sts-test_spearman_euclidean": 0.9083242734345022, |
|
"eval_sts-test_spearman_manhattan": 0.9086424996542565, |
|
"eval_sts-test_spearman_max": 0.9086424996542565, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.40476190476190477, |
|
"grad_norm": 2.6451456546783447, |
|
"learning_rate": 2.8333333333333332e-05, |
|
"loss": 0.2437, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4126984126984127, |
|
"grad_norm": 2.7020044326782227, |
|
"learning_rate": 2.8888888888888885e-05, |
|
"loss": 0.2258, |
|
"step": 52 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 252, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 26, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 960, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|