|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.24, |
|
"global_step": 420, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 1.9122, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 2.0707, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.529411764705883e-05, |
|
"loss": 1.9703, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 2.015, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.882352941176471e-05, |
|
"loss": 2.119, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 2.0515, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.23529411764706e-05, |
|
"loss": 1.7436, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.411764705882353e-05, |
|
"loss": 1.9326, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00010588235294117647, |
|
"loss": 2.0819, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00011764705882352942, |
|
"loss": 1.9965, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00012941176470588237, |
|
"loss": 2.0543, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001411764705882353, |
|
"loss": 2.0461, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015294117647058822, |
|
"loss": 2.311, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001647058823529412, |
|
"loss": 1.8447, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00017647058823529413, |
|
"loss": 2.6263, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018823529411764707, |
|
"loss": 2.6179, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.535, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019999833248118322, |
|
"loss": 2.5924, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019999332998034514, |
|
"loss": 2.3562, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019998499266432108, |
|
"loss": 2.2352, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019997332081116373, |
|
"loss": 2.152, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019995831481013374, |
|
"loss": 2.1149, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019993997516168689, |
|
"loss": 2.1608, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001999183024774573, |
|
"loss": 2.1593, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019989329748023725, |
|
"loss": 2.2774, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019986496100395275, |
|
"loss": 2.0355, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019983329399363598, |
|
"loss": 1.9455, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001997982975053936, |
|
"loss": 2.5648, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001997599727063717, |
|
"loss": 2.3619, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019971832087471676, |
|
"loss": 2.0888, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000199673343399533, |
|
"loss": 1.9363, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019962504178083618, |
|
"loss": 2.0259, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019957341762950344, |
|
"loss": 1.8482, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001995184726672197, |
|
"loss": 1.8542, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001994602087264201, |
|
"loss": 1.9435, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019939862775022893, |
|
"loss": 1.6559, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019933373179239502, |
|
"loss": 1.9478, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001992655230172229, |
|
"loss": 1.9417, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000199194003699501, |
|
"loss": 1.9067, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019911917622442537, |
|
"loss": 1.6106, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001990410430875205, |
|
"loss": 1.2742, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019895960689455598, |
|
"loss": 1.3048, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001988748703614594, |
|
"loss": 1.1847, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019878683631422605, |
|
"loss": 1.1683, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019869550768882455, |
|
"loss": 1.1531, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019860088753109896, |
|
"loss": 1.1401, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001985029789966671, |
|
"loss": 1.8959, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019840178535081545, |
|
"loss": 1.897, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001982973099683902, |
|
"loss": 1.791, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019818955633368464, |
|
"loss": 1.7451, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.8556, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019796422879114084, |
|
"loss": 1.7882, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001978466623980609, |
|
"loss": 1.8834, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019772583278196678, |
|
"loss": 1.7593, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019760174397257156, |
|
"loss": 1.8291, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019747440010828383, |
|
"loss": 1.808, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001973438054360693, |
|
"loss": 1.6324, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019720996431130946, |
|
"loss": 1.483, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019707288119765623, |
|
"loss": 1.9648, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000196932560666883, |
|
"loss": 1.5742, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019678900739873226, |
|
"loss": 1.7066, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019664222618075958, |
|
"loss": 2.0719, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001964922219081738, |
|
"loss": 1.9432, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019633899958367384, |
|
"loss": 1.7979, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019618256431728194, |
|
"loss": 1.6286, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000196022921326173, |
|
"loss": 1.6031, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019586007593450097, |
|
"loss": 1.5169, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001956940335732209, |
|
"loss": 1.5923, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000195524799779908, |
|
"loss": 1.6123, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000195352380198573, |
|
"loss": 1.7392, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019517678057947384, |
|
"loss": 1.6406, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019499800677892385, |
|
"loss": 1.5532, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001948160647590966, |
|
"loss": 1.816, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001946309605878269, |
|
"loss": 1.5759, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019444270043840852, |
|
"loss": 1.6726, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019425129058938832, |
|
"loss": 1.6134, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019405673742435678, |
|
"loss": 1.6285, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019385904743173516, |
|
"loss": 1.6023, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019365822720455916, |
|
"loss": 1.5177, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019345428344025883, |
|
"loss": 1.5413, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019324722294043558, |
|
"loss": 1.3404, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019303705261063497, |
|
"loss": 1.6409, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019282377946011652, |
|
"loss": 1.6361, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019260741060162016, |
|
"loss": 1.4902, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.3232, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019216541472762735, |
|
"loss": 1.2438, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019193980245285966, |
|
"loss": 1.0828, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019171112395107985, |
|
"loss": 1.0696, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001914793868488021, |
|
"loss": 1.0179, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001912445988745459, |
|
"loss": 1.0012, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001910067678585786, |
|
"loss": 0.9423, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019076590173265406, |
|
"loss": 0.9173, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019052200852974819, |
|
"loss": 1.8901, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001902750963837912, |
|
"loss": 1.9665, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019002517352939598, |
|
"loss": 1.8323, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001897722483015838, |
|
"loss": 1.7828, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018951632913550626, |
|
"loss": 1.5083, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018925742456616374, |
|
"loss": 1.6924, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001889955432281212, |
|
"loss": 1.7446, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001887306938552197, |
|
"loss": 1.6143, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018846288528028555, |
|
"loss": 1.5013, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001881921264348355, |
|
"loss": 1.6335, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018791842634877898, |
|
"loss": 1.5852, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018764179415011682, |
|
"loss": 1.4358, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018736223906463696, |
|
"loss": 1.7103, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0001870797704156067, |
|
"loss": 1.3361, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018679439762346185, |
|
"loss": 1.7021, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018650613020549232, |
|
"loss": 2.3076, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018621497777552507, |
|
"loss": 1.6109, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018592095004360318, |
|
"loss": 1.8032, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018562405681566216, |
|
"loss": 1.6432, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001853243079932029, |
|
"loss": 1.4159, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00018502171357296144, |
|
"loss": 1.4869, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018471628364657555, |
|
"loss": 1.419, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018440802840024822, |
|
"loss": 1.3616, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018409695811440796, |
|
"loss": 1.4805, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018378308316336584, |
|
"loss": 1.5585, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0001834664140149696, |
|
"loss": 1.4302, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 1.3596, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001828247354630912, |
|
"loss": 1.5605, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018249974745983023, |
|
"loss": 1.5355, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018217200805894384, |
|
"loss": 1.5559, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018184152819066435, |
|
"loss": 1.3455, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018150831887661978, |
|
"loss": 1.4451, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018117239122946615, |
|
"loss": 1.302, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018083375645251684, |
|
"loss": 1.2725, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001804924258393692, |
|
"loss": 1.2186, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00018014841077352762, |
|
"loss": 1.2388, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.000179801722728024, |
|
"loss": 1.2655, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00017945237326503507, |
|
"loss": 1.1938, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017910037403549693, |
|
"loss": 1.1203, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001787457367787164, |
|
"loss": 1.1576, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017838847332197938, |
|
"loss": 1.0116, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017802859558015664, |
|
"loss": 0.8979, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00017766611555530636, |
|
"loss": 0.8594, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0001773010453362737, |
|
"loss": 0.819, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017693339709828792, |
|
"loss": 0.8788, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017656318310255604, |
|
"loss": 0.8013, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017619041569585418, |
|
"loss": 1.6861, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001758151073101157, |
|
"loss": 1.6799, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001754372704620164, |
|
"loss": 1.7259, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017505691775255745, |
|
"loss": 1.6804, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017467406186664474, |
|
"loss": 1.6242, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001742887155726663, |
|
"loss": 1.7912, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017390089172206592, |
|
"loss": 1.5403, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017351060324891502, |
|
"loss": 1.5285, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0001731178631694811, |
|
"loss": 1.5643, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00017272268458179353, |
|
"loss": 1.425, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00017232508066520702, |
|
"loss": 1.4601, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00017192506467996174, |
|
"loss": 1.3357, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00017152264996674136, |
|
"loss": 1.6524, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00017111784994622804, |
|
"loss": 1.181, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.7122, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00017030114806335526, |
|
"loss": 2.103, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016988927343831095, |
|
"loss": 1.5937, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016947506797969562, |
|
"loss": 1.5736, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00016905854550141716, |
|
"loss": 1.2724, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00016863971989465698, |
|
"loss": 1.224, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00016821860512740671, |
|
"loss": 1.1956, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00016779521524400232, |
|
"loss": 1.2721, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00016736956436465573, |
|
"loss": 1.3703, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00016694166668498398, |
|
"loss": 1.2, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00016651153647553567, |
|
"loss": 1.031, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00016607918808131525, |
|
"loss": 1.1153, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016564463592130428, |
|
"loss": 1.4222, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00016520789448798087, |
|
"loss": 1.1901, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001647689783468362, |
|
"loss": 1.278, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016432790213588872, |
|
"loss": 1.3186, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016388468056519612, |
|
"loss": 1.1945, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016343932841636456, |
|
"loss": 1.1583, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016299186054205577, |
|
"loss": 1.0005, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001625422918654918, |
|
"loss": 0.9833, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00016209063737995715, |
|
"loss": 1.0031, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00016163691214829892, |
|
"loss": 0.9844, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00016118113130242432, |
|
"loss": 0.9744, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00016072331004279614, |
|
"loss": 1.025, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00016026346363792567, |
|
"loss": 0.869, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0001598016074238635, |
|
"loss": 0.8968, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015933775680368822, |
|
"loss": 0.8441, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001588719272469926, |
|
"loss": 0.767, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00015840413428936767, |
|
"loss": 0.5753, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001579343935318846, |
|
"loss": 0.6553, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0001574627206405744, |
|
"loss": 0.6268, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015698913134590552, |
|
"loss": 0.5677, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001565136414422592, |
|
"loss": 1.4827, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015603626678740263, |
|
"loss": 1.4317, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.9108, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015507592696888258, |
|
"loss": 1.19, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015459299383291345, |
|
"loss": 1.7435, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015410824000005468, |
|
"loss": 1.5147, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.000153621681637029, |
|
"loss": 1.7445, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001531333349707409, |
|
"loss": 1.5158, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001526432162877356, |
|
"loss": 1.6239, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001521513419336555, |
|
"loss": 1.4444, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00015165772831269547, |
|
"loss": 1.5048, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00015116239188705556, |
|
"loss": 1.5457, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00015066534917639195, |
|
"loss": 1.4051, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00015016661675726608, |
|
"loss": 1.2748, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014966621126259183, |
|
"loss": 1.3219, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001491641493810808, |
|
"loss": 1.1541, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00014866044785668563, |
|
"loss": 1.4747, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00014815512348804178, |
|
"loss": 1.1666, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00014764819312790707, |
|
"loss": 1.2449, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001471396736825998, |
|
"loss": 1.8251, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001466295821114348, |
|
"loss": 1.2484, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014611793542615803, |
|
"loss": 1.4197, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014560475069037894, |
|
"loss": 1.2089, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001450900450190016, |
|
"loss": 1.0404, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014457383557765386, |
|
"loss": 1.0625, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014405613958211482, |
|
"loss": 0.947, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00014353697429774084, |
|
"loss": 1.1545, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00014301635703888943, |
|
"loss": 1.0116, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001424943051683422, |
|
"loss": 0.8152, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014197083609672543, |
|
"loss": 0.8963, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001414459672819297, |
|
"loss": 1.3162, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001409197162285275, |
|
"loss": 0.8569, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00014039210048718949, |
|
"loss": 1.171, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00013986313765409925, |
|
"loss": 0.8849, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00013933284537036625, |
|
"loss": 0.9212, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00013880124132143782, |
|
"loss": 0.7478, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.6915, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00013773416888793145, |
|
"loss": 0.6634, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013719873609062077, |
|
"loss": 0.7808, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013666206270146223, |
|
"loss": 0.7653, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00013612416661871533, |
|
"loss": 0.6742, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00013558506578141682, |
|
"loss": 0.7197, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001350447781687826, |
|
"loss": 0.5688, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001345033217996078, |
|
"loss": 0.5976, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013396071473166613, |
|
"loss": 0.5635, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00013341697506110754, |
|
"loss": 0.4534, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00013287212092185464, |
|
"loss": 0.455, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.000132326170484998, |
|
"loss": 0.4934, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00013177914195819016, |
|
"loss": 0.4047, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001312310535850384, |
|
"loss": 0.9632, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00013068192364449618, |
|
"loss": 1.5302, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00013013177045025374, |
|
"loss": 1.5886, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012958061235012706, |
|
"loss": 1.5665, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012902846772544624, |
|
"loss": 1.3943, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001284753549904423, |
|
"loss": 1.3516, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00012792129259163318, |
|
"loss": 1.5157, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001273662990072083, |
|
"loss": 1.4044, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001268103927464126, |
|
"loss": 1.4095, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012625359234892907, |
|
"loss": 1.3318, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012569591638426052, |
|
"loss": 1.1496, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001251373834511103, |
|
"loss": 1.2188, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00012457801217676182, |
|
"loss": 1.3083, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00012401782121645766, |
|
"loss": 1.1114, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00012345682925277716, |
|
"loss": 1.1415, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0001228950549950134, |
|
"loss": 1.8205, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00012233251717854937, |
|
"loss": 1.2441, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00012176923456423284, |
|
"loss": 1.2625, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00012120522593775108, |
|
"loss": 1.1744, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00012064051010900397, |
|
"loss": 0.9422, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00012007510591147697, |
|
"loss": 0.9428, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.9575, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00011894230785818284, |
|
"loss": 0.8955, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00011837495178165706, |
|
"loss": 0.9415, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00011780698289357419, |
|
"loss": 0.7821, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00011723842013591044, |
|
"loss": 0.7817, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00011666928247044768, |
|
"loss": 0.7947, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00011609958887814129, |
|
"loss": 1.0891, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00011552935835848697, |
|
"loss": 0.7932, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00011495860992888712, |
|
"loss": 1.0385, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00011438736262401669, |
|
"loss": 0.7126, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00011381563549518823, |
|
"loss": 0.7194, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00011324344760971671, |
|
"loss": 0.6358, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00011267081805028339, |
|
"loss": 0.513, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00011209776591429962, |
|
"loss": 0.5445, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00011152431031326978, |
|
"loss": 0.6362, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00011095047037215396, |
|
"loss": 0.6582, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00011037626522873019, |
|
"loss": 0.5072, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001098017140329561, |
|
"loss": 0.568, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010922683594633021, |
|
"loss": 0.5671, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010865165014125316, |
|
"loss": 0.5759, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010807617580038796, |
|
"loss": 0.4984, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010750043211602045, |
|
"loss": 0.435, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010692443828941918, |
|
"loss": 0.4135, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010634821353019504, |
|
"loss": 0.3939, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010577177705566061, |
|
"loss": 0.3891, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010519514809018927, |
|
"loss": 0.8678, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010461834586457398, |
|
"loss": 1.5705, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010404138961538603, |
|
"loss": 1.7109, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00010346429858433352, |
|
"loss": 1.271, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00010288709201761948, |
|
"loss": 1.2667, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00010230978916530012, |
|
"loss": 1.4117, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00010173240928064285, |
|
"loss": 1.4835, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00010115497161948409, |
|
"loss": 1.2376, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00010057749543958717, |
|
"loss": 1.3616, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1731, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.942250456041286e-05, |
|
"loss": 1.2436, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.884502838051595e-05, |
|
"loss": 1.1019, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.826759071935718e-05, |
|
"loss": 1.2143, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.76902108346999e-05, |
|
"loss": 0.8841, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.711290798238057e-05, |
|
"loss": 1.075, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.653570141566653e-05, |
|
"loss": 1.5949, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.595861038461398e-05, |
|
"loss": 1.0204, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.538165413542607e-05, |
|
"loss": 1.1104, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.480485190981073e-05, |
|
"loss": 1.0312, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.422822294433939e-05, |
|
"loss": 0.8481, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.365178646980497e-05, |
|
"loss": 0.8279, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.307556171058085e-05, |
|
"loss": 0.768, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.249956788397957e-05, |
|
"loss": 0.794, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.192382419961208e-05, |
|
"loss": 0.8737, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.134834985874688e-05, |
|
"loss": 1.0419, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.077316405366981e-05, |
|
"loss": 0.6576, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.019828596704394e-05, |
|
"loss": 0.4765, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.962373477126983e-05, |
|
"loss": 0.8899, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.904952962784605e-05, |
|
"loss": 0.7921, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.847568968673026e-05, |
|
"loss": 0.8791, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.790223408570042e-05, |
|
"loss": 0.811, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.732918194971664e-05, |
|
"loss": 0.6333, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.675655239028333e-05, |
|
"loss": 0.5666, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.61843645048118e-05, |
|
"loss": 0.3983, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.561263737598338e-05, |
|
"loss": 0.3781, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.504139007111289e-05, |
|
"loss": 0.5553, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.447064164151304e-05, |
|
"loss": 0.5327, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.39004111218587e-05, |
|
"loss": 0.4589, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.333071752955233e-05, |
|
"loss": 0.4377, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.27615798640896e-05, |
|
"loss": 0.4125, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.219301710642583e-05, |
|
"loss": 0.4413, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.162504821834295e-05, |
|
"loss": 0.4931, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.10576921418172e-05, |
|
"loss": 0.4238, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.3512, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.992489408852306e-05, |
|
"loss": 0.3913, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.935948989099605e-05, |
|
"loss": 0.3857, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.879477406224894e-05, |
|
"loss": 0.8727, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.823076543576717e-05, |
|
"loss": 1.2647, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.766748282145068e-05, |
|
"loss": 1.4902, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.710494500498662e-05, |
|
"loss": 1.4818, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.654317074722287e-05, |
|
"loss": 1.54, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.598217878354237e-05, |
|
"loss": 1.4253, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.542198782323819e-05, |
|
"loss": 1.2916, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.486261654888973e-05, |
|
"loss": 1.2685, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.430408361573948e-05, |
|
"loss": 1.1558, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.374640765107095e-05, |
|
"loss": 1.0571, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.318960725358741e-05, |
|
"loss": 1.1138, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.263370099279172e-05, |
|
"loss": 0.9725, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.207870740836684e-05, |
|
"loss": 1.1059, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 7.152464500955769e-05, |
|
"loss": 0.8638, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 7.097153227455379e-05, |
|
"loss": 1.0556, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.041938764987297e-05, |
|
"loss": 1.4042, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.98682295497463e-05, |
|
"loss": 0.8709, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.931807635550383e-05, |
|
"loss": 0.9123, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.876894641496163e-05, |
|
"loss": 0.8378, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.822085804180984e-05, |
|
"loss": 0.7112, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.767382951500204e-05, |
|
"loss": 0.6657, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.712787907814541e-05, |
|
"loss": 0.6509, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.65830249388925e-05, |
|
"loss": 0.8331, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.603928526833387e-05, |
|
"loss": 0.8813, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.54966782003922e-05, |
|
"loss": 0.5913, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.495522183121741e-05, |
|
"loss": 0.4076, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.441493421858317e-05, |
|
"loss": 0.397, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.387583338128471e-05, |
|
"loss": 0.8866, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.333793729853781e-05, |
|
"loss": 0.7043, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.280126390937925e-05, |
|
"loss": 0.7236, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.226583111206856e-05, |
|
"loss": 0.674, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.6255, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.11987586785622e-05, |
|
"loss": 0.4535, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.066715462963376e-05, |
|
"loss": 0.3801, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.013686234590077e-05, |
|
"loss": 0.2789, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.960789951281052e-05, |
|
"loss": 0.4639, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.9080283771472524e-05, |
|
"loss": 0.4388, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.855403271807033e-05, |
|
"loss": 0.3567, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.802916390327459e-05, |
|
"loss": 0.3494, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.750569483165784e-05, |
|
"loss": 0.3534, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.698364296111056e-05, |
|
"loss": 0.457, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.646302570225919e-05, |
|
"loss": 0.5304, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.59438604178852e-05, |
|
"loss": 0.4163, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.542616442234618e-05, |
|
"loss": 0.4019, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.490995498099843e-05, |
|
"loss": 0.4139, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.43952493096211e-05, |
|
"loss": 0.3702, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.388206457384197e-05, |
|
"loss": 0.8498, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.337041788856518e-05, |
|
"loss": 1.2813, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.286032631740023e-05, |
|
"loss": 0.7817, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.235180687209296e-05, |
|
"loss": 0.4367, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.184487651195825e-05, |
|
"loss": 1.4105, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.1339552143314384e-05, |
|
"loss": 1.151, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.0835850618919245e-05, |
|
"loss": 1.4063, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.033378873740819e-05, |
|
"loss": 1.1942, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.9833383242733964e-05, |
|
"loss": 1.1175, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.933465082360807e-05, |
|
"loss": 1.3146, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.8837608112944454e-05, |
|
"loss": 1.1888, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.834227168730451e-05, |
|
"loss": 1.2757, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.7848658066344486e-05, |
|
"loss": 1.0492, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.735678371226441e-05, |
|
"loss": 1.0377, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.686666502925908e-05, |
|
"loss": 1.1143, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.637831836297103e-05, |
|
"loss": 0.8922, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.5891759999945347e-05, |
|
"loss": 0.9866, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.540700616708658e-05, |
|
"loss": 0.7297, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.4924073031117455e-05, |
|
"loss": 1.0632, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.2544, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.3963733212597366e-05, |
|
"loss": 0.7964, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.3486358557740814e-05, |
|
"loss": 0.8269, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.301086865409449e-05, |
|
"loss": 0.8226, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.253727935942563e-05, |
|
"loss": 0.6238, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.206560646811545e-05, |
|
"loss": 0.6065, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.159586571063236e-05, |
|
"loss": 0.6122, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.112807275300742e-05, |
|
"loss": 0.7502, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.0662243196311815e-05, |
|
"loss": 0.6923, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.0198392576136526e-05, |
|
"loss": 0.4024, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.973653636207437e-05, |
|
"loss": 0.4082, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.927668995720384e-05, |
|
"loss": 0.714, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.881886869757565e-05, |
|
"loss": 0.5765, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.8363087851701085e-05, |
|
"loss": 0.6759, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.7909362620042865e-05, |
|
"loss": 0.5544, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.745770813450824e-05, |
|
"loss": 0.5977, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.7008139457944245e-05, |
|
"loss": 0.4911, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.6560671583635467e-05, |
|
"loss": 0.3373, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.6115319434803894e-05, |
|
"loss": 0.2999, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.5672097864111285e-05, |
|
"loss": 0.3538, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.523102165316381e-05, |
|
"loss": 0.3836, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.479210551201915e-05, |
|
"loss": 0.3839, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.435536407869575e-05, |
|
"loss": 0.3004, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.3920811918684805e-05, |
|
"loss": 0.3204, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.348846352446435e-05, |
|
"loss": 0.2981, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.3058333315016065e-05, |
|
"loss": 0.355, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.263043563534428e-05, |
|
"loss": 0.4275, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.22047847559977e-05, |
|
"loss": 0.3209, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.1781394872593295e-05, |
|
"loss": 0.3781, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.136028010534303e-05, |
|
"loss": 0.3621, |
|
"step": 420 |
|
} |
|
], |
|
"max_steps": 561, |
|
"num_train_epochs": 3, |
|
"total_flos": 7.61388308692992e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|