|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9713721618953604, |
|
"eval_steps": 127, |
|
"global_step": 492, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2e-05, |
|
"loss": 2.8529, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"eval_loss": 3.1576027870178223, |
|
"eval_runtime": 37.0517, |
|
"eval_samples_per_second": 1.403, |
|
"eval_steps_per_second": 1.403, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-05, |
|
"loss": 0.7839, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6e-05, |
|
"loss": 0.5453, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0492, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001, |
|
"loss": 0.766, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00012, |
|
"loss": 0.8575, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00014, |
|
"loss": 0.5194, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00016, |
|
"loss": 0.2592, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00018, |
|
"loss": 2.9974, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 0.1871, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019999799412001546, |
|
"loss": 0.213, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019999197656053288, |
|
"loss": 0.7832, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001999819475629623, |
|
"loss": 2.6674, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019996790752964305, |
|
"loss": 0.1479, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019994985702382758, |
|
"loss": 0.1546, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019992779676965885, |
|
"loss": 3.1039, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019990172765214128, |
|
"loss": 2.2049, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019987165071710527, |
|
"loss": 3.6854, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019983756717116536, |
|
"loss": 0.1398, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001997994783816715, |
|
"loss": 0.3254, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019975738587665456, |
|
"loss": 0.6154, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019971129134476473, |
|
"loss": 0.8228, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019966119663520412, |
|
"loss": 0.3039, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001996071037576521, |
|
"loss": 1.0781, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019954901488218515, |
|
"loss": 0.6851, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019948693233918952, |
|
"loss": 0.6144, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001994208586192678, |
|
"loss": 0.8485, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019935079637313906, |
|
"loss": 0.6133, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019927674841153237, |
|
"loss": 2.9564, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001991987177050743, |
|
"loss": 1.506, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019911670738416947, |
|
"loss": 0.168, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019903072073887507, |
|
"loss": 0.7176, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000198940761218769, |
|
"loss": 0.4638, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019884683243281116, |
|
"loss": 0.7248, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019874893814919906, |
|
"loss": 0.3349, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019864708229521636, |
|
"loss": 0.3917, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001985412689570754, |
|
"loss": 1.2308, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019843150237975344, |
|
"loss": 0.9945, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019831778696682194, |
|
"loss": 1.3547, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019820012728027044, |
|
"loss": 0.9227, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.0954, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019795299412524945, |
|
"loss": 0.6762, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000197823530571169, |
|
"loss": 1.3553, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001976901425718487, |
|
"loss": 0.3896, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019755283547849494, |
|
"loss": 0.3207, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001974116147995387, |
|
"loss": 3.249, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019726648620041468, |
|
"loss": 0.9758, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001971174555033339, |
|
"loss": 0.5544, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019696452868705024, |
|
"loss": 0.332, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019680771188662044, |
|
"loss": 0.6672, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001966470113931582, |
|
"loss": 0.7634, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019648243365358146, |
|
"loss": 0.5081, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019631398527035422, |
|
"loss": 0.7995, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019614167300122126, |
|
"loss": 1.1439, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001959655037589372, |
|
"loss": 0.4855, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019578548461098914, |
|
"loss": 5.2893, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019560162277931325, |
|
"loss": 0.1765, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019541392564000488, |
|
"loss": 0.4805, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019522240072302274, |
|
"loss": 1.668, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019502705571188672, |
|
"loss": 0.1239, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001948278984433699, |
|
"loss": 3.1256, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001946249369071837, |
|
"loss": 0.2337, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019441817924565786, |
|
"loss": 0.1279, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001942076337534135, |
|
"loss": 3.1412, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019399330887703037, |
|
"loss": 0.4809, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019377521321470805, |
|
"loss": 0.5192, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019355335551592105, |
|
"loss": 0.5328, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019332774468106768, |
|
"loss": 0.1569, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019309838976111311, |
|
"loss": 3.1561, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019286529995722623, |
|
"loss": 4.205, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019262848462041045, |
|
"loss": 0.0668, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.4921, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001921437154989221, |
|
"loss": 3.1048, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019189578116202307, |
|
"loss": 0.2369, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019164416018696207, |
|
"loss": 0.7976, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019138886266816866, |
|
"loss": 0.4052, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019112989884756653, |
|
"loss": 0.3292, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001908672791141625, |
|
"loss": 0.4974, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019060101400362998, |
|
"loss": 0.1235, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019033111419788597, |
|
"loss": 1.8426, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000190057590524663, |
|
"loss": 1.1387, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00018978045395707418, |
|
"loss": 0.1255, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001894997156131734, |
|
"loss": 0.3632, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001892153867555092, |
|
"loss": 0.5894, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018892747879067286, |
|
"loss": 0.9053, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018863600326884082, |
|
"loss": 0.6213, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018834097188331143, |
|
"loss": 0.4491, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018804239647003573, |
|
"loss": 0.3181, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018774028900714256, |
|
"loss": 0.6545, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018743466161445823, |
|
"loss": 0.4294, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001871255265530201, |
|
"loss": 2.9904, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018681289622458485, |
|
"loss": 0.6953, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018649678317113084, |
|
"loss": 3.2506, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018617720007435497, |
|
"loss": 0.7977, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000185854159755164, |
|
"loss": 2.8863, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018552767517316022, |
|
"loss": 0.6639, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018519775942612128, |
|
"loss": 0.3403, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018486442574947511, |
|
"loss": 0.084, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001845276875157687, |
|
"loss": 0.8301, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001841875582341317, |
|
"loss": 0.3861, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001838440515497345, |
|
"loss": 0.9388, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018349718124324076, |
|
"loss": 0.4805, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.9507, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018279340556076216, |
|
"loss": 0.9377, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001824365284185684, |
|
"loss": 3.8316, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018207634412072764, |
|
"loss": 2.9745, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018171286711696934, |
|
"loss": 1.9168, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001813461119891184, |
|
"loss": 0.9356, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018097609345051025, |
|
"loss": 5.7176, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018060282634540053, |
|
"loss": 1.141, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018022632564836948, |
|
"loss": 0.2181, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001798466064637214, |
|
"loss": 0.8319, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00017946368402487845, |
|
"loss": 0.6727, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00017907757369376985, |
|
"loss": 1.0776, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00017868829096021527, |
|
"loss": 0.5182, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00017829585144130356, |
|
"loss": 0.7924, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001779002708807662, |
|
"loss": 0.4007, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001775015651483459, |
|
"loss": 0.2954, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00017709975023915949, |
|
"loss": 1.4825, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001766948422730567, |
|
"loss": 0.8667, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001762868574939732, |
|
"loss": 0.0204, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001758758122692791, |
|
"loss": 0.0491, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00017546172308912213, |
|
"loss": 3.2544, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00017504460656576627, |
|
"loss": 0.7571, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001746244794329252, |
|
"loss": 3.2652, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001742013585450911, |
|
"loss": 0.565, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00017377526087685832, |
|
"loss": 0.4365, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 3.1416423320770264, |
|
"eval_runtime": 38.7051, |
|
"eval_samples_per_second": 1.343, |
|
"eval_steps_per_second": 1.343, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001733462035222426, |
|
"loss": 3.5766, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001729142036939951, |
|
"loss": 0.2136, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000172479278722912, |
|
"loss": 0.8068, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001720414460571392, |
|
"loss": 0.3899, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001716007232614723, |
|
"loss": 0.5877, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000171157128016652, |
|
"loss": 0.1575, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 3.0165, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001702613914779789, |
|
"loss": 3.3047, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001698092861189259, |
|
"loss": 0.4127, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00016935438017887772, |
|
"loss": 0.7031, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00016889669190756868, |
|
"loss": 2.9221, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00016843623966635366, |
|
"loss": 0.1674, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001679730419274713, |
|
"loss": 0.4934, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001675071172733031, |
|
"loss": 0.6168, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00016703848439562785, |
|
"loss": 0.3576, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00016656716209487174, |
|
"loss": 0.5786, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001660931692793541, |
|
"loss": 1.2878, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000165616524964529, |
|
"loss": 0.4301, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00016513724827222227, |
|
"loss": 0.3353, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00016465535842986434, |
|
"loss": 0.9216, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000164170874769719, |
|
"loss": 0.4037, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00016368381672810786, |
|
"loss": 3.7759, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001631942038446304, |
|
"loss": 0.3816, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00016270205576138032, |
|
"loss": 0.167, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00016220739222215738, |
|
"loss": 2.676, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00016171023307167545, |
|
"loss": 0.0795, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001612105982547663, |
|
"loss": 0.1744, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00016070850781557948, |
|
"loss": 0.4056, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001602039818967783, |
|
"loss": 0.7392, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00015969704073873157, |
|
"loss": 5.6689, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001591877046787017, |
|
"loss": 0.7629, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00015867599415002895, |
|
"loss": 0.069, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00015816192968131138, |
|
"loss": 2.9564, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001576455318955816, |
|
"loss": 0.3949, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00015712682150947923, |
|
"loss": 0.4876, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00015660581933241993, |
|
"loss": 0.9095, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00015608254626576048, |
|
"loss": 0.1195, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.182, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00015502927152373914, |
|
"loss": 0.3554, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001544993121032318, |
|
"loss": 0.5072, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000153967166301138, |
|
"loss": 0.7807, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00015343285546587013, |
|
"loss": 0.1042, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015289640103269625, |
|
"loss": 0.3438, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015235782452288068, |
|
"loss": 0.3308, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001518171475428202, |
|
"loss": 0.5366, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015127439178317745, |
|
"loss": 0.509, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015072957901801076, |
|
"loss": 0.4237, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001501827311039005, |
|
"loss": 0.2678, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001496338699790724, |
|
"loss": 0.5321, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00014908301766251739, |
|
"loss": 0.1806, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00014853019625310813, |
|
"loss": 0.469, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00014797542792871265, |
|
"loss": 0.4509, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001474187349453045, |
|
"loss": 0.4466, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00014686013963607, |
|
"loss": 0.9026, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00014629966441051208, |
|
"loss": 0.6483, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001457373317535515, |
|
"loss": 0.1891, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001451731642246247, |
|
"loss": 2.834, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014460718445677876, |
|
"loss": 0.3445, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014403941515576344, |
|
"loss": 0.3603, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014346987909912023, |
|
"loss": 0.388, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014289859913526874, |
|
"loss": 0.4124, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014232559818258984, |
|
"loss": 1.4459, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00014175089922850633, |
|
"loss": 0.1114, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00014117452532856083, |
|
"loss": 3.2592, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001405964996054907, |
|
"loss": 0.4387, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00014001684524830057, |
|
"loss": 3.0681, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00013943558551133186, |
|
"loss": 2.8873, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00013885274371333, |
|
"loss": 0.0514, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.2656, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00013768240752561314, |
|
"loss": 0.5523, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001370949600869768, |
|
"loss": 0.3376, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00013650602448758112, |
|
"loss": 0.397, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001359156243541087, |
|
"loss": 0.306, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013532378337199582, |
|
"loss": 0.3834, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013473052528448201, |
|
"loss": 0.351, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013413587389165784, |
|
"loss": 0.3581, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013353985304950973, |
|
"loss": 0.3167, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013294248666896328, |
|
"loss": 1.2743, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001323437987149238, |
|
"loss": 1.1231, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00013174381320531505, |
|
"loss": 0.2232, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001311425542101154, |
|
"loss": 0.2116, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00013054004585039258, |
|
"loss": 3.3701, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00012993631229733582, |
|
"loss": 0.1044, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00012933137777128607, |
|
"loss": 0.8717, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001287252665407645, |
|
"loss": 0.1707, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001281180029214988, |
|
"loss": 0.3714, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001275096112754478, |
|
"loss": 0.5697, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000126900116009824, |
|
"loss": 0.8001, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001262895415761145, |
|
"loss": 0.8647, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00012567791246909994, |
|
"loss": 0.31, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00012506525322587207, |
|
"loss": 2.5493, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001244515884248491, |
|
"loss": 3.1865, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00012383694268478993, |
|
"loss": 2.7346, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001232213406638062, |
|
"loss": 2.9121, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001226048070583735, |
|
"loss": 3.3936, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00012198736660234009, |
|
"loss": 0.7656, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00012136904406593507, |
|
"loss": 0.2102, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00012074986425477445, |
|
"loss": 0.0982, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00012012985200886602, |
|
"loss": 0.5801, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.1236, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011888742973881543, |
|
"loss": 1.401, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011826506955767258, |
|
"loss": 0.5149, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011764197662578086, |
|
"loss": 0.6567, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011701817594013312, |
|
"loss": 0.1937, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011639369252611552, |
|
"loss": 0.1336, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011576855143650371, |
|
"loss": 2.8677, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011514277775045768, |
|
"loss": 0.3171, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011451639657251563, |
|
"loss": 0.7903, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00011388943303158693, |
|
"loss": 2.6568, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00011326191227994391, |
|
"loss": 0.2808, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00011263385949221295, |
|
"loss": 0.1951, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001120052998643643, |
|
"loss": 0.0974, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00011137625861270151, |
|
"loss": 0.1154, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00011074676097284973, |
|
"loss": 0.2559, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00011011683219874323, |
|
"loss": 0.6843, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010948649756161246, |
|
"loss": 1.8828, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010885578234897003, |
|
"loss": 0.0511, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010822471186359639, |
|
"loss": 0.4992, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010759331142252462, |
|
"loss": 0.1157, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010696160635602487, |
|
"loss": 0.937, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010632962200658815, |
|
"loss": 0.052, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010569738372790956, |
|
"loss": 0.1229, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010506491688387127, |
|
"loss": 0.1599, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000104432246847525, |
|
"loss": 0.7171, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010379939900007393, |
|
"loss": 0.0594, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010316639872985472, |
|
"loss": 2.7738, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010253327143131879, |
|
"loss": 2.953, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 3.1145544052124023, |
|
"eval_runtime": 38.0064, |
|
"eval_samples_per_second": 1.368, |
|
"eval_steps_per_second": 1.368, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010190004250401368, |
|
"loss": 1.0087, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010126673735156402, |
|
"loss": 0.1579, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010063338138065234, |
|
"loss": 0.7859, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001, |
|
"loss": 3.0133, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.936661861934765e-05, |
|
"loss": 0.2415, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.8733262648436e-05, |
|
"loss": 0.1456, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.809995749598632e-05, |
|
"loss": 0.1532, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.746672856868123e-05, |
|
"loss": 0.407, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.683360127014529e-05, |
|
"loss": 0.3275, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.620060099992609e-05, |
|
"loss": 0.3738, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.556775315247501e-05, |
|
"loss": 0.283, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.493508311612874e-05, |
|
"loss": 0.6886, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.430261627209044e-05, |
|
"loss": 0.2551, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.367037799341187e-05, |
|
"loss": 0.8632, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.303839364397511e-05, |
|
"loss": 0.4109, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.24066885774754e-05, |
|
"loss": 0.122, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.177528813640362e-05, |
|
"loss": 0.1055, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.114421765102999e-05, |
|
"loss": 0.3906, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.051350243838756e-05, |
|
"loss": 0.0687, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 8.98831678012568e-05, |
|
"loss": 2.9347, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 8.925323902715031e-05, |
|
"loss": 0.1194, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 8.862374138729853e-05, |
|
"loss": 0.5719, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.799470013563573e-05, |
|
"loss": 2.7415, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.73661405077871e-05, |
|
"loss": 0.1243, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.67380877200561e-05, |
|
"loss": 2.9093, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.611056696841312e-05, |
|
"loss": 1.2619, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.54836034274844e-05, |
|
"loss": 0.5068, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.485722224954237e-05, |
|
"loss": 2.9148, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.423144856349631e-05, |
|
"loss": 1.1471, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.36063074738845e-05, |
|
"loss": 1.91, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.298182405986689e-05, |
|
"loss": 0.0886, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.235802337421919e-05, |
|
"loss": 0.3945, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.173493044232745e-05, |
|
"loss": 0.4341, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.11125702611846e-05, |
|
"loss": 1.4804, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 2.9338, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 7.987014799113397e-05, |
|
"loss": 0.4612, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 7.925013574522557e-05, |
|
"loss": 0.9304, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.863095593406491e-05, |
|
"loss": 2.9119, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.801263339765994e-05, |
|
"loss": 2.8254, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.739519294162652e-05, |
|
"loss": 0.0828, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.677865933619379e-05, |
|
"loss": 2.9986, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.616305731521008e-05, |
|
"loss": 3.1021, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.554841157515092e-05, |
|
"loss": 0.6816, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.493474677412794e-05, |
|
"loss": 0.1559, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.432208753090009e-05, |
|
"loss": 0.0844, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.371045842388552e-05, |
|
"loss": 2.7568, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.309988399017602e-05, |
|
"loss": 1.1267, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.24903887245522e-05, |
|
"loss": 0.2277, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.188199707850122e-05, |
|
"loss": 0.0967, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.127473345923554e-05, |
|
"loss": 3.2755, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.066862222871397e-05, |
|
"loss": 1.1044, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.006368770266421e-05, |
|
"loss": 0.0781, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.945995414960744e-05, |
|
"loss": 0.064, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.885744578988463e-05, |
|
"loss": 0.831, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.825618679468502e-05, |
|
"loss": 0.1206, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.765620128507619e-05, |
|
"loss": 0.6853, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.705751333103675e-05, |
|
"loss": 0.2435, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.64601469504903e-05, |
|
"loss": 0.753, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.586412610834221e-05, |
|
"loss": 0.0688, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.526947471551798e-05, |
|
"loss": 0.2185, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.46762166280042e-05, |
|
"loss": 0.0895, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.40843756458913e-05, |
|
"loss": 2.5166, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.349397551241894e-05, |
|
"loss": 0.7716, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.290503991302324e-05, |
|
"loss": 0.7076, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.231759247438689e-05, |
|
"loss": 0.3317, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.3898, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.114725628666998e-05, |
|
"loss": 1.9533, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.0564414488668165e-05, |
|
"loss": 0.646, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.998315475169942e-05, |
|
"loss": 2.9357, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.94035003945093e-05, |
|
"loss": 5.7477, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.88254746714392e-05, |
|
"loss": 1.0008, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.824910077149371e-05, |
|
"loss": 0.3179, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.767440181741019e-05, |
|
"loss": 0.9338, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.710140086473129e-05, |
|
"loss": 0.1536, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.653012090087977e-05, |
|
"loss": 0.61, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.596058484423656e-05, |
|
"loss": 1.2369, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.5392815543221254e-05, |
|
"loss": 0.1902, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.4826835775375285e-05, |
|
"loss": 0.2775, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.4262668246448475e-05, |
|
"loss": 0.2029, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.3700335589487925e-05, |
|
"loss": 0.0906, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.3139860363929996e-05, |
|
"loss": 0.1297, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.2581265054695494e-05, |
|
"loss": 0.3453, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.202457207128736e-05, |
|
"loss": 0.156, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.146980374689192e-05, |
|
"loss": 0.2555, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.0916982337482644e-05, |
|
"loss": 0.3003, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.0366130020927624e-05, |
|
"loss": 0.7925, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.981726889609952e-05, |
|
"loss": 1.2376, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.9270420981989294e-05, |
|
"loss": 2.9298, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.872560821682256e-05, |
|
"loss": 0.1811, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.818285245717984e-05, |
|
"loss": 0.1007, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.764217547711934e-05, |
|
"loss": 5.1643, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.710359896730379e-05, |
|
"loss": 0.6855, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.656714453412993e-05, |
|
"loss": 1.6421, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.6032833698862044e-05, |
|
"loss": 0.0813, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.5500687896768256e-05, |
|
"loss": 2.8098, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.497072847626087e-05, |
|
"loss": 2.9227, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.0877, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.3917453734239566e-05, |
|
"loss": 0.1996, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.339418066758008e-05, |
|
"loss": 0.8809, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.287317849052075e-05, |
|
"loss": 2.7009, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.235446810441841e-05, |
|
"loss": 0.067, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.1838070318688604e-05, |
|
"loss": 1.6842, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.132400584997106e-05, |
|
"loss": 0.7324, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.081229532129827e-05, |
|
"loss": 0.1683, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.030295926126845e-05, |
|
"loss": 0.7423, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.979601810322169e-05, |
|
"loss": 0.2521, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.929149218442052e-05, |
|
"loss": 0.3601, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.878940174523371e-05, |
|
"loss": 0.1575, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.828976692832458e-05, |
|
"loss": 0.572, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.779260777784263e-05, |
|
"loss": 2.8984, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.7297944238619706e-05, |
|
"loss": 0.0589, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.680579615536961e-05, |
|
"loss": 0.0536, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.631618327189218e-05, |
|
"loss": 0.0897, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.582912523028101e-05, |
|
"loss": 0.7315, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.534464157013574e-05, |
|
"loss": 0.1946, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.4862751727777797e-05, |
|
"loss": 1.2387, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.438347503547102e-05, |
|
"loss": 0.0964, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.390683072064594e-05, |
|
"loss": 1.1282, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.343283790512829e-05, |
|
"loss": 0.3648, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.296151560437214e-05, |
|
"loss": 0.1887, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.249288272669691e-05, |
|
"loss": 0.1734, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.202695807252871e-05, |
|
"loss": 2.8517, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.1563760333646395e-05, |
|
"loss": 0.1988, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.110330809243134e-05, |
|
"loss": 2.929, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.064561982112232e-05, |
|
"loss": 0.1981, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.0190713881074105e-05, |
|
"loss": 0.0902, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.9738608522021173e-05, |
|
"loss": 0.35, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 3.096834897994995, |
|
"eval_runtime": 37.8467, |
|
"eval_samples_per_second": 1.374, |
|
"eval_steps_per_second": 1.374, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.4055, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8842871983347998e-05, |
|
"loss": 0.0914, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8399276738527714e-05, |
|
"loss": 0.6584, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.795855394286081e-05, |
|
"loss": 0.1345, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.7520721277088024e-05, |
|
"loss": 0.1762, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.7085796306004906e-05, |
|
"loss": 0.0956, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.6653796477757432e-05, |
|
"loss": 0.0985, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.6224739123141684e-05, |
|
"loss": 0.768, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.5798641454908944e-05, |
|
"loss": 3.141, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.537552056707483e-05, |
|
"loss": 3.1468, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.4955393434233754e-05, |
|
"loss": 2.8864, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.45382769108779e-05, |
|
"loss": 0.774, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.4124187730720917e-05, |
|
"loss": 0.1578, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.3713142506026786e-05, |
|
"loss": 2.4016, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.3305157726943327e-05, |
|
"loss": 3.0549, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.290024976084052e-05, |
|
"loss": 0.3476, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.2498434851654126e-05, |
|
"loss": 1.6223, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.209972911923377e-05, |
|
"loss": 0.139, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.170414855869647e-05, |
|
"loss": 3.1515, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.1311709039784734e-05, |
|
"loss": 1.0012, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.092242630623016e-05, |
|
"loss": 0.4592, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.0536315975121544e-05, |
|
"loss": 0.4305, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.0153393536278653e-05, |
|
"loss": 0.9412, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.9773674351630545e-05, |
|
"loss": 0.1606, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.939717365459952e-05, |
|
"loss": 0.1445, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.9023906549489767e-05, |
|
"loss": 5.7603, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.8653888010881637e-05, |
|
"loss": 0.037, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.82871328830307e-05, |
|
"loss": 0.1551, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.7923655879272393e-05, |
|
"loss": 0.542, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.7563471581431624e-05, |
|
"loss": 0.1888, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.7206594439237865e-05, |
|
"loss": 1.0623, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.0943, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.6502818756759276e-05, |
|
"loss": 0.1562, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.61559484502655e-05, |
|
"loss": 2.9172, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.5812441765868292e-05, |
|
"loss": 0.0865, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.547231248423132e-05, |
|
"loss": 2.3386, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.5135574250524897e-05, |
|
"loss": 2.3205, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.4802240573878733e-05, |
|
"loss": 0.9576, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.447232482683979e-05, |
|
"loss": 0.1757, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.4145840244835983e-05, |
|
"loss": 0.5404, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.3822799925645036e-05, |
|
"loss": 0.8805, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.3503216828869192e-05, |
|
"loss": 0.1249, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.3187103775415156e-05, |
|
"loss": 0.3596, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2874473446979918e-05, |
|
"loss": 1.6876, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2565338385541792e-05, |
|
"loss": 0.1585, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2259710992857465e-05, |
|
"loss": 2.8895, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.195760352996429e-05, |
|
"loss": 2.9701, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.1659028116688575e-05, |
|
"loss": 0.3456, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.1363996731159188e-05, |
|
"loss": 0.3992, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.107252120932717e-05, |
|
"loss": 0.4428, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.0784613244490816e-05, |
|
"loss": 0.0964, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.0500284386826597e-05, |
|
"loss": 0.4612, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.0219546042925843e-05, |
|
"loss": 1.0988, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.942409475337012e-06, |
|
"loss": 0.2646, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.668885802114003e-06, |
|
"loss": 0.1104, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.398985996370058e-06, |
|
"loss": 2.9026, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.13272088583751e-06, |
|
"loss": 2.5969, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.870101152433497e-06, |
|
"loss": 0.204, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.611137331831331e-06, |
|
"loss": 2.8929, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.355839813037936e-06, |
|
"loss": 0.4752, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.10421883797694e-06, |
|
"loss": 0.0708, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 7.856284501077926e-06, |
|
"loss": 0.2817, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.3538, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.371515379589555e-06, |
|
"loss": 3.0355, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.13470004277379e-06, |
|
"loss": 1.7098, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 6.901610238886891e-06, |
|
"loss": 0.0538, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 6.672255318932341e-06, |
|
"loss": 0.1649, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.4466444840789674e-06, |
|
"loss": 0.1586, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.22478678529197e-06, |
|
"loss": 3.0296, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.006691122969643e-06, |
|
"loss": 2.9251, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 5.792366246586511e-06, |
|
"loss": 0.5227, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 5.581820754342137e-06, |
|
"loss": 0.1688, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.375063092816313e-06, |
|
"loss": 0.6017, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.172101556630149e-06, |
|
"loss": 0.0826, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.972944288113268e-06, |
|
"loss": 0.7591, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.777599276977263e-06, |
|
"loss": 0.1292, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.586074359995119e-06, |
|
"loss": 0.8982, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.398377220686745e-06, |
|
"loss": 2.735, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.214515389010865e-06, |
|
"loss": 0.242, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.034496241062824e-06, |
|
"loss": 0.097, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.858326998778761e-06, |
|
"loss": 0.1028, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.6860147296457816e-06, |
|
"loss": 0.1689, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.5175663464185436e-06, |
|
"loss": 0.1873, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.3529886068418447e-06, |
|
"loss": 3.074, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.1922881133795825e-06, |
|
"loss": 0.2985, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.035471312949778e-06, |
|
"loss": 0.0815, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.8825444966661063e-06, |
|
"loss": 0.1297, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.7335137995853188e-06, |
|
"loss": 1.8928, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.5883852004613074e-06, |
|
"loss": 0.0399, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.4471645215050743e-06, |
|
"loss": 2.4668, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.3098574281513185e-06, |
|
"loss": 1.3717, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.1764694288310184e-06, |
|
"loss": 0.5464, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.0470058747505516e-06, |
|
"loss": 5.7381, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.0523, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.7998727197295784e-06, |
|
"loss": 2.0947, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.6822130331780484e-06, |
|
"loss": 0.3609, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.5684976202465784e-06, |
|
"loss": 0.3193, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.4587310429245882e-06, |
|
"loss": 0.5045, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.3529177047836627e-06, |
|
"loss": 0.0842, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.2510618508009608e-06, |
|
"loss": 1.3106, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.1531675671888619e-06, |
|
"loss": 2.8808, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.0592387812310311e-06, |
|
"loss": 2.9172, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.692792611249224e-07, |
|
"loss": 0.6062, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.832926158305444e-07, |
|
"loss": 0.1825, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.012822949256982e-07, |
|
"loss": 0.2575, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.232515884676328e-07, |
|
"loss": 0.1746, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.492036268609725e-07, |
|
"loss": 0.264, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.791413807322066e-07, |
|
"loss": 0.0955, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.130676608104845e-07, |
|
"loss": 0.1896, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.509851178148505e-07, |
|
"loss": 0.402, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.9289624234790656e-07, |
|
"loss": 0.3264, |
|
"step": 492 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 506, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 1.8835459562432102e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|