|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0061855670103093, |
|
"eval_steps": 61, |
|
"global_step": 122, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008247422680412371, |
|
"grad_norm": 2.2737574577331543, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0155, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008247422680412371, |
|
"eval_loss": 1.2302311658859253, |
|
"eval_runtime": 295.4772, |
|
"eval_samples_per_second": 2.031, |
|
"eval_steps_per_second": 1.015, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016494845360824743, |
|
"grad_norm": 2.203125476837158, |
|
"learning_rate": 4e-05, |
|
"loss": 1.1137, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.024742268041237112, |
|
"grad_norm": 1.7233558893203735, |
|
"learning_rate": 6e-05, |
|
"loss": 0.9453, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.032989690721649485, |
|
"grad_norm": 2.38301420211792, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1091, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.041237113402061855, |
|
"grad_norm": 1.6754157543182373, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9209, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.049484536082474224, |
|
"grad_norm": 1.6663925647735596, |
|
"learning_rate": 0.00012, |
|
"loss": 0.9145, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0577319587628866, |
|
"grad_norm": 1.6516679525375366, |
|
"learning_rate": 0.00014, |
|
"loss": 0.8456, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06597938144329897, |
|
"grad_norm": 1.4858132600784302, |
|
"learning_rate": 0.00016, |
|
"loss": 0.8257, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07422680412371134, |
|
"grad_norm": 1.2829698324203491, |
|
"learning_rate": 0.00018, |
|
"loss": 0.7396, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 0.8841626644134521, |
|
"learning_rate": 0.0002, |
|
"loss": 0.6599, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09072164948453608, |
|
"grad_norm": 0.6680086255073547, |
|
"learning_rate": 0.0001999960397967811, |
|
"loss": 0.6368, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09896907216494845, |
|
"grad_norm": 0.9510122537612915, |
|
"learning_rate": 0.00019998415950078858, |
|
"loss": 0.7061, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10721649484536082, |
|
"grad_norm": 0.8464949131011963, |
|
"learning_rate": 0.00019996436005299012, |
|
"loss": 0.6674, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1154639175257732, |
|
"grad_norm": 0.8255585432052612, |
|
"learning_rate": 0.00019993664302158255, |
|
"loss": 0.6971, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"grad_norm": 0.7613081932067871, |
|
"learning_rate": 0.00019990101060186733, |
|
"loss": 0.6621, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13195876288659794, |
|
"grad_norm": 0.6413361430168152, |
|
"learning_rate": 0.00019985746561607698, |
|
"loss": 0.6336, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1402061855670103, |
|
"grad_norm": 0.7216547727584839, |
|
"learning_rate": 0.0001998060115131513, |
|
"loss": 0.7165, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14845360824742268, |
|
"grad_norm": 0.7801871299743652, |
|
"learning_rate": 0.00019974665236846442, |
|
"loss": 0.7449, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15670103092783505, |
|
"grad_norm": 0.6427637338638306, |
|
"learning_rate": 0.00019967939288350182, |
|
"loss": 0.6344, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 0.6753759980201721, |
|
"learning_rate": 0.00019960423838548814, |
|
"loss": 0.735, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1731958762886598, |
|
"grad_norm": 0.6213053464889526, |
|
"learning_rate": 0.00019952119482696503, |
|
"loss": 0.6675, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18144329896907216, |
|
"grad_norm": 0.6505969762802124, |
|
"learning_rate": 0.00019943026878531983, |
|
"loss": 0.6129, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18969072164948453, |
|
"grad_norm": 0.6324624419212341, |
|
"learning_rate": 0.0001993314674622646, |
|
"loss": 0.6289, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1979381443298969, |
|
"grad_norm": 0.6155864000320435, |
|
"learning_rate": 0.00019922479868326578, |
|
"loss": 0.6358, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20618556701030927, |
|
"grad_norm": 0.6395933628082275, |
|
"learning_rate": 0.0001991102708969241, |
|
"loss": 0.6225, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21443298969072164, |
|
"grad_norm": 0.5889492034912109, |
|
"learning_rate": 0.00019898789317430575, |
|
"loss": 0.6674, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22268041237113403, |
|
"grad_norm": 0.5262942910194397, |
|
"learning_rate": 0.00019885767520822376, |
|
"loss": 0.6077, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2309278350515464, |
|
"grad_norm": 0.5878570675849915, |
|
"learning_rate": 0.0001987196273124703, |
|
"loss": 0.6145, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23917525773195877, |
|
"grad_norm": 0.5845457315444946, |
|
"learning_rate": 0.00019857376042099983, |
|
"loss": 0.6435, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24742268041237114, |
|
"grad_norm": 0.6508314609527588, |
|
"learning_rate": 0.00019842008608706295, |
|
"loss": 0.6799, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2556701030927835, |
|
"grad_norm": 0.5770230293273926, |
|
"learning_rate": 0.00019825861648229152, |
|
"loss": 0.6092, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2639175257731959, |
|
"grad_norm": 0.5438909530639648, |
|
"learning_rate": 0.00019808936439573454, |
|
"loss": 0.5952, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2721649484536082, |
|
"grad_norm": 0.5276155471801758, |
|
"learning_rate": 0.00019791234323284513, |
|
"loss": 0.5899, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2804123711340206, |
|
"grad_norm": 0.6430350542068481, |
|
"learning_rate": 0.00019772756701441887, |
|
"loss": 0.6581, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28865979381443296, |
|
"grad_norm": 0.6087874174118042, |
|
"learning_rate": 0.0001975350503754833, |
|
"loss": 0.654, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29690721649484536, |
|
"grad_norm": 0.610534131526947, |
|
"learning_rate": 0.00019733480856413868, |
|
"loss": 0.6655, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.30515463917525776, |
|
"grad_norm": 0.5277837514877319, |
|
"learning_rate": 0.0001971268574403503, |
|
"loss": 0.6228, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3134020618556701, |
|
"grad_norm": 0.6729940176010132, |
|
"learning_rate": 0.00019691121347469235, |
|
"loss": 0.8324, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3216494845360825, |
|
"grad_norm": 0.541552722454071, |
|
"learning_rate": 0.00019668789374704338, |
|
"loss": 0.54, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32989690721649484, |
|
"grad_norm": 0.5642793774604797, |
|
"learning_rate": 0.0001964569159452335, |
|
"loss": 0.641, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33814432989690724, |
|
"grad_norm": 0.5639921426773071, |
|
"learning_rate": 0.00019621829836364337, |
|
"loss": 0.6042, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3463917525773196, |
|
"grad_norm": 0.566059410572052, |
|
"learning_rate": 0.00019597205990175525, |
|
"loss": 0.6364, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.354639175257732, |
|
"grad_norm": 0.5911106467247009, |
|
"learning_rate": 0.00019571822006265622, |
|
"loss": 0.6777, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3628865979381443, |
|
"grad_norm": 0.5940718054771423, |
|
"learning_rate": 0.00019545679895149315, |
|
"loss": 0.713, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3711340206185567, |
|
"grad_norm": 0.46311718225479126, |
|
"learning_rate": 0.0001951878172738806, |
|
"loss": 0.5298, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.37938144329896906, |
|
"grad_norm": 0.5446880459785461, |
|
"learning_rate": 0.00019491129633426068, |
|
"loss": 0.546, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38762886597938145, |
|
"grad_norm": 0.6126728653907776, |
|
"learning_rate": 0.00019462725803421566, |
|
"loss": 0.5725, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3958762886597938, |
|
"grad_norm": 0.49817898869514465, |
|
"learning_rate": 0.0001943357248707334, |
|
"loss": 0.6224, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4041237113402062, |
|
"grad_norm": 0.4865269362926483, |
|
"learning_rate": 0.0001940367199344253, |
|
"loss": 0.544, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.41237113402061853, |
|
"grad_norm": 0.48718348145484924, |
|
"learning_rate": 0.00019373026690769763, |
|
"loss": 0.6132, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.42061855670103093, |
|
"grad_norm": 0.5080978274345398, |
|
"learning_rate": 0.0001934163900628756, |
|
"loss": 0.6278, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4288659793814433, |
|
"grad_norm": 0.4930504262447357, |
|
"learning_rate": 0.00019309511426028104, |
|
"loss": 0.602, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.43711340206185567, |
|
"grad_norm": 0.4977351725101471, |
|
"learning_rate": 0.00019276646494626332, |
|
"loss": 0.5516, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.44536082474226807, |
|
"grad_norm": 0.4758591055870056, |
|
"learning_rate": 0.00019243046815118386, |
|
"loss": 0.5136, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.4536082474226804, |
|
"grad_norm": 0.4887770712375641, |
|
"learning_rate": 0.00019208715048735445, |
|
"loss": 0.6265, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4618556701030928, |
|
"grad_norm": 0.5064010620117188, |
|
"learning_rate": 0.00019173653914692946, |
|
"loss": 0.6524, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.47010309278350515, |
|
"grad_norm": 0.5001596808433533, |
|
"learning_rate": 0.00019137866189975202, |
|
"loss": 0.622, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.47835051546391755, |
|
"grad_norm": 0.4940868616104126, |
|
"learning_rate": 0.00019101354709115468, |
|
"loss": 0.6362, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4865979381443299, |
|
"grad_norm": 0.4839000701904297, |
|
"learning_rate": 0.00019064122363971427, |
|
"loss": 0.6223, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.4948453608247423, |
|
"grad_norm": 0.4475175440311432, |
|
"learning_rate": 0.00019026172103496137, |
|
"loss": 0.5625, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5030927835051546, |
|
"grad_norm": 0.4716227352619171, |
|
"learning_rate": 0.0001898750693350447, |
|
"loss": 0.5161, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5030927835051546, |
|
"eval_loss": 0.5744044780731201, |
|
"eval_runtime": 296.9417, |
|
"eval_samples_per_second": 2.021, |
|
"eval_steps_per_second": 1.01, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.511340206185567, |
|
"grad_norm": 0.47013187408447266, |
|
"learning_rate": 0.00018948129916435046, |
|
"loss": 0.592, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5195876288659794, |
|
"grad_norm": 0.44856756925582886, |
|
"learning_rate": 0.00018908044171107657, |
|
"loss": 0.5981, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5278350515463918, |
|
"grad_norm": 0.4443698525428772, |
|
"learning_rate": 0.00018867252872476257, |
|
"loss": 0.6068, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5360824742268041, |
|
"grad_norm": 0.423221617937088, |
|
"learning_rate": 0.00018825759251377483, |
|
"loss": 0.537, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5443298969072164, |
|
"grad_norm": 0.4647126793861389, |
|
"learning_rate": 0.00018783566594274783, |
|
"loss": 0.5896, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5525773195876289, |
|
"grad_norm": 0.4287152588367462, |
|
"learning_rate": 0.00018740678242998077, |
|
"loss": 0.5424, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.5608247422680412, |
|
"grad_norm": 0.4562716484069824, |
|
"learning_rate": 0.00018697097594479103, |
|
"loss": 0.5571, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5690721649484536, |
|
"grad_norm": 0.37922483682632446, |
|
"learning_rate": 0.0001865282810048235, |
|
"loss": 0.5337, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5773195876288659, |
|
"grad_norm": 0.4227437973022461, |
|
"learning_rate": 0.0001860787326733168, |
|
"loss": 0.5989, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5855670103092784, |
|
"grad_norm": 0.39743122458457947, |
|
"learning_rate": 0.0001856223665563258, |
|
"loss": 0.537, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5938144329896907, |
|
"grad_norm": 0.41971203684806824, |
|
"learning_rate": 0.00018515921879990187, |
|
"loss": 0.5353, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6020618556701031, |
|
"grad_norm": 0.4010031819343567, |
|
"learning_rate": 0.00018468932608722973, |
|
"loss": 0.5954, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6103092783505155, |
|
"grad_norm": 0.469838947057724, |
|
"learning_rate": 0.000184212725635722, |
|
"loss": 0.6942, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6185567010309279, |
|
"grad_norm": 0.381673663854599, |
|
"learning_rate": 0.00018372945519407158, |
|
"loss": 0.557, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6268041237113402, |
|
"grad_norm": 0.3736635446548462, |
|
"learning_rate": 0.00018323955303926163, |
|
"loss": 0.5174, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6350515463917525, |
|
"grad_norm": 0.4390040338039398, |
|
"learning_rate": 0.00018274305797353395, |
|
"loss": 0.6249, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.643298969072165, |
|
"grad_norm": 0.4298490285873413, |
|
"learning_rate": 0.00018224000932131568, |
|
"loss": 0.5682, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.6515463917525773, |
|
"grad_norm": 0.3995118737220764, |
|
"learning_rate": 0.00018173044692610467, |
|
"loss": 0.5261, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.6597938144329897, |
|
"grad_norm": 0.41546231508255005, |
|
"learning_rate": 0.00018121441114731367, |
|
"loss": 0.5296, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.668041237113402, |
|
"grad_norm": 0.4129379093647003, |
|
"learning_rate": 0.0001806919428570737, |
|
"loss": 0.5576, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.6762886597938145, |
|
"grad_norm": 0.4260002672672272, |
|
"learning_rate": 0.00018016308343699687, |
|
"loss": 0.6042, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.6845360824742268, |
|
"grad_norm": 0.3927561640739441, |
|
"learning_rate": 0.00017962787477489878, |
|
"loss": 0.5349, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.6927835051546392, |
|
"grad_norm": 0.39806240797042847, |
|
"learning_rate": 0.00017908635926148069, |
|
"loss": 0.5386, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.7010309278350515, |
|
"grad_norm": 0.3762768805027008, |
|
"learning_rate": 0.00017853857978697223, |
|
"loss": 0.545, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.709278350515464, |
|
"grad_norm": 0.4306787848472595, |
|
"learning_rate": 0.00017798457973773417, |
|
"loss": 0.6305, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7175257731958763, |
|
"grad_norm": 0.3811737596988678, |
|
"learning_rate": 0.00017742440299282203, |
|
"loss": 0.539, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7257731958762886, |
|
"grad_norm": 0.39163580536842346, |
|
"learning_rate": 0.00017685809392051083, |
|
"loss": 0.5286, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.734020618556701, |
|
"grad_norm": 0.4237455725669861, |
|
"learning_rate": 0.00017628569737478076, |
|
"loss": 0.5801, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.7422680412371134, |
|
"grad_norm": 0.41603776812553406, |
|
"learning_rate": 0.00017570725869176467, |
|
"loss": 0.6001, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7505154639175258, |
|
"grad_norm": 0.3811546862125397, |
|
"learning_rate": 0.00017512282368615728, |
|
"loss": 0.5942, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.7587628865979381, |
|
"grad_norm": 0.3982357680797577, |
|
"learning_rate": 0.00017453243864758638, |
|
"loss": 0.5655, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.7670103092783506, |
|
"grad_norm": 0.43069708347320557, |
|
"learning_rate": 0.00017393615033694656, |
|
"loss": 0.5251, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.7752577319587629, |
|
"grad_norm": 0.3952259421348572, |
|
"learning_rate": 0.0001733340059826956, |
|
"loss": 0.5462, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.7835051546391752, |
|
"grad_norm": 0.40667715668678284, |
|
"learning_rate": 0.00017272605327711365, |
|
"loss": 0.5303, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7917525773195876, |
|
"grad_norm": 0.3917592465877533, |
|
"learning_rate": 0.000172112340372526, |
|
"loss": 0.612, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.37455278635025024, |
|
"learning_rate": 0.00017149291587748898, |
|
"loss": 0.4831, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.8082474226804124, |
|
"grad_norm": 0.3805007040500641, |
|
"learning_rate": 0.00017086782885294025, |
|
"loss": 0.5237, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.8164948453608247, |
|
"grad_norm": 0.4057518541812897, |
|
"learning_rate": 0.0001702371288083127, |
|
"loss": 0.611, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8247422680412371, |
|
"grad_norm": 0.424902081489563, |
|
"learning_rate": 0.00016960086569761332, |
|
"loss": 0.5071, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8329896907216495, |
|
"grad_norm": 0.41015303134918213, |
|
"learning_rate": 0.0001689590899154664, |
|
"loss": 0.6073, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.8412371134020619, |
|
"grad_norm": 0.3922909200191498, |
|
"learning_rate": 0.00016831185229312237, |
|
"loss": 0.5923, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.8494845360824742, |
|
"grad_norm": 0.43730470538139343, |
|
"learning_rate": 0.0001676592040944315, |
|
"loss": 0.5937, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.8577319587628865, |
|
"grad_norm": 0.3726069927215576, |
|
"learning_rate": 0.0001670011970117838, |
|
"loss": 0.4912, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.865979381443299, |
|
"grad_norm": 0.404433012008667, |
|
"learning_rate": 0.00016633788316201454, |
|
"loss": 0.474, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8742268041237113, |
|
"grad_norm": 0.41312918066978455, |
|
"learning_rate": 0.0001656693150822766, |
|
"loss": 0.5545, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.8824742268041237, |
|
"grad_norm": 0.42073437571525574, |
|
"learning_rate": 0.0001649955457258792, |
|
"loss": 0.5488, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.8907216494845361, |
|
"grad_norm": 0.39594408869743347, |
|
"learning_rate": 0.00016431662845809388, |
|
"loss": 0.5798, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.8989690721649485, |
|
"grad_norm": 0.41331595182418823, |
|
"learning_rate": 0.00016363261705192757, |
|
"loss": 0.5188, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.9072164948453608, |
|
"grad_norm": 0.3879910111427307, |
|
"learning_rate": 0.00016294356568386369, |
|
"loss": 0.5676, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9154639175257732, |
|
"grad_norm": 0.41248220205307007, |
|
"learning_rate": 0.00016224952892957123, |
|
"loss": 0.541, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.9237113402061856, |
|
"grad_norm": 0.3406890034675598, |
|
"learning_rate": 0.0001615505617595819, |
|
"loss": 0.4902, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.931958762886598, |
|
"grad_norm": 0.3741547167301178, |
|
"learning_rate": 0.00016084671953493643, |
|
"loss": 0.5247, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.9402061855670103, |
|
"grad_norm": 0.37796300649642944, |
|
"learning_rate": 0.00016013805800279976, |
|
"loss": 0.5091, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.9484536082474226, |
|
"grad_norm": 0.3957986533641815, |
|
"learning_rate": 0.00015942463329204546, |
|
"loss": 0.5809, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9567010309278351, |
|
"grad_norm": 0.38373249769210815, |
|
"learning_rate": 0.00015870650190881022, |
|
"loss": 0.4693, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.9649484536082474, |
|
"grad_norm": 0.4250248074531555, |
|
"learning_rate": 0.00015798372073201836, |
|
"loss": 0.5668, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.9731958762886598, |
|
"grad_norm": 0.37807247042655945, |
|
"learning_rate": 0.00015725634700887678, |
|
"loss": 0.5159, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.9814432989690721, |
|
"grad_norm": 0.3726092278957367, |
|
"learning_rate": 0.00015652443835034068, |
|
"loss": 0.5605, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.9896907216494846, |
|
"grad_norm": 0.38076040148735046, |
|
"learning_rate": 0.0001557880527265505, |
|
"loss": 0.537, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9979381443298969, |
|
"grad_norm": 0.37176623940467834, |
|
"learning_rate": 0.00015504724846224064, |
|
"loss": 0.5603, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.0061855670103093, |
|
"grad_norm": 0.36700350046157837, |
|
"learning_rate": 0.00015430208423211975, |
|
"loss": 0.5398, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.0061855670103093, |
|
"eval_loss": 0.5378922820091248, |
|
"eval_runtime": 296.9545, |
|
"eval_samples_per_second": 2.021, |
|
"eval_steps_per_second": 1.01, |
|
"step": 122 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 363, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.642172793225216e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|