|
{ |
|
"best_metric": 1.2263596057891846, |
|
"best_model_checkpoint": "data/Llama-31-8B_task-1_180-samples_config-1/checkpoint-68", |
|
"epoch": 11.0, |
|
"eval_steps": 500, |
|
"global_step": 187, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.058823529411764705, |
|
"grad_norm": 1.806323766708374, |
|
"learning_rate": 1.1764705882352942e-06, |
|
"loss": 2.1281, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.11764705882352941, |
|
"grad_norm": 1.5079092979431152, |
|
"learning_rate": 2.3529411764705885e-06, |
|
"loss": 1.9223, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 1.8070296049118042, |
|
"learning_rate": 4.705882352941177e-06, |
|
"loss": 2.0084, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.35294117647058826, |
|
"grad_norm": 1.6644213199615479, |
|
"learning_rate": 7.058823529411765e-06, |
|
"loss": 2.1225, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 1.774314284324646, |
|
"learning_rate": 9.411764705882354e-06, |
|
"loss": 1.9249, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5882352941176471, |
|
"grad_norm": 1.6763962507247925, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 2.06, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7058823529411765, |
|
"grad_norm": 1.8542298078536987, |
|
"learning_rate": 1.411764705882353e-05, |
|
"loss": 1.981, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8235294117647058, |
|
"grad_norm": 1.5285791158676147, |
|
"learning_rate": 1.647058823529412e-05, |
|
"loss": 1.9257, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.9411764705882353, |
|
"grad_norm": 1.7371580600738525, |
|
"learning_rate": 1.8823529411764708e-05, |
|
"loss": 1.8393, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7238094806671143, |
|
"eval_runtime": 14.4902, |
|
"eval_samples_per_second": 2.484, |
|
"eval_steps_per_second": 2.484, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.0588235294117647, |
|
"grad_norm": 1.277214765548706, |
|
"learning_rate": 2.1176470588235296e-05, |
|
"loss": 1.713, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.1764705882352942, |
|
"grad_norm": 0.8376792073249817, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 1.7011, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.2941176470588236, |
|
"grad_norm": 0.6922251582145691, |
|
"learning_rate": 2.5882352941176475e-05, |
|
"loss": 1.5788, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4117647058823528, |
|
"grad_norm": 0.7128008008003235, |
|
"learning_rate": 2.823529411764706e-05, |
|
"loss": 1.6274, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.5294117647058822, |
|
"grad_norm": 0.8218591809272766, |
|
"learning_rate": 3.058823529411765e-05, |
|
"loss": 1.5799, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.6470588235294117, |
|
"grad_norm": 0.7004019021987915, |
|
"learning_rate": 3.294117647058824e-05, |
|
"loss": 1.4887, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.7647058823529411, |
|
"grad_norm": 0.6600505113601685, |
|
"learning_rate": 3.529411764705883e-05, |
|
"loss": 1.4354, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"grad_norm": 0.6615216135978699, |
|
"learning_rate": 3.7647058823529415e-05, |
|
"loss": 1.4252, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.7450706958770752, |
|
"learning_rate": 4e-05, |
|
"loss": 1.3906, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.4334121942520142, |
|
"eval_runtime": 14.4955, |
|
"eval_samples_per_second": 2.484, |
|
"eval_steps_per_second": 2.484, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.1176470588235294, |
|
"grad_norm": 0.6278265714645386, |
|
"learning_rate": 4.235294117647059e-05, |
|
"loss": 1.305, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.235294117647059, |
|
"grad_norm": 0.7144865989685059, |
|
"learning_rate": 4.470588235294118e-05, |
|
"loss": 1.4263, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.3529411764705883, |
|
"grad_norm": 0.7090567350387573, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 1.2102, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.4705882352941178, |
|
"grad_norm": 0.7017014622688293, |
|
"learning_rate": 4.9411764705882355e-05, |
|
"loss": 1.3959, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.588235294117647, |
|
"grad_norm": 0.7466076016426086, |
|
"learning_rate": 5.176470588235295e-05, |
|
"loss": 1.2635, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.7058823529411766, |
|
"grad_norm": 0.750141978263855, |
|
"learning_rate": 5.411764705882353e-05, |
|
"loss": 1.2825, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.8235294117647056, |
|
"grad_norm": 0.7849295139312744, |
|
"learning_rate": 5.647058823529412e-05, |
|
"loss": 1.2437, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.9411764705882355, |
|
"grad_norm": 0.814879298210144, |
|
"learning_rate": 5.882352941176471e-05, |
|
"loss": 1.1829, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.272010326385498, |
|
"eval_runtime": 14.4968, |
|
"eval_samples_per_second": 2.483, |
|
"eval_steps_per_second": 2.483, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 3.0588235294117645, |
|
"grad_norm": 0.9217017889022827, |
|
"learning_rate": 6.11764705882353e-05, |
|
"loss": 1.1552, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 3.176470588235294, |
|
"grad_norm": 0.8735676407814026, |
|
"learning_rate": 6.352941176470588e-05, |
|
"loss": 1.1407, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 3.2941176470588234, |
|
"grad_norm": 0.8801168203353882, |
|
"learning_rate": 6.588235294117648e-05, |
|
"loss": 1.1258, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 3.411764705882353, |
|
"grad_norm": 0.9490126967430115, |
|
"learning_rate": 6.823529411764707e-05, |
|
"loss": 1.0762, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 3.5294117647058822, |
|
"grad_norm": 1.0785832405090332, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 1.1216, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.6470588235294117, |
|
"grad_norm": 1.0801024436950684, |
|
"learning_rate": 7.294117647058823e-05, |
|
"loss": 1.0717, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 3.764705882352941, |
|
"grad_norm": 0.9483073353767395, |
|
"learning_rate": 7.529411764705883e-05, |
|
"loss": 0.996, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 3.8823529411764706, |
|
"grad_norm": 1.1348040103912354, |
|
"learning_rate": 7.764705882352942e-05, |
|
"loss": 1.0813, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.2702502012252808, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0037, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.2263596057891846, |
|
"eval_runtime": 14.4887, |
|
"eval_samples_per_second": 2.485, |
|
"eval_steps_per_second": 2.485, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 4.117647058823529, |
|
"grad_norm": 1.0951895713806152, |
|
"learning_rate": 8.23529411764706e-05, |
|
"loss": 0.9193, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 4.235294117647059, |
|
"grad_norm": 1.1319009065628052, |
|
"learning_rate": 8.470588235294118e-05, |
|
"loss": 0.9073, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 4.352941176470588, |
|
"grad_norm": 1.3507709503173828, |
|
"learning_rate": 8.705882352941177e-05, |
|
"loss": 0.9094, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 4.470588235294118, |
|
"grad_norm": 1.5976216793060303, |
|
"learning_rate": 8.941176470588236e-05, |
|
"loss": 0.8, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 4.588235294117647, |
|
"grad_norm": 1.5247750282287598, |
|
"learning_rate": 9.176470588235295e-05, |
|
"loss": 0.8411, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 4.705882352941177, |
|
"grad_norm": 1.7212128639221191, |
|
"learning_rate": 9.411764705882353e-05, |
|
"loss": 0.7832, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.823529411764706, |
|
"grad_norm": 1.636215090751648, |
|
"learning_rate": 9.647058823529412e-05, |
|
"loss": 0.7942, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 4.9411764705882355, |
|
"grad_norm": 1.6557462215423584, |
|
"learning_rate": 9.882352941176471e-05, |
|
"loss": 0.875, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.2729443311691284, |
|
"eval_runtime": 14.4917, |
|
"eval_samples_per_second": 2.484, |
|
"eval_steps_per_second": 2.484, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 5.0588235294117645, |
|
"grad_norm": 1.4015008211135864, |
|
"learning_rate": 9.99995783847866e-05, |
|
"loss": 0.7261, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 5.176470588235294, |
|
"grad_norm": 1.3426791429519653, |
|
"learning_rate": 9.999620550574153e-05, |
|
"loss": 0.595, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 5.294117647058823, |
|
"grad_norm": 1.7509989738464355, |
|
"learning_rate": 9.998945997517956e-05, |
|
"loss": 0.5419, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 5.411764705882353, |
|
"grad_norm": 3.1275417804718018, |
|
"learning_rate": 9.997934224814173e-05, |
|
"loss": 0.6423, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 5.529411764705882, |
|
"grad_norm": 2.2710916996002197, |
|
"learning_rate": 9.996585300715116e-05, |
|
"loss": 0.5543, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 5.647058823529412, |
|
"grad_norm": 1.9362425804138184, |
|
"learning_rate": 9.994899316216708e-05, |
|
"loss": 0.4271, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 5.764705882352941, |
|
"grad_norm": 1.7837473154067993, |
|
"learning_rate": 9.992876385052345e-05, |
|
"loss": 0.5637, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 5.882352941176471, |
|
"grad_norm": 1.80534827709198, |
|
"learning_rate": 9.990516643685222e-05, |
|
"loss": 0.6042, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.8158038854599, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.5286, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.4640731811523438, |
|
"eval_runtime": 14.4917, |
|
"eval_samples_per_second": 2.484, |
|
"eval_steps_per_second": 2.484, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 6.117647058823529, |
|
"grad_norm": 1.8034647703170776, |
|
"learning_rate": 9.984787389787688e-05, |
|
"loss": 0.3217, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 6.235294117647059, |
|
"grad_norm": 1.959482192993164, |
|
"learning_rate": 9.981418263742148e-05, |
|
"loss": 0.3218, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 6.352941176470588, |
|
"grad_norm": 2.72263240814209, |
|
"learning_rate": 9.977713100437509e-05, |
|
"loss": 0.3157, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 6.470588235294118, |
|
"grad_norm": 3.339064359664917, |
|
"learning_rate": 9.973672149817232e-05, |
|
"loss": 0.3228, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 6.588235294117647, |
|
"grad_norm": 3.0396878719329834, |
|
"learning_rate": 9.96929568447637e-05, |
|
"loss": 0.2937, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 6.705882352941177, |
|
"grad_norm": 3.0496692657470703, |
|
"learning_rate": 9.964583999643174e-05, |
|
"loss": 0.3314, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 6.823529411764706, |
|
"grad_norm": 1.9532092809677124, |
|
"learning_rate": 9.95953741315919e-05, |
|
"loss": 0.2716, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 6.9411764705882355, |
|
"grad_norm": 1.7998892068862915, |
|
"learning_rate": 9.954156265457801e-05, |
|
"loss": 0.2721, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.7060846090316772, |
|
"eval_runtime": 14.4931, |
|
"eval_samples_per_second": 2.484, |
|
"eval_steps_per_second": 2.484, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 7.0588235294117645, |
|
"grad_norm": 1.2864255905151367, |
|
"learning_rate": 9.948440919541278e-05, |
|
"loss": 0.2517, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 7.176470588235294, |
|
"grad_norm": 1.3337100744247437, |
|
"learning_rate": 9.942391760956277e-05, |
|
"loss": 0.1339, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 7.294117647058823, |
|
"grad_norm": 2.677187442779541, |
|
"learning_rate": 9.936009197767845e-05, |
|
"loss": 0.1415, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 7.411764705882353, |
|
"grad_norm": 2.17299485206604, |
|
"learning_rate": 9.929293660531888e-05, |
|
"loss": 0.1373, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 7.529411764705882, |
|
"grad_norm": 3.5403952598571777, |
|
"learning_rate": 9.922245602266118e-05, |
|
"loss": 0.1608, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 7.647058823529412, |
|
"grad_norm": 2.2326877117156982, |
|
"learning_rate": 9.91486549841951e-05, |
|
"loss": 0.1371, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 7.764705882352941, |
|
"grad_norm": 2.6239540576934814, |
|
"learning_rate": 9.90715384684021e-05, |
|
"loss": 0.1604, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 7.882352941176471, |
|
"grad_norm": 2.034834623336792, |
|
"learning_rate": 9.899111167741966e-05, |
|
"loss": 0.1671, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.1497554779052734, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.16, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.0944266319274902, |
|
"eval_runtime": 14.498, |
|
"eval_samples_per_second": 2.483, |
|
"eval_steps_per_second": 2.483, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 8.117647058823529, |
|
"grad_norm": 1.502654790878296, |
|
"learning_rate": 9.882034919459555e-05, |
|
"loss": 0.081, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 8.235294117647058, |
|
"grad_norm": 1.151986837387085, |
|
"learning_rate": 9.873002502207503e-05, |
|
"loss": 0.0554, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 8.352941176470589, |
|
"grad_norm": 3.554004669189453, |
|
"learning_rate": 9.863641361223024e-05, |
|
"loss": 0.0771, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 8.470588235294118, |
|
"grad_norm": 2.4202420711517334, |
|
"learning_rate": 9.853952127991372e-05, |
|
"loss": 0.0956, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 8.588235294117647, |
|
"grad_norm": 3.394904613494873, |
|
"learning_rate": 9.843935456130295e-05, |
|
"loss": 0.0781, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 8.705882352941176, |
|
"grad_norm": 1.7927554845809937, |
|
"learning_rate": 9.833592021345937e-05, |
|
"loss": 0.0944, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 8.823529411764707, |
|
"grad_norm": 1.5801514387130737, |
|
"learning_rate": 9.822922521387276e-05, |
|
"loss": 0.0829, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 8.941176470588236, |
|
"grad_norm": 2.423281669616699, |
|
"learning_rate": 9.811927675999036e-05, |
|
"loss": 0.0814, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.3201403617858887, |
|
"eval_runtime": 14.4986, |
|
"eval_samples_per_second": 2.483, |
|
"eval_steps_per_second": 2.483, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 9.058823529411764, |
|
"grad_norm": 0.9261131882667542, |
|
"learning_rate": 9.800608226873142e-05, |
|
"loss": 0.0459, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 9.176470588235293, |
|
"grad_norm": 0.9879596829414368, |
|
"learning_rate": 9.788964937598689e-05, |
|
"loss": 0.0279, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 9.294117647058824, |
|
"grad_norm": 1.363547444343567, |
|
"learning_rate": 9.776998593610428e-05, |
|
"loss": 0.0334, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 9.411764705882353, |
|
"grad_norm": 1.988921046257019, |
|
"learning_rate": 9.764710002135784e-05, |
|
"loss": 0.0548, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 9.529411764705882, |
|
"grad_norm": 1.7726337909698486, |
|
"learning_rate": 9.752099992140399e-05, |
|
"loss": 0.047, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 9.647058823529411, |
|
"grad_norm": 1.5547761917114258, |
|
"learning_rate": 9.739169414272217e-05, |
|
"loss": 0.0633, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 9.764705882352942, |
|
"grad_norm": 2.4369709491729736, |
|
"learning_rate": 9.725919140804099e-05, |
|
"loss": 0.0684, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 9.882352941176471, |
|
"grad_norm": 2.0575246810913086, |
|
"learning_rate": 9.71235006557497e-05, |
|
"loss": 0.0708, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.5280945301055908, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0608, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.290346145629883, |
|
"eval_runtime": 14.4996, |
|
"eval_samples_per_second": 2.483, |
|
"eval_steps_per_second": 2.483, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 10.117647058823529, |
|
"grad_norm": 1.086666464805603, |
|
"learning_rate": 9.684259192656553e-05, |
|
"loss": 0.0315, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 10.235294117647058, |
|
"grad_norm": 1.520897626876831, |
|
"learning_rate": 9.669739289925577e-05, |
|
"loss": 0.0324, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 10.352941176470589, |
|
"grad_norm": 2.3758738040924072, |
|
"learning_rate": 9.654904375222385e-05, |
|
"loss": 0.0401, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 10.470588235294118, |
|
"grad_norm": 2.0643303394317627, |
|
"learning_rate": 9.639755449282875e-05, |
|
"loss": 0.0572, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 10.588235294117647, |
|
"grad_norm": 1.8688856363296509, |
|
"learning_rate": 9.62429353402556e-05, |
|
"loss": 0.0354, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 10.705882352941176, |
|
"grad_norm": 1.3134160041809082, |
|
"learning_rate": 9.608519672482636e-05, |
|
"loss": 0.0585, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 10.823529411764707, |
|
"grad_norm": 1.692589521408081, |
|
"learning_rate": 9.592434928729616e-05, |
|
"loss": 0.0269, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 10.941176470588236, |
|
"grad_norm": 1.541475534439087, |
|
"learning_rate": 9.576040387813552e-05, |
|
"loss": 0.033, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 2.681903839111328, |
|
"eval_runtime": 14.4882, |
|
"eval_samples_per_second": 2.485, |
|
"eval_steps_per_second": 2.485, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"step": 187, |
|
"total_flos": 2.381284302336819e+16, |
|
"train_loss": 0.7195047194506714, |
|
"train_runtime": 1765.5808, |
|
"train_samples_per_second": 3.851, |
|
"train_steps_per_second": 0.481 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 850, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.381284302336819e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|