File size: 3,985 Bytes
3d10467 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.08140670791273201,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0040703353956366,
"grad_norm": 0.8551507592201233,
"learning_rate": 2.9959296646043635e-05,
"loss": 1.8278,
"step": 50
},
{
"epoch": 0.0081406707912732,
"grad_norm": 0.6171118021011353,
"learning_rate": 2.991859329208727e-05,
"loss": 0.0282,
"step": 100
},
{
"epoch": 0.012211006186909802,
"grad_norm": 0.04681049659848213,
"learning_rate": 2.98778899381309e-05,
"loss": 0.0061,
"step": 150
},
{
"epoch": 0.0162813415825464,
"grad_norm": 0.0864928737282753,
"learning_rate": 2.9837186584174535e-05,
"loss": 0.0061,
"step": 200
},
{
"epoch": 0.020351676978183002,
"grad_norm": 0.09369981288909912,
"learning_rate": 2.979648323021817e-05,
"loss": 0.0048,
"step": 250
},
{
"epoch": 0.024422012373819604,
"grad_norm": 0.05656412988901138,
"learning_rate": 2.9755779876261804e-05,
"loss": 0.0044,
"step": 300
},
{
"epoch": 0.028492347769456205,
"grad_norm": 0.06424488872289658,
"learning_rate": 2.971507652230544e-05,
"loss": 0.0033,
"step": 350
},
{
"epoch": 0.0325626831650928,
"grad_norm": 0.028317060321569443,
"learning_rate": 2.967437316834907e-05,
"loss": 0.0034,
"step": 400
},
{
"epoch": 0.036633018560729404,
"grad_norm": 0.03844854235649109,
"learning_rate": 2.9633669814392704e-05,
"loss": 0.0036,
"step": 450
},
{
"epoch": 0.040703353956366005,
"grad_norm": 0.013333266600966454,
"learning_rate": 2.959296646043634e-05,
"loss": 0.0026,
"step": 500
},
{
"epoch": 0.044773689352002606,
"grad_norm": 0.027189180254936218,
"learning_rate": 2.9552263106479973e-05,
"loss": 0.0027,
"step": 550
},
{
"epoch": 0.04884402474763921,
"grad_norm": 0.015432504937052727,
"learning_rate": 2.951155975252361e-05,
"loss": 0.0021,
"step": 600
},
{
"epoch": 0.05291436014327581,
"grad_norm": 0.0909409150481224,
"learning_rate": 2.9470856398567242e-05,
"loss": 0.0028,
"step": 650
},
{
"epoch": 0.05698469553891241,
"grad_norm": 0.03523814678192139,
"learning_rate": 2.9430153044610877e-05,
"loss": 0.0027,
"step": 700
},
{
"epoch": 0.061055030934549004,
"grad_norm": 0.037610672414302826,
"learning_rate": 2.938944969065451e-05,
"loss": 0.0025,
"step": 750
},
{
"epoch": 0.0651253663301856,
"grad_norm": 0.03404204174876213,
"learning_rate": 2.9348746336698146e-05,
"loss": 0.0023,
"step": 800
},
{
"epoch": 0.0691957017258222,
"grad_norm": 0.0387328565120697,
"learning_rate": 2.930804298274178e-05,
"loss": 0.0023,
"step": 850
},
{
"epoch": 0.07326603712145881,
"grad_norm": 0.017468873411417007,
"learning_rate": 2.9267339628785415e-05,
"loss": 0.0023,
"step": 900
},
{
"epoch": 0.07733637251709541,
"grad_norm": 0.025613853707909584,
"learning_rate": 2.9226636274829046e-05,
"loss": 0.003,
"step": 950
},
{
"epoch": 0.08140670791273201,
"grad_norm": 0.020182810723781586,
"learning_rate": 2.918593292087268e-05,
"loss": 0.002,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 36852,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"total_flos": 0.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}
|