|
{ |
|
"best_metric": 2.515887498855591, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.24875621890547264, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004975124378109453, |
|
"grad_norm": 3.0430262088775635, |
|
"learning_rate": 0.0001, |
|
"loss": 3.2635, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004975124378109453, |
|
"eval_loss": 3.682396650314331, |
|
"eval_runtime": 6.5408, |
|
"eval_samples_per_second": 12.995, |
|
"eval_steps_per_second": 6.574, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009950248756218905, |
|
"grad_norm": 3.611407518386841, |
|
"learning_rate": 0.0002, |
|
"loss": 3.1135, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.014925373134328358, |
|
"grad_norm": 3.713569402694702, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 3.2019, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01990049751243781, |
|
"grad_norm": 3.7321176528930664, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 2.8382, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.024875621890547265, |
|
"grad_norm": 3.764651298522949, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 2.8298, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.029850746268656716, |
|
"grad_norm": 3.621135950088501, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 3.1415, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03482587064676617, |
|
"grad_norm": 8.381474494934082, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 2.8867, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03980099502487562, |
|
"grad_norm": 3.6087472438812256, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 2.6187, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04477611940298507, |
|
"grad_norm": 4.97490119934082, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 2.7751, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04975124378109453, |
|
"grad_norm": 3.8109967708587646, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.9982, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05472636815920398, |
|
"grad_norm": 3.904412269592285, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 2.9809, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.05970149253731343, |
|
"grad_norm": 3.4200658798217773, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 2.5012, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06467661691542288, |
|
"grad_norm": 4.79351806640625, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 3.2791, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.06965174129353234, |
|
"grad_norm": 4.197938919067383, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 2.7394, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07462686567164178, |
|
"grad_norm": 3.96777081489563, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 2.3632, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07960199004975124, |
|
"grad_norm": 3.868743658065796, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.6906, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0845771144278607, |
|
"grad_norm": 4.248363018035889, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 2.4091, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.08955223880597014, |
|
"grad_norm": 4.386332035064697, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 3.0164, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0945273631840796, |
|
"grad_norm": 4.483827114105225, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 2.6212, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.09950248756218906, |
|
"grad_norm": 3.482344627380371, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 2.5468, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1044776119402985, |
|
"grad_norm": 3.7087607383728027, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 2.5328, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.10945273631840796, |
|
"grad_norm": 4.2484049797058105, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 2.0708, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11442786069651742, |
|
"grad_norm": 4.827538967132568, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 2.785, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.11940298507462686, |
|
"grad_norm": 3.7040979862213135, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 2.342, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12437810945273632, |
|
"grad_norm": 4.959524631500244, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 2.704, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12437810945273632, |
|
"eval_loss": 2.594191312789917, |
|
"eval_runtime": 6.1599, |
|
"eval_samples_per_second": 13.799, |
|
"eval_steps_per_second": 6.981, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12935323383084577, |
|
"grad_norm": 3.9420931339263916, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5456, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13432835820895522, |
|
"grad_norm": 5.832747936248779, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 2.1194, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.13930348258706468, |
|
"grad_norm": 5.003444194793701, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 2.3539, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14427860696517414, |
|
"grad_norm": 3.70873761177063, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 2.5001, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 3.857776403427124, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 2.3172, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15422885572139303, |
|
"grad_norm": 3.669508934020996, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 2.1357, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.15920398009950248, |
|
"grad_norm": 7.074223518371582, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 3.0023, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.16417910447761194, |
|
"grad_norm": 4.587769508361816, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 2.2561, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1691542288557214, |
|
"grad_norm": 4.273606777191162, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 2.448, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17412935323383086, |
|
"grad_norm": 4.296418190002441, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 2.3099, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1791044776119403, |
|
"grad_norm": 3.6311933994293213, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 2.3837, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18407960199004975, |
|
"grad_norm": 4.1932902336120605, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 2.6655, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1890547263681592, |
|
"grad_norm": 4.752549171447754, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 2.6488, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19402985074626866, |
|
"grad_norm": 4.613941669464111, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 2.5856, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.19900497512437812, |
|
"grad_norm": 4.24809455871582, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 2.2621, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.20398009950248755, |
|
"grad_norm": 4.745845794677734, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 2.2354, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.208955223880597, |
|
"grad_norm": 5.25393009185791, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 2.6063, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21393034825870647, |
|
"grad_norm": 4.762695789337158, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 2.3609, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.21890547263681592, |
|
"grad_norm": 4.703743934631348, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 2.2418, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.22388059701492538, |
|
"grad_norm": 5.409986972808838, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 2.1651, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22885572139303484, |
|
"grad_norm": 4.685298442840576, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 2.1631, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.23383084577114427, |
|
"grad_norm": 6.100794315338135, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 2.3216, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.23880597014925373, |
|
"grad_norm": 4.347185134887695, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 2.1609, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.24378109452736318, |
|
"grad_norm": 5.658843040466309, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 2.4468, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.24875621890547264, |
|
"grad_norm": 5.9395833015441895, |
|
"learning_rate": 0.0, |
|
"loss": 2.4286, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.24875621890547264, |
|
"eval_loss": 2.515887498855591, |
|
"eval_runtime": 6.187, |
|
"eval_samples_per_second": 13.739, |
|
"eval_steps_per_second": 6.95, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3604771504128000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|