|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9970326409495549, |
|
"eval_steps": 500, |
|
"global_step": 168, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005934718100890208, |
|
"grad_norm": 3.172546863555908, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 1.7951, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02967359050445104, |
|
"grad_norm": 3.243614435195923, |
|
"learning_rate": 5.882352941176471e-05, |
|
"loss": 1.8001, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05934718100890208, |
|
"grad_norm": 2.237574577331543, |
|
"learning_rate": 0.00011764705882352942, |
|
"loss": 1.7245, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08902077151335312, |
|
"grad_norm": 2.2595160007476807, |
|
"learning_rate": 0.00017647058823529413, |
|
"loss": 1.6176, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11869436201780416, |
|
"grad_norm": 2.2810211181640625, |
|
"learning_rate": 0.00019980527694749952, |
|
"loss": 1.4882, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14836795252225518, |
|
"grad_norm": 2.004383087158203, |
|
"learning_rate": 0.00019861804788521493, |
|
"loss": 1.3673, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.17804154302670624, |
|
"grad_norm": 1.0031766891479492, |
|
"learning_rate": 0.00019636458959356316, |
|
"loss": 1.3098, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20771513353115728, |
|
"grad_norm": 0.7017138004302979, |
|
"learning_rate": 0.00019306926579854821, |
|
"loss": 1.2812, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.23738872403560832, |
|
"grad_norm": 0.7703685164451599, |
|
"learning_rate": 0.00018876770456851877, |
|
"loss": 1.265, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.26706231454005935, |
|
"grad_norm": 0.7199541926383972, |
|
"learning_rate": 0.00018350641311400812, |
|
"loss": 1.2555, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.29673590504451036, |
|
"grad_norm": 0.7666878700256348, |
|
"learning_rate": 0.0001773422749654988, |
|
"loss": 1.2433, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3264094955489614, |
|
"grad_norm": 1.0030404329299927, |
|
"learning_rate": 0.00017034193496547902, |
|
"loss": 1.2443, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3560830860534125, |
|
"grad_norm": 0.6837704181671143, |
|
"learning_rate": 0.00016258107872407375, |
|
"loss": 1.2282, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3857566765578635, |
|
"grad_norm": 0.6520164608955383, |
|
"learning_rate": 0.00015414361432856475, |
|
"loss": 1.2225, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.41543026706231456, |
|
"grad_norm": 0.6578584909439087, |
|
"learning_rate": 0.00014512076515391375, |
|
"loss": 1.2227, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44510385756676557, |
|
"grad_norm": 0.7506173849105835, |
|
"learning_rate": 0.00013561008358255468, |
|
"loss": 1.2281, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.47477744807121663, |
|
"grad_norm": 0.6809776425361633, |
|
"learning_rate": 0.0001257143962968246, |
|
"loss": 1.2078, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5044510385756676, |
|
"grad_norm": 0.6472010016441345, |
|
"learning_rate": 0.00011554069254722051, |
|
"loss": 1.2185, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.5341246290801187, |
|
"grad_norm": 0.6473718881607056, |
|
"learning_rate": 0.00010519896741619803, |
|
"loss": 1.2216, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5637982195845698, |
|
"grad_norm": 0.6487621068954468, |
|
"learning_rate": 9.480103258380198e-05, |
|
"loss": 1.2095, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.5934718100890207, |
|
"grad_norm": 0.7169174551963806, |
|
"learning_rate": 8.445930745277953e-05, |
|
"loss": 1.2196, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6231454005934718, |
|
"grad_norm": 0.6917266249656677, |
|
"learning_rate": 7.428560370317542e-05, |
|
"loss": 1.2122, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.6528189910979229, |
|
"grad_norm": 0.6533765196800232, |
|
"learning_rate": 6.43899164174453e-05, |
|
"loss": 1.1987, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6824925816023739, |
|
"grad_norm": 0.740526556968689, |
|
"learning_rate": 5.487923484608629e-05, |
|
"loss": 1.2059, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.712166172106825, |
|
"grad_norm": 0.7236852645874023, |
|
"learning_rate": 4.585638567143529e-05, |
|
"loss": 1.2049, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.7418397626112759, |
|
"grad_norm": 0.6368817090988159, |
|
"learning_rate": 3.741892127592625e-05, |
|
"loss": 1.2139, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.771513353115727, |
|
"grad_norm": 0.6370018124580383, |
|
"learning_rate": 2.9658065034520978e-05, |
|
"loss": 1.2009, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8011869436201781, |
|
"grad_norm": 0.691111147403717, |
|
"learning_rate": 2.265772503450122e-05, |
|
"loss": 1.1977, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.8308605341246291, |
|
"grad_norm": 0.6910406947135925, |
|
"learning_rate": 1.649358688599191e-05, |
|
"loss": 1.2049, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.8605341246290801, |
|
"grad_norm": 0.669152557849884, |
|
"learning_rate": 1.1232295431481222e-05, |
|
"loss": 1.2018, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.8902077151335311, |
|
"grad_norm": 0.6983711123466492, |
|
"learning_rate": 6.930734201451816e-06, |
|
"loss": 1.2049, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9198813056379822, |
|
"grad_norm": 0.7779198288917542, |
|
"learning_rate": 3.6354104064368566e-06, |
|
"loss": 1.1999, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.9495548961424333, |
|
"grad_norm": 0.6764826774597168, |
|
"learning_rate": 1.3819521147851123e-06, |
|
"loss": 1.1995, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.9792284866468842, |
|
"grad_norm": 0.6475749611854553, |
|
"learning_rate": 1.947230525005006e-07, |
|
"loss": 1.1865, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.9970326409495549, |
|
"eval_loss": 1.8814080953598022, |
|
"eval_runtime": 0.6632, |
|
"eval_samples_per_second": 21.108, |
|
"eval_steps_per_second": 1.508, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.9970326409495549, |
|
"step": 168, |
|
"total_flos": 8.227900459906499e+17, |
|
"train_loss": 1.2773614100047521, |
|
"train_runtime": 661.1188, |
|
"train_samples_per_second": 57.022, |
|
"train_steps_per_second": 0.254 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 168, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.227900459906499e+17, |
|
"train_batch_size": 14, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|