mistral7b-lora-closedqa-11-v1 / trainer_state.json
chansung's picture
Model save
c2a4e7f verified
raw
history blame
7.12 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9970326409495549,
"eval_steps": 500,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005934718100890208,
"grad_norm": 3.0403032302856445,
"learning_rate": 1.1764705882352942e-05,
"loss": 1.8006,
"step": 1
},
{
"epoch": 0.02967359050445104,
"grad_norm": 3.026293992996216,
"learning_rate": 5.882352941176471e-05,
"loss": 1.8024,
"step": 5
},
{
"epoch": 0.05934718100890208,
"grad_norm": 2.2480087280273438,
"learning_rate": 0.00011764705882352942,
"loss": 1.7257,
"step": 10
},
{
"epoch": 0.08902077151335312,
"grad_norm": 2.212745428085327,
"learning_rate": 0.00017647058823529413,
"loss": 1.6182,
"step": 15
},
{
"epoch": 0.11869436201780416,
"grad_norm": 2.061483144760132,
"learning_rate": 0.00019980527694749952,
"loss": 1.4913,
"step": 20
},
{
"epoch": 0.14836795252225518,
"grad_norm": 1.8516631126403809,
"learning_rate": 0.00019861804788521493,
"loss": 1.3717,
"step": 25
},
{
"epoch": 0.17804154302670624,
"grad_norm": 1.0224816799163818,
"learning_rate": 0.00019636458959356316,
"loss": 1.3122,
"step": 30
},
{
"epoch": 0.20771513353115728,
"grad_norm": 0.6997563242912292,
"learning_rate": 0.00019306926579854821,
"loss": 1.2841,
"step": 35
},
{
"epoch": 0.23738872403560832,
"grad_norm": 0.7542662024497986,
"learning_rate": 0.00018876770456851877,
"loss": 1.2682,
"step": 40
},
{
"epoch": 0.26706231454005935,
"grad_norm": 0.7191608548164368,
"learning_rate": 0.00018350641311400812,
"loss": 1.2583,
"step": 45
},
{
"epoch": 0.29673590504451036,
"grad_norm": 0.8058047890663147,
"learning_rate": 0.0001773422749654988,
"loss": 1.2459,
"step": 50
},
{
"epoch": 0.3264094955489614,
"grad_norm": 0.7011504769325256,
"learning_rate": 0.00017034193496547902,
"loss": 1.247,
"step": 55
},
{
"epoch": 0.3560830860534125,
"grad_norm": 0.6850998997688293,
"learning_rate": 0.00016258107872407375,
"loss": 1.2311,
"step": 60
},
{
"epoch": 0.3857566765578635,
"grad_norm": 0.6663928627967834,
"learning_rate": 0.00015414361432856475,
"loss": 1.2248,
"step": 65
},
{
"epoch": 0.41543026706231456,
"grad_norm": 0.6247283816337585,
"learning_rate": 0.00014512076515391375,
"loss": 1.2249,
"step": 70
},
{
"epoch": 0.44510385756676557,
"grad_norm": 0.7638198733329773,
"learning_rate": 0.00013561008358255468,
"loss": 1.2306,
"step": 75
},
{
"epoch": 0.47477744807121663,
"grad_norm": 0.7044103145599365,
"learning_rate": 0.0001257143962968246,
"loss": 1.2097,
"step": 80
},
{
"epoch": 0.5044510385756676,
"grad_norm": 0.6669860482215881,
"learning_rate": 0.00011554069254722051,
"loss": 1.2206,
"step": 85
},
{
"epoch": 0.5341246290801187,
"grad_norm": 0.6756629347801208,
"learning_rate": 0.00010519896741619803,
"loss": 1.2232,
"step": 90
},
{
"epoch": 0.5637982195845698,
"grad_norm": 0.6518044471740723,
"learning_rate": 9.480103258380198e-05,
"loss": 1.2115,
"step": 95
},
{
"epoch": 0.5934718100890207,
"grad_norm": 0.7202388644218445,
"learning_rate": 8.445930745277953e-05,
"loss": 1.2217,
"step": 100
},
{
"epoch": 0.6231454005934718,
"grad_norm": 0.7034718990325928,
"learning_rate": 7.428560370317542e-05,
"loss": 1.2141,
"step": 105
},
{
"epoch": 0.6528189910979229,
"grad_norm": 0.7892000675201416,
"learning_rate": 6.43899164174453e-05,
"loss": 1.2007,
"step": 110
},
{
"epoch": 0.6824925816023739,
"grad_norm": 0.7199309468269348,
"learning_rate": 5.487923484608629e-05,
"loss": 1.2074,
"step": 115
},
{
"epoch": 0.712166172106825,
"grad_norm": 0.7223320603370667,
"learning_rate": 4.585638567143529e-05,
"loss": 1.2068,
"step": 120
},
{
"epoch": 0.7418397626112759,
"grad_norm": 0.6834920644760132,
"learning_rate": 3.741892127592625e-05,
"loss": 1.2158,
"step": 125
},
{
"epoch": 0.771513353115727,
"grad_norm": 0.6318499445915222,
"learning_rate": 2.9658065034520978e-05,
"loss": 1.2025,
"step": 130
},
{
"epoch": 0.8011869436201781,
"grad_norm": 0.7108297348022461,
"learning_rate": 2.265772503450122e-05,
"loss": 1.1998,
"step": 135
},
{
"epoch": 0.8308605341246291,
"grad_norm": 0.6781418919563293,
"learning_rate": 1.649358688599191e-05,
"loss": 1.2071,
"step": 140
},
{
"epoch": 0.8605341246290801,
"grad_norm": 0.6723542213439941,
"learning_rate": 1.1232295431481222e-05,
"loss": 1.2034,
"step": 145
},
{
"epoch": 0.8902077151335311,
"grad_norm": 0.694801926612854,
"learning_rate": 6.930734201451816e-06,
"loss": 1.2069,
"step": 150
},
{
"epoch": 0.9198813056379822,
"grad_norm": 0.7542781233787537,
"learning_rate": 3.6354104064368566e-06,
"loss": 1.2018,
"step": 155
},
{
"epoch": 0.9495548961424333,
"grad_norm": 0.6740858554840088,
"learning_rate": 1.3819521147851123e-06,
"loss": 1.2015,
"step": 160
},
{
"epoch": 0.9792284866468842,
"grad_norm": 0.6539055109024048,
"learning_rate": 1.947230525005006e-07,
"loss": 1.1885,
"step": 165
},
{
"epoch": 0.9970326409495549,
"eval_loss": 1.881588339805603,
"eval_runtime": 1.5002,
"eval_samples_per_second": 9.332,
"eval_steps_per_second": 0.667,
"step": 168
},
{
"epoch": 0.9970326409495549,
"step": 168,
"total_flos": 8.227900459906499e+17,
"train_loss": 1.2795698436952772,
"train_runtime": 2845.3749,
"train_samples_per_second": 13.249,
"train_steps_per_second": 0.059
}
],
"logging_steps": 5,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.227900459906499e+17,
"train_batch_size": 14,
"trial_name": null,
"trial_params": null
}