pipeline_test / checkpoint-400 /trainer_state.json
beamaia's picture
Training in progress, step 400, checkpoint
ed3be56 verified
{
"best_metric": 0.4464975595474243,
"best_model_checkpoint": "./llama3/30-08-24-Weni-Pipeline_test_Experiment with SFT and Llama3 70b-2_max_steps-1362_batch_8_2024-08-30/checkpoint-400",
"epoch": 1.76017601760176,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04400440044004401,
"grad_norm": 0.5568628907203674,
"learning_rate": 7.5e-05,
"loss": 2.0875,
"step": 10
},
{
"epoch": 0.08800880088008801,
"grad_norm": 0.2537558972835541,
"learning_rate": 0.00015,
"loss": 0.9378,
"step": 20
},
{
"epoch": 0.132013201320132,
"grad_norm": 0.24558919668197632,
"learning_rate": 0.000225,
"loss": 0.7,
"step": 30
},
{
"epoch": 0.17601760176017603,
"grad_norm": 0.13937097787857056,
"learning_rate": 0.0003,
"loss": 0.6298,
"step": 40
},
{
"epoch": 0.22002200220022003,
"grad_norm": 0.1871194988489151,
"learning_rate": 0.00029995764763563235,
"loss": 0.6321,
"step": 50
},
{
"epoch": 0.264026402640264,
"grad_norm": 0.14626263082027435,
"learning_rate": 0.00029983061445883305,
"loss": 0.6403,
"step": 60
},
{
"epoch": 0.30803080308030806,
"grad_norm": 0.12049665302038193,
"learning_rate": 0.0002996189722050073,
"loss": 0.5998,
"step": 70
},
{
"epoch": 0.35203520352035206,
"grad_norm": 0.13617923855781555,
"learning_rate": 0.0002993228403881531,
"loss": 0.5942,
"step": 80
},
{
"epoch": 0.39603960396039606,
"grad_norm": 0.1271793246269226,
"learning_rate": 0.00029894238623337174,
"loss": 0.5647,
"step": 90
},
{
"epoch": 0.44004400440044006,
"grad_norm": 0.18757876753807068,
"learning_rate": 0.00029847782458243663,
"loss": 0.5619,
"step": 100
},
{
"epoch": 0.44004400440044006,
"eval_accuracy": 1.0,
"eval_f1": 1.0,
"eval_f1_macro": 1.0,
"eval_f1_micro": 1.0,
"eval_loss": 0.5742923021316528,
"eval_precision": 1.0,
"eval_precision_macro": 1.0,
"eval_precision_micro": 1.0,
"eval_recall": 1.0,
"eval_recall_macro": 1.0,
"eval_recall_micro": 1.0,
"eval_runtime": 90.5857,
"eval_samples_per_second": 4.46,
"eval_steps_per_second": 1.115,
"step": 100
},
{
"epoch": 0.48404840484048406,
"grad_norm": 0.14132679998874664,
"learning_rate": 0.00029792941777247184,
"loss": 0.5584,
"step": 110
},
{
"epoch": 0.528052805280528,
"grad_norm": 0.15474887192249298,
"learning_rate": 0.0002972974754878111,
"loss": 0.5752,
"step": 120
},
{
"epoch": 0.5720572057205721,
"grad_norm": 0.13014496862888336,
"learning_rate": 0.0002965823545851199,
"loss": 0.5565,
"step": 130
},
{
"epoch": 0.6160616061606161,
"grad_norm": 0.12456662207841873,
"learning_rate": 0.00029578445889187865,
"loss": 0.5722,
"step": 140
},
{
"epoch": 0.6600660066006601,
"grad_norm": 0.12824317812919617,
"learning_rate": 0.00029490423897834234,
"loss": 0.523,
"step": 150
},
{
"epoch": 0.7040704070407041,
"grad_norm": 0.14279119670391083,
"learning_rate": 0.0002939421919031044,
"loss": 0.5523,
"step": 160
},
{
"epoch": 0.7480748074807481,
"grad_norm": 0.11781885474920273,
"learning_rate": 0.00029289886093240847,
"loss": 0.5291,
"step": 170
},
{
"epoch": 0.7920792079207921,
"grad_norm": 0.1608349233865738,
"learning_rate": 0.0002917748352333667,
"loss": 0.5417,
"step": 180
},
{
"epoch": 0.8360836083608361,
"grad_norm": 0.13777320086956024,
"learning_rate": 0.0002905707495412589,
"loss": 0.4967,
"step": 190
},
{
"epoch": 0.8800880088008801,
"grad_norm": 0.21577192842960358,
"learning_rate": 0.00028928728380109764,
"loss": 0.6545,
"step": 200
},
{
"epoch": 0.8800880088008801,
"eval_accuracy": 1.0,
"eval_f1": 1.0,
"eval_f1_macro": 1.0,
"eval_f1_micro": 1.0,
"eval_loss": 0.6772989630699158,
"eval_precision": 1.0,
"eval_precision_macro": 1.0,
"eval_precision_micro": 1.0,
"eval_recall": 1.0,
"eval_recall_macro": 1.0,
"eval_recall_micro": 1.0,
"eval_runtime": 90.2067,
"eval_samples_per_second": 4.479,
"eval_steps_per_second": 1.12,
"step": 200
},
{
"epoch": 0.9240924092409241,
"grad_norm": 3.4556286334991455,
"learning_rate": 0.00028792516278366547,
"loss": 2.5144,
"step": 210
},
{
"epoch": 0.9680968096809681,
"grad_norm": 0.13046959042549133,
"learning_rate": 0.00028648515567623764,
"loss": 0.6004,
"step": 220
},
{
"epoch": 1.012101210121012,
"grad_norm": 0.39528125524520874,
"learning_rate": 0.0002849680756482235,
"loss": 0.5174,
"step": 230
},
{
"epoch": 1.056105610561056,
"grad_norm": 0.23764920234680176,
"learning_rate": 0.00028337477939197135,
"loss": 0.4065,
"step": 240
},
{
"epoch": 1.1001100110011002,
"grad_norm": 0.14821326732635498,
"learning_rate": 0.0002817061666389958,
"loss": 0.4425,
"step": 250
},
{
"epoch": 1.1441144114411441,
"grad_norm": 0.15903742611408234,
"learning_rate": 0.0002799631796519007,
"loss": 0.4107,
"step": 260
},
{
"epoch": 1.188118811881188,
"grad_norm": 0.16189326345920563,
"learning_rate": 0.00027814680269228574,
"loss": 0.4215,
"step": 270
},
{
"epoch": 1.2321232123212322,
"grad_norm": 0.20622508227825165,
"learning_rate": 0.00027625806146493523,
"loss": 0.3968,
"step": 280
},
{
"epoch": 1.2761276127612762,
"grad_norm": 0.17622467875480652,
"learning_rate": 0.0002742980225386045,
"loss": 0.4419,
"step": 290
},
{
"epoch": 1.3201320132013201,
"grad_norm": 0.12472284585237503,
"learning_rate": 0.0002722677927437307,
"loss": 0.396,
"step": 300
},
{
"epoch": 1.3201320132013201,
"eval_accuracy": 1.0,
"eval_f1": 1.0,
"eval_f1_macro": 1.0,
"eval_f1_micro": 1.0,
"eval_loss": 0.5126128792762756,
"eval_precision": 1.0,
"eval_precision_macro": 1.0,
"eval_precision_micro": 1.0,
"eval_recall": 1.0,
"eval_recall_macro": 1.0,
"eval_recall_micro": 1.0,
"eval_runtime": 90.5135,
"eval_samples_per_second": 4.463,
"eval_steps_per_second": 1.116,
"step": 300
},
{
"epoch": 1.364136413641364,
"grad_norm": 0.17043285071849823,
"learning_rate": 0.0002701685185474076,
"loss": 0.4297,
"step": 310
},
{
"epoch": 1.408140814081408,
"grad_norm": 0.12771165370941162,
"learning_rate": 0.00026800138540597723,
"loss": 0.4174,
"step": 320
},
{
"epoch": 1.4521452145214522,
"grad_norm": 0.18149854242801666,
"learning_rate": 0.00026576761709560555,
"loss": 0.4313,
"step": 330
},
{
"epoch": 1.4961496149614961,
"grad_norm": 0.17737938463687897,
"learning_rate": 0.00026346847502121783,
"loss": 0.3693,
"step": 340
},
{
"epoch": 1.5401540154015403,
"grad_norm": 1.699652910232544,
"learning_rate": 0.0002611052575041856,
"loss": 0.4237,
"step": 350
},
{
"epoch": 1.5841584158415842,
"grad_norm": 0.18545816838741302,
"learning_rate": 0.00025867929904916704,
"loss": 0.3925,
"step": 360
},
{
"epoch": 1.6281628162816282,
"grad_norm": 0.16388830542564392,
"learning_rate": 0.0002561919695905145,
"loss": 0.3759,
"step": 370
},
{
"epoch": 1.6721672167216721,
"grad_norm": 0.1380474716424942,
"learning_rate": 0.0002536446737186751,
"loss": 0.3859,
"step": 380
},
{
"epoch": 1.716171617161716,
"grad_norm": 0.16071344912052155,
"learning_rate": 0.0002510388498870211,
"loss": 0.41,
"step": 390
},
{
"epoch": 1.76017601760176,
"grad_norm": 0.15108689665794373,
"learning_rate": 0.00024837596959955777,
"loss": 0.3645,
"step": 400
},
{
"epoch": 1.76017601760176,
"eval_accuracy": 1.0,
"eval_f1": 1.0,
"eval_f1_macro": 1.0,
"eval_f1_micro": 1.0,
"eval_loss": 0.4464975595474243,
"eval_precision": 1.0,
"eval_precision_macro": 1.0,
"eval_precision_micro": 1.0,
"eval_recall": 1.0,
"eval_recall_macro": 1.0,
"eval_recall_micro": 1.0,
"eval_runtime": 90.5484,
"eval_samples_per_second": 4.462,
"eval_steps_per_second": 1.115,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 1362,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.589901503407063e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}