hoang14's picture
Upload folder using huggingface_hub
75bcf3e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 192.64206001263068,
"learning_rate": 2e-05,
"loss": 2.9869,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 48.1404574206668,
"learning_rate": 2e-05,
"loss": 1.2726,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 41.79888626205312,
"learning_rate": 2e-05,
"loss": 1.2518,
"step": 15
},
{
"epoch": 0.08,
"grad_norm": 18.107174654771164,
"learning_rate": 2e-05,
"loss": 0.694,
"step": 20
},
{
"epoch": 0.1,
"grad_norm": 30.2422290061728,
"learning_rate": 2e-05,
"loss": 0.4019,
"step": 25
},
{
"epoch": 0.12,
"grad_norm": 32.685282758571375,
"learning_rate": 2e-05,
"loss": 0.7553,
"step": 30
},
{
"epoch": 0.14,
"grad_norm": 18.979239258345306,
"learning_rate": 2e-05,
"loss": 0.5813,
"step": 35
},
{
"epoch": 0.16,
"grad_norm": 19.764858390255736,
"learning_rate": 2e-05,
"loss": 0.6364,
"step": 40
},
{
"epoch": 0.18,
"grad_norm": 22.461668082512883,
"learning_rate": 2e-05,
"loss": 0.7166,
"step": 45
},
{
"epoch": 0.2,
"grad_norm": 5.772167653543795,
"learning_rate": 2e-05,
"loss": 0.477,
"step": 50
},
{
"epoch": 0.22,
"grad_norm": 23.763305086803346,
"learning_rate": 2e-05,
"loss": 0.4109,
"step": 55
},
{
"epoch": 0.24,
"grad_norm": 19.437747488324412,
"learning_rate": 2e-05,
"loss": 0.7639,
"step": 60
},
{
"epoch": 0.26,
"grad_norm": 37.05102235979124,
"learning_rate": 2e-05,
"loss": 0.8183,
"step": 65
},
{
"epoch": 0.28,
"grad_norm": 23.014514492561837,
"learning_rate": 2e-05,
"loss": 0.6484,
"step": 70
},
{
"epoch": 0.3,
"grad_norm": 12.77252167418251,
"learning_rate": 2e-05,
"loss": 0.2884,
"step": 75
},
{
"epoch": 0.32,
"grad_norm": 11.73960064185402,
"learning_rate": 2e-05,
"loss": 0.5449,
"step": 80
},
{
"epoch": 0.34,
"grad_norm": 28.512782095251783,
"learning_rate": 2e-05,
"loss": 0.4911,
"step": 85
},
{
"epoch": 0.36,
"grad_norm": 12.937559735306024,
"learning_rate": 2e-05,
"loss": 0.4353,
"step": 90
},
{
"epoch": 0.38,
"grad_norm": 19.839752017761988,
"learning_rate": 2e-05,
"loss": 0.508,
"step": 95
},
{
"epoch": 0.4,
"grad_norm": 25.83637222146644,
"learning_rate": 2e-05,
"loss": 0.6268,
"step": 100
},
{
"epoch": 0.42,
"grad_norm": 7.866252327982729,
"learning_rate": 2e-05,
"loss": 0.2765,
"step": 105
},
{
"epoch": 0.44,
"grad_norm": 19.173168345597215,
"learning_rate": 2e-05,
"loss": 0.3429,
"step": 110
},
{
"epoch": 0.46,
"grad_norm": 7.356122490363529,
"learning_rate": 2e-05,
"loss": 0.673,
"step": 115
},
{
"epoch": 0.48,
"grad_norm": 25.452330672923093,
"learning_rate": 2e-05,
"loss": 0.2502,
"step": 120
},
{
"epoch": 0.5,
"grad_norm": 18.193546104023437,
"learning_rate": 2e-05,
"loss": 0.5538,
"step": 125
},
{
"epoch": 0.52,
"grad_norm": 10.606904391146188,
"learning_rate": 2e-05,
"loss": 0.4428,
"step": 130
},
{
"epoch": 0.54,
"grad_norm": 24.98698506555314,
"learning_rate": 2e-05,
"loss": 0.7371,
"step": 135
},
{
"epoch": 0.56,
"grad_norm": 12.932817524755643,
"learning_rate": 2e-05,
"loss": 0.3546,
"step": 140
},
{
"epoch": 0.58,
"grad_norm": 23.09632284588474,
"learning_rate": 2e-05,
"loss": 0.5592,
"step": 145
},
{
"epoch": 0.6,
"grad_norm": 12.27489087251143,
"learning_rate": 2e-05,
"loss": 0.5467,
"step": 150
},
{
"epoch": 0.62,
"grad_norm": 37.77701888603335,
"learning_rate": 2e-05,
"loss": 0.4446,
"step": 155
},
{
"epoch": 0.64,
"grad_norm": 44.43844469994968,
"learning_rate": 2e-05,
"loss": 0.5023,
"step": 160
},
{
"epoch": 0.66,
"grad_norm": 24.744024542612852,
"learning_rate": 2e-05,
"loss": 0.5357,
"step": 165
},
{
"epoch": 0.68,
"grad_norm": 23.375337882754707,
"learning_rate": 2e-05,
"loss": 0.4636,
"step": 170
},
{
"epoch": 0.7,
"grad_norm": 17.471065994038756,
"learning_rate": 2e-05,
"loss": 0.6389,
"step": 175
},
{
"epoch": 0.72,
"grad_norm": 17.504412137784108,
"learning_rate": 2e-05,
"loss": 0.4652,
"step": 180
},
{
"epoch": 0.74,
"grad_norm": 18.34290057543399,
"learning_rate": 2e-05,
"loss": 0.5353,
"step": 185
},
{
"epoch": 0.76,
"grad_norm": 10.469335873485205,
"learning_rate": 2e-05,
"loss": 0.4117,
"step": 190
},
{
"epoch": 0.78,
"grad_norm": 8.574566538117475,
"learning_rate": 2e-05,
"loss": 0.1914,
"step": 195
},
{
"epoch": 0.8,
"grad_norm": 34.729664060077845,
"learning_rate": 2e-05,
"loss": 0.9078,
"step": 200
},
{
"epoch": 0.82,
"grad_norm": 11.83902631776558,
"learning_rate": 2e-05,
"loss": 0.4132,
"step": 205
},
{
"epoch": 0.84,
"grad_norm": 19.70511531848034,
"learning_rate": 2e-05,
"loss": 0.5426,
"step": 210
},
{
"epoch": 0.86,
"grad_norm": 16.283872444359304,
"learning_rate": 2e-05,
"loss": 0.4625,
"step": 215
},
{
"epoch": 0.88,
"grad_norm": 11.310750776722527,
"learning_rate": 2e-05,
"loss": 0.6134,
"step": 220
},
{
"epoch": 0.9,
"grad_norm": 10.965542860625407,
"learning_rate": 2e-05,
"loss": 0.5924,
"step": 225
},
{
"epoch": 0.92,
"grad_norm": 13.517564564054656,
"learning_rate": 2e-05,
"loss": 0.6286,
"step": 230
},
{
"epoch": 0.94,
"grad_norm": 34.26880432074886,
"learning_rate": 2e-05,
"loss": 0.4085,
"step": 235
},
{
"epoch": 0.96,
"grad_norm": 25.030865721860277,
"learning_rate": 2e-05,
"loss": 0.3896,
"step": 240
},
{
"epoch": 0.98,
"grad_norm": 25.98960849665949,
"learning_rate": 2e-05,
"loss": 0.7899,
"step": 245
},
{
"epoch": 1.0,
"grad_norm": 8.864625940315946,
"learning_rate": 2e-05,
"loss": 0.4402,
"step": 250
},
{
"epoch": 1.02,
"grad_norm": 11.40545315116504,
"learning_rate": 2e-05,
"loss": 0.3358,
"step": 255
},
{
"epoch": 1.04,
"grad_norm": 13.463916057621617,
"learning_rate": 2e-05,
"loss": 0.3494,
"step": 260
},
{
"epoch": 1.06,
"grad_norm": 9.782398722159707,
"learning_rate": 2e-05,
"loss": 0.323,
"step": 265
},
{
"epoch": 1.08,
"grad_norm": 13.317644626850393,
"learning_rate": 2e-05,
"loss": 0.3282,
"step": 270
},
{
"epoch": 1.1,
"grad_norm": 5.139859271115599,
"learning_rate": 2e-05,
"loss": 0.2077,
"step": 275
},
{
"epoch": 1.12,
"grad_norm": 7.631702619157663,
"learning_rate": 2e-05,
"loss": 0.2774,
"step": 280
},
{
"epoch": 1.1400000000000001,
"grad_norm": 5.33820682349548,
"learning_rate": 2e-05,
"loss": 0.1876,
"step": 285
},
{
"epoch": 1.16,
"grad_norm": 17.796618873954458,
"learning_rate": 2e-05,
"loss": 0.2024,
"step": 290
},
{
"epoch": 1.18,
"grad_norm": 7.742739766893884,
"learning_rate": 2e-05,
"loss": 0.2382,
"step": 295
},
{
"epoch": 1.2,
"grad_norm": 12.272583734681321,
"learning_rate": 2e-05,
"loss": 0.1758,
"step": 300
},
{
"epoch": 1.22,
"grad_norm": 15.072772862067511,
"learning_rate": 2e-05,
"loss": 0.162,
"step": 305
},
{
"epoch": 1.24,
"grad_norm": 12.379175079681959,
"learning_rate": 2e-05,
"loss": 0.3106,
"step": 310
},
{
"epoch": 1.26,
"grad_norm": 33.48507360806824,
"learning_rate": 2e-05,
"loss": 0.1663,
"step": 315
},
{
"epoch": 1.28,
"grad_norm": 15.999333486664096,
"learning_rate": 2e-05,
"loss": 0.4353,
"step": 320
},
{
"epoch": 1.3,
"grad_norm": 14.39207981694989,
"learning_rate": 2e-05,
"loss": 0.3389,
"step": 325
},
{
"epoch": 1.32,
"grad_norm": 13.332999614759036,
"learning_rate": 2e-05,
"loss": 0.4599,
"step": 330
},
{
"epoch": 1.34,
"grad_norm": 23.928605991003675,
"learning_rate": 2e-05,
"loss": 0.2329,
"step": 335
},
{
"epoch": 1.3599999999999999,
"grad_norm": 13.34758642775842,
"learning_rate": 2e-05,
"loss": 0.2337,
"step": 340
},
{
"epoch": 1.38,
"grad_norm": 24.352266155741198,
"learning_rate": 2e-05,
"loss": 0.2545,
"step": 345
},
{
"epoch": 1.4,
"grad_norm": 10.093768669427867,
"learning_rate": 2e-05,
"loss": 0.1388,
"step": 350
},
{
"epoch": 1.42,
"grad_norm": 32.58466972795229,
"learning_rate": 2e-05,
"loss": 0.3381,
"step": 355
},
{
"epoch": 1.44,
"grad_norm": 16.995622901411814,
"learning_rate": 2e-05,
"loss": 0.2648,
"step": 360
},
{
"epoch": 1.46,
"grad_norm": 10.97816885532189,
"learning_rate": 2e-05,
"loss": 0.3115,
"step": 365
},
{
"epoch": 1.48,
"grad_norm": 11.548897196049536,
"learning_rate": 2e-05,
"loss": 0.2889,
"step": 370
},
{
"epoch": 1.5,
"grad_norm": 14.16697968734292,
"learning_rate": 2e-05,
"loss": 0.1862,
"step": 375
},
{
"epoch": 1.52,
"grad_norm": 10.212597579637505,
"learning_rate": 2e-05,
"loss": 0.2742,
"step": 380
},
{
"epoch": 1.54,
"grad_norm": 8.91459350111025,
"learning_rate": 2e-05,
"loss": 0.1768,
"step": 385
},
{
"epoch": 1.56,
"grad_norm": 4.5094557209751125,
"learning_rate": 2e-05,
"loss": 0.2998,
"step": 390
},
{
"epoch": 1.58,
"grad_norm": 17.12956574852988,
"learning_rate": 2e-05,
"loss": 0.4823,
"step": 395
},
{
"epoch": 1.6,
"grad_norm": 9.24016552408342,
"learning_rate": 2e-05,
"loss": 0.3929,
"step": 400
},
{
"epoch": 1.62,
"grad_norm": 1.7011726405852587,
"learning_rate": 2e-05,
"loss": 0.0779,
"step": 405
},
{
"epoch": 1.6400000000000001,
"grad_norm": 5.402131687184834,
"learning_rate": 2e-05,
"loss": 0.2897,
"step": 410
},
{
"epoch": 1.6600000000000001,
"grad_norm": 41.449093332441706,
"learning_rate": 2e-05,
"loss": 0.2473,
"step": 415
},
{
"epoch": 1.6800000000000002,
"grad_norm": 6.669376800386209,
"learning_rate": 2e-05,
"loss": 0.3655,
"step": 420
},
{
"epoch": 1.7,
"grad_norm": 8.23518921357868,
"learning_rate": 2e-05,
"loss": 0.3253,
"step": 425
},
{
"epoch": 1.72,
"grad_norm": 9.034537410146203,
"learning_rate": 2e-05,
"loss": 0.296,
"step": 430
},
{
"epoch": 1.74,
"grad_norm": 11.56008324710057,
"learning_rate": 2e-05,
"loss": 0.3163,
"step": 435
},
{
"epoch": 1.76,
"grad_norm": 5.894931150739263,
"learning_rate": 2e-05,
"loss": 0.3091,
"step": 440
},
{
"epoch": 1.78,
"grad_norm": 7.0458502125313345,
"learning_rate": 2e-05,
"loss": 0.2953,
"step": 445
},
{
"epoch": 1.8,
"grad_norm": 6.778888500945086,
"learning_rate": 2e-05,
"loss": 0.3389,
"step": 450
},
{
"epoch": 1.8199999999999998,
"grad_norm": 5.539170431035288,
"learning_rate": 2e-05,
"loss": 0.435,
"step": 455
},
{
"epoch": 1.8399999999999999,
"grad_norm": 3.343648740799574,
"learning_rate": 2e-05,
"loss": 0.2393,
"step": 460
},
{
"epoch": 1.8599999999999999,
"grad_norm": 12.183313738430767,
"learning_rate": 2e-05,
"loss": 0.2175,
"step": 465
},
{
"epoch": 1.88,
"grad_norm": 12.811874616374048,
"learning_rate": 2e-05,
"loss": 0.3642,
"step": 470
},
{
"epoch": 1.9,
"grad_norm": 9.28274328738779,
"learning_rate": 2e-05,
"loss": 0.2358,
"step": 475
},
{
"epoch": 1.92,
"grad_norm": 3.6965502254399536,
"learning_rate": 2e-05,
"loss": 0.1731,
"step": 480
},
{
"epoch": 1.94,
"grad_norm": 23.422711462644386,
"learning_rate": 2e-05,
"loss": 0.2284,
"step": 485
},
{
"epoch": 1.96,
"grad_norm": 6.007651568580575,
"learning_rate": 2e-05,
"loss": 0.2029,
"step": 490
},
{
"epoch": 1.98,
"grad_norm": 22.857478600516423,
"learning_rate": 2e-05,
"loss": 0.2268,
"step": 495
},
{
"epoch": 2.0,
"grad_norm": 7.607530663493723,
"learning_rate": 2e-05,
"loss": 0.396,
"step": 500
}
],
"logging_steps": 5,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 268959744000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}