|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.575107296137339,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.08583690987124463,
|
|
"grad_norm": 0.08846336603164673,
|
|
"learning_rate": 0.0001998960663781063,
|
|
"loss": 0.7092,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.17167381974248927,
|
|
"grad_norm": 0.10057297348976135,
|
|
"learning_rate": 0.00019906589321760313,
|
|
"loss": 0.5514,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.2575107296137339,
|
|
"grad_norm": 0.10845289379358292,
|
|
"learning_rate": 0.00019741244594178857,
|
|
"loss": 0.4897,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.34334763948497854,
|
|
"grad_norm": 0.08657114207744598,
|
|
"learning_rate": 0.00019494946530743566,
|
|
"loss": 0.4674,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.4291845493562232,
|
|
"grad_norm": 0.20552553236484528,
|
|
"learning_rate": 0.00019169741959214142,
|
|
"loss": 0.4537,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.5150214592274678,
|
|
"grad_norm": 0.1217830628156662,
|
|
"learning_rate": 0.0001876833344953899,
|
|
"loss": 0.4358,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.6008583690987125,
|
|
"grad_norm": 0.1439305692911148,
|
|
"learning_rate": 0.0001829405685450202,
|
|
"loss": 0.418,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.6866952789699571,
|
|
"grad_norm": 0.16524147987365723,
|
|
"learning_rate": 0.00017750853587555535,
|
|
"loss": 0.4065,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.7725321888412017,
|
|
"grad_norm": 0.14308129251003265,
|
|
"learning_rate": 0.00017143237868220553,
|
|
"loss": 0.4022,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.8583690987124464,
|
|
"grad_norm": 0.12405655533075333,
|
|
"learning_rate": 0.00016476259207257407,
|
|
"loss": 0.388,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.944206008583691,
|
|
"grad_norm": 0.0915222093462944,
|
|
"learning_rate": 0.0001575546044336872,
|
|
"loss": 0.3849,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 1.0300429184549356,
|
|
"grad_norm": 0.12163916975259781,
|
|
"learning_rate": 0.00014986831680165167,
|
|
"loss": 0.3715,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 1.1158798283261802,
|
|
"grad_norm": 0.10504616051912308,
|
|
"learning_rate": 0.00014176760506194906,
|
|
"loss": 0.3579,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.201716738197425,
|
|
"grad_norm": 0.11827877163887024,
|
|
"learning_rate": 0.00013331978911726523,
|
|
"loss": 0.3501,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.2875536480686696,
|
|
"grad_norm": 0.12604044377803802,
|
|
"learning_rate": 0.00012459507343426653,
|
|
"loss": 0.3479,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.3733905579399142,
|
|
"grad_norm": 0.13938841223716736,
|
|
"learning_rate": 0.00011566596361858548,
|
|
"loss": 0.3398,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.4592274678111588,
|
|
"grad_norm": 0.15155862271785736,
|
|
"learning_rate": 0.0001066066638664925,
|
|
"loss": 0.3306,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.5450643776824036,
|
|
"grad_norm": 0.11944034695625305,
|
|
"learning_rate": 9.749246030065306e-05,
|
|
"loss": 0.3325,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.6309012875536482,
|
|
"grad_norm": 0.09199097752571106,
|
|
"learning_rate": 8.839909531467737e-05,
|
|
"loss": 0.3358,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.7167381974248928,
|
|
"grad_norm": 0.1427503079175949,
|
|
"learning_rate": 7.940213812589018e-05,
|
|
"loss": 0.3292,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.8025751072961373,
|
|
"grad_norm": 0.1335051953792572,
|
|
"learning_rate": 7.057635676725945e-05,
|
|
"loss": 0.3262,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.888412017167382,
|
|
"grad_norm": 0.12322486191987991,
|
|
"learning_rate": 6.199509673746246e-05,
|
|
"loss": 0.3282,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.9742489270386265,
|
|
"grad_norm": 0.11185155063867569,
|
|
"learning_rate": 5.372967147273683e-05,
|
|
"loss": 0.3269,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 2.060085836909871,
|
|
"grad_norm": 0.08911816030740738,
|
|
"learning_rate": 4.584876970591957e-05,
|
|
"loss": 0.3121,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 2.1459227467811157,
|
|
"grad_norm": 0.0984240174293518,
|
|
"learning_rate": 3.841788463774003e-05,
|
|
"loss": 0.3177,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.2317596566523603,
|
|
"grad_norm": 0.09516295790672302,
|
|
"learning_rate": 3.149876966416321e-05,
|
|
"loss": 0.3144,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 2.317596566523605,
|
|
"grad_norm": 0.12630854547023773,
|
|
"learning_rate": 2.514892518288988e-05,
|
|
"loss": 0.3187,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.40343347639485,
|
|
"grad_norm": 0.12051806598901749,
|
|
"learning_rate": 1.9421120743841902e-05,
|
|
"loss": 0.308,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.4892703862660945,
|
|
"grad_norm": 0.12218445539474487,
|
|
"learning_rate": 1.436295651473667e-05,
|
|
"loss": 0.3062,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.575107296137339,
|
|
"grad_norm": 0.10438164323568344,
|
|
"learning_rate": 1.0016467706135135e-05,
|
|
"loss": 0.3147,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 3495,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.9668426131636224e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|