RAGPT-2_unfunctional / trainer_state.json
Gerson Fabian Buenahora Ormaza
Upload 12 files
1a45c77 verified
raw
history blame
8.61 kB
{
"best_metric": 0.8724454641342163,
"best_model_checkpoint": "./results/checkpoint-2700",
"epoch": 7.0,
"eval_steps": 500,
"global_step": 18900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18518518518518517,
"grad_norm": 2.5104269981384277,
"learning_rate": 4.94212962962963e-05,
"loss": 0.9092,
"step": 500
},
{
"epoch": 0.37037037037037035,
"grad_norm": 1.030969262123108,
"learning_rate": 4.8842592592592595e-05,
"loss": 0.869,
"step": 1000
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.49540138244628906,
"learning_rate": 4.8263888888888895e-05,
"loss": 0.8595,
"step": 1500
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.6758684515953064,
"learning_rate": 4.768518518518519e-05,
"loss": 0.8446,
"step": 2000
},
{
"epoch": 0.9259259259259259,
"grad_norm": 1.0114092826843262,
"learning_rate": 4.710648148148149e-05,
"loss": 0.8403,
"step": 2500
},
{
"epoch": 1.0,
"eval_loss": 0.8724454641342163,
"eval_runtime": 30.4341,
"eval_samples_per_second": 39.43,
"eval_steps_per_second": 9.857,
"step": 2700
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.8358873128890991,
"learning_rate": 4.652777777777778e-05,
"loss": 0.8109,
"step": 3000
},
{
"epoch": 1.2962962962962963,
"grad_norm": 0.8434183597564697,
"learning_rate": 4.594907407407408e-05,
"loss": 0.7912,
"step": 3500
},
{
"epoch": 1.4814814814814814,
"grad_norm": 1.2122215032577515,
"learning_rate": 4.5370370370370374e-05,
"loss": 0.7891,
"step": 4000
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.7747897505760193,
"learning_rate": 4.4791666666666673e-05,
"loss": 0.8132,
"step": 4500
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.9554975032806396,
"learning_rate": 4.4212962962962966e-05,
"loss": 0.8047,
"step": 5000
},
{
"epoch": 2.0,
"eval_loss": 0.8918996453285217,
"eval_runtime": 30.4698,
"eval_samples_per_second": 39.383,
"eval_steps_per_second": 9.846,
"step": 5400
},
{
"epoch": 2.037037037037037,
"grad_norm": 0.6646651029586792,
"learning_rate": 4.3634259259259266e-05,
"loss": 0.8091,
"step": 5500
},
{
"epoch": 2.2222222222222223,
"grad_norm": 4.084255218505859,
"learning_rate": 4.305555555555556e-05,
"loss": 0.7432,
"step": 6000
},
{
"epoch": 2.4074074074074074,
"grad_norm": 2.1203970909118652,
"learning_rate": 4.247685185185186e-05,
"loss": 0.7355,
"step": 6500
},
{
"epoch": 2.5925925925925926,
"grad_norm": 4.367093086242676,
"learning_rate": 4.1898148148148145e-05,
"loss": 0.751,
"step": 7000
},
{
"epoch": 2.7777777777777777,
"grad_norm": 1.1563506126403809,
"learning_rate": 4.1319444444444445e-05,
"loss": 0.7632,
"step": 7500
},
{
"epoch": 2.962962962962963,
"grad_norm": 0.9595785140991211,
"learning_rate": 4.074074074074074e-05,
"loss": 0.7714,
"step": 8000
},
{
"epoch": 3.0,
"eval_loss": 0.9504669308662415,
"eval_runtime": 30.5135,
"eval_samples_per_second": 39.327,
"eval_steps_per_second": 9.832,
"step": 8100
},
{
"epoch": 3.148148148148148,
"grad_norm": 0.6189069747924805,
"learning_rate": 4.016203703703704e-05,
"loss": 0.7277,
"step": 8500
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.6079156994819641,
"learning_rate": 3.958333333333333e-05,
"loss": 0.7373,
"step": 9000
},
{
"epoch": 3.5185185185185186,
"grad_norm": 0.4996514320373535,
"learning_rate": 3.900462962962963e-05,
"loss": 0.7206,
"step": 9500
},
{
"epoch": 3.7037037037037037,
"grad_norm": 0.9442146420478821,
"learning_rate": 3.8425925925925924e-05,
"loss": 0.7215,
"step": 10000
},
{
"epoch": 3.888888888888889,
"grad_norm": 0.46321621537208557,
"learning_rate": 3.7847222222222224e-05,
"loss": 0.7238,
"step": 10500
},
{
"epoch": 4.0,
"eval_loss": 1.0164023637771606,
"eval_runtime": 30.4613,
"eval_samples_per_second": 39.394,
"eval_steps_per_second": 9.849,
"step": 10800
},
{
"epoch": 4.074074074074074,
"grad_norm": 0.808857798576355,
"learning_rate": 3.726851851851852e-05,
"loss": 0.7143,
"step": 11000
},
{
"epoch": 4.2592592592592595,
"grad_norm": 0.2266809195280075,
"learning_rate": 3.6689814814814816e-05,
"loss": 0.7017,
"step": 11500
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.8129966259002686,
"learning_rate": 3.611111111111111e-05,
"loss": 0.7165,
"step": 12000
},
{
"epoch": 4.62962962962963,
"grad_norm": 0.7154943943023682,
"learning_rate": 3.553240740740741e-05,
"loss": 0.6892,
"step": 12500
},
{
"epoch": 4.814814814814815,
"grad_norm": 0.823897659778595,
"learning_rate": 3.49537037037037e-05,
"loss": 0.7026,
"step": 13000
},
{
"epoch": 5.0,
"grad_norm": 0.7548332810401917,
"learning_rate": 3.4375e-05,
"loss": 0.7019,
"step": 13500
},
{
"epoch": 5.0,
"eval_loss": 1.056677222251892,
"eval_runtime": 30.3702,
"eval_samples_per_second": 39.512,
"eval_steps_per_second": 9.878,
"step": 13500
},
{
"epoch": 5.185185185185185,
"grad_norm": 0.6250707507133484,
"learning_rate": 3.3796296296296295e-05,
"loss": 0.7107,
"step": 14000
},
{
"epoch": 5.37037037037037,
"grad_norm": 0.7014070749282837,
"learning_rate": 3.3217592592592595e-05,
"loss": 0.696,
"step": 14500
},
{
"epoch": 5.555555555555555,
"grad_norm": 0.8305183053016663,
"learning_rate": 3.263888888888889e-05,
"loss": 0.6858,
"step": 15000
},
{
"epoch": 5.7407407407407405,
"grad_norm": 0.5459818840026855,
"learning_rate": 3.206018518518519e-05,
"loss": 0.6828,
"step": 15500
},
{
"epoch": 5.925925925925926,
"grad_norm": 0.40176087617874146,
"learning_rate": 3.148148148148148e-05,
"loss": 0.6841,
"step": 16000
},
{
"epoch": 6.0,
"eval_loss": 1.0900229215621948,
"eval_runtime": 30.4123,
"eval_samples_per_second": 39.458,
"eval_steps_per_second": 9.864,
"step": 16200
},
{
"epoch": 6.111111111111111,
"grad_norm": 0.4161689281463623,
"learning_rate": 3.090277777777778e-05,
"loss": 0.6615,
"step": 16500
},
{
"epoch": 6.296296296296296,
"grad_norm": 0.7132428288459778,
"learning_rate": 3.0324074074074077e-05,
"loss": 0.683,
"step": 17000
},
{
"epoch": 6.481481481481482,
"grad_norm": 0.6823524236679077,
"learning_rate": 2.9745370370370373e-05,
"loss": 0.6692,
"step": 17500
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.3051627278327942,
"learning_rate": 2.916666666666667e-05,
"loss": 0.6771,
"step": 18000
},
{
"epoch": 6.851851851851852,
"grad_norm": 0.5912793278694153,
"learning_rate": 2.8587962962962966e-05,
"loss": 0.6959,
"step": 18500
},
{
"epoch": 7.0,
"eval_loss": 1.12686288356781,
"eval_runtime": 30.4381,
"eval_samples_per_second": 39.424,
"eval_steps_per_second": 9.856,
"step": 18900
}
],
"logging_steps": 500,
"max_steps": 43200,
"num_input_tokens_seen": 0,
"num_train_epochs": 16,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.97536776192e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}