apcl
/

chiayisu's picture
init
2f30e1d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5594541910331383,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.031189083820662766,
"grad_norm": 0.0576171875,
"learning_rate": 0.0001,
"loss": 0.674,
"step": 5
},
{
"epoch": 0.06237816764132553,
"grad_norm": 0.052490234375,
"learning_rate": 0.0001,
"loss": 0.5118,
"step": 10
},
{
"epoch": 0.0935672514619883,
"grad_norm": 0.03857421875,
"learning_rate": 0.0001,
"loss": 0.4572,
"step": 15
},
{
"epoch": 0.12475633528265107,
"grad_norm": 0.039794921875,
"learning_rate": 0.0001,
"loss": 0.4645,
"step": 20
},
{
"epoch": 0.15594541910331383,
"grad_norm": 0.0537109375,
"learning_rate": 0.0001,
"loss": 0.4772,
"step": 25
},
{
"epoch": 0.1871345029239766,
"grad_norm": 0.059814453125,
"learning_rate": 0.0001,
"loss": 0.4466,
"step": 30
},
{
"epoch": 0.21832358674463936,
"grad_norm": 0.0478515625,
"learning_rate": 0.0001,
"loss": 0.4365,
"step": 35
},
{
"epoch": 0.24951267056530213,
"grad_norm": 0.138671875,
"learning_rate": 0.0001,
"loss": 0.4622,
"step": 40
},
{
"epoch": 0.2807017543859649,
"grad_norm": 0.046875,
"learning_rate": 0.0001,
"loss": 0.3409,
"step": 45
},
{
"epoch": 0.31189083820662766,
"grad_norm": 0.0289306640625,
"learning_rate": 0.0001,
"loss": 0.3158,
"step": 50
},
{
"epoch": 0.34307992202729043,
"grad_norm": 0.029541015625,
"learning_rate": 0.0001,
"loss": 0.3236,
"step": 55
},
{
"epoch": 0.3742690058479532,
"grad_norm": 0.0274658203125,
"learning_rate": 0.0001,
"loss": 0.3273,
"step": 60
},
{
"epoch": 0.40545808966861596,
"grad_norm": 0.031494140625,
"learning_rate": 0.0001,
"loss": 0.3142,
"step": 65
},
{
"epoch": 0.43664717348927873,
"grad_norm": 0.0269775390625,
"learning_rate": 0.0001,
"loss": 0.3276,
"step": 70
},
{
"epoch": 0.4678362573099415,
"grad_norm": 0.02978515625,
"learning_rate": 0.0001,
"loss": 0.334,
"step": 75
},
{
"epoch": 0.49902534113060426,
"grad_norm": 0.0703125,
"learning_rate": 0.0001,
"loss": 0.356,
"step": 80
},
{
"epoch": 0.530214424951267,
"grad_norm": 0.03271484375,
"learning_rate": 0.0001,
"loss": 0.3176,
"step": 85
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.0322265625,
"learning_rate": 0.0001,
"loss": 0.2829,
"step": 90
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.02587890625,
"learning_rate": 0.0001,
"loss": 0.2984,
"step": 95
},
{
"epoch": 0.6237816764132553,
"grad_norm": 0.0296630859375,
"learning_rate": 0.0001,
"loss": 0.3067,
"step": 100
},
{
"epoch": 0.6549707602339181,
"grad_norm": 0.0264892578125,
"learning_rate": 0.0001,
"loss": 0.2979,
"step": 105
},
{
"epoch": 0.6861598440545809,
"grad_norm": 0.02783203125,
"learning_rate": 0.0001,
"loss": 0.3012,
"step": 110
},
{
"epoch": 0.7173489278752436,
"grad_norm": 0.031494140625,
"learning_rate": 0.0001,
"loss": 0.3222,
"step": 115
},
{
"epoch": 0.7485380116959064,
"grad_norm": 0.06982421875,
"learning_rate": 0.0001,
"loss": 0.3439,
"step": 120
},
{
"epoch": 0.7797270955165692,
"grad_norm": 0.031982421875,
"learning_rate": 0.0001,
"loss": 0.2875,
"step": 125
},
{
"epoch": 0.8109161793372319,
"grad_norm": 0.029296875,
"learning_rate": 0.0001,
"loss": 0.2803,
"step": 130
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.029541015625,
"learning_rate": 0.0001,
"loss": 0.2999,
"step": 135
},
{
"epoch": 0.8732943469785575,
"grad_norm": 0.02734375,
"learning_rate": 0.0001,
"loss": 0.2986,
"step": 140
},
{
"epoch": 0.9044834307992202,
"grad_norm": 0.028076171875,
"learning_rate": 0.0001,
"loss": 0.302,
"step": 145
},
{
"epoch": 0.935672514619883,
"grad_norm": 0.0322265625,
"learning_rate": 0.0001,
"loss": 0.2936,
"step": 150
},
{
"epoch": 0.9668615984405458,
"grad_norm": 0.034423828125,
"learning_rate": 0.0001,
"loss": 0.3017,
"step": 155
},
{
"epoch": 0.9980506822612085,
"grad_norm": 0.060546875,
"learning_rate": 0.0001,
"loss": 0.3486,
"step": 160
},
{
"epoch": 1.0292397660818713,
"grad_norm": 0.033935546875,
"learning_rate": 0.0001,
"loss": 0.2867,
"step": 165
},
{
"epoch": 1.060428849902534,
"grad_norm": 0.031494140625,
"learning_rate": 0.0001,
"loss": 0.2628,
"step": 170
},
{
"epoch": 1.0916179337231968,
"grad_norm": 0.0289306640625,
"learning_rate": 0.0001,
"loss": 0.278,
"step": 175
},
{
"epoch": 1.1228070175438596,
"grad_norm": 0.031982421875,
"learning_rate": 0.0001,
"loss": 0.2915,
"step": 180
},
{
"epoch": 1.1539961013645224,
"grad_norm": 0.031005859375,
"learning_rate": 0.0001,
"loss": 0.2902,
"step": 185
},
{
"epoch": 1.1851851851851851,
"grad_norm": 0.03271484375,
"learning_rate": 0.0001,
"loss": 0.2785,
"step": 190
},
{
"epoch": 1.2163742690058479,
"grad_norm": 0.042236328125,
"learning_rate": 0.0001,
"loss": 0.295,
"step": 195
},
{
"epoch": 1.2475633528265107,
"grad_norm": 0.05615234375,
"learning_rate": 0.0001,
"loss": 0.3039,
"step": 200
},
{
"epoch": 1.2787524366471734,
"grad_norm": 0.03759765625,
"learning_rate": 0.0001,
"loss": 0.2725,
"step": 205
},
{
"epoch": 1.3099415204678362,
"grad_norm": 0.035400390625,
"learning_rate": 0.0001,
"loss": 0.2738,
"step": 210
},
{
"epoch": 1.341130604288499,
"grad_norm": 0.031494140625,
"learning_rate": 0.0001,
"loss": 0.2861,
"step": 215
},
{
"epoch": 1.3723196881091617,
"grad_norm": 0.036865234375,
"learning_rate": 0.0001,
"loss": 0.2859,
"step": 220
},
{
"epoch": 1.4035087719298245,
"grad_norm": 0.03515625,
"learning_rate": 0.0001,
"loss": 0.285,
"step": 225
},
{
"epoch": 1.4346978557504872,
"grad_norm": 0.03466796875,
"learning_rate": 0.0001,
"loss": 0.2713,
"step": 230
},
{
"epoch": 1.46588693957115,
"grad_norm": 0.042724609375,
"learning_rate": 0.0001,
"loss": 0.2982,
"step": 235
},
{
"epoch": 1.4970760233918128,
"grad_norm": 0.0673828125,
"learning_rate": 0.0001,
"loss": 0.3026,
"step": 240
},
{
"epoch": 1.5282651072124755,
"grad_norm": 0.04150390625,
"learning_rate": 0.0001,
"loss": 0.2916,
"step": 245
},
{
"epoch": 1.5594541910331383,
"grad_norm": 0.034912109375,
"learning_rate": 0.0001,
"loss": 0.2548,
"step": 250
},
{
"epoch": 1.5594541910331383,
"step": 250,
"total_flos": 4.824681746497536e+17,
"train_loss": 0.3312075719833374,
"train_runtime": 19317.2857,
"train_samples_per_second": 0.828,
"train_steps_per_second": 0.013
}
],
"logging_steps": 5,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 90,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.824681746497536e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}