|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.0, |
|
"eval_steps": 500, |
|
"global_step": 516, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07751937984496124, |
|
"grad_norm": 2.245417356491089, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1087, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15503875968992248, |
|
"grad_norm": 3.3328239917755127, |
|
"learning_rate": 0.0001, |
|
"loss": 1.7547, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.23255813953488372, |
|
"grad_norm": 1.6743243932724, |
|
"learning_rate": 9.993684783030088e-05, |
|
"loss": 1.3868, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.31007751937984496, |
|
"grad_norm": 1.585747480392456, |
|
"learning_rate": 9.974755084906502e-05, |
|
"loss": 1.1455, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3875968992248062, |
|
"grad_norm": 1.2781128883361816, |
|
"learning_rate": 9.94325872368957e-05, |
|
"loss": 1.1394, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.46511627906976744, |
|
"grad_norm": 1.3732120990753174, |
|
"learning_rate": 9.899275261921234e-05, |
|
"loss": 1.0942, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5426356589147286, |
|
"grad_norm": 1.4817951917648315, |
|
"learning_rate": 9.842915805643155e-05, |
|
"loss": 0.9934, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6201550387596899, |
|
"grad_norm": 1.261801838874817, |
|
"learning_rate": 9.774322723733216e-05, |
|
"loss": 1.0093, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6976744186046512, |
|
"grad_norm": 1.801243782043457, |
|
"learning_rate": 9.693669288269372e-05, |
|
"loss": 1.0686, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7751937984496124, |
|
"grad_norm": 1.719342827796936, |
|
"learning_rate": 9.601159236829352e-05, |
|
"loss": 0.9973, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8527131782945736, |
|
"grad_norm": 1.5797449350357056, |
|
"learning_rate": 9.497026257831855e-05, |
|
"loss": 1.0002, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9302325581395349, |
|
"grad_norm": 1.790971279144287, |
|
"learning_rate": 9.381533400219318e-05, |
|
"loss": 0.9484, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0077519379844961, |
|
"grad_norm": 1.6127315759658813, |
|
"learning_rate": 9.254972408973461e-05, |
|
"loss": 0.979, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0852713178294573, |
|
"grad_norm": 1.4869861602783203, |
|
"learning_rate": 9.117662988142138e-05, |
|
"loss": 0.8899, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.1627906976744187, |
|
"grad_norm": 2.101341962814331, |
|
"learning_rate": 8.969951993239177e-05, |
|
"loss": 0.9493, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.2403100775193798, |
|
"grad_norm": 1.7181503772735596, |
|
"learning_rate": 8.81221255505724e-05, |
|
"loss": 0.8715, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.3178294573643412, |
|
"grad_norm": 2.006377696990967, |
|
"learning_rate": 8.644843137107059e-05, |
|
"loss": 0.8846, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3953488372093024, |
|
"grad_norm": 2.0157504081726074, |
|
"learning_rate": 8.468266529064025e-05, |
|
"loss": 0.9292, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.4728682170542635, |
|
"grad_norm": 1.704633355140686, |
|
"learning_rate": 8.282928778764783e-05, |
|
"loss": 0.8883, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.550387596899225, |
|
"grad_norm": 1.7320111989974976, |
|
"learning_rate": 8.089298065451672e-05, |
|
"loss": 0.874, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.627906976744186, |
|
"grad_norm": 2.369300127029419, |
|
"learning_rate": 7.887863517111338e-05, |
|
"loss": 0.8688, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.7054263565891472, |
|
"grad_norm": 1.5535732507705688, |
|
"learning_rate": 7.679133974894983e-05, |
|
"loss": 0.8726, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.7829457364341086, |
|
"grad_norm": 1.9749231338500977, |
|
"learning_rate": 7.463636707741458e-05, |
|
"loss": 0.8252, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.8604651162790697, |
|
"grad_norm": 2.261054277420044, |
|
"learning_rate": 7.241916080450163e-05, |
|
"loss": 0.846, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.937984496124031, |
|
"grad_norm": 2.019524097442627, |
|
"learning_rate": 7.014532178568314e-05, |
|
"loss": 0.8104, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.0155038759689923, |
|
"grad_norm": 1.8253782987594604, |
|
"learning_rate": 6.782059393566253e-05, |
|
"loss": 0.8436, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.0930232558139537, |
|
"grad_norm": 1.92261803150177, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.8007, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.1705426356589146, |
|
"grad_norm": 1.959991455078125, |
|
"learning_rate": 6.304207531449486e-05, |
|
"loss": 0.8093, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.248062015503876, |
|
"grad_norm": 2.0819852352142334, |
|
"learning_rate": 6.0600355496102745e-05, |
|
"loss": 0.7626, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.3255813953488373, |
|
"grad_norm": 2.1029396057128906, |
|
"learning_rate": 5.813185825974419e-05, |
|
"loss": 0.7995, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.4031007751937983, |
|
"grad_norm": 2.149883508682251, |
|
"learning_rate": 5.564281924367408e-05, |
|
"loss": 0.7867, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.4806201550387597, |
|
"grad_norm": 2.2461025714874268, |
|
"learning_rate": 5.313952597646568e-05, |
|
"loss": 0.778, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.558139534883721, |
|
"grad_norm": 2.1659789085388184, |
|
"learning_rate": 5.062830199416764e-05, |
|
"loss": 0.8143, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.6356589147286824, |
|
"grad_norm": 2.3656270503997803, |
|
"learning_rate": 4.811549086650327e-05, |
|
"loss": 0.8017, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.7131782945736433, |
|
"grad_norm": 2.3753740787506104, |
|
"learning_rate": 4.560744017246284e-05, |
|
"loss": 0.7666, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.7906976744186047, |
|
"grad_norm": 2.221393585205078, |
|
"learning_rate": 4.31104854657681e-05, |
|
"loss": 0.7679, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.8682170542635657, |
|
"grad_norm": 2.018345594406128, |
|
"learning_rate": 4.063093427071376e-05, |
|
"loss": 0.7507, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.945736434108527, |
|
"grad_norm": 2.199131965637207, |
|
"learning_rate": 3.817505014881378e-05, |
|
"loss": 0.7413, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.0232558139534884, |
|
"grad_norm": 2.139735698699951, |
|
"learning_rate": 3.5749036876501194e-05, |
|
"loss": 0.8139, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.10077519379845, |
|
"grad_norm": 2.0483205318450928, |
|
"learning_rate": 3.335902277385067e-05, |
|
"loss": 0.7209, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.1782945736434107, |
|
"grad_norm": 2.3175625801086426, |
|
"learning_rate": 3.101104522390995e-05, |
|
"loss": 0.7392, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.255813953488372, |
|
"grad_norm": 2.267725944519043, |
|
"learning_rate": 2.8711035421746367e-05, |
|
"loss": 0.7487, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 2.491976261138916, |
|
"learning_rate": 2.6464803391733374e-05, |
|
"loss": 0.7472, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.4108527131782944, |
|
"grad_norm": 2.3867716789245605, |
|
"learning_rate": 2.4278023310924673e-05, |
|
"loss": 0.7194, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.488372093023256, |
|
"grad_norm": 2.1725575923919678, |
|
"learning_rate": 2.215621917559062e-05, |
|
"loss": 0.7435, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.565891472868217, |
|
"grad_norm": 2.362307071685791, |
|
"learning_rate": 2.0104750847124075e-05, |
|
"loss": 0.7257, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.6434108527131785, |
|
"grad_norm": 2.3659157752990723, |
|
"learning_rate": 1.8128800512565513e-05, |
|
"loss": 0.7498, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.7209302325581395, |
|
"grad_norm": 2.225560188293457, |
|
"learning_rate": 1.6233359593948777e-05, |
|
"loss": 0.7308, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.798449612403101, |
|
"grad_norm": 2.4101855754852295, |
|
"learning_rate": 1.4423216139535734e-05, |
|
"loss": 0.7524, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.875968992248062, |
|
"grad_norm": 2.295118808746338, |
|
"learning_rate": 1.2702942728790895e-05, |
|
"loss": 0.7239, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.953488372093023, |
|
"grad_norm": 2.7008495330810547, |
|
"learning_rate": 1.1076884921648834e-05, |
|
"loss": 0.7305, |
|
"step": 510 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 645, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.387586293602714e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|