{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.5594541910331383, "eval_steps": 500, "global_step": 250, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.031189083820662766, "grad_norm": 0.138671875, "learning_rate": 0.0001, "loss": 3.833, "step": 5 }, { "epoch": 0.06237816764132553, "grad_norm": 0.05322265625, "learning_rate": 0.0001, "loss": 0.4376, "step": 10 }, { "epoch": 0.0935672514619883, "grad_norm": 0.06494140625, "learning_rate": 0.0001, "loss": 0.4347, "step": 15 }, { "epoch": 0.12475633528265107, "grad_norm": 0.059326171875, "learning_rate": 0.0001, "loss": 0.4465, "step": 20 }, { "epoch": 0.15594541910331383, "grad_norm": 0.060791015625, "learning_rate": 0.0001, "loss": 0.4576, "step": 25 }, { "epoch": 0.1871345029239766, "grad_norm": 0.0703125, "learning_rate": 0.0001, "loss": 0.4395, "step": 30 }, { "epoch": 0.21832358674463936, "grad_norm": 0.051513671875, "learning_rate": 0.0001, "loss": 0.4391, "step": 35 }, { "epoch": 0.24951267056530213, "grad_norm": 0.150390625, "learning_rate": 0.0001, "loss": 0.4827, "step": 40 }, { "epoch": 0.2807017543859649, "grad_norm": 0.047119140625, "learning_rate": 0.0001, "loss": 0.3364, "step": 45 }, { "epoch": 0.31189083820662766, "grad_norm": 0.032958984375, "learning_rate": 0.0001, "loss": 0.3183, "step": 50 }, { "epoch": 0.34307992202729043, "grad_norm": 0.033447265625, "learning_rate": 0.0001, "loss": 0.3259, "step": 55 }, { "epoch": 0.3742690058479532, "grad_norm": 0.032470703125, "learning_rate": 0.0001, "loss": 0.3314, "step": 60 }, { "epoch": 0.40545808966861596, "grad_norm": 0.033203125, "learning_rate": 0.0001, "loss": 0.3172, "step": 65 }, { "epoch": 0.43664717348927873, "grad_norm": 0.0308837890625, "learning_rate": 0.0001, "loss": 0.3306, "step": 70 }, { "epoch": 0.4678362573099415, "grad_norm": 0.035400390625, "learning_rate": 0.0001, "loss": 0.3365, "step": 75 }, { "epoch": 0.49902534113060426, "grad_norm": 0.083984375, "learning_rate": 0.0001, "loss": 0.3592, "step": 80 }, { "epoch": 0.530214424951267, "grad_norm": 0.043212890625, "learning_rate": 0.0001, "loss": 0.3191, "step": 85 }, { "epoch": 0.5614035087719298, "grad_norm": 0.0311279296875, "learning_rate": 0.0001, "loss": 0.2838, "step": 90 }, { "epoch": 0.5925925925925926, "grad_norm": 0.039794921875, "learning_rate": 0.0001, "loss": 0.3003, "step": 95 }, { "epoch": 0.6237816764132553, "grad_norm": 0.03076171875, "learning_rate": 0.0001, "loss": 0.3073, "step": 100 }, { "epoch": 0.6549707602339181, "grad_norm": 0.031982421875, "learning_rate": 0.0001, "loss": 0.3, "step": 105 }, { "epoch": 0.6861598440545809, "grad_norm": 0.033203125, "learning_rate": 0.0001, "loss": 0.3031, "step": 110 }, { "epoch": 0.7173489278752436, "grad_norm": 0.037841796875, "learning_rate": 0.0001, "loss": 0.3237, "step": 115 }, { "epoch": 0.7485380116959064, "grad_norm": 0.0771484375, "learning_rate": 0.0001, "loss": 0.3449, "step": 120 }, { "epoch": 0.7797270955165692, "grad_norm": 0.03662109375, "learning_rate": 0.0001, "loss": 0.2893, "step": 125 }, { "epoch": 0.8109161793372319, "grad_norm": 0.0301513671875, "learning_rate": 0.0001, "loss": 0.2818, "step": 130 }, { "epoch": 0.8421052631578947, "grad_norm": 0.03125, "learning_rate": 0.0001, "loss": 0.3003, "step": 135 }, { "epoch": 0.8732943469785575, "grad_norm": 0.037841796875, "learning_rate": 0.0001, "loss": 0.2999, "step": 140 }, { "epoch": 0.9044834307992202, "grad_norm": 0.0458984375, "learning_rate": 0.0001, "loss": 0.3044, "step": 145 }, { "epoch": 0.935672514619883, "grad_norm": 0.03466796875, "learning_rate": 0.0001, "loss": 0.2954, "step": 150 }, { "epoch": 0.9668615984405458, "grad_norm": 0.03759765625, "learning_rate": 0.0001, "loss": 0.304, "step": 155 }, { "epoch": 0.9980506822612085, "grad_norm": 0.07421875, "learning_rate": 0.0001, "loss": 0.351, "step": 160 }, { "epoch": 1.0292397660818713, "grad_norm": 0.03564453125, "learning_rate": 0.0001, "loss": 0.2895, "step": 165 }, { "epoch": 1.060428849902534, "grad_norm": 0.03271484375, "learning_rate": 0.0001, "loss": 0.2658, "step": 170 }, { "epoch": 1.0916179337231968, "grad_norm": 0.031494140625, "learning_rate": 0.0001, "loss": 0.2809, "step": 175 }, { "epoch": 1.1228070175438596, "grad_norm": 0.031982421875, "learning_rate": 0.0001, "loss": 0.2943, "step": 180 }, { "epoch": 1.1539961013645224, "grad_norm": 0.0322265625, "learning_rate": 0.0001, "loss": 0.2932, "step": 185 }, { "epoch": 1.1851851851851851, "grad_norm": 0.033935546875, "learning_rate": 0.0001, "loss": 0.2806, "step": 190 }, { "epoch": 1.2163742690058479, "grad_norm": 0.0419921875, "learning_rate": 0.0001, "loss": 0.298, "step": 195 }, { "epoch": 1.2475633528265107, "grad_norm": 0.062255859375, "learning_rate": 0.0001, "loss": 0.3069, "step": 200 }, { "epoch": 1.2787524366471734, "grad_norm": 0.0419921875, "learning_rate": 0.0001, "loss": 0.2752, "step": 205 }, { "epoch": 1.3099415204678362, "grad_norm": 0.03515625, "learning_rate": 0.0001, "loss": 0.276, "step": 210 }, { "epoch": 1.341130604288499, "grad_norm": 0.032958984375, "learning_rate": 0.0001, "loss": 0.2891, "step": 215 }, { "epoch": 1.3723196881091617, "grad_norm": 0.033447265625, "learning_rate": 0.0001, "loss": 0.2886, "step": 220 }, { "epoch": 1.4035087719298245, "grad_norm": 0.035888671875, "learning_rate": 0.0001, "loss": 0.289, "step": 225 }, { "epoch": 1.4346978557504872, "grad_norm": 0.03564453125, "learning_rate": 0.0001, "loss": 0.274, "step": 230 }, { "epoch": 1.46588693957115, "grad_norm": 0.04296875, "learning_rate": 0.0001, "loss": 0.3011, "step": 235 }, { "epoch": 1.4970760233918128, "grad_norm": 0.07080078125, "learning_rate": 0.0001, "loss": 0.306, "step": 240 }, { "epoch": 1.5282651072124755, "grad_norm": 0.0439453125, "learning_rate": 0.0001, "loss": 0.2949, "step": 245 }, { "epoch": 1.5594541910331383, "grad_norm": 0.032470703125, "learning_rate": 0.0001, "loss": 0.2576, "step": 250 }, { "epoch": 1.5594541910331383, "step": 250, "total_flos": 4.824681746497536e+17, "train_loss": 0.3939001045227051, "train_runtime": 16242.9143, "train_samples_per_second": 0.985, "train_steps_per_second": 0.015 } ], "logging_steps": 5, "max_steps": 250, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 90, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 4.824681746497536e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }