{ "best_metric": null, "best_model_checkpoint": null, "epoch": 9.968354430379748, "eval_steps": 500, "global_step": 1575, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.15822784810126583, "grad_norm": 2.5220770835876465, "learning_rate": 0.0002, "loss": 2.3375, "step": 25 }, { "epoch": 0.31645569620253167, "grad_norm": 1.2550034523010254, "learning_rate": 0.0002, "loss": 1.4176, "step": 50 }, { "epoch": 0.47468354430379744, "grad_norm": 1.1539708375930786, "learning_rate": 0.0002, "loss": 1.0198, "step": 75 }, { "epoch": 0.6329113924050633, "grad_norm": 1.0697557926177979, "learning_rate": 0.0002, "loss": 1.2321, "step": 100 }, { "epoch": 0.7911392405063291, "grad_norm": 1.0829753875732422, "learning_rate": 0.0002, "loss": 0.9474, "step": 125 }, { "epoch": 0.9493670886075949, "grad_norm": 1.0121601819992065, "learning_rate": 0.0002, "loss": 0.8395, "step": 150 }, { "epoch": 1.1075949367088607, "grad_norm": 1.1056463718414307, "learning_rate": 0.0002, "loss": 0.7928, "step": 175 }, { "epoch": 1.2658227848101267, "grad_norm": 1.5227317810058594, "learning_rate": 0.0002, "loss": 0.4643, "step": 200 }, { "epoch": 1.4240506329113924, "grad_norm": 0.5029327273368835, "learning_rate": 0.0002, "loss": 0.574, "step": 225 }, { "epoch": 1.5822784810126582, "grad_norm": 0.7477303147315979, "learning_rate": 0.0002, "loss": 0.5327, "step": 250 }, { "epoch": 1.740506329113924, "grad_norm": 1.1760517358779907, "learning_rate": 0.0002, "loss": 0.3683, "step": 275 }, { "epoch": 1.8987341772151898, "grad_norm": 0.7232487797737122, "learning_rate": 0.0002, "loss": 0.6297, "step": 300 }, { "epoch": 2.0569620253164556, "grad_norm": 0.8394080996513367, "learning_rate": 0.0002, "loss": 0.445, "step": 325 }, { "epoch": 2.2151898734177213, "grad_norm": 0.7312848567962646, "learning_rate": 0.0002, "loss": 0.3746, "step": 350 }, { "epoch": 2.3734177215189876, "grad_norm": 0.7731461524963379, "learning_rate": 0.0002, "loss": 0.4716, "step": 375 }, { "epoch": 2.5316455696202533, "grad_norm": 1.2274295091629028, "learning_rate": 0.0002, "loss": 0.3655, "step": 400 }, { "epoch": 2.689873417721519, "grad_norm": 0.6810910105705261, "learning_rate": 0.0002, "loss": 0.3526, "step": 425 }, { "epoch": 2.848101265822785, "grad_norm": 0.6517865657806396, "learning_rate": 0.0002, "loss": 0.4433, "step": 450 }, { "epoch": 3.0063291139240507, "grad_norm": 1.4362159967422485, "learning_rate": 0.0002, "loss": 0.3225, "step": 475 }, { "epoch": 3.1645569620253164, "grad_norm": 0.5174972414970398, "learning_rate": 0.0002, "loss": 0.4203, "step": 500 }, { "epoch": 3.3227848101265822, "grad_norm": 0.8872693181037903, "learning_rate": 0.0002, "loss": 0.3485, "step": 525 }, { "epoch": 3.481012658227848, "grad_norm": 0.7814120054244995, "learning_rate": 0.0002, "loss": 0.2683, "step": 550 }, { "epoch": 3.6392405063291138, "grad_norm": 0.5629352331161499, "learning_rate": 0.0002, "loss": 0.4093, "step": 575 }, { "epoch": 3.7974683544303796, "grad_norm": 0.8676897883415222, "learning_rate": 0.0002, "loss": 0.3342, "step": 600 }, { "epoch": 3.9556962025316453, "grad_norm": 0.3980434536933899, "learning_rate": 0.0002, "loss": 0.2799, "step": 625 }, { "epoch": 4.113924050632911, "grad_norm": 0.6955259442329407, "learning_rate": 0.0002, "loss": 0.3424, "step": 650 }, { "epoch": 4.272151898734177, "grad_norm": 0.9159439206123352, "learning_rate": 0.0002, "loss": 0.2344, "step": 675 }, { "epoch": 4.430379746835443, "grad_norm": 0.657145082950592, "learning_rate": 0.0002, "loss": 0.3157, "step": 700 }, { "epoch": 4.588607594936709, "grad_norm": 0.6370977163314819, "learning_rate": 0.0002, "loss": 0.3165, "step": 725 }, { "epoch": 4.746835443037975, "grad_norm": 1.4098331928253174, "learning_rate": 0.0002, "loss": 0.2306, "step": 750 }, { "epoch": 4.905063291139241, "grad_norm": 0.49123522639274597, "learning_rate": 0.0002, "loss": 0.3595, "step": 775 }, { "epoch": 5.063291139240507, "grad_norm": 0.6442649960517883, "learning_rate": 0.0002, "loss": 0.2739, "step": 800 }, { "epoch": 5.2215189873417724, "grad_norm": 0.3584900498390198, "learning_rate": 0.0002, "loss": 0.2148, "step": 825 }, { "epoch": 5.379746835443038, "grad_norm": 0.6529756784439087, "learning_rate": 0.0002, "loss": 0.3141, "step": 850 }, { "epoch": 5.537974683544304, "grad_norm": 0.7614108324050903, "learning_rate": 0.0002, "loss": 0.2484, "step": 875 }, { "epoch": 5.69620253164557, "grad_norm": 0.37845131754875183, "learning_rate": 0.0002, "loss": 0.2489, "step": 900 }, { "epoch": 5.8544303797468356, "grad_norm": 0.5563950538635254, "learning_rate": 0.0002, "loss": 0.3095, "step": 925 }, { "epoch": 6.012658227848101, "grad_norm": 0.9632635712623596, "learning_rate": 0.0002, "loss": 0.2197, "step": 950 }, { "epoch": 6.170886075949367, "grad_norm": 0.32439491152763367, "learning_rate": 0.0002, "loss": 0.2652, "step": 975 }, { "epoch": 6.329113924050633, "grad_norm": 0.5222536325454712, "learning_rate": 0.0002, "loss": 0.2422, "step": 1000 }, { "epoch": 6.487341772151899, "grad_norm": 0.5735016465187073, "learning_rate": 0.0002, "loss": 0.1888, "step": 1025 }, { "epoch": 6.6455696202531644, "grad_norm": 0.6607617735862732, "learning_rate": 0.0002, "loss": 0.2995, "step": 1050 }, { "epoch": 6.80379746835443, "grad_norm": 0.9632560610771179, "learning_rate": 0.0002, "loss": 0.2397, "step": 1075 }, { "epoch": 6.962025316455696, "grad_norm": 0.25032752752304077, "learning_rate": 0.0002, "loss": 0.2035, "step": 1100 }, { "epoch": 7.120253164556962, "grad_norm": 0.4147493243217468, "learning_rate": 0.0002, "loss": 0.2388, "step": 1125 }, { "epoch": 7.2784810126582276, "grad_norm": 0.6790456771850586, "learning_rate": 0.0002, "loss": 0.1916, "step": 1150 }, { "epoch": 7.436708860759493, "grad_norm": 0.29760053753852844, "learning_rate": 0.0002, "loss": 0.2266, "step": 1175 }, { "epoch": 7.594936708860759, "grad_norm": 0.6803601980209351, "learning_rate": 0.0002, "loss": 0.2337, "step": 1200 }, { "epoch": 7.753164556962025, "grad_norm": 0.8090017437934875, "learning_rate": 0.0002, "loss": 0.199, "step": 1225 }, { "epoch": 7.911392405063291, "grad_norm": 0.4255053699016571, "learning_rate": 0.0002, "loss": 0.2577, "step": 1250 }, { "epoch": 8.069620253164556, "grad_norm": 0.6785822510719299, "learning_rate": 0.0002, "loss": 0.2045, "step": 1275 }, { "epoch": 8.227848101265822, "grad_norm": 0.2853826880455017, "learning_rate": 0.0002, "loss": 0.1707, "step": 1300 }, { "epoch": 8.386075949367088, "grad_norm": 0.5013365745544434, "learning_rate": 0.0002, "loss": 0.2324, "step": 1325 }, { "epoch": 8.544303797468354, "grad_norm": 0.7551653385162354, "learning_rate": 0.0002, "loss": 0.2055, "step": 1350 }, { "epoch": 8.70253164556962, "grad_norm": 0.2432267665863037, "learning_rate": 0.0002, "loss": 0.2046, "step": 1375 }, { "epoch": 8.860759493670885, "grad_norm": 0.6924687027931213, "learning_rate": 0.0002, "loss": 0.2369, "step": 1400 }, { "epoch": 9.018987341772151, "grad_norm": 0.6701030135154724, "learning_rate": 0.0002, "loss": 0.1816, "step": 1425 }, { "epoch": 9.177215189873417, "grad_norm": 0.3679279088973999, "learning_rate": 0.0002, "loss": 0.1884, "step": 1450 }, { "epoch": 9.335443037974684, "grad_norm": 0.3734745979309082, "learning_rate": 0.0002, "loss": 0.1934, "step": 1475 }, { "epoch": 9.49367088607595, "grad_norm": 0.3465384244918823, "learning_rate": 0.0002, "loss": 0.167, "step": 1500 }, { "epoch": 9.651898734177216, "grad_norm": 0.4366382360458374, "learning_rate": 0.0002, "loss": 0.244, "step": 1525 }, { "epoch": 9.810126582278482, "grad_norm": 0.44247567653656006, "learning_rate": 0.0002, "loss": 0.1965, "step": 1550 }, { "epoch": 9.968354430379748, "grad_norm": 0.2957931458950043, "learning_rate": 0.0002, "loss": 0.1794, "step": 1575 } ], "logging_steps": 25, "max_steps": 1580, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 25, "total_flos": 1.479668134699008e+16, "train_batch_size": 4, "trial_name": null, "trial_params": null }