|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9963898916967509, |
|
"eval_steps": 500, |
|
"global_step": 69, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01444043321299639, |
|
"grad_norm": 0.9324310421943665, |
|
"learning_rate": 4.9974091841168195e-05, |
|
"loss": 0.8402, |
|
"num_input_tokens_seen": 2097152, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02888086642599278, |
|
"grad_norm": 0.736538290977478, |
|
"learning_rate": 4.9896421063288286e-05, |
|
"loss": 0.7686, |
|
"num_input_tokens_seen": 4194304, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04332129963898917, |
|
"grad_norm": 0.6694996953010559, |
|
"learning_rate": 4.976714865090827e-05, |
|
"loss": 0.7563, |
|
"num_input_tokens_seen": 6291456, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05776173285198556, |
|
"grad_norm": 0.5700488090515137, |
|
"learning_rate": 4.958654254084355e-05, |
|
"loss": 0.7139, |
|
"num_input_tokens_seen": 8388608, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.07220216606498195, |
|
"grad_norm": 0.47626349329948425, |
|
"learning_rate": 4.9354977066836986e-05, |
|
"loss": 0.6793, |
|
"num_input_tokens_seen": 10485760, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08664259927797834, |
|
"grad_norm": 0.4310459494590759, |
|
"learning_rate": 4.907293218369499e-05, |
|
"loss": 0.6631, |
|
"num_input_tokens_seen": 12582912, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10108303249097472, |
|
"grad_norm": 0.41724854707717896, |
|
"learning_rate": 4.874099247250798e-05, |
|
"loss": 0.64, |
|
"num_input_tokens_seen": 14680064, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.11552346570397112, |
|
"grad_norm": 0.37938904762268066, |
|
"learning_rate": 4.835984592901678e-05, |
|
"loss": 0.6145, |
|
"num_input_tokens_seen": 16777216, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1299638989169675, |
|
"grad_norm": 0.30510303378105164, |
|
"learning_rate": 4.793028253763633e-05, |
|
"loss": 0.5997, |
|
"num_input_tokens_seen": 18874368, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1444043321299639, |
|
"grad_norm": 0.17869406938552856, |
|
"learning_rate": 4.74531926340924e-05, |
|
"loss": 0.5896, |
|
"num_input_tokens_seen": 20971520, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1588447653429603, |
|
"grad_norm": 0.1084759458899498, |
|
"learning_rate": 4.6929565060064864e-05, |
|
"loss": 0.6025, |
|
"num_input_tokens_seen": 23068672, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.17328519855595667, |
|
"grad_norm": 0.09031977504491806, |
|
"learning_rate": 4.6360485113662216e-05, |
|
"loss": 0.5644, |
|
"num_input_tokens_seen": 25165824, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.18772563176895307, |
|
"grad_norm": 0.08057376742362976, |
|
"learning_rate": 4.574713229997563e-05, |
|
"loss": 0.5558, |
|
"num_input_tokens_seen": 27262976, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.20216606498194944, |
|
"grad_norm": 0.0670362040400505, |
|
"learning_rate": 4.509077788637446e-05, |
|
"loss": 0.5705, |
|
"num_input_tokens_seen": 29360128, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.21660649819494585, |
|
"grad_norm": 0.06539376825094223, |
|
"learning_rate": 4.43927822676105e-05, |
|
"loss": 0.5694, |
|
"num_input_tokens_seen": 31457280, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.23104693140794225, |
|
"grad_norm": 0.05930742993950844, |
|
"learning_rate": 4.365459214619214e-05, |
|
"loss": 0.559, |
|
"num_input_tokens_seen": 33554432, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.24548736462093862, |
|
"grad_norm": 0.054464634507894516, |
|
"learning_rate": 4.2877737533872485e-05, |
|
"loss": 0.5628, |
|
"num_input_tokens_seen": 35651584, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.259927797833935, |
|
"grad_norm": 0.053172189742326736, |
|
"learning_rate": 4.206382858046636e-05, |
|
"loss": 0.5553, |
|
"num_input_tokens_seen": 37748736, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2743682310469314, |
|
"grad_norm": 0.04865848645567894, |
|
"learning_rate": 4.12145522365689e-05, |
|
"loss": 0.5401, |
|
"num_input_tokens_seen": 39845888, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2888086642599278, |
|
"grad_norm": 0.04852156713604927, |
|
"learning_rate": 4.033166875709291e-05, |
|
"loss": 0.575, |
|
"num_input_tokens_seen": 41943040, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.30324909747292417, |
|
"grad_norm": 0.046296387910842896, |
|
"learning_rate": 3.941700805287168e-05, |
|
"loss": 0.5398, |
|
"num_input_tokens_seen": 44040192, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3176895306859206, |
|
"grad_norm": 0.04640813171863556, |
|
"learning_rate": 3.8472465897889394e-05, |
|
"loss": 0.5389, |
|
"num_input_tokens_seen": 46137344, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.33212996389891697, |
|
"grad_norm": 0.043006811290979385, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.5451, |
|
"num_input_tokens_seen": 48234496, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.34657039711191334, |
|
"grad_norm": 0.04406141862273216, |
|
"learning_rate": 3.6501625943278805e-05, |
|
"loss": 0.5558, |
|
"num_input_tokens_seen": 50331648, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.36101083032490977, |
|
"grad_norm": 0.03994145616889, |
|
"learning_rate": 3.547941301041661e-05, |
|
"loss": 0.5403, |
|
"num_input_tokens_seen": 52428800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.37545126353790614, |
|
"grad_norm": 0.039348017424345016, |
|
"learning_rate": 3.443547989381536e-05, |
|
"loss": 0.5405, |
|
"num_input_tokens_seen": 54525952, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3898916967509025, |
|
"grad_norm": 0.03861572593450546, |
|
"learning_rate": 3.3371990304274656e-05, |
|
"loss": 0.5577, |
|
"num_input_tokens_seen": 56623104, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4043321299638989, |
|
"grad_norm": 0.03878667205572128, |
|
"learning_rate": 3.2291148486370626e-05, |
|
"loss": 0.5345, |
|
"num_input_tokens_seen": 58720256, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4187725631768953, |
|
"grad_norm": 0.03664080426096916, |
|
"learning_rate": 3.11951946498225e-05, |
|
"loss": 0.5484, |
|
"num_input_tokens_seen": 60817408, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.4332129963898917, |
|
"grad_norm": 0.036824408918619156, |
|
"learning_rate": 3.008640032631585e-05, |
|
"loss": 0.5485, |
|
"num_input_tokens_seen": 62914560, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.44765342960288806, |
|
"grad_norm": 0.037150438874959946, |
|
"learning_rate": 2.8967063661406285e-05, |
|
"loss": 0.5299, |
|
"num_input_tokens_seen": 65011712, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4620938628158845, |
|
"grad_norm": 0.03484778478741646, |
|
"learning_rate": 2.7839504651261872e-05, |
|
"loss": 0.539, |
|
"num_input_tokens_seen": 67108864, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.47653429602888087, |
|
"grad_norm": 0.035446375608444214, |
|
"learning_rate": 2.6706060334116777e-05, |
|
"loss": 0.5248, |
|
"num_input_tokens_seen": 69206016, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.49097472924187724, |
|
"grad_norm": 0.0350475013256073, |
|
"learning_rate": 2.556907994640264e-05, |
|
"loss": 0.5342, |
|
"num_input_tokens_seen": 71303168, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5054151624548736, |
|
"grad_norm": 0.036621786653995514, |
|
"learning_rate": 2.4430920053597356e-05, |
|
"loss": 0.5431, |
|
"num_input_tokens_seen": 73400320, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.51985559566787, |
|
"grad_norm": 0.0347721092402935, |
|
"learning_rate": 2.329393966588323e-05, |
|
"loss": 0.5471, |
|
"num_input_tokens_seen": 75497472, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5342960288808665, |
|
"grad_norm": 0.03457929939031601, |
|
"learning_rate": 2.2160495348738123e-05, |
|
"loss": 0.542, |
|
"num_input_tokens_seen": 77594624, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5487364620938628, |
|
"grad_norm": 0.035683248192071915, |
|
"learning_rate": 2.1032936338593718e-05, |
|
"loss": 0.542, |
|
"num_input_tokens_seen": 79691776, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5631768953068592, |
|
"grad_norm": 0.03531257063150406, |
|
"learning_rate": 1.991359967368416e-05, |
|
"loss": 0.542, |
|
"num_input_tokens_seen": 81788928, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5776173285198556, |
|
"grad_norm": 0.034907545894384384, |
|
"learning_rate": 1.8804805350177505e-05, |
|
"loss": 0.5425, |
|
"num_input_tokens_seen": 83886080, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.592057761732852, |
|
"grad_norm": 0.03420661389827728, |
|
"learning_rate": 1.7708851513629377e-05, |
|
"loss": 0.5482, |
|
"num_input_tokens_seen": 85983232, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6064981949458483, |
|
"grad_norm": 0.03401198983192444, |
|
"learning_rate": 1.6628009695725346e-05, |
|
"loss": 0.5598, |
|
"num_input_tokens_seen": 88080384, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6209386281588448, |
|
"grad_norm": 0.03434673324227333, |
|
"learning_rate": 1.5564520106184644e-05, |
|
"loss": 0.5402, |
|
"num_input_tokens_seen": 90177536, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6353790613718412, |
|
"grad_norm": 0.033791348338127136, |
|
"learning_rate": 1.4520586989583406e-05, |
|
"loss": 0.5413, |
|
"num_input_tokens_seen": 92274688, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6498194945848376, |
|
"grad_norm": 0.03330031782388687, |
|
"learning_rate": 1.3498374056721197e-05, |
|
"loss": 0.556, |
|
"num_input_tokens_seen": 94371840, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6642599277978339, |
|
"grad_norm": 0.03298752009868622, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 0.5341, |
|
"num_input_tokens_seen": 96468992, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6787003610108303, |
|
"grad_norm": 0.03363870084285736, |
|
"learning_rate": 1.1527534102110612e-05, |
|
"loss": 0.5436, |
|
"num_input_tokens_seen": 98566144, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6931407942238267, |
|
"grad_norm": 0.032934173941612244, |
|
"learning_rate": 1.0582991947128324e-05, |
|
"loss": 0.5393, |
|
"num_input_tokens_seen": 100663296, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.7075812274368231, |
|
"grad_norm": 0.03438662365078926, |
|
"learning_rate": 9.668331242907089e-06, |
|
"loss": 0.5598, |
|
"num_input_tokens_seen": 102760448, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7220216606498195, |
|
"grad_norm": 0.03351249918341637, |
|
"learning_rate": 8.785447763431101e-06, |
|
"loss": 0.5329, |
|
"num_input_tokens_seen": 104857600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7364620938628159, |
|
"grad_norm": 0.0330129936337471, |
|
"learning_rate": 7.936171419533653e-06, |
|
"loss": 0.5384, |
|
"num_input_tokens_seen": 106954752, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.7509025270758123, |
|
"grad_norm": 0.03433903306722641, |
|
"learning_rate": 7.122262466127514e-06, |
|
"loss": 0.5447, |
|
"num_input_tokens_seen": 109051904, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.7653429602888087, |
|
"grad_norm": 0.03314002603292465, |
|
"learning_rate": 6.3454078538078635e-06, |
|
"loss": 0.5291, |
|
"num_input_tokens_seen": 111149056, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.779783393501805, |
|
"grad_norm": 0.032993488013744354, |
|
"learning_rate": 5.607217732389503e-06, |
|
"loss": 0.5258, |
|
"num_input_tokens_seen": 113246208, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7942238267148014, |
|
"grad_norm": 0.032629404217004776, |
|
"learning_rate": 4.9092221136255444e-06, |
|
"loss": 0.5508, |
|
"num_input_tokens_seen": 115343360, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8086642599277978, |
|
"grad_norm": 0.03205695375800133, |
|
"learning_rate": 4.252867700024374e-06, |
|
"loss": 0.5439, |
|
"num_input_tokens_seen": 117440512, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.8231046931407943, |
|
"grad_norm": 0.03285016119480133, |
|
"learning_rate": 3.6395148863377858e-06, |
|
"loss": 0.5261, |
|
"num_input_tokens_seen": 119537664, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.8375451263537906, |
|
"grad_norm": 0.035063523799180984, |
|
"learning_rate": 3.0704349399351435e-06, |
|
"loss": 0.5518, |
|
"num_input_tokens_seen": 121634816, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.851985559566787, |
|
"grad_norm": 0.03116844967007637, |
|
"learning_rate": 2.5468073659076e-06, |
|
"loss": 0.5444, |
|
"num_input_tokens_seen": 123731968, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.8664259927797834, |
|
"grad_norm": 0.03255166485905647, |
|
"learning_rate": 2.0697174623636794e-06, |
|
"loss": 0.5469, |
|
"num_input_tokens_seen": 125829120, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8808664259927798, |
|
"grad_norm": 0.03249025344848633, |
|
"learning_rate": 1.6401540709832242e-06, |
|
"loss": 0.5335, |
|
"num_input_tokens_seen": 127926272, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8953068592057761, |
|
"grad_norm": 0.03506583720445633, |
|
"learning_rate": 1.2590075274920205e-06, |
|
"loss": 0.5367, |
|
"num_input_tokens_seen": 130023424, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.9097472924187726, |
|
"grad_norm": 0.03293128311634064, |
|
"learning_rate": 9.270678163050217e-07, |
|
"loss": 0.551, |
|
"num_input_tokens_seen": 132120576, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.924187725631769, |
|
"grad_norm": 0.03347219526767731, |
|
"learning_rate": 6.450229331630253e-07, |
|
"loss": 0.5597, |
|
"num_input_tokens_seen": 134217728, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.9386281588447654, |
|
"grad_norm": 0.03135489672422409, |
|
"learning_rate": 4.134574591564494e-07, |
|
"loss": 0.5369, |
|
"num_input_tokens_seen": 136314880, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.9530685920577617, |
|
"grad_norm": 0.03153960779309273, |
|
"learning_rate": 2.3285134909173112e-07, |
|
"loss": 0.5451, |
|
"num_input_tokens_seen": 138412032, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.9675090252707581, |
|
"grad_norm": 0.03191647306084633, |
|
"learning_rate": 1.0357893671171792e-07, |
|
"loss": 0.5237, |
|
"num_input_tokens_seen": 140509184, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.9819494584837545, |
|
"grad_norm": 0.03295959159731865, |
|
"learning_rate": 2.590815883181108e-08, |
|
"loss": 0.5604, |
|
"num_input_tokens_seen": 142606336, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9963898916967509, |
|
"grad_norm": 0.03191553056240082, |
|
"learning_rate": 0.0, |
|
"loss": 0.534, |
|
"num_input_tokens_seen": 144703488, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9963898916967509, |
|
"num_input_tokens_seen": 144703488, |
|
"step": 69, |
|
"total_flos": 5.635565866281075e+18, |
|
"train_loss": 0.5665888682655666, |
|
"train_runtime": 10913.0245, |
|
"train_samples_per_second": 3.247, |
|
"train_steps_per_second": 0.006 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 69, |
|
"num_input_tokens_seen": 144703488, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.635565866281075e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|