|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 25070, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1994415636218588, |
|
"grad_norm": 1.7483305931091309, |
|
"learning_rate": 4.8005584363781416e-05, |
|
"loss": 7.5177, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3988831272437176, |
|
"grad_norm": 1.5902838706970215, |
|
"learning_rate": 4.601116872756282e-05, |
|
"loss": 6.6658, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5983246908655764, |
|
"grad_norm": 1.9109225273132324, |
|
"learning_rate": 4.4016753091344236e-05, |
|
"loss": 6.3093, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7977662544874352, |
|
"grad_norm": 1.8110615015029907, |
|
"learning_rate": 4.202233745512565e-05, |
|
"loss": 6.0429, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9972078181092939, |
|
"grad_norm": 2.1440045833587646, |
|
"learning_rate": 4.002792181890706e-05, |
|
"loss": 5.8564, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.1966493817311528, |
|
"grad_norm": 2.1070995330810547, |
|
"learning_rate": 3.803350618268847e-05, |
|
"loss": 5.6429, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3960909453530115, |
|
"grad_norm": 2.0321288108825684, |
|
"learning_rate": 3.6039090546469884e-05, |
|
"loss": 5.5179, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.5955325089748702, |
|
"grad_norm": 2.1192569732666016, |
|
"learning_rate": 3.4044674910251304e-05, |
|
"loss": 5.4088, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.7949740725967291, |
|
"grad_norm": 2.2034354209899902, |
|
"learning_rate": 3.205025927403271e-05, |
|
"loss": 5.3574, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.994415636218588, |
|
"grad_norm": 2.3862476348876953, |
|
"learning_rate": 3.0055843637814124e-05, |
|
"loss": 5.2802, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.193857199840447, |
|
"grad_norm": 2.287271738052368, |
|
"learning_rate": 2.8061428001595534e-05, |
|
"loss": 5.1049, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.3932987634623055, |
|
"grad_norm": 2.282344341278076, |
|
"learning_rate": 2.6067012365376948e-05, |
|
"loss": 5.0663, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.5927403270841642, |
|
"grad_norm": 2.3689157962799072, |
|
"learning_rate": 2.4072596729158358e-05, |
|
"loss": 5.0354, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.792181890706023, |
|
"grad_norm": 2.4090659618377686, |
|
"learning_rate": 2.207818109293977e-05, |
|
"loss": 4.9782, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.9916234543278817, |
|
"grad_norm": 2.3092195987701416, |
|
"learning_rate": 2.008376545672118e-05, |
|
"loss": 4.9434, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.191065017949741, |
|
"grad_norm": 2.460700273513794, |
|
"learning_rate": 1.8089349820502595e-05, |
|
"loss": 4.8539, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.3905065815715996, |
|
"grad_norm": 2.6232993602752686, |
|
"learning_rate": 1.6098923015556443e-05, |
|
"loss": 4.8056, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.5899481451934583, |
|
"grad_norm": 2.5114476680755615, |
|
"learning_rate": 1.4104507379337855e-05, |
|
"loss": 4.7642, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.789389708815317, |
|
"grad_norm": 2.446881055831909, |
|
"learning_rate": 1.2110091743119267e-05, |
|
"loss": 4.7694, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.988831272437176, |
|
"grad_norm": 2.586047410964966, |
|
"learning_rate": 1.0115676106900679e-05, |
|
"loss": 4.733, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.188272836059035, |
|
"grad_norm": 2.6265206336975098, |
|
"learning_rate": 8.125249301954529e-06, |
|
"loss": 4.6612, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.387714399680894, |
|
"grad_norm": 2.6361939907073975, |
|
"learning_rate": 6.13083366573594e-06, |
|
"loss": 4.6552, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.587155963302752, |
|
"grad_norm": 2.643498182296753, |
|
"learning_rate": 4.136418029517352e-06, |
|
"loss": 4.6594, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.786597526924611, |
|
"grad_norm": 2.601949453353882, |
|
"learning_rate": 2.1420023932987634e-06, |
|
"loss": 4.6318, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 4.98603909054647, |
|
"grad_norm": 2.711338996887207, |
|
"learning_rate": 1.515755883526127e-07, |
|
"loss": 4.6389, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 12535, |
|
"total_flos": 1.310011259092992e+16, |
|
"train_loss": 5.274392118328446, |
|
"train_runtime": 3806.2907, |
|
"train_samples_per_second": 52.686, |
|
"train_steps_per_second": 3.293 |
|
}, |
|
{ |
|
"epoch": 5.1854806541683285, |
|
"grad_norm": 2.7731709480285645, |
|
"learning_rate": 4.907259672915836e-05, |
|
"loss": 4.7681, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.384922217790187, |
|
"grad_norm": 2.5628647804260254, |
|
"learning_rate": 4.8075388911049066e-05, |
|
"loss": 4.7441, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 5.584363781412046, |
|
"grad_norm": 2.7598373889923096, |
|
"learning_rate": 4.707818109293977e-05, |
|
"loss": 4.6634, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.783805345033905, |
|
"grad_norm": 2.619565486907959, |
|
"learning_rate": 4.608097327483047e-05, |
|
"loss": 4.6238, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 5.983246908655763, |
|
"grad_norm": 2.6309878826141357, |
|
"learning_rate": 4.508376545672118e-05, |
|
"loss": 4.6015, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.182688472277623, |
|
"grad_norm": 2.6872658729553223, |
|
"learning_rate": 4.4086557638611886e-05, |
|
"loss": 4.4218, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 6.382130035899482, |
|
"grad_norm": 2.780477285385132, |
|
"learning_rate": 4.3089349820502596e-05, |
|
"loss": 4.405, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.58157159952134, |
|
"grad_norm": 2.700143575668335, |
|
"learning_rate": 4.2092142002393306e-05, |
|
"loss": 4.3785, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 6.781013163143199, |
|
"grad_norm": 2.7577366828918457, |
|
"learning_rate": 4.1094934184284e-05, |
|
"loss": 4.3257, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 6.980454726765058, |
|
"grad_norm": 2.6972854137420654, |
|
"learning_rate": 4.009772636617471e-05, |
|
"loss": 4.3124, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 7.179896290386917, |
|
"grad_norm": 2.8049590587615967, |
|
"learning_rate": 3.9100518548065417e-05, |
|
"loss": 4.1901, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.379337854008775, |
|
"grad_norm": 3.0153963565826416, |
|
"learning_rate": 3.810331072995613e-05, |
|
"loss": 4.1466, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 7.578779417630634, |
|
"grad_norm": 3.010798215866089, |
|
"learning_rate": 3.710610291184683e-05, |
|
"loss": 4.1345, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 7.778220981252493, |
|
"grad_norm": 2.857867956161499, |
|
"learning_rate": 3.610889509373754e-05, |
|
"loss": 4.1093, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 7.9776625448743514, |
|
"grad_norm": 2.9866838455200195, |
|
"learning_rate": 3.5111687275628244e-05, |
|
"loss": 4.091, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 8.177104108496211, |
|
"grad_norm": 3.2831859588623047, |
|
"learning_rate": 3.411447945751895e-05, |
|
"loss": 3.9812, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 8.37654567211807, |
|
"grad_norm": 3.1285603046417236, |
|
"learning_rate": 3.311926605504587e-05, |
|
"loss": 3.955, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.575987235739928, |
|
"grad_norm": 2.9974467754364014, |
|
"learning_rate": 3.212205823693658e-05, |
|
"loss": 3.9307, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 8.775428799361787, |
|
"grad_norm": 3.0734570026397705, |
|
"learning_rate": 3.1124850418827286e-05, |
|
"loss": 3.9186, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 8.974870362983646, |
|
"grad_norm": 3.024569034576416, |
|
"learning_rate": 3.0129637016354212e-05, |
|
"loss": 3.9455, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 9.174311926605505, |
|
"grad_norm": 3.05592679977417, |
|
"learning_rate": 2.9132429198244916e-05, |
|
"loss": 3.8104, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.373753490227363, |
|
"grad_norm": 3.2957406044006348, |
|
"learning_rate": 2.8135221380135622e-05, |
|
"loss": 3.8007, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 9.573195053849222, |
|
"grad_norm": 3.398186683654785, |
|
"learning_rate": 2.7138013562026326e-05, |
|
"loss": 3.8345, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 9.77263661747108, |
|
"grad_norm": 3.1825666427612305, |
|
"learning_rate": 2.6140805743917036e-05, |
|
"loss": 3.7761, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 9.97207818109294, |
|
"grad_norm": 2.9551422595977783, |
|
"learning_rate": 2.514359792580774e-05, |
|
"loss": 3.777, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 25070, |
|
"total_flos": 2.620022518185984e+16, |
|
"train_loss": 2.0909440845904905, |
|
"train_runtime": 3914.0913, |
|
"train_samples_per_second": 102.471, |
|
"train_steps_per_second": 6.405 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 25070, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.620022518185984e+16, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|