|
{ |
|
"best_metric": 2.2648308277130127, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0167115790353241, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000334231580706482, |
|
"grad_norm": 5.496165752410889, |
|
"learning_rate": 5e-05, |
|
"loss": 31.0236, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000334231580706482, |
|
"eval_loss": 2.416963577270508, |
|
"eval_runtime": 4.6942, |
|
"eval_samples_per_second": 10.651, |
|
"eval_steps_per_second": 2.769, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000668463161412964, |
|
"grad_norm": 8.403727531433105, |
|
"learning_rate": 0.0001, |
|
"loss": 32.6935, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001002694742119446, |
|
"grad_norm": 6.8086442947387695, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 33.4726, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001336926322825928, |
|
"grad_norm": 6.322213649749756, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 34.4679, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00167115790353241, |
|
"grad_norm": 9.177667617797852, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 33.4588, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002005389484238892, |
|
"grad_norm": 9.946578979492188, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 33.4397, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0023396210649453742, |
|
"grad_norm": 9.198806762695312, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 34.1534, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.002673852645651856, |
|
"grad_norm": 19.019378662109375, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 34.8272, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003008084226358338, |
|
"grad_norm": 14.353171348571777, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 35.0351, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00334231580706482, |
|
"grad_norm": 15.570418357849121, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 38.1495, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003676547387771302, |
|
"grad_norm": 16.995012283325195, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 37.8086, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.004010778968477784, |
|
"grad_norm": 20.282392501831055, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 40.2625, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004345010549184266, |
|
"grad_norm": 18.340831756591797, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 39.0408, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0046792421298907485, |
|
"grad_norm": 9.529115676879883, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 34.0686, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00501347371059723, |
|
"grad_norm": 10.36082649230957, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 34.1487, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005347705291303712, |
|
"grad_norm": 11.908557891845703, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 33.2204, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.005681936872010194, |
|
"grad_norm": 8.402276039123535, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 33.5902, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.006016168452716676, |
|
"grad_norm": 8.26902961730957, |
|
"learning_rate": 7.75e-05, |
|
"loss": 33.4582, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006350400033423158, |
|
"grad_norm": 9.017067909240723, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 34.3777, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.00668463161412964, |
|
"grad_norm": 8.824953079223633, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 32.7606, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.007018863194836122, |
|
"grad_norm": 9.761025428771973, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 35.2556, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.007353094775542604, |
|
"grad_norm": 11.351914405822754, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 36.7428, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.007687326356249086, |
|
"grad_norm": 17.06279182434082, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 36.5333, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.008021557936955567, |
|
"grad_norm": 18.976041793823242, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 40.7233, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00835578951766205, |
|
"grad_norm": 22.932994842529297, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 44.8268, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00835578951766205, |
|
"eval_loss": 2.2737886905670166, |
|
"eval_runtime": 4.7443, |
|
"eval_samples_per_second": 10.539, |
|
"eval_steps_per_second": 2.74, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.008690021098368531, |
|
"grad_norm": 4.368255615234375, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 32.8048, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.009024252679075015, |
|
"grad_norm": 5.049758434295654, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 33.632, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.009358484259781497, |
|
"grad_norm": 5.552004337310791, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 33.4633, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.009692715840487979, |
|
"grad_norm": 6.246798038482666, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 31.611, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01002694742119446, |
|
"grad_norm": 5.866751670837402, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 32.8603, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.010361179001900943, |
|
"grad_norm": 7.1694865226745605, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 33.6185, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.010695410582607425, |
|
"grad_norm": 7.402153491973877, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 33.4369, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.011029642163313907, |
|
"grad_norm": 7.939614772796631, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 34.4956, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.011363873744020389, |
|
"grad_norm": 9.427316665649414, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 35.5888, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01169810532472687, |
|
"grad_norm": 11.526426315307617, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 34.8295, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.012032336905433353, |
|
"grad_norm": 15.830511093139648, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 38.0113, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.012366568486139835, |
|
"grad_norm": 16.048410415649414, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 41.1285, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.012700800066846317, |
|
"grad_norm": 17.221982955932617, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 40.34, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.013035031647552799, |
|
"grad_norm": 3.4482598304748535, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 32.6562, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01336926322825928, |
|
"grad_norm": 4.213986873626709, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 33.9774, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.013703494808965763, |
|
"grad_norm": 4.579893589019775, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 32.5801, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.014037726389672244, |
|
"grad_norm": 5.420438289642334, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 31.9658, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.014371957970378726, |
|
"grad_norm": 6.065475940704346, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 34.2332, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.014706189551085208, |
|
"grad_norm": 6.8349080085754395, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 32.6066, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.01504042113179169, |
|
"grad_norm": 7.83049201965332, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 33.1971, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.015374652712498172, |
|
"grad_norm": 9.868491172790527, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 33.5423, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.015708884293204654, |
|
"grad_norm": 10.248172760009766, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 35.64, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.016043115873911135, |
|
"grad_norm": 12.959470748901367, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 36.4588, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.01637734745461762, |
|
"grad_norm": 14.926746368408203, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 38.2075, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0167115790353241, |
|
"grad_norm": 21.086666107177734, |
|
"learning_rate": 1e-05, |
|
"loss": 45.6951, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0167115790353241, |
|
"eval_loss": 2.2648308277130127, |
|
"eval_runtime": 4.7822, |
|
"eval_samples_per_second": 10.455, |
|
"eval_steps_per_second": 2.718, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658021338284032e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|