|
{ |
|
"best_metric": 3.432676315307617, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.009146306607063235, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0001829261321412647, |
|
"grad_norm": 1.9110344648361206, |
|
"learning_rate": 5e-05, |
|
"loss": 3.6323, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001829261321412647, |
|
"eval_loss": 6.4708404541015625, |
|
"eval_runtime": 2.6694, |
|
"eval_samples_per_second": 18.731, |
|
"eval_steps_per_second": 4.87, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003658522642825294, |
|
"grad_norm": 1.928886890411377, |
|
"learning_rate": 0.0001, |
|
"loss": 4.427, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005487783964237942, |
|
"grad_norm": 1.885818362236023, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 4.7624, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0007317045285650588, |
|
"grad_norm": 2.122727155685425, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 4.9093, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0009146306607063236, |
|
"grad_norm": 2.3126866817474365, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 4.9697, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0010975567928475883, |
|
"grad_norm": 2.9748995304107666, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 5.0934, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001280482924988853, |
|
"grad_norm": 3.3190455436706543, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 5.0183, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0014634090571301176, |
|
"grad_norm": 3.633790969848633, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 4.873, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0016463351892713823, |
|
"grad_norm": 3.4954895973205566, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 4.6961, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0018292613214126471, |
|
"grad_norm": 3.1350529193878174, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 4.7941, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002012187453553912, |
|
"grad_norm": 3.9131839275360107, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 4.3245, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0021951135856951767, |
|
"grad_norm": 4.561995029449463, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 4.2026, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002378039717836441, |
|
"grad_norm": 5.815301895141602, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 3.5911, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.002560965849977706, |
|
"grad_norm": 5.602147579193115, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 3.6752, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0027438919821189704, |
|
"grad_norm": 3.8843131065368652, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 3.6851, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0029268181142602353, |
|
"grad_norm": 3.2248361110687256, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 3.7318, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0031097442464015, |
|
"grad_norm": 2.665513277053833, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 3.7105, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0032926703785427646, |
|
"grad_norm": 2.0910823345184326, |
|
"learning_rate": 7.75e-05, |
|
"loss": 3.5708, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0034755965106840294, |
|
"grad_norm": 1.4845154285430908, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 3.6131, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0036585226428252943, |
|
"grad_norm": 1.4835506677627563, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 3.584, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0038414487749665587, |
|
"grad_norm": 1.5968637466430664, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 3.7142, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.004024374907107824, |
|
"grad_norm": 1.9748867750167847, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 3.7735, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0042073010392490885, |
|
"grad_norm": 2.7007381916046143, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 3.7163, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.004390227171390353, |
|
"grad_norm": 2.6866607666015625, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 3.5792, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004573153303531617, |
|
"grad_norm": 4.762912273406982, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 4.3612, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004573153303531617, |
|
"eval_loss": 3.5562896728515625, |
|
"eval_runtime": 2.0393, |
|
"eval_samples_per_second": 24.518, |
|
"eval_steps_per_second": 6.375, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004756079435672882, |
|
"grad_norm": 1.803903341293335, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 3.0202, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.004939005567814147, |
|
"grad_norm": 1.4035038948059082, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 3.1542, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.005121931699955412, |
|
"grad_norm": 1.3655030727386475, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 3.2782, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.005304857832096677, |
|
"grad_norm": 1.1467270851135254, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 3.396, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.005487783964237941, |
|
"grad_norm": 1.1027382612228394, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 3.4175, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.005670710096379206, |
|
"grad_norm": 1.5248521566390991, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 3.4381, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0058536362285204705, |
|
"grad_norm": 1.9056307077407837, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 3.4771, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.006036562360661735, |
|
"grad_norm": 1.7207236289978027, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 3.5574, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.006219488492803, |
|
"grad_norm": 1.2745652198791504, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 3.6236, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.006402414624944265, |
|
"grad_norm": 1.5298084020614624, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 3.5052, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.006585340757085529, |
|
"grad_norm": 1.6400824785232544, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 3.5871, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.006768266889226794, |
|
"grad_norm": 2.165523052215576, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 3.7025, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.006951193021368059, |
|
"grad_norm": 1.51387619972229, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 3.0254, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.007134119153509324, |
|
"grad_norm": 1.3941786289215088, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 3.1322, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.007317045285650589, |
|
"grad_norm": 1.2476261854171753, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 3.1812, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.007499971417791853, |
|
"grad_norm": 1.377185344696045, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 3.4018, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0076828975499331174, |
|
"grad_norm": 1.416702389717102, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 3.2872, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.007865823682074382, |
|
"grad_norm": 1.2533056735992432, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 3.4308, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.008048749814215647, |
|
"grad_norm": 1.3103725910186768, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 3.4037, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.008231675946356912, |
|
"grad_norm": 1.1992508172988892, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 3.5168, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.008414602078498177, |
|
"grad_norm": 1.243343710899353, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 3.4219, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.008597528210639442, |
|
"grad_norm": 1.248721718788147, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 3.3442, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.008780454342780707, |
|
"grad_norm": 1.4735393524169922, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 3.6574, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.00896338047492197, |
|
"grad_norm": 1.8419065475463867, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 3.504, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.009146306607063235, |
|
"grad_norm": 3.8599138259887695, |
|
"learning_rate": 1e-05, |
|
"loss": 3.868, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.009146306607063235, |
|
"eval_loss": 3.432676315307617, |
|
"eval_runtime": 2.0429, |
|
"eval_samples_per_second": 24.475, |
|
"eval_steps_per_second": 6.363, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|