dsakerkwq's picture
Training in progress, step 50, checkpoint
a0fb495 verified
{
"best_metric": 0.034579914063215256,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.2656924609764198,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005313849219528396,
"grad_norm": 23.628463745117188,
"learning_rate": 5e-05,
"loss": 10.8982,
"step": 1
},
{
"epoch": 0.005313849219528396,
"eval_loss": 11.041999816894531,
"eval_runtime": 2.6503,
"eval_samples_per_second": 18.866,
"eval_steps_per_second": 4.905,
"step": 1
},
{
"epoch": 0.010627698439056792,
"grad_norm": 23.705106735229492,
"learning_rate": 0.0001,
"loss": 11.0253,
"step": 2
},
{
"epoch": 0.015941547658585187,
"grad_norm": 27.114561080932617,
"learning_rate": 9.990365154573717e-05,
"loss": 9.6993,
"step": 3
},
{
"epoch": 0.021255396878113585,
"grad_norm": 25.034391403198242,
"learning_rate": 9.961501876182148e-05,
"loss": 2.9229,
"step": 4
},
{
"epoch": 0.02656924609764198,
"grad_norm": 6.130597114562988,
"learning_rate": 9.913533761814537e-05,
"loss": 0.9386,
"step": 5
},
{
"epoch": 0.031883095317170373,
"grad_norm": 12.182331085205078,
"learning_rate": 9.846666218300807e-05,
"loss": 1.0357,
"step": 6
},
{
"epoch": 0.03719694453669877,
"grad_norm": 1.75734543800354,
"learning_rate": 9.761185582727977e-05,
"loss": 0.7076,
"step": 7
},
{
"epoch": 0.04251079375622717,
"grad_norm": 2.458411455154419,
"learning_rate": 9.657457896300791e-05,
"loss": 0.7182,
"step": 8
},
{
"epoch": 0.04782464297575556,
"grad_norm": 2.2449662685394287,
"learning_rate": 9.535927336897098e-05,
"loss": 0.7122,
"step": 9
},
{
"epoch": 0.05313849219528396,
"grad_norm": 1.5674667358398438,
"learning_rate": 9.397114317029975e-05,
"loss": 0.6785,
"step": 10
},
{
"epoch": 0.058452341414812356,
"grad_norm": 1.9958386421203613,
"learning_rate": 9.241613255361455e-05,
"loss": 0.6132,
"step": 11
},
{
"epoch": 0.06376619063434075,
"grad_norm": 3.479598045349121,
"learning_rate": 9.070090031310558e-05,
"loss": 0.4993,
"step": 12
},
{
"epoch": 0.06908003985386915,
"grad_norm": 6.28720235824585,
"learning_rate": 8.883279133655399e-05,
"loss": 0.4418,
"step": 13
},
{
"epoch": 0.07439388907339754,
"grad_norm": 3.4374570846557617,
"learning_rate": 8.681980515339464e-05,
"loss": 0.3487,
"step": 14
},
{
"epoch": 0.07970773829292593,
"grad_norm": 5.028607368469238,
"learning_rate": 8.467056167950311e-05,
"loss": 0.3446,
"step": 15
},
{
"epoch": 0.08502158751245434,
"grad_norm": 2.4742584228515625,
"learning_rate": 8.239426430539243e-05,
"loss": 0.3521,
"step": 16
},
{
"epoch": 0.09033543673198273,
"grad_norm": 4.0169172286987305,
"learning_rate": 8.000066048588211e-05,
"loss": 0.3533,
"step": 17
},
{
"epoch": 0.09564928595151112,
"grad_norm": 2.451641082763672,
"learning_rate": 7.75e-05,
"loss": 0.3394,
"step": 18
},
{
"epoch": 0.10096313517103953,
"grad_norm": 2.82594633102417,
"learning_rate": 7.490299105985507e-05,
"loss": 0.2622,
"step": 19
},
{
"epoch": 0.10627698439056792,
"grad_norm": 3.50852108001709,
"learning_rate": 7.222075445642904e-05,
"loss": 0.282,
"step": 20
},
{
"epoch": 0.11159083361009631,
"grad_norm": 3.7088255882263184,
"learning_rate": 6.946477593864228e-05,
"loss": 0.2771,
"step": 21
},
{
"epoch": 0.11690468282962471,
"grad_norm": 2.132201910018921,
"learning_rate": 6.664685702961344e-05,
"loss": 0.2332,
"step": 22
},
{
"epoch": 0.1222185320491531,
"grad_norm": 2.604558229446411,
"learning_rate": 6.377906449072578e-05,
"loss": 0.2375,
"step": 23
},
{
"epoch": 0.1275323812686815,
"grad_norm": 2.225511312484741,
"learning_rate": 6.087367864990233e-05,
"loss": 0.2396,
"step": 24
},
{
"epoch": 0.1328462304882099,
"grad_norm": 2.3635268211364746,
"learning_rate": 5.794314081535644e-05,
"loss": 0.1778,
"step": 25
},
{
"epoch": 0.1328462304882099,
"eval_loss": 0.09752233326435089,
"eval_runtime": 2.0324,
"eval_samples_per_second": 24.602,
"eval_steps_per_second": 6.397,
"step": 25
},
{
"epoch": 0.1381600797077383,
"grad_norm": 1.9589002132415771,
"learning_rate": 5.500000000000001e-05,
"loss": 0.1831,
"step": 26
},
{
"epoch": 0.14347392892726668,
"grad_norm": 2.174701690673828,
"learning_rate": 5.205685918464356e-05,
"loss": 0.1831,
"step": 27
},
{
"epoch": 0.14878777814679509,
"grad_norm": 2.3990869522094727,
"learning_rate": 4.912632135009769e-05,
"loss": 0.1238,
"step": 28
},
{
"epoch": 0.1541016273663235,
"grad_norm": 2.058366298675537,
"learning_rate": 4.6220935509274235e-05,
"loss": 0.1291,
"step": 29
},
{
"epoch": 0.15941547658585187,
"grad_norm": 2.2778303623199463,
"learning_rate": 4.3353142970386564e-05,
"loss": 0.1427,
"step": 30
},
{
"epoch": 0.16472932580538027,
"grad_norm": 2.884948492050171,
"learning_rate": 4.053522406135775e-05,
"loss": 0.0802,
"step": 31
},
{
"epoch": 0.17004317502490868,
"grad_norm": 2.686048984527588,
"learning_rate": 3.777924554357096e-05,
"loss": 0.155,
"step": 32
},
{
"epoch": 0.17535702424443705,
"grad_norm": 2.415022611618042,
"learning_rate": 3.509700894014496e-05,
"loss": 0.2073,
"step": 33
},
{
"epoch": 0.18067087346396546,
"grad_norm": 2.207425117492676,
"learning_rate": 3.250000000000001e-05,
"loss": 0.1384,
"step": 34
},
{
"epoch": 0.18598472268349386,
"grad_norm": 1.7799278497695923,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.1412,
"step": 35
},
{
"epoch": 0.19129857190302224,
"grad_norm": 1.3226287364959717,
"learning_rate": 2.760573569460757e-05,
"loss": 0.0728,
"step": 36
},
{
"epoch": 0.19661242112255065,
"grad_norm": 1.6843650341033936,
"learning_rate": 2.53294383204969e-05,
"loss": 0.1343,
"step": 37
},
{
"epoch": 0.20192627034207905,
"grad_norm": 1.350917935371399,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.1155,
"step": 38
},
{
"epoch": 0.20724011956160743,
"grad_norm": 1.1504665613174438,
"learning_rate": 2.1167208663446025e-05,
"loss": 0.0929,
"step": 39
},
{
"epoch": 0.21255396878113583,
"grad_norm": 1.5761011838912964,
"learning_rate": 1.9299099686894423e-05,
"loss": 0.1395,
"step": 40
},
{
"epoch": 0.21786781800066424,
"grad_norm": 1.5024954080581665,
"learning_rate": 1.758386744638546e-05,
"loss": 0.107,
"step": 41
},
{
"epoch": 0.22318166722019261,
"grad_norm": 1.106066346168518,
"learning_rate": 1.602885682970026e-05,
"loss": 0.1,
"step": 42
},
{
"epoch": 0.22849551643972102,
"grad_norm": 1.5602272748947144,
"learning_rate": 1.464072663102903e-05,
"loss": 0.0827,
"step": 43
},
{
"epoch": 0.23380936565924942,
"grad_norm": 1.4273624420166016,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.0767,
"step": 44
},
{
"epoch": 0.2391232148787778,
"grad_norm": 1.07826566696167,
"learning_rate": 1.2388144172720251e-05,
"loss": 0.0417,
"step": 45
},
{
"epoch": 0.2444370640983062,
"grad_norm": 1.6851541996002197,
"learning_rate": 1.1533337816991932e-05,
"loss": 0.0851,
"step": 46
},
{
"epoch": 0.2497509133178346,
"grad_norm": 1.4490783214569092,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.0528,
"step": 47
},
{
"epoch": 0.255064762537363,
"grad_norm": 1.4430365562438965,
"learning_rate": 1.0384981238178534e-05,
"loss": 0.05,
"step": 48
},
{
"epoch": 0.2603786117568914,
"grad_norm": 2.2319040298461914,
"learning_rate": 1.0096348454262845e-05,
"loss": 0.0895,
"step": 49
},
{
"epoch": 0.2656924609764198,
"grad_norm": 1.0726263523101807,
"learning_rate": 1e-05,
"loss": 0.024,
"step": 50
},
{
"epoch": 0.2656924609764198,
"eval_loss": 0.034579914063215256,
"eval_runtime": 2.0459,
"eval_samples_per_second": 24.439,
"eval_steps_per_second": 6.354,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}