poca-SoccerTwos / run_logs /timers.json
SamSJackson's picture
First Push
58299d1
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.221144199371338,
"min": 3.188920736312866,
"max": 3.2957088947296143,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 86893.5859375,
"min": 42178.09765625,
"max": 105462.515625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 928.1666666666666,
"min": 485.1111111111111,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22276.0,
"min": 14168.0,
"max": 28044.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1189.182282031378,
"min": 1188.4081430847966,
"max": 1202.4685887719338,
"count": 38
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2378.364564062756,
"min": 2376.8162861695932,
"max": 14389.387209866527,
"count": 38
},
"SoccerTwos.Step.mean": {
"value": 499812.0,
"min": 9038.0,
"max": 499812.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499812.0,
"min": 9038.0,
"max": 499812.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01624487154185772,
"min": -0.029827257618308067,
"max": -0.001312743523158133,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.17869359254837036,
"min": -0.5384518504142761,
"max": -0.022316640242934227,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.016229845583438873,
"min": -0.03340421989560127,
"max": -0.002580253640189767,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.1785283088684082,
"min": -0.6346802115440369,
"max": -0.03354329615831375,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07716363668441772,
"min": -0.625,
"max": 0.23827693095574012,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.848800003528595,
"min": -10.0,
"max": 3.0976001024246216,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07716363668441772,
"min": -0.625,
"max": 0.23827693095574012,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.848800003528595,
"min": -10.0,
"max": 3.0976001024246216,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014975171471208644,
"min": 0.011634940153938563,
"max": 0.022144934178019562,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014975171471208644,
"min": 0.011634940153938563,
"max": 0.022144934178019562,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.000778963102978499,
"min": 4.6427225546115856e-05,
"max": 0.006489968532696367,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.000778963102978499,
"min": 4.6427225546115856e-05,
"max": 0.006489968532696367,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0007812570198439062,
"min": 4.9472472892375666e-05,
"max": 0.006601646480460962,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0007812570198439062,
"min": 4.9472472892375666e-05,
"max": 0.006601646480460962,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703691093",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Sam\\anaconda3\\envs\\hf-rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1703692120"
},
"total": 1027.0313427,
"count": 1,
"self": 0.47245939999993425,
"children": {
"run_training.setup": {
"total": 0.10348060000069381,
"count": 1,
"self": 0.10348060000069381
},
"TrainerController.start_learning": {
"total": 1026.4554026999995,
"count": 1,
"self": 0.3790510998478567,
"children": {
"TrainerController._reset_env": {
"total": 3.148382799998217,
"count": 3,
"self": 3.148382799998217
},
"TrainerController.advance": {
"total": 1022.8390034001532,
"count": 32953,
"self": 0.3991325004353712,
"children": {
"env_step": {
"total": 361.635755599631,
"count": 32953,
"self": 272.21735489981074,
"children": {
"SubprocessEnvManager._take_step": {
"total": 89.17183589986598,
"count": 32953,
"self": 2.8537339002523368,
"children": {
"TorchPolicy.evaluate": {
"total": 86.31810199961365,
"count": 65432,
"self": 86.31810199961365
}
}
},
"workers": {
"total": 0.24656479995428526,
"count": 32953,
"self": 0.0,
"children": {
"worker_root": {
"total": 1022.7023911000233,
"count": 32953,
"is_parallel": true,
"self": 808.1815209998422,
"children": {
"steps_from_proto": {
"total": 0.003891899998052395,
"count": 6,
"is_parallel": true,
"self": 0.0007979999954841333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030939000025682617,
"count": 24,
"is_parallel": true,
"self": 0.0030939000025682617
}
}
},
"UnityEnvironment.step": {
"total": 214.51697820018308,
"count": 32953,
"is_parallel": true,
"self": 12.72579990014674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.795844799828046,
"count": 32953,
"is_parallel": true,
"self": 11.795844799828046
},
"communicator.exchange": {
"total": 151.39225170007558,
"count": 32953,
"is_parallel": true,
"self": 151.39225170007558
},
"steps_from_proto": {
"total": 38.60308180013271,
"count": 65906,
"is_parallel": true,
"self": 7.4401286005831935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.16295319954952,
"count": 263624,
"is_parallel": true,
"self": 31.16295319954952
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 660.8041153000868,
"count": 32953,
"self": 3.57287010010441,
"children": {
"process_trajectory": {
"total": 82.72817269997904,
"count": 32953,
"self": 82.6200198999777,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10815280000133498,
"count": 1,
"self": 0.10815280000133498
}
}
},
"_update_policy": {
"total": 574.5030725000033,
"count": 23,
"self": 56.93176390002736,
"children": {
"TorchPOCAOptimizer.update": {
"total": 517.571308599976,
"count": 690,
"self": 517.571308599976
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3999997463542968e-06,
"count": 1,
"self": 1.3999997463542968e-06
},
"TrainerController._save_models": {
"total": 0.08896400000048743,
"count": 1,
"self": 0.006688400000712136,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0822755999997753,
"count": 1,
"self": 0.0822755999997753
}
}
}
}
}
}
}