manuih's picture
Second Push`
5efb73d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4300297498703003,
"min": 1.2939000129699707,
"max": 1.5566500425338745,
"count": 1868
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29836.140625,
"min": 22882.525390625,
"max": 34798.74609375,
"count": 1868
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 46.556603773584904,
"min": 32.557046979865774,
"max": 80.59677419354838,
"count": 1868
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19740.0,
"min": 14076.0,
"max": 20932.0,
"count": 1868
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1505.6478202822311,
"min": 1444.130187443437,
"max": 1529.8664604141977,
"count": 1868
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 319197.337899833,
"min": 176473.29914229224,
"max": 440870.4304579644,
"count": 1868
},
"SoccerTwos.Step.mean": {
"value": 49999985.0,
"min": 31329827.0,
"max": 49999985.0,
"count": 1868
},
"SoccerTwos.Step.sum": {
"value": 49999985.0,
"min": 31329827.0,
"max": 49999985.0,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008808558806777,
"min": -0.12382835894823074,
"max": 0.07764289528131485,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.8586058616638184,
"min": -27.87138557434082,
"max": 18.059650421142578,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007905821315944195,
"min": -0.12627680599689484,
"max": 0.07929305732250214,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.6681283712387085,
"min": -28.078716278076172,
"max": 18.072132110595703,
"count": 1868
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1868
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.05238862738225132,
"min": -0.39782429911265865,
"max": 0.3570304734366281,
"count": 1868
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.05400037765503,
"min": -85.13440001010895,
"max": 74.9763994216919,
"count": 1868
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.05238862738225132,
"min": -0.39782429911265865,
"max": 0.3570304734366281,
"count": 1868
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.05400037765503,
"min": -85.13440001010895,
"max": 74.9763994216919,
"count": 1868
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1868
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1868
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016061648454827566,
"min": 0.010508424140668163,
"max": 0.02448746747104451,
"count": 907
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016061648454827566,
"min": 0.010508424140668163,
"max": 0.02448746747104451,
"count": 907
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1309850513935089,
"min": 0.09209257910648981,
"max": 0.14468998163938523,
"count": 907
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1309850513935089,
"min": 0.09209257910648981,
"max": 0.14468998163938523,
"count": 907
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.13237753336628277,
"min": 0.09314731905857722,
"max": 0.14641447762648266,
"count": 907
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.13237753336628277,
"min": 0.09314731905857722,
"max": 0.14641447762648266,
"count": 907
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 907
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 907
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 907
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 907
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 907
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 907
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702956406",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\emanu\\miniconda3\\envs\\mlagents7\\Scripts\\mlagents-learn config/poca/SoccerTwos.yaml --run-id=SoccerTwos_basic --resume",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.23.1",
"end_time_seconds": "1703067697"
},
"total": 111290.63815800002,
"count": 1,
"self": 0.03311730004497804,
"children": {
"run_training.setup": {
"total": 0.20670419998350553,
"count": 1,
"self": 0.20670419998350553
},
"TrainerController.start_learning": {
"total": 111290.39833649999,
"count": 1,
"self": 38.79569911569706,
"children": {
"TrainerController._reset_env": {
"total": 44.43875170004321,
"count": 95,
"self": 44.43875170004321
},
"TrainerController.advance": {
"total": 111206.97457958423,
"count": 1319563,
"self": 35.55578088664333,
"children": {
"env_step": {
"total": 34387.63442577858,
"count": 1319563,
"self": 21669.21721950115,
"children": {
"SubprocessEnvManager._take_step": {
"total": 12694.31907998005,
"count": 1319563,
"self": 180.90979260933818,
"children": {
"TorchPolicy.evaluate": {
"total": 12513.409287370712,
"count": 2342978,
"self": 12513.409287370712
}
}
},
"workers": {
"total": 24.098126297380077,
"count": 1319563,
"self": 0.0,
"children": {
"worker_root": {
"total": 111197.43400599252,
"count": 1319563,
"is_parallel": true,
"self": 93654.25793216252,
"children": {
"steps_from_proto": {
"total": 0.20589109970023856,
"count": 190,
"is_parallel": true,
"self": 0.039865299535449594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.16602580016478896,
"count": 760,
"is_parallel": true,
"self": 0.16602580016478896
}
}
},
"UnityEnvironment.step": {
"total": 17542.970182730292,
"count": 1319563,
"is_parallel": true,
"self": 906.9769658251607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 775.9321822029888,
"count": 1319563,
"is_parallel": true,
"self": 775.9321822029888
},
"communicator.exchange": {
"total": 12988.819363005052,
"count": 1319563,
"is_parallel": true,
"self": 12988.819363005052
},
"steps_from_proto": {
"total": 2871.241671697091,
"count": 2639126,
"is_parallel": true,
"self": 553.706305623753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2317.535366073338,
"count": 10556504,
"is_parallel": true,
"self": 2317.535366073338
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 76783.784372919,
"count": 1319563,
"self": 291.84621213772334,
"children": {
"process_trajectory": {
"total": 23569.340994681785,
"count": 1319563,
"self": 23563.75128538179,
"children": {
"RLTrainer._checkpoint": {
"total": 5.589709299994865,
"count": 38,
"self": 5.589709299994865
}
}
},
"_update_policy": {
"total": 52922.5971660995,
"count": 907,
"self": 2743.3453059984895,
"children": {
"TorchPOCAOptimizer.update": {
"total": 50179.25186010101,
"count": 27210,
"self": 50179.25186010101
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.700020559132099e-06,
"count": 1,
"self": 2.700020559132099e-06
},
"TrainerController._save_models": {
"total": 0.18930339999496937,
"count": 1,
"self": 0.05483280000044033,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13447059999452904,
"count": 1,
"self": 0.13447059999452904
}
}
}
}
}
}
}