plegg's picture
First Push`
589cf50
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6436563730239868,
"min": 1.5766630172729492,
"max": 3.2957727909088135,
"count": 836
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33925.06640625,
"min": 27373.46484375,
"max": 108274.7890625,
"count": 836
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.241379310344826,
"min": 46.235849056603776,
"max": 999.0,
"count": 836
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19572.0,
"min": 16624.0,
"max": 24692.0,
"count": 836
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1622.901046867177,
"min": 1183.1620344266842,
"max": 1660.0724597833855,
"count": 834
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 282384.7821548888,
"min": 2367.788579599301,
"max": 323841.91322314646,
"count": 834
},
"SoccerTwos.Step.mean": {
"value": 8359982.0,
"min": 9364.0,
"max": 8359982.0,
"count": 836
},
"SoccerTwos.Step.sum": {
"value": 8359982.0,
"min": 9364.0,
"max": 8359982.0,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04412328451871872,
"min": -0.13746853172779083,
"max": 0.2039482593536377,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.6774516105651855,
"min": -22.957244873046875,
"max": 28.05706787109375,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03761598840355873,
"min": -0.13524287939071655,
"max": 0.20287510752677917,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.545182228088379,
"min": -22.585559844970703,
"max": 28.808265686035156,
"count": 836
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 836
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07114252824892943,
"min": -0.5833333333333334,
"max": 0.4089764715994106,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 12.37879991531372,
"min": -55.82079941034317,
"max": 54.98859965801239,
"count": 836
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07114252824892943,
"min": -0.5833333333333334,
"max": 0.4089764715994106,
"count": 836
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 12.37879991531372,
"min": -55.82079941034317,
"max": 54.98859965801239,
"count": 836
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 836
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 836
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015712379955220967,
"min": 0.01139993552060332,
"max": 0.025473337310055893,
"count": 403
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015712379955220967,
"min": 0.01139993552060332,
"max": 0.025473337310055893,
"count": 403
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08840486680467924,
"min": 0.0008984813781959625,
"max": 0.11362483402093251,
"count": 403
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08840486680467924,
"min": 0.0008984813781959625,
"max": 0.11362483402093251,
"count": 403
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09042802080512047,
"min": 0.0009063885013650482,
"max": 0.11517100632190705,
"count": 403
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09042802080512047,
"min": 0.0009063885013650482,
"max": 0.11517100632190705,
"count": 403
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 403
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 403
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 403
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 403
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 403
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 403
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678057211",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pa-legg\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos-v2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678087164"
},
"total": 29952.8506473,
"count": 1,
"self": 0.1575236999997287,
"children": {
"run_training.setup": {
"total": 0.1856146999999999,
"count": 1,
"self": 0.1856146999999999
},
"TrainerController.start_learning": {
"total": 29952.507508900002,
"count": 1,
"self": 17.938412500770937,
"children": {
"TrainerController._reset_env": {
"total": 5.637848499999233,
"count": 42,
"self": 5.637848499999233
},
"TrainerController.advance": {
"total": 29928.73732699923,
"count": 569807,
"self": 17.51389010064304,
"children": {
"env_step": {
"total": 12173.06392199877,
"count": 569807,
"self": 9406.545452299511,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2755.669963199507,
"count": 569807,
"self": 96.58012619973715,
"children": {
"TorchPolicy.evaluate": {
"total": 2659.08983699977,
"count": 1052256,
"self": 2659.08983699977
}
}
},
"workers": {
"total": 10.848506499750716,
"count": 569807,
"self": 0.0,
"children": {
"worker_root": {
"total": 29900.837372699607,
"count": 569807,
"is_parallel": true,
"self": 22427.73571179988,
"children": {
"steps_from_proto": {
"total": 0.11223689999466213,
"count": 84,
"is_parallel": true,
"self": 0.021027199979626765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.09120970001503537,
"count": 336,
"is_parallel": true,
"self": 0.09120970001503537
}
}
},
"UnityEnvironment.step": {
"total": 7472.989423999732,
"count": 569807,
"is_parallel": true,
"self": 443.63700669922764,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 314.95180979998565,
"count": 569807,
"is_parallel": true,
"self": 314.95180979998565
},
"communicator.exchange": {
"total": 5256.4938220991025,
"count": 569807,
"is_parallel": true,
"self": 5256.4938220991025
},
"steps_from_proto": {
"total": 1457.9067854014165,
"count": 1139614,
"is_parallel": true,
"self": 277.6322359989697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1180.2745494024468,
"count": 4558456,
"is_parallel": true,
"self": 1180.2745494024468
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17738.159514899817,
"count": 569807,
"self": 120.44777359840009,
"children": {
"process_trajectory": {
"total": 3076.8388475014153,
"count": 569807,
"self": 3073.8254532014157,
"children": {
"RLTrainer._checkpoint": {
"total": 3.013394299999618,
"count": 16,
"self": 3.013394299999618
}
}
},
"_update_policy": {
"total": 14540.872893800002,
"count": 404,
"self": 1420.580308000075,
"children": {
"TorchPOCAOptimizer.update": {
"total": 13120.292585799927,
"count": 12110,
"self": 13120.292585799927
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.4000004234258085e-06,
"count": 1,
"self": 3.4000004234258085e-06
},
"TrainerController._save_models": {
"total": 0.19391750000067987,
"count": 1,
"self": 0.006020199998602038,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18789730000207783,
"count": 1,
"self": 0.18789730000207783
}
}
}
}
}
}
}