RamonAnkersmit's picture
First Push`
9b910bc
raw
history blame
15.8 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.104137897491455,
"min": 2.104137897491455,
"max": 3.295746088027954,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39591.45703125,
"min": 28661.078125,
"max": 105463.796875,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.0989010989011,
"min": 39.95867768595041,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19328.0,
"min": 16684.0,
"max": 23208.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1687.9152761926134,
"min": 1190.6857026828052,
"max": 1698.9798879194325,
"count": 997
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 307200.58026705566,
"min": 2401.677687384432,
"max": 401313.1546064704,
"count": 997
},
"SoccerTwos.Step.mean": {
"value": 9999966.0,
"min": 9620.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999966.0,
"min": 9620.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04598451405763626,
"min": -0.12628822028636932,
"max": 0.18047218024730682,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.369181632995605,
"min": -22.226726531982422,
"max": 28.187450408935547,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04881046712398529,
"min": -0.12806162238121033,
"max": 0.178554505109787,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.883504867553711,
"min": -22.53884506225586,
"max": 28.47722625732422,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1185000018104092,
"min": -0.7692307692307693,
"max": 0.5266690904443915,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 21.567000329494476,
"min": -56.49059993028641,
"max": 60.974000334739685,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1185000018104092,
"min": -0.7692307692307693,
"max": 0.5266690904443915,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 21.567000329494476,
"min": -56.49059993028641,
"max": 60.974000334739685,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012351672106888145,
"min": 0.006377983589967092,
"max": 0.01650880670834643,
"count": 242
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012351672106888145,
"min": 0.006377983589967092,
"max": 0.01650880670834643,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.12223331481218339,
"min": 0.0028801917796954513,
"max": 0.13559552580118178,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12223331481218339,
"min": 0.0028801917796954513,
"max": 0.13559552580118178,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12333853219946225,
"min": 0.002953560114838183,
"max": 0.137561371922493,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12333853219946225,
"min": 0.002953560114838183,
"max": 0.137561371922493,
"count": 242
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00025000000000000006,
"min": 0.00025000000000000006,
"max": 0.00025000000000000006,
"count": 242
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00025000000000000006,
"min": 0.00025000000000000006,
"max": 0.00025000000000000006,
"count": 242
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.15000000000000002,
"min": 0.15000000000000002,
"max": 0.15000000000000002,
"count": 242
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.15000000000000002,
"min": 0.15000000000000002,
"max": 0.15000000000000002,
"count": 242
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675468435",
"python_version": "3.8.16 (default, Jan 17 2023, 22:25:28) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\ramon\\anaconda3\\envs\\rl_football_gpu\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwosGPU --no-graphics --force --torch-device=cuda",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1675573353"
},
"total": 104917.1378512,
"count": 1,
"self": 2.371245499991346,
"children": {
"run_training.setup": {
"total": 0.7410259000000003,
"count": 1,
"self": 0.7410259000000003
},
"TrainerController.start_learning": {
"total": 104914.0255798,
"count": 1,
"self": 74.59980279373121,
"children": {
"TrainerController._reset_env": {
"total": 47.93823570001701,
"count": 29,
"self": 47.93823570001701
},
"TrainerController.advance": {
"total": 104790.52446330624,
"count": 681647,
"self": 78.4749788112822,
"children": {
"env_step": {
"total": 77107.23927279649,
"count": 681647,
"self": 49394.6467882015,
"children": {
"SubprocessEnvManager._take_step": {
"total": 27664.324471095126,
"count": 681647,
"self": 501.3633953925819,
"children": {
"TorchPolicy.evaluate": {
"total": 27162.961075702544,
"count": 1255672,
"self": 27162.961075702544
}
}
},
"workers": {
"total": 48.26801349986546,
"count": 681647,
"self": 0.0,
"children": {
"worker_root": {
"total": 104787.02264969976,
"count": 681647,
"is_parallel": true,
"self": 65118.05426029919,
"children": {
"steps_from_proto": {
"total": 0.28205810001661646,
"count": 58,
"is_parallel": true,
"self": 0.05843879995229884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.22361930006431763,
"count": 232,
"is_parallel": true,
"self": 0.22361930006431763
}
}
},
"UnityEnvironment.step": {
"total": 39668.68633130055,
"count": 681647,
"is_parallel": true,
"self": 2240.013187102624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1663.4952165971774,
"count": 681647,
"is_parallel": true,
"self": 1663.4952165971774
},
"communicator.exchange": {
"total": 28830.686681596937,
"count": 681647,
"is_parallel": true,
"self": 28830.686681596937
},
"steps_from_proto": {
"total": 6934.491246003817,
"count": 1363294,
"is_parallel": true,
"self": 1478.5051235932751,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5455.986122410542,
"count": 5453176,
"is_parallel": true,
"self": 5455.986122410542
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 27604.81021169846,
"count": 681647,
"self": 375.8029027978482,
"children": {
"process_trajectory": {
"total": 12891.255822200601,
"count": 681647,
"self": 12871.471023100607,
"children": {
"RLTrainer._checkpoint": {
"total": 19.784799099993506,
"count": 20,
"self": 19.784799099993506
}
}
},
"_update_policy": {
"total": 14337.75148670001,
"count": 242,
"self": 9861.626107899774,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4476.125378800237,
"count": 7260,
"self": 4476.125378800237
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0000006770715117e-05,
"count": 1,
"self": 2.0000006770715117e-05
},
"TrainerController._save_models": {
"total": 0.9630580000084592,
"count": 1,
"self": 0.03351940000720788,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9295386000012513,
"count": 1,
"self": 0.9295386000012513
}
}
}
}
}
}
}