jarski's picture
Improve
1b6d2c7 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0248702764511108,
"min": 1.0233978033065796,
"max": 2.6493778228759766,
"count": 18
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9977.1123046875,
"min": 9977.1123046875,
"max": 25267.1171875,
"count": 18
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 29992.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 29992.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.989912986755371,
"min": 2.4948792457580566,
"max": 13.001709938049316,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2662.93212890625,
"min": 434.1090087890625,
"max": 2662.93212890625,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06182343586103752,
"min": 0.06052512708782017,
"max": 0.07680597960072691,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3091171793051876,
"min": 0.26256851684051,
"max": 0.36497568320962326,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2130128609783509,
"min": 0.18682205450593256,
"max": 0.2755992733234284,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0650643048917545,
"min": 0.7472882180237302,
"max": 1.3488157207474991,
"count": 18
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.620097459999994e-06,
"min": 7.620097459999994e-06,
"max": 0.00026007001331,
"count": 18
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.810048729999997e-05,
"min": 3.810048729999997e-05,
"max": 0.0011601001132999999,
"count": 18
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10254,
"min": 0.10254,
"max": 0.18669000000000002,
"count": 18
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5127,
"min": 0.42996,
"max": 0.8866999999999999,
"count": 18
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001367459999999999,
"min": 0.0001367459999999999,
"max": 0.004335831,
"count": 18
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006837299999999995,
"min": 0.0006837299999999995,
"max": 0.019346330000000002,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.236363636363638,
"min": 8.023809523809524,
"max": 25.836363636363636,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1388.0,
"min": 337.0,
"max": 1421.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.236363636363638,
"min": 8.023809523809524,
"max": 25.836363636363636,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1388.0,
"min": 337.0,
"max": 1421.0,
"count": 18
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722593986",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722594388"
},
"total": 402.14017314800003,
"count": 1,
"self": 0.47645351199980723,
"children": {
"run_training.setup": {
"total": 0.05271483400019861,
"count": 1,
"self": 0.05271483400019861
},
"TrainerController.start_learning": {
"total": 401.611004802,
"count": 1,
"self": 0.5080185079914372,
"children": {
"TrainerController._reset_env": {
"total": 1.9157861460000731,
"count": 1,
"self": 1.9157861460000731
},
"TrainerController.advance": {
"total": 399.0280675060085,
"count": 16298,
"self": 0.231761505992381,
"children": {
"env_step": {
"total": 398.7963060000161,
"count": 16298,
"self": 257.3976601529878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.15487900101402,
"count": 16298,
"self": 1.3216700969931026,
"children": {
"TorchPolicy.evaluate": {
"total": 139.83320890402092,
"count": 16298,
"self": 139.83320890402092
}
}
},
"workers": {
"total": 0.24376684601429588,
"count": 16298,
"self": 0.0,
"children": {
"worker_root": {
"total": 400.4621210340165,
"count": 16298,
"is_parallel": true,
"self": 202.58089878202077,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021687190001102863,
"count": 1,
"is_parallel": true,
"self": 0.0006749759998001537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014937430003101326,
"count": 10,
"is_parallel": true,
"self": 0.0014937430003101326
}
}
},
"UnityEnvironment.step": {
"total": 0.07031695199998467,
"count": 1,
"is_parallel": true,
"self": 0.0006866130001981219,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003499219999412162,
"count": 1,
"is_parallel": true,
"self": 0.0003499219999412162
},
"communicator.exchange": {
"total": 0.06749808599988683,
"count": 1,
"is_parallel": true,
"self": 0.06749808599988683
},
"steps_from_proto": {
"total": 0.0017823309999585035,
"count": 1,
"is_parallel": true,
"self": 0.00034167799958595424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014406530003725493,
"count": 10,
"is_parallel": true,
"self": 0.0014406530003725493
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 197.88122225199572,
"count": 16297,
"is_parallel": true,
"self": 8.868798117969845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.791149939013167,
"count": 16297,
"is_parallel": true,
"self": 4.791149939013167
},
"communicator.exchange": {
"total": 153.98308474799887,
"count": 16297,
"is_parallel": true,
"self": 153.98308474799887
},
"steps_from_proto": {
"total": 30.238189447013838,
"count": 16297,
"is_parallel": true,
"self": 5.63628823898739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.601901208026447,
"count": 162970,
"is_parallel": true,
"self": 24.601901208026447
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015016800011835585,
"count": 1,
"self": 0.00015016800011835585,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 394.0747794568442,
"count": 604609,
"is_parallel": true,
"self": 12.610627955976042,
"children": {
"process_trajectory": {
"total": 218.03464418686826,
"count": 604609,
"is_parallel": true,
"self": 217.08632586886802,
"children": {
"RLTrainer._checkpoint": {
"total": 0.948318318000247,
"count": 4,
"is_parallel": true,
"self": 0.948318318000247
}
}
},
"_update_policy": {
"total": 163.42950731399992,
"count": 81,
"is_parallel": true,
"self": 50.42302629799565,
"children": {
"TorchPPOOptimizer.update": {
"total": 113.00648101600427,
"count": 4128,
"is_parallel": true,
"self": 113.00648101600427
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15898247399991305,
"count": 1,
"self": 0.0015238260000387527,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1574586479998743,
"count": 1,
"self": 0.1574586479998743
}
}
}
}
}
}
}