ppo-Huggy / run_logs /timers.json
msgerasyov's picture
Huggy
e0449df
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406590461730957,
"min": 1.406590461730957,
"max": 1.4271314144134521,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71353.5234375,
"min": 68739.03125,
"max": 78317.1484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.79491525423728,
"min": 79.41639871382637,
"max": 397.0079365079365,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49439.0,
"min": 48730.0,
"max": 50284.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999916.0,
"min": 49414.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999916.0,
"min": 49414.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.417358875274658,
"min": 0.07471233606338501,
"max": 2.4850099086761475,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1426.24169921875,
"min": 9.339041709899902,
"max": 1534.745361328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6692319943743237,
"min": 1.7617017402648927,
"max": 3.9974654778893552,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2164.846876680851,
"min": 220.21271753311157,
"max": 2428.093115925789,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6692319943743237,
"min": 1.7617017402648927,
"max": 3.9974654778893552,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2164.846876680851,
"min": 220.21271753311157,
"max": 2428.093115925789,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0162354612761798,
"min": 0.013104002612211237,
"max": 0.020484475717178915,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0487063838285394,
"min": 0.026208005224422473,
"max": 0.05604054856376024,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0594173765844769,
"min": 0.02345195558543007,
"max": 0.06654860706379016,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1782521297534307,
"min": 0.04690391117086014,
"max": 0.18137964966396491,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4069988643666796e-06,
"min": 3.4069988643666796e-06,
"max": 0.00029528692657102506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0220996593100039e-05,
"min": 1.0220996593100039e-05,
"max": 0.0008434218188594,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113563333333335,
"min": 0.10113563333333335,
"max": 0.198428975,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30340690000000003,
"min": 0.20740005,
"max": 0.5811406000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.666810333333355e-05,
"min": 6.666810333333355e-05,
"max": 0.004921605852499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020000431000000068,
"min": 0.00020000431000000068,
"max": 0.01405891594,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671041931",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671044194"
},
"total": 2262.8138873999997,
"count": 1,
"self": 0.44277089499973954,
"children": {
"run_training.setup": {
"total": 0.10527543800003514,
"count": 1,
"self": 0.10527543800003514
},
"TrainerController.start_learning": {
"total": 2262.265841067,
"count": 1,
"self": 3.8565478029458973,
"children": {
"TrainerController._reset_env": {
"total": 9.988992580999934,
"count": 1,
"self": 9.988992580999934
},
"TrainerController.advance": {
"total": 2248.3043743990543,
"count": 232720,
"self": 4.23252722525649,
"children": {
"env_step": {
"total": 1768.114408793905,
"count": 232720,
"self": 1484.518075243741,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.96532352699114,
"count": 232720,
"self": 14.33723976602596,
"children": {
"TorchPolicy.evaluate": {
"total": 266.6280837609652,
"count": 223006,
"self": 66.00220557196906,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.62587818899613,
"count": 223006,
"self": 200.62587818899613
}
}
}
}
},
"workers": {
"total": 2.6310100231731894,
"count": 232720,
"self": 0.0,
"children": {
"worker_root": {
"total": 2254.321778588019,
"count": 232720,
"is_parallel": true,
"self": 1034.1939830740735,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017549770000186982,
"count": 1,
"is_parallel": true,
"self": 0.000319597000043359,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014353799999753392,
"count": 2,
"is_parallel": true,
"self": 0.0014353799999753392
}
}
},
"UnityEnvironment.step": {
"total": 0.026381904000004397,
"count": 1,
"is_parallel": true,
"self": 0.00029525499996907456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001817119999714123,
"count": 1,
"is_parallel": true,
"self": 0.0001817119999714123
},
"communicator.exchange": {
"total": 0.025218604999963645,
"count": 1,
"is_parallel": true,
"self": 0.025218604999963645
},
"steps_from_proto": {
"total": 0.0006863320001002648,
"count": 1,
"is_parallel": true,
"self": 0.00022870600014357478,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045762599995669007,
"count": 2,
"is_parallel": true,
"self": 0.00045762599995669007
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.1277955139453,
"count": 232719,
"is_parallel": true,
"self": 35.11991141495241,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.56463078103945,
"count": 232719,
"is_parallel": true,
"self": 75.56463078103945
},
"communicator.exchange": {
"total": 1016.0898638619673,
"count": 232719,
"is_parallel": true,
"self": 1016.0898638619673
},
"steps_from_proto": {
"total": 93.35338945598619,
"count": 232719,
"is_parallel": true,
"self": 38.178251192118296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.17513826386789,
"count": 465438,
"is_parallel": true,
"self": 55.17513826386789
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 475.9574383798928,
"count": 232720,
"self": 6.401791535962616,
"children": {
"process_trajectory": {
"total": 149.7276454639316,
"count": 232720,
"self": 149.2623151849316,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4653302789999998,
"count": 4,
"self": 0.4653302789999998
}
}
},
"_update_policy": {
"total": 319.82800137999857,
"count": 97,
"self": 265.26530748799166,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.562693892006905,
"count": 2910,
"self": 54.562693892006905
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.530000741302501e-07,
"count": 1,
"self": 8.530000741302501e-07
},
"TrainerController._save_models": {
"total": 0.1159254310000506,
"count": 1,
"self": 0.0020987949997106625,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11382663600033993,
"count": 1,
"self": 0.11382663600033993
}
}
}
}
}
}
}