ppo-Huggy / run_logs /timers.json
DimiNim's picture
Huggy
25648df
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4034416675567627,
"min": 1.4034416675567627,
"max": 1.427432894706726,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69671.0546875,
"min": 69013.328125,
"max": 77315.7265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.1891891891892,
"min": 101.5860655737705,
"max": 394.3543307086614,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49634.0,
"min": 48948.0,
"max": 50271.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999420.0,
"min": 49610.0,
"max": 1999420.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999420.0,
"min": 49610.0,
"max": 1999420.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.307337522506714,
"min": 0.13111253082752228,
"max": 2.406428575515747,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1109.829345703125,
"min": 16.520179748535156,
"max": 1174.337158203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5371587062080287,
"min": 1.915757889785464,
"max": 3.7602187402546408,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1701.3733376860619,
"min": 241.38549411296844,
"max": 1816.9940581917763,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5371587062080287,
"min": 1.915757889785464,
"max": 3.7602187402546408,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1701.3733376860619,
"min": 241.38549411296844,
"max": 1816.9940581917763,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015968762603699817,
"min": 0.014197437744345127,
"max": 0.020599364762407885,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.031937525207399634,
"min": 0.028394875488690255,
"max": 0.06179809428722365,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05142368705322345,
"min": 0.022092575828234356,
"max": 0.06368105846146743,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1028473741064469,
"min": 0.04466596376150846,
"max": 0.17535685760279496,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.932423689225011e-06,
"min": 3.932423689225011e-06,
"max": 0.00029533545155484997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 7.864847378450021e-06,
"min": 7.864847378450021e-06,
"max": 0.0008439454686848501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10131077499999999,
"min": 0.10131077499999999,
"max": 0.19844515000000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20262154999999998,
"min": 0.20262154999999998,
"max": 0.58131515,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.540767250000018e-05,
"min": 7.540767250000018e-05,
"max": 0.004922412984999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015081534500000035,
"min": 0.00015081534500000035,
"max": 0.014067625985000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670919647",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670921914"
},
"total": 2266.591040191,
"count": 1,
"self": 0.4388859699997738,
"children": {
"run_training.setup": {
"total": 0.10731974399999444,
"count": 1,
"self": 0.10731974399999444
},
"TrainerController.start_learning": {
"total": 2266.044834477,
"count": 1,
"self": 3.8966626800720405,
"children": {
"TrainerController._reset_env": {
"total": 9.832440628000029,
"count": 1,
"self": 9.832440628000029
},
"TrainerController.advance": {
"total": 2252.2020416969285,
"count": 231109,
"self": 4.095782864879311,
"children": {
"env_step": {
"total": 1791.019727491064,
"count": 231109,
"self": 1498.88352832305,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.3312459210538,
"count": 231109,
"self": 15.111221970097915,
"children": {
"TorchPolicy.evaluate": {
"total": 274.22002395095586,
"count": 222924,
"self": 68.00772196509075,
"children": {
"TorchPolicy.sample_actions": {
"total": 206.2123019858651,
"count": 222924,
"self": 206.2123019858651
}
}
}
}
},
"workers": {
"total": 2.804953246960281,
"count": 231109,
"self": 0.0,
"children": {
"worker_root": {
"total": 2257.8717320621713,
"count": 231109,
"is_parallel": true,
"self": 1027.6635223442238,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018784040000241475,
"count": 1,
"is_parallel": true,
"self": 0.0003530729999852156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001525331000038932,
"count": 2,
"is_parallel": true,
"self": 0.001525331000038932
}
}
},
"UnityEnvironment.step": {
"total": 0.028042574999972203,
"count": 1,
"is_parallel": true,
"self": 0.0003302550000512383,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020670800000743839,
"count": 1,
"is_parallel": true,
"self": 0.00020670800000743839
},
"communicator.exchange": {
"total": 0.026605500999949072,
"count": 1,
"is_parallel": true,
"self": 0.026605500999949072
},
"steps_from_proto": {
"total": 0.0009001109999644541,
"count": 1,
"is_parallel": true,
"self": 0.00028922899997496643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006108819999894877,
"count": 2,
"is_parallel": true,
"self": 0.0006108819999894877
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1230.2082097179475,
"count": 231108,
"is_parallel": true,
"self": 35.07381504893192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.69479525099626,
"count": 231108,
"is_parallel": true,
"self": 83.69479525099626
},
"communicator.exchange": {
"total": 1014.7367970620742,
"count": 231108,
"is_parallel": true,
"self": 1014.7367970620742
},
"steps_from_proto": {
"total": 96.70280235594515,
"count": 231108,
"is_parallel": true,
"self": 41.908182039728445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.79462031621671,
"count": 462216,
"is_parallel": true,
"self": 54.79462031621671
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.0865313409851,
"count": 231109,
"self": 6.19362798898203,
"children": {
"process_trajectory": {
"total": 143.87409786000268,
"count": 231109,
"self": 143.37962545900217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4944724010005075,
"count": 4,
"self": 0.4944724010005075
}
}
},
"_update_policy": {
"total": 307.0188054920004,
"count": 96,
"self": 254.21434314199814,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.80446235000227,
"count": 2880,
"self": 52.80446235000227
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.479996722599026e-07,
"count": 1,
"self": 8.479996722599026e-07
},
"TrainerController._save_models": {
"total": 0.11368862400013313,
"count": 1,
"self": 0.002020709000134957,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11166791499999817,
"count": 1,
"self": 0.11166791499999817
}
}
}
}
}
}
}