ppo-Huggy / run_logs /timers.json
blghtr's picture
Huggy
c128484
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401718258857727,
"min": 1.401718258857727,
"max": 1.4265803098678589,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69528.03125,
"min": 68323.9765625,
"max": 77123.4921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.76673040152964,
"min": 78.2107765451664,
"max": 409.0409836065574,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49563.0,
"min": 48925.0,
"max": 50038.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999376.0,
"min": 49358.0,
"max": 1999376.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999376.0,
"min": 49358.0,
"max": 1999376.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4496448040008545,
"min": 0.10427506268024445,
"max": 2.484670639038086,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1281.1641845703125,
"min": 12.61728286743164,
"max": 1530.64990234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.716703139801098,
"min": 1.8602946474532451,
"max": 4.046146650110657,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1943.8357421159744,
"min": 225.09565234184265,
"max": 2415.549550116062,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.716703139801098,
"min": 1.8602946474532451,
"max": 4.046146650110657,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1943.8357421159744,
"min": 225.09565234184265,
"max": 2415.549550116062,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01836042815573617,
"min": 0.013421747290703934,
"max": 0.02074068708752748,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05508128446720851,
"min": 0.02684349458140787,
"max": 0.05747165445621552,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05224798284471035,
"min": 0.022220716035614412,
"max": 0.05966830224626594,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15674394853413104,
"min": 0.044441432071228824,
"max": 0.17900490673879782,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5106988298000095e-06,
"min": 3.5106988298000095e-06,
"max": 0.00029532217655927495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0532096489400029e-05,
"min": 1.0532096489400029e-05,
"max": 0.0008438452687182499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117019999999999,
"min": 0.10117019999999999,
"max": 0.19844072499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30351059999999996,
"min": 0.20749389999999995,
"max": 0.58128175,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.839298000000013e-05,
"min": 6.839298000000013e-05,
"max": 0.0049221921775,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002051789400000004,
"min": 0.0002051789400000004,
"max": 0.014065959325000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673106923",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673109110"
},
"total": 2186.791938034,
"count": 1,
"self": 0.3894292459999633,
"children": {
"run_training.setup": {
"total": 0.1900041339999916,
"count": 1,
"self": 0.1900041339999916
},
"TrainerController.start_learning": {
"total": 2186.212504654,
"count": 1,
"self": 3.7827642320385166,
"children": {
"TrainerController._reset_env": {
"total": 7.904374058999991,
"count": 1,
"self": 7.904374058999991
},
"TrainerController.advance": {
"total": 2174.406583964961,
"count": 232795,
"self": 3.89231146099155,
"children": {
"env_step": {
"total": 1707.925286263974,
"count": 232795,
"self": 1434.3659427809414,
"children": {
"SubprocessEnvManager._take_step": {
"total": 271.04098715201474,
"count": 232795,
"self": 13.840129734890922,
"children": {
"TorchPolicy.evaluate": {
"total": 257.2008574171238,
"count": 222928,
"self": 64.30694273709133,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.89391468003248,
"count": 222928,
"self": 192.89391468003248
}
}
}
}
},
"workers": {
"total": 2.5183563310177988,
"count": 232795,
"self": 0.0,
"children": {
"worker_root": {
"total": 2178.207436787939,
"count": 232795,
"is_parallel": true,
"self": 997.8631339518645,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018228320000162057,
"count": 1,
"is_parallel": true,
"self": 0.00029471799999214454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015281140000240612,
"count": 2,
"is_parallel": true,
"self": 0.0015281140000240612
}
}
},
"UnityEnvironment.step": {
"total": 0.02711505099998135,
"count": 1,
"is_parallel": true,
"self": 0.0003011060000233101,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001740839999797572,
"count": 1,
"is_parallel": true,
"self": 0.0001740839999797572
},
"communicator.exchange": {
"total": 0.025890855999989526,
"count": 1,
"is_parallel": true,
"self": 0.025890855999989526
},
"steps_from_proto": {
"total": 0.000749004999988756,
"count": 1,
"is_parallel": true,
"self": 0.0002359059999719193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005130990000168367,
"count": 2,
"is_parallel": true,
"self": 0.0005130990000168367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1180.3443028360746,
"count": 232794,
"is_parallel": true,
"self": 34.329755883100916,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.38598525596419,
"count": 232794,
"is_parallel": true,
"self": 74.38598525596419
},
"communicator.exchange": {
"total": 979.6464237839525,
"count": 232794,
"is_parallel": true,
"self": 979.6464237839525
},
"steps_from_proto": {
"total": 91.98213791305687,
"count": 232794,
"is_parallel": true,
"self": 37.94060480701759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.04153310603928,
"count": 465588,
"is_parallel": true,
"self": 54.04153310603928
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.5889862399958,
"count": 232795,
"self": 5.859698646053573,
"children": {
"process_trajectory": {
"total": 147.9417675169412,
"count": 232795,
"self": 146.7603066799409,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1814608370003157,
"count": 10,
"self": 1.1814608370003157
}
}
},
"_update_policy": {
"total": 308.78752007700103,
"count": 97,
"self": 255.642441708997,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.14507836800402,
"count": 2910,
"self": 53.14507836800402
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.110000635497272e-07,
"count": 1,
"self": 8.110000635497272e-07
},
"TrainerController._save_models": {
"total": 0.11878158700028507,
"count": 1,
"self": 0.002045346000159043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11673624100012603,
"count": 1,
"self": 0.11673624100012603
}
}
}
}
}
}
}