ppo-Huggy / run_logs /timers.json
Miljkovic's picture
Huggy
5bb675d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4057353734970093,
"min": 1.4057353734970093,
"max": 1.4298509359359741,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69994.375,
"min": 67764.0,
"max": 78830.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 96.42105263157895,
"min": 86.15183246073299,
"max": 391.9453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49464.0,
"min": 48949.0,
"max": 50169.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999980.0,
"min": 49601.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999980.0,
"min": 49601.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3798208236694336,
"min": 0.17059510946273804,
"max": 2.4268596172332764,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1220.8480224609375,
"min": 21.665578842163086,
"max": 1375.174560546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.687070602445807,
"min": 1.7603671414645639,
"max": 3.8601367199775014,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1891.467219054699,
"min": 223.5666269659996,
"max": 2166.153334081173,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.687070602445807,
"min": 1.7603671414645639,
"max": 3.8601367199775014,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1891.467219054699,
"min": 223.5666269659996,
"max": 2166.153334081173,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017722960558926894,
"min": 0.013144492062080342,
"max": 0.01970972528158907,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05316888167678068,
"min": 0.026288984124160683,
"max": 0.05912917584476721,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05016221354405085,
"min": 0.021540686519195634,
"max": 0.06116366961763966,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15048664063215256,
"min": 0.04308137303839127,
"max": 0.18349100885291897,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3012988996000034e-06,
"min": 3.3012988996000034e-06,
"max": 0.0002953209765596749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.90389669880001e-06,
"min": 9.90389669880001e-06,
"max": 0.0008440905186364999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110040000000002,
"min": 0.10110040000000002,
"max": 0.198440325,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30330120000000005,
"min": 0.2073293,
"max": 0.5813635000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.490996000000008e-05,
"min": 6.490996000000008e-05,
"max": 0.0049221722175,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019472988000000024,
"min": 0.00019472988000000024,
"max": 0.014070038650000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673354780",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673359758"
},
"total": 4977.296750438,
"count": 1,
"self": 0.6135134000005564,
"children": {
"run_training.setup": {
"total": 0.1610328550000304,
"count": 1,
"self": 0.1610328550000304
},
"TrainerController.start_learning": {
"total": 4976.522204182999,
"count": 1,
"self": 8.932847856117405,
"children": {
"TrainerController._reset_env": {
"total": 6.034937672999945,
"count": 1,
"self": 6.034937672999945
},
"TrainerController.advance": {
"total": 4961.361065979882,
"count": 231618,
"self": 8.458412705196679,
"children": {
"env_step": {
"total": 3269.77078050283,
"count": 231618,
"self": 2784.395340785581,
"children": {
"SubprocessEnvManager._take_step": {
"total": 478.88726788706583,
"count": 231618,
"self": 32.61589710880946,
"children": {
"TorchPolicy.evaluate": {
"total": 446.27137077825637,
"count": 222858,
"self": 67.39098456416514,
"children": {
"TorchPolicy.sample_actions": {
"total": 378.88038621409123,
"count": 222858,
"self": 378.88038621409123
}
}
}
}
},
"workers": {
"total": 6.4881718301830915,
"count": 231618,
"self": 0.0,
"children": {
"worker_root": {
"total": 4957.8727447270385,
"count": 231618,
"is_parallel": true,
"self": 2696.131931449195,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026382290000128705,
"count": 1,
"is_parallel": true,
"self": 0.0004593920000388607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00217883699997401,
"count": 2,
"is_parallel": true,
"self": 0.00217883699997401
}
}
},
"UnityEnvironment.step": {
"total": 0.0390804040000603,
"count": 1,
"is_parallel": true,
"self": 0.00037474199996268,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002552170000171827,
"count": 1,
"is_parallel": true,
"self": 0.0002552170000171827
},
"communicator.exchange": {
"total": 0.037461355000004914,
"count": 1,
"is_parallel": true,
"self": 0.037461355000004914
},
"steps_from_proto": {
"total": 0.000989090000075521,
"count": 1,
"is_parallel": true,
"self": 0.00031493200003751554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006741580000380054,
"count": 2,
"is_parallel": true,
"self": 0.0006741580000380054
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2261.7408132778437,
"count": 231617,
"is_parallel": true,
"self": 65.62979042882262,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 96.53612441980181,
"count": 231617,
"is_parallel": true,
"self": 96.53612441980181
},
"communicator.exchange": {
"total": 1935.8028796440885,
"count": 231617,
"is_parallel": true,
"self": 1935.8028796440885
},
"steps_from_proto": {
"total": 163.77201878513074,
"count": 231617,
"is_parallel": true,
"self": 61.29172527704418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.48029350808656,
"count": 463234,
"is_parallel": true,
"self": 102.48029350808656
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1683.1318727718558,
"count": 231618,
"self": 15.458389020965342,
"children": {
"process_trajectory": {
"total": 275.18927695288653,
"count": 231618,
"self": 273.70834820188793,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4809287509986007,
"count": 10,
"self": 1.4809287509986007
}
}
},
"_update_policy": {
"total": 1392.484206798004,
"count": 97,
"self": 355.9358637280079,
"children": {
"TorchPPOOptimizer.update": {
"total": 1036.548343069996,
"count": 2910,
"self": 1036.548343069996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.19335157399927994,
"count": 1,
"self": 0.005382831999668269,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18796874199961167,
"count": 1,
"self": 0.18796874199961167
}
}
}
}
}
}
}