ppo-PyramidsRND / run_logs /timers.json
hmatzner's picture
Trained model finished
9473680
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9638763070106506,
"min": 0.8708292841911316,
"max": 1.4101594686508179,
"count": 14
},
"Pyramids.Policy.Entropy.sum": {
"value": 29348.10546875,
"min": 26016.169921875,
"max": 42778.59765625,
"count": 14
},
"Pyramids.Step.mean": {
"value": 419972.0,
"min": 29941.0,
"max": 419972.0,
"count": 14
},
"Pyramids.Step.sum": {
"value": 419972.0,
"min": 29941.0,
"max": 419972.0,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04902276024222374,
"min": -0.11009175330400467,
"max": 0.049739401787519455,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.912530899047852,
"min": -26.53211212158203,
"max": 11.788238525390625,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025813356041908264,
"min": 0.022236265242099762,
"max": 0.4061439037322998,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.272645473480225,
"min": 5.358940124511719,
"max": 96.256103515625,
"count": 14
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0711750337643039,
"min": 0.06656932569237,
"max": 0.07395162867092234,
"count": 14
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9252754389359507,
"min": 0.5916130293673787,
"max": 1.0026062028326372,
"count": 14
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004515216187845682,
"min": 0.0002494773856550006,
"max": 0.009266096443126592,
"count": 14
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05869781044199387,
"min": 0.002744251242205007,
"max": 0.07412877154501274,
"count": 14
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00025947569812348974,
"min": 0.00025947569812348974,
"max": 0.0002982804130731958,
"count": 14
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0033731840756053666,
"min": 0.0023862433045855665,
"max": 0.0038007929330690668,
"count": 14
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1864918948717949,
"min": 0.1864918948717949,
"max": 0.1994268041666667,
"count": 14
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4243946333333337,
"min": 1.5954144333333335,
"max": 2.666930933333334,
"count": 14
},
"Pyramids.Policy.Beta.mean": {
"value": 0.008650540297692307,
"min": 0.008650540297692307,
"max": 0.00994273773625,
"count": 14
},
"Pyramids.Policy.Beta.sum": {
"value": 0.11245702386999999,
"min": 0.07954190189,
"max": 0.12670640024,
"count": 14
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.027655037119984627,
"min": 0.027655037119984627,
"max": 0.47840026021003723,
"count": 14
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3595154881477356,
"min": 0.3595154881477356,
"max": 3.827202081680298,
"count": 14
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 879.4571428571429,
"min": 879.4571428571429,
"max": 999.0,
"count": 14
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30781.0,
"min": 16484.0,
"max": 32769.0,
"count": 14
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.30860005021095277,
"min": -0.9999484390981735,
"max": -0.30860005021095277,
"count": 14
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -10.801001757383347,
"min": -30.99840161204338,
"max": -10.801001757383347,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.30860005021095277,
"min": -0.9999484390981735,
"max": -0.30860005021095277,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -10.801001757383347,
"min": -30.99840161204338,
"max": -10.801001757383347,
"count": 14
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.25806081465312414,
"min": 0.25806081465312414,
"max": 9.867490260040059,
"count": 14
},
"Pyramids.Policy.RndReward.sum": {
"value": 9.032128512859344,
"min": 7.100933128967881,
"max": 167.747334420681,
"count": 14
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678884273",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678885069"
},
"total": 796.2509718289998,
"count": 1,
"self": 0.26232837299949097,
"children": {
"run_training.setup": {
"total": 0.10471597600007954,
"count": 1,
"self": 0.10471597600007954
},
"TrainerController.start_learning": {
"total": 795.8839274800002,
"count": 1,
"self": 0.5009431500016035,
"children": {
"TrainerController._reset_env": {
"total": 7.175234115999956,
"count": 1,
"self": 7.175234115999956
},
"TrainerController.advance": {
"total": 788.0386093179982,
"count": 26521,
"self": 0.5466147469742282,
"children": {
"env_step": {
"total": 531.0496984850558,
"count": 26521,
"self": 487.67960199503113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 43.05377965902676,
"count": 26521,
"self": 1.8780770330115502,
"children": {
"TorchPolicy.evaluate": {
"total": 41.17570262601521,
"count": 26417,
"self": 41.17570262601521
}
}
},
"workers": {
"total": 0.3163168309979483,
"count": 26520,
"self": 0.0,
"children": {
"worker_root": {
"total": 794.1986699670183,
"count": 26520,
"is_parallel": true,
"self": 352.1496931660281,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017417429999113665,
"count": 1,
"is_parallel": true,
"self": 0.0005573809999077639,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011843620000036026,
"count": 8,
"is_parallel": true,
"self": 0.0011843620000036026
}
}
},
"UnityEnvironment.step": {
"total": 0.04543398000009802,
"count": 1,
"is_parallel": true,
"self": 0.000518151000051148,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004427469998518063,
"count": 1,
"is_parallel": true,
"self": 0.0004427469998518063
},
"communicator.exchange": {
"total": 0.042907402000082584,
"count": 1,
"is_parallel": true,
"self": 0.042907402000082584
},
"steps_from_proto": {
"total": 0.0015656800001124793,
"count": 1,
"is_parallel": true,
"self": 0.00035908099994230724,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001206599000170172,
"count": 8,
"is_parallel": true,
"self": 0.001206599000170172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 442.0489768009902,
"count": 26519,
"is_parallel": true,
"self": 12.680803778954896,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.208501058000593,
"count": 26519,
"is_parallel": true,
"self": 9.208501058000593
},
"communicator.exchange": {
"total": 382.96968831102436,
"count": 26519,
"is_parallel": true,
"self": 382.96968831102436
},
"steps_from_proto": {
"total": 37.18998365301036,
"count": 26519,
"is_parallel": true,
"self": 7.704698988944301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.485284664066057,
"count": 212152,
"is_parallel": true,
"self": 29.485284664066057
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 256.4422960859681,
"count": 26520,
"self": 0.7597167379826715,
"children": {
"process_trajectory": {
"total": 45.78197076998413,
"count": 26520,
"self": 45.78197076998413
},
"_update_policy": {
"total": 209.9006085780013,
"count": 171,
"self": 132.90398994699967,
"children": {
"TorchPPOOptimizer.update": {
"total": 76.99661863100164,
"count": 9675,
"self": 76.99661863100164
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.669000084802974e-06,
"count": 1,
"self": 1.669000084802974e-06
},
"TrainerController._save_models": {
"total": 0.1691392270004144,
"count": 1,
"self": 0.002099379000810586,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1670398479996038,
"count": 1,
"self": 0.1670398479996038
}
}
}
}
}
}
}