{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3768202066421509, "min": 0.3602882921695709, "max": 1.4753899574279785, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 11292.5478515625, "min": 10814.4130859375, "max": 44757.4296875, "count": 33 }, "Pyramids.Step.mean": { "value": 989967.0, "min": 29952.0, "max": 989967.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989967.0, "min": 29952.0, "max": 989967.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.559971809387207, "min": -0.12040328979492188, "max": 0.6346123218536377, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 157.91204833984375, "min": -29.017192840576172, "max": 183.4029541015625, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.008066359907388687, "min": -0.013307561166584492, "max": 0.22606909275054932, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 2.2747135162353516, "min": -3.5797338485717773, "max": 54.48265075683594, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07277559271964039, "min": 0.06513140253287188, "max": 0.07366678260215041, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0188582980749654, "min": 0.513647898213052, "max": 1.1050017390322562, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01718260951947384, "min": 0.0009757625032288733, "max": 0.01718260951947384, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.24055653327263377, "min": 0.013660675045204226, "max": 0.24055653327263377, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.556626052585716e-06, "min": 7.556626052585716e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010579276473620003, "min": 0.00010579276473620003, "max": 0.0037574890475037, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10251884285714286, "min": 0.10251884285714286, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4352638, "min": 1.3886848, "max": 2.6524962999999997, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00026163240142857146, "min": 0.00026163240142857146, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0036628536200000003, "min": 0.0036628536200000003, "max": 0.12526438036999998, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.007854354567825794, "min": 0.007758575491607189, "max": 0.33130520582199097, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.10996095836162567, "min": 0.1086200550198555, "max": 2.319136381149292, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 314.0210526315789, "min": 304.5904761904762, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29832.0, "min": 15984.0, "max": 34377.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6228020914291081, "min": -1.0000000521540642, "max": 1.657306653828848, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 154.16619868576527, "min": -27.817001566290855, "max": 174.01719865202904, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6228020914291081, "min": -1.0000000521540642, "max": 1.657306653828848, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 154.16619868576527, "min": -27.817001566290855, "max": 174.01719865202904, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.02556955931404924, "min": 0.024813593196380507, "max": 6.577359039336443, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 2.429108134834678, "min": 2.3135363211040385, "max": 105.23774462938309, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1724848704", "python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.4.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1724852705" }, "total": 4001.1839438129996, "count": 1, "self": 0.7523128669990911, "children": { "run_training.setup": { "total": 0.09746441899994807, "count": 1, "self": 0.09746441899994807 }, "TrainerController.start_learning": { "total": 4000.3341665270004, "count": 1, "self": 3.005080662071123, "children": { "TrainerController._reset_env": { "total": 3.462283059000015, "count": 1, "self": 3.462283059000015 }, "TrainerController.advance": { "total": 3993.7693979199303, "count": 64153, "self": 3.03215360291415, "children": { "env_step": { "total": 2766.0681710219633, "count": 64153, "self": 2570.4991680099733, "children": { "SubprocessEnvManager._take_step": { "total": 193.80120430996612, "count": 64153, "self": 8.754843848900236, "children": { "TorchPolicy.evaluate": { "total": 185.0463604610659, "count": 62560, "self": 185.0463604610659 } } }, "workers": { "total": 1.767798702023697, "count": 64153, "self": 0.0, "children": { "worker_root": { "total": 3991.2023763959487, "count": 64153, "is_parallel": true, "self": 1647.939381191, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0038328650000494235, "count": 1, "is_parallel": true, "self": 0.0012922319999688625, "children": { "_process_rank_one_or_two_observation": { "total": 0.002540633000080561, "count": 8, "is_parallel": true, "self": 0.002540633000080561 } } }, "UnityEnvironment.step": { "total": 0.07255502400005298, "count": 1, "is_parallel": true, "self": 0.0009380920000694459, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005989969999973255, "count": 1, "is_parallel": true, "self": 0.0005989969999973255 }, "communicator.exchange": { "total": 0.06859343899998294, "count": 1, "is_parallel": true, "self": 0.06859343899998294 }, "steps_from_proto": { "total": 0.0024244960000032734, "count": 1, "is_parallel": true, "self": 0.0005218469999590525, "children": { "_process_rank_one_or_two_observation": { "total": 0.0019026490000442209, "count": 8, "is_parallel": true, "self": 0.0019026490000442209 } } } } } } }, "UnityEnvironment.step": { "total": 2343.2629952049488, "count": 64152, "is_parallel": true, "self": 59.117644186915186, "children": { "UnityEnvironment._generate_step_input": { "total": 34.70970180601762, "count": 64152, "is_parallel": true, "self": 34.70970180601762 }, "communicator.exchange": { "total": 2103.1303248019535, "count": 64152, "is_parallel": true, "self": 2103.1303248019535 }, "steps_from_proto": { "total": 146.3053244100622, "count": 64152, "is_parallel": true, "self": 32.95847579378426, "children": { "_process_rank_one_or_two_observation": { "total": 113.34684861627795, "count": 513216, "is_parallel": true, "self": 113.34684861627795 } } } } } } } } } } }, "trainer_advance": { "total": 1224.6690732950528, "count": 64153, "self": 6.202726828018513, "children": { "process_trajectory": { "total": 198.50741224803153, "count": 64153, "self": 198.0819124720316, "children": { "RLTrainer._checkpoint": { "total": 0.42549977599992417, "count": 2, "self": 0.42549977599992417 } } }, "_update_policy": { "total": 1019.9589342190029, "count": 459, "self": 411.72534896602554, "children": { "TorchPPOOptimizer.update": { "total": 608.2335852529774, "count": 22776, "self": 608.2335852529774 } } } } } } }, "trainer_threads": { "total": 2.119999408023432e-06, "count": 1, "self": 2.119999408023432e-06 }, "TrainerController._save_models": { "total": 0.09740276599950448, "count": 1, "self": 0.002778328998829238, "children": { "RLTrainer._checkpoint": { "total": 0.09462443700067524, "count": 1, "self": 0.09462443700067524 } } } } } } }