{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3387645184993744, "min": 0.31888067722320557, "max": 1.507812261581421, "count": 38 }, "Pyramids.Policy.Entropy.sum": { "value": 10249.6591796875, "min": 9489.888671875, "max": 45740.9921875, "count": 38 }, "Pyramids.Step.mean": { "value": 1139974.0, "min": 29952.0, "max": 1139974.0, "count": 38 }, "Pyramids.Step.sum": { "value": 1139974.0, "min": 29952.0, "max": 1139974.0, "count": 38 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.7842142581939697, "min": -0.15182361006736755, "max": 0.7842142581939697, "count": 38 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 239.9695587158203, "min": -35.98219680786133, "max": 239.9695587158203, "count": 38 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.017610697075724602, "min": 0.006322672590613365, "max": 0.35876649618148804, "count": 38 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 5.388873100280762, "min": 1.7577029466629028, "max": 85.02765655517578, "count": 38 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0728328871654124, "min": 0.06378227886411228, "max": 0.07296234771187993, "count": 38 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0196604203157738, "min": 0.4845466595472589, "max": 1.0545992280268703, "count": 38 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.014479745659261098, "min": 0.001521426011475578, "max": 0.018084776733303443, "count": 38 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.20271643922965538, "min": 0.016492177840752204, "max": 0.2531868742662482, "count": 38 }, "Pyramids.Policy.LearningRate.mean": { "value": 0.0001874845303623119, "min": 0.0001874845303623119, "max": 0.00029838354339596195, "count": 38 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0026247834250723665, "min": 0.0020886848037717336, "max": 0.003969655476781533, "count": 38 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.16249483095238096, "min": 0.16249483095238096, "max": 0.19946118095238097, "count": 38 }, "Pyramids.Policy.Epsilon.sum": { "value": 2.2749276333333333, "min": 1.3962282666666668, "max": 2.7526151666666667, "count": 38 }, "Pyramids.Policy.Beta.mean": { "value": 0.006253233612142856, "min": 0.006253233612142856, "max": 0.009946171977142856, "count": 38 }, "Pyramids.Policy.Beta.sum": { "value": 0.08754527056999999, "min": 0.06962320384, "max": 0.13232952482, "count": 38 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.012935087084770203, "min": 0.012935087084770203, "max": 0.4273882508277893, "count": 38 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.18109121918678284, "min": 0.18109121918678284, "max": 2.99171781539917, "count": 38 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 244.23622047244095, "min": 244.23622047244095, "max": 999.0, "count": 38 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31018.0, "min": 15984.0, "max": 32717.0, "count": 38 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7557637696191082, "min": -1.0000000521540642, "max": 1.7557637696191082, "count": 38 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 222.98199874162674, "min": -30.676401637494564, "max": 222.98199874162674, "count": 38 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7557637696191082, "min": -1.0000000521540642, "max": 1.7557637696191082, "count": 38 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 222.98199874162674, "min": -30.676401637494564, "max": 222.98199874162674, "count": 38 }, "Pyramids.Policy.RndReward.mean": { "value": 0.03278945655356653, "min": 0.03278945655356653, "max": 8.399812946096063, "count": 38 }, "Pyramids.Policy.RndReward.sum": { "value": 4.164260982302949, "min": 4.040977261138323, "max": 134.397007137537, "count": 38 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 38 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 38 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1735218647", "python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.5.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1735222788" }, "total": 4140.671235691, "count": 1, "self": 0.7486039460009124, "children": { "run_training.setup": { "total": 0.07072070699996402, "count": 1, "self": 0.07072070699996402 }, "TrainerController.start_learning": { "total": 4139.851911037999, "count": 1, "self": 2.970015475118089, "children": { "TrainerController._reset_env": { "total": 3.749637024000549, "count": 1, "self": 3.749637024000549 }, "TrainerController.advance": { "total": 4132.993147969881, "count": 75309, "self": 3.0848376317617294, "children": { "env_step": { "total": 2841.732845197186, "count": 75309, "self": 2643.9246244103715, "children": { "SubprocessEnvManager._take_step": { "total": 196.09917332780333, "count": 75309, "self": 8.710568079820405, "children": { "TorchPolicy.evaluate": { "total": 187.38860524798292, "count": 73083, "self": 187.38860524798292 } } }, "workers": { "total": 1.7090474590113445, "count": 75309, "self": 0.0, "children": { "worker_root": { "total": 4129.183153381927, "count": 75309, "is_parallel": true, "self": 1704.7684151600306, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.004257460000189894, "count": 1, "is_parallel": true, "self": 0.0014964299998609931, "children": { "_process_rank_one_or_two_observation": { "total": 0.0027610300003289012, "count": 8, "is_parallel": true, "self": 0.0027610300003289012 } } }, "UnityEnvironment.step": { "total": 0.10920169700057158, "count": 1, "is_parallel": true, "self": 0.0007959900012792787, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005358440002964926, "count": 1, "is_parallel": true, "self": 0.0005358440002964926 }, "communicator.exchange": { "total": 0.10577807699974073, "count": 1, "is_parallel": true, "self": 0.10577807699974073 }, "steps_from_proto": { "total": 0.0020917859992550802, "count": 1, "is_parallel": true, "self": 0.00046363399997062515, "children": { "_process_rank_one_or_two_observation": { "total": 0.001628151999284455, "count": 8, "is_parallel": true, "self": 0.001628151999284455 } } } } } } }, "UnityEnvironment.step": { "total": 2424.414738221896, "count": 75308, "is_parallel": true, "self": 61.67821465241832, "children": { "UnityEnvironment._generate_step_input": { "total": 37.65305203113894, "count": 75308, "is_parallel": true, "self": 37.65305203113894 }, "communicator.exchange": { "total": 2166.948624449299, "count": 75308, "is_parallel": true, "self": 2166.948624449299 }, "steps_from_proto": { "total": 158.1348470890398, "count": 75308, "is_parallel": true, "self": 34.218007979646245, "children": { "_process_rank_one_or_two_observation": { "total": 123.91683910939355, "count": 602464, "is_parallel": true, "self": 123.91683910939355 } } } } } } } } } } }, "trainer_advance": { "total": 1288.1754651409328, "count": 75309, "self": 5.757001725854025, "children": { "process_trajectory": { "total": 200.41994352107758, "count": 75309, "self": 200.21983868007828, "children": { "RLTrainer._checkpoint": { "total": 0.20010484099930181, "count": 2, "self": 0.20010484099930181 } } }, "_update_policy": { "total": 1081.9985198940012, "count": 533, "self": 443.4313299279147, "children": { "TorchPPOOptimizer.update": { "total": 638.5671899660865, "count": 26635, "self": 638.5671899660865 } } } } } } }, "trainer_threads": { "total": 1.8049995560431853e-06, "count": 1, "self": 1.8049995560431853e-06 }, "TrainerController._save_models": { "total": 0.1391087640004116, "count": 1, "self": 0.0028435690019250615, "children": { "RLTrainer._checkpoint": { "total": 0.13626519499848655, "count": 1, "self": 0.13626519499848655 } } } } } } }