{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.14880646765232086, "min": 0.13085918128490448, "max": 1.4282140731811523, "count": 100 }, "Pyramids.Policy.Entropy.sum": { "value": 4428.48046875, "min": 3944.619140625, "max": 43326.30078125, "count": 100 }, "Pyramids.Step.mean": { "value": 2999902.0, "min": 29990.0, "max": 2999902.0, "count": 100 }, "Pyramids.Step.sum": { "value": 2999902.0, "min": 29990.0, "max": 2999902.0, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.8498652577400208, "min": -0.09720087796449661, "max": 0.8504406213760376, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 258.3590393066406, "min": -23.328210830688477, "max": 259.0555419921875, "count": 100 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.0056196898221969604, "min": -0.029418986290693283, "max": 0.3736802041530609, "count": 100 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 1.708385705947876, "min": -7.766612529754639, "max": 88.9358901977539, "count": 100 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06854750087357071, "min": 0.06283316485494432, "max": 0.07330249887152174, "count": 100 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9596650122299899, "min": 0.5736377990072667, "max": 1.078065687829318, "count": 100 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.013498380142385993, "min": 0.00018856516171736928, "max": 0.018135343533102553, "count": 100 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.1889773219934039, "min": 0.0024513471023258006, "max": 0.25389480946343573, "count": 100 }, "Pyramids.Policy.LearningRate.mean": { "value": 1.5178780655023814e-06, "min": 1.5178780655023814e-06, "max": 0.0002982541255819584, "count": 100 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.125029291703334e-05, "min": 2.125029291703334e-05, "max": 0.0040113777628741, "count": 100 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1005059261904762, "min": 0.1005059261904762, "max": 0.19941804166666668, "count": 100 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4070829666666669, "min": 1.4070829666666669, "max": 2.8275009000000004, "count": 100 }, "Pyramids.Policy.Beta.mean": { "value": 6.054202642857143e-05, "min": 6.054202642857143e-05, "max": 0.0099418623625, "count": 100 }, "Pyramids.Policy.Beta.sum": { "value": 0.0008475883700000001, "min": 0.0008475883700000001, "max": 0.13371887741000002, "count": 100 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0058842808939516544, "min": 0.005419283639639616, "max": 0.4554038643836975, "count": 100 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.08237992972135544, "min": 0.07586997002363205, "max": 3.64323091506958, "count": 100 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 217.2391304347826, "min": 204.15540540540542, "max": 999.0, "count": 100 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29979.0, "min": 17556.0, "max": 33284.0, "count": 100 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7682652039372402, "min": -0.9999484388578322, "max": 1.7958445885294192, "count": 100 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 244.02059814333916, "min": -31.99480165541172, "max": 265.78499910235405, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7682652039372402, "min": -0.9999484388578322, "max": 1.7958445885294192, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 244.02059814333916, "min": -31.99480165541172, "max": 265.78499910235405, "count": 100 }, "Pyramids.Policy.RndReward.mean": { "value": 0.013573649943440536, "min": 0.012836840184172615, "max": 9.239888391560978, "count": 100 }, "Pyramids.Policy.RndReward.sum": { "value": 1.8731636921947938, "min": 1.66878922394244, "max": 166.3179910480976, "count": 100 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1712067862", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1712075701" }, "total": 7839.3951579760005, "count": 1, "self": 0.5292454870004804, "children": { "run_training.setup": { "total": 0.05581000699976357, "count": 1, "self": 0.05581000699976357 }, "TrainerController.start_learning": { "total": 7838.810102482001, "count": 1, "self": 5.239425150288298, "children": { "TrainerController._reset_env": { "total": 2.692407330000151, "count": 1, "self": 2.692407330000151 }, "TrainerController.advance": { "total": 7830.789007934712, "count": 194777, "self": 5.447606398954122, "children": { "env_step": { "total": 5822.306601738667, "count": 194777, "self": 5366.708253669529, "children": { "SubprocessEnvManager._take_step": { "total": 452.34886138409183, "count": 194777, "self": 16.037159126177357, "children": { "TorchPolicy.evaluate": { "total": 436.3117022579145, "count": 187535, "self": 436.3117022579145 } } }, "workers": { "total": 3.249486685046577, "count": 194777, "self": 0.0, "children": { "worker_root": { "total": 7820.891294055741, "count": 194777, "is_parallel": true, "self": 2877.4523589436167, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0021717209997405007, "count": 1, "is_parallel": true, "self": 0.0006478839995907038, "children": { "_process_rank_one_or_two_observation": { "total": 0.001523837000149797, "count": 8, "is_parallel": true, "self": 0.001523837000149797 } } }, "UnityEnvironment.step": { "total": 0.05549811399987448, "count": 1, "is_parallel": true, "self": 0.000666289000037068, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00041843400003926945, "count": 1, "is_parallel": true, "self": 0.00041843400003926945 }, "communicator.exchange": { "total": 0.052744757999789726, "count": 1, "is_parallel": true, "self": 0.052744757999789726 }, "steps_from_proto": { "total": 0.0016686330000084126, "count": 1, "is_parallel": true, "self": 0.0003386729995327187, "children": { "_process_rank_one_or_two_observation": { "total": 0.001329960000475694, "count": 8, "is_parallel": true, "self": 0.001329960000475694 } } } } } } }, "UnityEnvironment.step": { "total": 4943.438935112124, "count": 194776, "is_parallel": true, "self": 119.3947417385607, "children": { "UnityEnvironment._generate_step_input": { "total": 76.32733853764648, "count": 194776, "is_parallel": true, "self": 76.32733853764648 }, "communicator.exchange": { "total": 4419.271306128947, "count": 194776, "is_parallel": true, "self": 4419.271306128947 }, "steps_from_proto": { "total": 328.44554870696993, "count": 194776, "is_parallel": true, "self": 69.44973935938242, "children": { "_process_rank_one_or_two_observation": { "total": 258.9958093475875, "count": 1558208, "is_parallel": true, "self": 258.9958093475875 } } } } } } } } } } }, "trainer_advance": { "total": 2003.0347997970903, "count": 194777, "self": 10.56110009298709, "children": { "process_trajectory": { "total": 413.92685118912505, "count": 194777, "self": 413.350276952126, "children": { "RLTrainer._checkpoint": { "total": 0.5765742369990221, "count": 6, "self": 0.5765742369990221 } } }, "_update_policy": { "total": 1578.5468485149781, "count": 1403, "self": 923.6471382898044, "children": { "TorchPPOOptimizer.update": { "total": 654.8997102251737, "count": 68310, "self": 654.8997102251737 } } } } } } }, "trainer_threads": { "total": 9.830000635702163e-07, "count": 1, "self": 9.830000635702163e-07 }, "TrainerController._save_models": { "total": 0.0892610840000998, "count": 1, "self": 0.0018049940008495469, "children": { "RLTrainer._checkpoint": { "total": 0.08745608999925025, "count": 1, "self": 0.08745608999925025 } } } } } } }