{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.23785677552223206, "min": 0.23676200211048126, "max": 1.4960755109786987, "count": 50 }, "Pyramids.Policy.Entropy.sum": { "value": 7131.8974609375, "min": 7053.904296875, "max": 45384.9453125, "count": 50 }, "Pyramids.Step.mean": { "value": 1499962.0, "min": 29952.0, "max": 1499962.0, "count": 50 }, "Pyramids.Step.sum": { "value": 1499962.0, "min": 29952.0, "max": 1499962.0, "count": 50 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.6610430479049683, "min": -0.11662512272596359, "max": 0.7313958406448364, "count": 50 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 191.04144287109375, "min": -28.10665512084961, "max": 210.64199829101562, "count": 50 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.021805142983794212, "min": -0.014034363441169262, "max": 0.4626796543598175, "count": 50 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 6.3016862869262695, "min": -3.957690477371216, "max": 109.65507507324219, "count": 50 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06933455764720704, "min": 0.06431559114059869, "max": 0.07327357271805382, "count": 50 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9706838070608986, "min": 0.4724266868597834, "max": 1.0666831057751551, "count": 50 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.016937498950504788, "min": 0.0001285119702290637, "max": 0.018870547313348477, "count": 50 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.23712498530706705, "min": 0.0016706556129778278, "max": 0.2784331410075538, "count": 50 }, "Pyramids.Policy.LearningRate.mean": { "value": 3.0779846883238147e-06, "min": 3.0779846883238147e-06, "max": 0.00029676708679192377, "count": 50 }, "Pyramids.Policy.LearningRate.sum": { "value": 4.309178563653341e-05, "min": 4.309178563653341e-05, "max": 0.003737879754040133, "count": 50 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1010259619047619, "min": 0.1010259619047619, "max": 0.19892236190476195, "count": 50 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4143634666666667, "min": 1.3794090666666667, "max": 2.645959866666667, "count": 50 }, "Pyramids.Policy.Beta.mean": { "value": 0.00011249359428571448, "min": 0.00011249359428571448, "max": 0.009892343954285714, "count": 50 }, "Pyramids.Policy.Beta.sum": { "value": 0.0015749103200000027, "min": 0.0015749103200000027, "max": 0.12461139068, "count": 50 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.010959007777273655, "min": 0.010959007777273655, "max": 0.6136869788169861, "count": 50 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.15342611074447632, "min": 0.15342611074447632, "max": 4.295808792114258, "count": 50 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 280.4074074074074, "min": 265.20754716981133, "max": 999.0, "count": 50 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30284.0, "min": 15984.0, "max": 33849.0, "count": 50 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.682540725502703, "min": -1.0000000521540642, "max": 1.716188773373577, "count": 50 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 181.71439835429192, "min": -32.000001668930054, "max": 190.86299843341112, "count": 50 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.682540725502703, "min": -1.0000000521540642, "max": 1.716188773373577, "count": 50 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 181.71439835429192, "min": -32.000001668930054, "max": 190.86299843341112, "count": 50 }, "Pyramids.Policy.RndReward.mean": { "value": 0.03190544266396534, "min": 0.03190544266396534, "max": 12.844169417396188, "count": 50 }, "Pyramids.Policy.RndReward.sum": { "value": 3.445787807708257, "min": 3.3767116628514486, "max": 205.506710678339, "count": 50 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1705507885", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1705511228" }, "total": 3343.187770981999, "count": 1, "self": 0.5262558539989186, "children": { "run_training.setup": { "total": 0.04506059200048185, "count": 1, "self": 0.04506059200048185 }, "TrainerController.start_learning": { "total": 3342.6164545359998, "count": 1, "self": 1.9462820958169686, "children": { "TrainerController._reset_env": { "total": 2.085633787000006, "count": 1, "self": 2.085633787000006 }, "TrainerController.advance": { "total": 3338.5010553151833, "count": 96248, "self": 2.1172421322071386, "children": { "env_step": { "total": 2398.844919742057, "count": 96248, "self": 2209.6243952920167, "children": { "SubprocessEnvManager._take_step": { "total": 188.04296216709827, "count": 96248, "self": 6.752343247265344, "children": { "TorchPolicy.evaluate": { "total": 181.29061891983292, "count": 93805, "self": 181.29061891983292 } } }, "workers": { "total": 1.1775622829418353, "count": 96248, "self": 0.0, "children": { "worker_root": { "total": 3335.769217529085, "count": 96248, "is_parallel": true, "self": 1297.5415993541392, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.001666008000029251, "count": 1, "is_parallel": true, "self": 0.0004997749974791077, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011662330025501433, "count": 8, "is_parallel": true, "self": 0.0011662330025501433 } } }, "UnityEnvironment.step": { "total": 0.052721981999638956, "count": 1, "is_parallel": true, "self": 0.0005631569993056473, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004868559999522404, "count": 1, "is_parallel": true, "self": 0.0004868559999522404 }, "communicator.exchange": { "total": 0.050070522999703826, "count": 1, "is_parallel": true, "self": 0.050070522999703826 }, "steps_from_proto": { "total": 0.0016014460006772424, "count": 1, "is_parallel": true, "self": 0.00032791200010251487, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012735340005747275, "count": 8, "is_parallel": true, "self": 0.0012735340005747275 } } } } } } }, "UnityEnvironment.step": { "total": 2038.2276181749457, "count": 96247, "is_parallel": true, "self": 51.929656844674355, "children": { "UnityEnvironment._generate_step_input": { "total": 35.15788366003562, "count": 96247, "is_parallel": true, "self": 35.15788366003562 }, "communicator.exchange": { "total": 1806.49592327327, "count": 96247, "is_parallel": true, "self": 1806.49592327327 }, "steps_from_proto": { "total": 144.64415439696586, "count": 96247, "is_parallel": true, "self": 28.466968297258063, "children": { "_process_rank_one_or_two_observation": { "total": 116.1771860997078, "count": 769976, "is_parallel": true, "self": 116.1771860997078 } } } } } } } } } } }, "trainer_advance": { "total": 937.5388934409193, "count": 96248, "self": 3.8488638757125955, "children": { "process_trajectory": { "total": 186.35211550520307, "count": 96248, "self": 186.03544833420256, "children": { "RLTrainer._checkpoint": { "total": 0.31666717100051756, "count": 3, "self": 0.31666717100051756 } } }, "_update_policy": { "total": 747.3379140600036, "count": 682, "self": 441.85220415202, "children": { "TorchPPOOptimizer.update": { "total": 305.4857099079836, "count": 34209, "self": 305.4857099079836 } } } } } } }, "trainer_threads": { "total": 8.539991540601477e-07, "count": 1, "self": 8.539991540601477e-07 }, "TrainerController._save_models": { "total": 0.08348248400034208, "count": 1, "self": 0.0014419449998968048, "children": { "RLTrainer._checkpoint": { "total": 0.08204053900044528, "count": 1, "self": 0.08204053900044528 } } } } } } }