{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.24086864292621613, "min": 0.24046899378299713, "max": 1.475688099861145, "count": 50 }, "Pyramids.Policy.Entropy.sum": { "value": 7210.6435546875, "min": 7210.6435546875, "max": 44766.47265625, "count": 50 }, "Pyramids.Step.mean": { "value": 1499994.0, "min": 29952.0, "max": 1499994.0, "count": 50 }, "Pyramids.Step.sum": { "value": 1499994.0, "min": 29952.0, "max": 1499994.0, "count": 50 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.6576626896858215, "min": -0.09953401982784271, "max": 0.748979926109314, "count": 50 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 186.7761993408203, "min": -23.987699508666992, "max": 219.45111083984375, "count": 50 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.00022034514404367656, "min": -0.02161170169711113, "max": 0.20931462943553925, "count": 50 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -0.06257802248001099, "min": -5.801077842712402, "max": 49.607566833496094, "count": 50 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06693622169301047, "min": 0.06455727419763782, "max": 0.07432421988665333, "count": 50 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9371071037021466, "min": 0.5094311897286304, "max": 1.1148632982998, "count": 50 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01406592396995707, "min": 0.0006072967257850309, "max": 0.017174474984252206, "count": 50 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.196922935579399, "min": 0.008502154160990432, "max": 0.25761712476378307, "count": 50 }, "Pyramids.Policy.LearningRate.mean": { "value": 3.0564704097809512e-06, "min": 3.0564704097809512e-06, "max": 0.00029676708679192377, "count": 50 }, "Pyramids.Policy.LearningRate.sum": { "value": 4.279058573693332e-05, "min": 4.279058573693332e-05, "max": 0.0037392945535685324, "count": 50 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10101879047619049, "min": 0.10101879047619049, "max": 0.19892236190476195, "count": 50 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.414263066666667, "min": 1.3924565333333336, "max": 2.646431466666667, "count": 50 }, "Pyramids.Policy.Beta.mean": { "value": 0.00011177716857142851, "min": 0.00011177716857142851, "max": 0.009892343954285714, "count": 50 }, "Pyramids.Policy.Beta.sum": { "value": 0.0015648803599999993, "min": 0.0015648803599999993, "max": 0.12465850351999999, "count": 50 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0075902934186160564, "min": 0.007568437606096268, "max": 0.31211981177330017, "count": 50 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.10626410692930222, "min": 0.10595812648534775, "max": 2.1848387718200684, "count": 50 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 285.26732673267327, "min": 244.76923076923077, "max": 999.0, "count": 50 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28812.0, "min": 15984.0, "max": 34146.0, "count": 50 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6766197800931364, "min": -1.0000000521540642, "max": 1.7546379173110271, "count": 50 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 169.33859778940678, "min": -28.96400159597397, "max": 206.5191983729601, "count": 50 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6766197800931364, "min": -1.0000000521540642, "max": 1.7546379173110271, "count": 50 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 169.33859778940678, "min": -28.96400159597397, "max": 206.5191983729601, "count": 50 }, "Pyramids.Policy.RndReward.mean": { "value": 0.022270725121289966, "min": 0.01981618881698774, "max": 6.223832651972771, "count": 50 }, "Pyramids.Policy.RndReward.sum": { "value": 2.2493432372502866, "min": 2.2493432372502866, "max": 99.58132243156433, "count": 50 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1731716584", "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.5.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1731719208" }, "total": 2623.7821835010004, "count": 1, "self": 0.3724162960002104, "children": { "run_training.setup": { "total": 0.05994252899995445, "count": 1, "self": 0.05994252899995445 }, "TrainerController.start_learning": { "total": 2623.349824676, "count": 1, "self": 2.0939361179684965, "children": { "TrainerController._reset_env": { "total": 2.157245813999907, "count": 1, "self": 2.157245813999907 }, "TrainerController.advance": { "total": 2619.019132943032, "count": 96607, "self": 1.989180416983345, "children": { "env_step": { "total": 1652.866439520008, "count": 96607, "self": 1437.1488113478774, "children": { "SubprocessEnvManager._take_step": { "total": 214.53055842902245, "count": 96607, "self": 6.633136438040765, "children": { "TorchPolicy.evaluate": { "total": 207.89742199098168, "count": 93808, "self": 207.89742199098168 } } }, "workers": { "total": 1.187069743108168, "count": 96607, "self": 0.0, "children": { "worker_root": { "total": 2620.0977232469286, "count": 96607, "is_parallel": true, "self": 1322.9525804668212, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0022280390001014894, "count": 1, "is_parallel": true, "self": 0.0006998720004958159, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015281669996056735, "count": 8, "is_parallel": true, "self": 0.0015281669996056735 } } }, "UnityEnvironment.step": { "total": 0.039696251000123084, "count": 1, "is_parallel": true, "self": 0.0004782560001785896, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003014549999988958, "count": 1, "is_parallel": true, "self": 0.0003014549999988958 }, "communicator.exchange": { "total": 0.03774575900001764, "count": 1, "is_parallel": true, "self": 0.03774575900001764 }, "steps_from_proto": { "total": 0.0011707809999279561, "count": 1, "is_parallel": true, "self": 0.0002621999994971702, "children": { "_process_rank_one_or_two_observation": { "total": 0.0009085810004307859, "count": 8, "is_parallel": true, "self": 0.0009085810004307859 } } } } } } }, "UnityEnvironment.step": { "total": 1297.1451427801073, "count": 96606, "is_parallel": true, "self": 32.51072544004592, "children": { "UnityEnvironment._generate_step_input": { "total": 21.762249334008402, "count": 96606, "is_parallel": true, "self": 21.762249334008402 }, "communicator.exchange": { "total": 1146.1673066949718, "count": 96606, "is_parallel": true, "self": 1146.1673066949718 }, "steps_from_proto": { "total": 96.70486131108123, "count": 96606, "is_parallel": true, "self": 20.981771098198806, "children": { "_process_rank_one_or_two_observation": { "total": 75.72309021288243, "count": 772848, "is_parallel": true, "self": 75.72309021288243 } } } } } } } } } } }, "trainer_advance": { "total": 964.1635130060406, "count": 96607, "self": 4.11297704008075, "children": { "process_trajectory": { "total": 184.33501653496228, "count": 96607, "self": 184.04931302196178, "children": { "RLTrainer._checkpoint": { "total": 0.2857035130004988, "count": 3, "self": 0.2857035130004988 } } }, "_update_policy": { "total": 775.7155194309976, "count": 692, "self": 425.1706987940238, "children": { "TorchPPOOptimizer.update": { "total": 350.5448206369738, "count": 34122, "self": 350.5448206369738 } } } } } } }, "trainer_threads": { "total": 1.0739995559561066e-06, "count": 1, "self": 1.0739995559561066e-06 }, "TrainerController._save_models": { "total": 0.07950872699984757, "count": 1, "self": 0.0017604429995117243, "children": { "RLTrainer._checkpoint": { "total": 0.07774828400033584, "count": 1, "self": 0.07774828400033584 } } } } } } }