{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.1884627491235733, "min": 0.17770697176456451, "max": 1.3928875923156738, "count": 100 }, "Pyramids.Policy.Entropy.sum": { "value": 5671.97509765625, "min": 5331.208984375, "max": 42254.63671875, "count": 100 }, "Pyramids.Step.mean": { "value": 2999881.0, "min": 29952.0, "max": 2999881.0, "count": 100 }, "Pyramids.Step.sum": { "value": 2999881.0, "min": 29952.0, "max": 2999881.0, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.8106731176376343, "min": -0.14450861513614655, "max": 0.8633511662483215, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 244.82327270507812, "min": -34.24854278564453, "max": 266.7755126953125, "count": 100 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.009020315483212471, "min": -0.017275137826800346, "max": 0.26931488513946533, "count": 100 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -2.724135160446167, "min": -5.096165657043457, "max": 64.63557434082031, "count": 100 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0676077923621051, "min": 0.063975483951526, "max": 0.0752293280066912, "count": 100 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0141168854315765, "min": 0.48122659149404406, "max": 1.1060026509352896, "count": 100 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01731761215099444, "min": 0.00029513084964479273, "max": 0.01731761215099444, "count": 100 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2597641822649166, "min": 0.004131831895027098, "max": 0.2597641822649166, "count": 100 }, "Pyramids.Policy.LearningRate.mean": { "value": 1.500059500013329e-06, "min": 1.500059500013329e-06, "max": 0.00029838354339596195, "count": 100 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.2500892500199937e-05, "min": 2.2500892500199937e-05, "max": 0.0040110954629681995, "count": 100 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10049998666666668, "min": 0.10049998666666668, "max": 0.19946118095238097, "count": 100 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.5074998000000002, "min": 1.3897045333333333, "max": 2.737532733333334, "count": 100 }, "Pyramids.Policy.Beta.mean": { "value": 5.994866799999986e-05, "min": 5.994866799999986e-05, "max": 0.009946171977142856, "count": 100 }, "Pyramids.Policy.Beta.sum": { "value": 0.000899230019999998, "min": 0.000899230019999998, "max": 0.13370947682, "count": 100 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.005434734746813774, "min": 0.005048881750553846, "max": 0.40165701508522034, "count": 100 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.08152101933956146, "min": 0.07068434357643127, "max": 2.811599016189575, "count": 100 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 214.28776978417267, "min": 207.80851063829786, "max": 999.0, "count": 100 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29786.0, "min": 15984.0, "max": 32647.0, "count": 100 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7855999840157373, "min": -1.0000000521540642, "max": 1.7913146783838738, "count": 100 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 249.98399776220322, "min": -32.000001668930054, "max": 256.15799900889397, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7855999840157373, "min": -1.0000000521540642, "max": 1.7913146783838738, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 249.98399776220322, "min": -32.000001668930054, "max": 256.15799900889397, "count": 100 }, "Pyramids.Policy.RndReward.mean": { "value": 0.012107008989758989, "min": 0.012055162960820098, "max": 7.702382093295455, "count": 100 }, "Pyramids.Policy.RndReward.sum": { "value": 1.6949812585662585, "min": 1.5430608589849726, "max": 123.23811349272728, "count": 100 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1673512916", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1673519882" }, "total": 6966.551755917, "count": 1, "self": 0.5396239170004264, "children": { "run_training.setup": { "total": 0.1109603020000236, "count": 1, "self": 0.1109603020000236 }, "TrainerController.start_learning": { "total": 6965.901171697999, "count": 1, "self": 4.236955528069302, "children": { "TrainerController._reset_env": { "total": 7.813935940999954, "count": 1, "self": 7.813935940999954 }, "TrainerController.advance": { "total": 6953.75599585593, "count": 194497, "self": 4.298586040918963, "children": { "env_step": { "total": 4886.382956933993, "count": 194497, "self": 4544.675004032815, "children": { "SubprocessEnvManager._take_step": { "total": 339.0181059770715, "count": 194497, "self": 13.876322819029554, "children": { "TorchPolicy.evaluate": { "total": 325.14178315804196, "count": 187567, "self": 109.3493617129879, "children": { "TorchPolicy.sample_actions": { "total": 215.79242144505406, "count": 187567, "self": 215.79242144505406 } } } } }, "workers": { "total": 2.6898469241068597, "count": 194497, "self": 0.0, "children": { "worker_root": { "total": 6950.651533212934, "count": 194497, "is_parallel": true, "self": 2737.507902091822, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005713289999960125, "count": 1, "is_parallel": true, "self": 0.003221857999903932, "children": { "_process_rank_one_or_two_observation": { "total": 0.002491432000056193, "count": 8, "is_parallel": true, "self": 0.002491432000056193 } } }, "UnityEnvironment.step": { "total": 0.05006534199998214, "count": 1, "is_parallel": true, "self": 0.0005556859999842345, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000543036999999913, "count": 1, "is_parallel": true, "self": 0.000543036999999913 }, "communicator.exchange": { "total": 0.04715146099999856, "count": 1, "is_parallel": true, "self": 0.04715146099999856 }, "steps_from_proto": { "total": 0.0018151579999994283, "count": 1, "is_parallel": true, "self": 0.0004823130000772835, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013328449999221448, "count": 8, "is_parallel": true, "self": 0.0013328449999221448 } } } } } } }, "UnityEnvironment.step": { "total": 4213.143631121112, "count": 194496, "is_parallel": true, "self": 87.98529919111843, "children": { "UnityEnvironment._generate_step_input": { "total": 75.01656599887667, "count": 194496, "is_parallel": true, "self": 75.01656599887667 }, "communicator.exchange": { "total": 3715.9250132820443, "count": 194496, "is_parallel": true, "self": 3715.9250132820443 }, "steps_from_proto": { "total": 334.2167526490722, "count": 194496, "is_parallel": true, "self": 75.69789434721054, "children": { "_process_rank_one_or_two_observation": { "total": 258.51885830186166, "count": 1555968, "is_parallel": true, "self": 258.51885830186166 } } } } } } } } } } }, "trainer_advance": { "total": 2063.0744528810187, "count": 194497, "self": 8.188517516895445, "children": { "process_trajectory": { "total": 473.272474907127, "count": 194497, "self": 472.7094128201264, "children": { "RLTrainer._checkpoint": { "total": 0.5630620870006169, "count": 6, "self": 0.5630620870006169 } } }, "_update_policy": { "total": 1581.613460456996, "count": 1396, "self": 616.1446497390439, "children": { "TorchPPOOptimizer.update": { "total": 965.4688107179522, "count": 68451, "self": 965.4688107179522 } } } } } } }, "trainer_threads": { "total": 9.490004231338389e-07, "count": 1, "self": 9.490004231338389e-07 }, "TrainerController._save_models": { "total": 0.09428342399951362, "count": 1, "self": 0.0013950799993835972, "children": { "RLTrainer._checkpoint": { "total": 0.09288834400013002, "count": 1, "self": 0.09288834400013002 } } } } } } }