{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4000904560089111, "min": 1.4000904560089111, "max": 1.4262045621871948, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69455.6875, "min": 69125.265625, "max": 76768.8046875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 94.56022944550669, "min": 87.23456790123457, "max": 417.07438016528926, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49455.0, "min": 48857.0, "max": 50466.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999978.0, "min": 49837.0, "max": 1999978.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999978.0, "min": 49837.0, "max": 1999978.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.345527172088623, "min": -0.03218771517276764, "max": 2.422205686569214, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1226.710693359375, "min": -3.8625259399414062, "max": 1354.99609375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.6246360086802087, "min": 1.8969663135707377, "max": 3.902965500890969, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1895.6846325397491, "min": 227.63595762848854, "max": 2110.8410179018974, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.6246360086802087, "min": 1.8969663135707377, "max": 3.902965500890969, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1895.6846325397491, "min": 227.63595762848854, "max": 2110.8410179018974, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.019094924846124035, "min": 0.014793468259611272, "max": 0.021043069238900595, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.0572847745383721, "min": 0.030080323915656966, "max": 0.06108315486829573, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.056143100352750885, "min": 0.02385393682246407, "max": 0.057199866324663166, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16842930105825266, "min": 0.04770787364492814, "max": 0.1715995989739895, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.4368988543999947e-06, "min": 3.4368988543999947e-06, "max": 0.000295365526544825, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0310696563199984e-05, "min": 1.0310696563199984e-05, "max": 0.0008441616186127998, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10114560000000002, "min": 0.10114560000000002, "max": 0.1984551750000001, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30343680000000006, "min": 0.20741790000000004, "max": 0.5813871999999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.716543999999993e-05, "min": 6.716543999999993e-05, "max": 0.004922913232500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020149631999999982, "min": 0.00020149631999999982, "max": 0.014071221279999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1736484262", "python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics --force", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.5.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1736486980" }, "total": 2717.722267852, "count": 1, "self": 0.47612956300054066, "children": { "run_training.setup": { "total": 0.05839655599993421, "count": 1, "self": 0.05839655599993421 }, "TrainerController.start_learning": { "total": 2717.1877417329997, "count": 1, "self": 4.795812509038569, "children": { "TrainerController._reset_env": { "total": 6.185528055000077, "count": 1, "self": 6.185528055000077 }, "TrainerController.advance": { "total": 2706.088086675961, "count": 231797, "self": 4.958594588019423, "children": { "env_step": { "total": 2130.0361314359407, "count": 231797, "self": 1672.0302187628408, "children": { "SubprocessEnvManager._take_step": { "total": 454.76671239801135, "count": 231797, "self": 16.9005091460549, "children": { "TorchPolicy.evaluate": { "total": 437.86620325195645, "count": 222932, "self": 437.86620325195645 } } }, "workers": { "total": 3.239200275088592, "count": 231797, "self": 0.0, "children": { "worker_root": { "total": 2709.4212019229863, "count": 231797, "is_parallel": true, "self": 1351.4304628068817, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.001001772999984496, "count": 1, "is_parallel": true, "self": 0.0002751770000486431, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007265959999358529, "count": 2, "is_parallel": true, "self": 0.0007265959999358529 } } }, "UnityEnvironment.step": { "total": 0.032140549000018837, "count": 1, "is_parallel": true, "self": 0.0004299500000115586, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00020125000003190507, "count": 1, "is_parallel": true, "self": 0.00020125000003190507 }, "communicator.exchange": { "total": 0.0306696610000472, "count": 1, "is_parallel": true, "self": 0.0306696610000472 }, "steps_from_proto": { "total": 0.0008396879999281737, "count": 1, "is_parallel": true, "self": 0.00023395999983222282, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006057280000959508, "count": 2, "is_parallel": true, "self": 0.0006057280000959508 } } } } } } }, "UnityEnvironment.step": { "total": 1357.9907391161046, "count": 231796, "is_parallel": true, "self": 40.338667696957145, "children": { "UnityEnvironment._generate_step_input": { "total": 91.55204529901721, "count": 231796, "is_parallel": true, "self": 91.55204529901721 }, "communicator.exchange": { "total": 1128.3461767361046, "count": 231796, "is_parallel": true, "self": 1128.3461767361046 }, "steps_from_proto": { "total": 97.75384938402556, "count": 231796, "is_parallel": true, "self": 37.017973129049324, "children": { "_process_rank_one_or_two_observation": { "total": 60.73587625497623, "count": 463592, "is_parallel": true, "self": 60.73587625497623 } } } } } } } } } } }, "trainer_advance": { "total": 571.0933606520014, "count": 231797, "self": 7.131255536021058, "children": { "process_trajectory": { "total": 181.9990974699823, "count": 231797, "self": 180.52116134898176, "children": { "RLTrainer._checkpoint": { "total": 1.477936121000539, "count": 10, "self": 1.477936121000539 } } }, "_update_policy": { "total": 381.963007645998, "count": 97, "self": 311.8478173660011, "children": { "TorchPPOOptimizer.update": { "total": 70.11519027999691, "count": 2910, "self": 70.11519027999691 } } } } } } }, "trainer_threads": { "total": 1.303999852098059e-06, "count": 1, "self": 1.303999852098059e-06 }, "TrainerController._save_models": { "total": 0.11831318899976395, "count": 1, "self": 0.0022360259995366505, "children": { "RLTrainer._checkpoint": { "total": 0.1160771630002273, "count": 1, "self": 0.1160771630002273 } } } } } } }