{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3765250444412231, "min": 1.3763339519500732, "max": 1.4215806722640991, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8259.150390625, "min": 8246.390625, "max": 8543.4287109375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6, "min": 0.6, "max": 0.7333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 9.0, "min": 9.0, "max": 11.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.mean": { "value": 43.42364870707194, "min": 19.278708457946777, "max": 55.716199239095054, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.sum": { "value": 651.3547306060791, "min": 289.18062686920166, "max": 835.7429885864258, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 399.0, "min": 399.0, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5985.0, "min": 5985.0, "max": 5985.0, "count": 200 }, "Agent.Step.mean": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Step.sum": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.44779056310653687, "min": 0.03938337415456772, "max": 0.9510329365730286, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 6.716858386993408, "min": 0.5513672232627869, "max": 14.265494346618652, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 3.7807421684265137, "min": 0.07217133045196533, "max": 8.408443450927734, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 56.71113204956055, "min": 1.0103986263275146, "max": 126.12665557861328, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 43.07920112609863, "min": 17.005450224876405, "max": 54.28641471862793, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 646.1880168914795, "min": 255.08175337314606, "max": 814.296220779419, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 1.4921668608983358, "min": 0.0, "max": 15.121726163228352, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 22.382502913475037, "min": 0.0, "max": 226.8258924484253, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 38.77128257751465, "min": 15.304905752340952, "max": 48.85777244567871, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 581.5692386627197, "min": 229.5735862851143, "max": 732.8665866851807, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.02899026535063361, "min": 0.014100697512427965, "max": 0.03209038888841557, "count": 142 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.02899026535063361, "min": 0.014100697512427965, "max": 0.03209038888841557, "count": 142 }, "Agent.Losses.ValueLoss.mean": { "value": 4.960888395706813, "min": 2.446509296695391, "max": 9.58059581120809, "count": 142 }, "Agent.Losses.ValueLoss.sum": { "value": 4.960888395706813, "min": 2.446509296695391, "max": 9.58059581120809, "count": 142 }, "Agent.Policy.LearningRate.mean": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.LearningRate.sum": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.Epsilon.mean": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Epsilon.sum": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Beta.mean": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Policy.Beta.sum": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.040255979324380554, "min": 0.040255979324380554, "max": 0.581280435125033, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.040255979324380554, "min": 0.040255979324380554, "max": 0.581280435125033, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.6220810463031132, "min": 1.5821740577618282, "max": 3.2931352953116098, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.6220810463031132, "min": 1.5821740577618282, "max": 3.2931352953116098, "count": 142 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717260901", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_5_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_5_run_id_1_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717264528" }, "total": 3626.9174700999997, "count": 1, "self": 0.39518239999961224, "children": { "run_training.setup": { "total": 0.05494280000000007, "count": 1, "self": 0.05494280000000007 }, "TrainerController.start_learning": { "total": 3626.4673449, "count": 1, "self": 7.255818699960855, "children": { "TrainerController._reset_env": { "total": 2.1334432, "count": 1, "self": 2.1334432 }, "TrainerController.advance": { "total": 3616.915204500039, "count": 400401, "self": 6.315011900002446, "children": { "env_step": { "total": 3610.6001926000367, "count": 400401, "self": 1707.2071315000455, "children": { "SubprocessEnvManager._take_step": { "total": 1899.2082348000101, "count": 400401, "self": 12.331207200031258, "children": { "TorchPolicy.evaluate": { "total": 1886.8770275999789, "count": 400401, "self": 1886.8770275999789 } } }, "workers": { "total": 4.184826299980969, "count": 400401, "self": 0.0, "children": { "worker_root": { "total": 3618.12669989991, "count": 400401, "is_parallel": true, "self": 2133.126631599947, "children": { "steps_from_proto": { "total": 0.006969199999999898, "count": 1, "is_parallel": true, "self": 0.00010760000000020753, "children": { "_process_maybe_compressed_observation": { "total": 0.006816499999999781, "count": 2, "is_parallel": true, "self": 3.959999999980646e-05, "children": { "_observation_to_np_array": { "total": 0.006776899999999975, "count": 3, "is_parallel": true, "self": 3.109999999995061e-05, "children": { "process_pixels": { "total": 0.006745800000000024, "count": 3, "is_parallel": true, "self": 0.0002339000000000091, "children": { "image_decompress": { "total": 0.006511900000000015, "count": 3, "is_parallel": true, "self": 0.006511900000000015 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.50999999999091e-05, "count": 2, "is_parallel": true, "self": 4.50999999999091e-05 } } }, "UnityEnvironment.step": { "total": 1484.993099099963, "count": 400401, "is_parallel": true, "self": 17.516494699970053, "children": { "UnityEnvironment._generate_step_input": { "total": 17.739050500065872, "count": 400401, "is_parallel": true, "self": 17.739050500065872 }, "communicator.exchange": { "total": 1299.6969130999507, "count": 400401, "is_parallel": true, "self": 1299.6969130999507 }, "steps_from_proto": { "total": 150.04064079997642, "count": 400401, "is_parallel": true, "self": 29.45829679973089, "children": { "_process_maybe_compressed_observation": { "total": 108.15429180021114, "count": 800802, "is_parallel": true, "self": 8.351980300191684, "children": { "_observation_to_np_array": { "total": 99.80231150001946, "count": 1204206, "is_parallel": true, "self": 8.077967600061513, "children": { "process_pixels": { "total": 91.72434389995794, "count": 1204206, "is_parallel": true, "self": 42.387988599862965, "children": { "image_decompress": { "total": 49.33635530009498, "count": 1204206, "is_parallel": true, "self": 49.33635530009498 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 12.428052200034392, "count": 800802, "is_parallel": true, "self": 12.428052200034392 } } } } } } } } } } } } }, "trainer_threads": { "total": 2.7999999929306796e-05, "count": 1, "self": 2.7999999929306796e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 3622.516082000059, "count": 179223, "is_parallel": true, "self": 5.278267200028495, "children": { "process_trajectory": { "total": 2908.314023300031, "count": 179223, "is_parallel": true, "self": 2907.9130927000306, "children": { "RLTrainer._checkpoint": { "total": 0.40093060000026526, "count": 2, "is_parallel": true, "self": 0.40093060000026526 } } }, "_update_policy": { "total": 708.9237915000001, "count": 142, "is_parallel": true, "self": 477.4729361000097, "children": { "TorchPPOOptimizer.update": { "total": 231.45085539999042, "count": 3408, "is_parallel": true, "self": 231.45085539999042 } } } } } } } } }, "TrainerController._save_models": { "total": 0.16285050000033152, "count": 1, "self": 0.005969700000150624, "children": { "RLTrainer._checkpoint": { "total": 0.1568808000001809, "count": 1, "self": 0.1568808000001809 } } } } } } }