{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.4504915475845337, "min": 1.4189385175704956, "max": 1.4583607912063599, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8716.00390625, "min": 7019.41796875, "max": 10183.787109375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 12.523809523809524, "min": 0.0, "max": 498.73333333333335, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 263.0, "min": 0.0, "max": 7481.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6666666666666666, "min": 0.4, "max": 0.8, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 14.0, "min": 6.0, "max": 14.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.FurthestDistanceExplored.mean": { "value": 130.73263047990343, "min": 84.74893919626872, "max": 187.240434773763, "count": 200 }, "Agent.DroneBasedReforestation.FurthestDistanceExplored.sum": { "value": 2745.3852400779724, "min": 1525.480905532837, "max": 3128.7300386428833, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 332.6666666666667, "min": 275.3333333333333, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5988.0, "min": 4788.0, "max": 6990.0, "count": 200 }, "Agent.Step.mean": { "value": 1199832.0, "min": 5600.0, "max": 1199832.0, "count": 200 }, "Agent.Step.sum": { "value": 1199832.0, "min": 5600.0, "max": 1199832.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.17566753923892975, "min": 0.031902581453323364, "max": 1.0969910621643066, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 3.162015676498413, "min": 0.4466361403465271, "max": 18.22492027282715, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 1.7508596181869507, "min": -0.19481128454208374, "max": 2.1962625980377197, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 31.515472412109375, "min": -3.5066030025482178, "max": 33.99528121948242, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 4.581625471667697, "min": -1.0654666741689047, "max": 20.056720465421677, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 82.46925849001855, "min": -15.98200011253357, "max": 320.9075274467468, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 0.5303110282661186, "min": 0.0, "max": 18.220220788319907, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 9.545598508790135, "min": 0.0, "max": 273.3033118247986, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 4.123461473474486, "min": -0.9589201927185058, "max": 18.051046889275312, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 74.22230652254075, "min": -14.383802890777588, "max": 288.816750228405, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.025146381948919345, "min": 0.015833884438810248, "max": 0.03159644683667769, "count": 140 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.025146381948919345, "min": 0.015833884438810248, "max": 0.03159644683667769, "count": 140 }, "Agent.Losses.ValueLoss.mean": { "value": 7.141066680351893, "min": 0.0002961420832434669, "max": 18.593239585558575, "count": 140 }, "Agent.Losses.ValueLoss.sum": { "value": 7.141066680351893, "min": 0.0002961420832434669, "max": 18.593239585558575, "count": 140 }, "Agent.Policy.LearningRate.mean": { "value": 4.20999860000087e-08, "min": 4.20999860000087e-08, "max": 0.0002979000007, "count": 140 }, "Agent.Policy.LearningRate.sum": { "value": 4.20999860000087e-08, "min": 4.20999860000087e-08, "max": 0.0002979000007, "count": 140 }, "Agent.Policy.Epsilon.mean": { "value": 0.100014, "min": 0.100014, "max": 0.1993, "count": 140 }, "Agent.Policy.Epsilon.sum": { "value": 0.100014, "min": 0.100014, "max": 0.1993, "count": 140 }, "Agent.Policy.Beta.mean": { "value": 1.0698600000000144e-05, "min": 1.0698600000000144e-05, "max": 0.00496507, "count": 140 }, "Agent.Policy.Beta.sum": { "value": 1.0698600000000144e-05, "min": 1.0698600000000144e-05, "max": 0.00496507, "count": 140 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.015817191878644127, "min": 0.014498400889957944, "max": 0.5835290277997652, "count": 140 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.015817191878644127, "min": 0.014498400889957944, "max": 0.5835290277997652, "count": 140 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 2.0491804033517838, "min": 2.0173485726118088, "max": 3.310828596353531, "count": 140 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 2.0491804033517838, "min": 2.0173485726118088, "max": 3.310828596353531, "count": 140 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717235414", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_3_task_6_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_3_task_6_run_id_1_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717239439" }, "total": 4024.6476045, "count": 1, "self": 0.4105314000003091, "children": { "run_training.setup": { "total": 0.05245299999999997, "count": 1, "self": 0.05245299999999997 }, "TrainerController.start_learning": { "total": 4024.1846201, "count": 1, "self": 6.823291999885896, "children": { "TrainerController._reset_env": { "total": 2.0034330000000002, "count": 1, "self": 2.0034330000000002 }, "TrainerController.advance": { "total": 4015.132420500114, "count": 401212, "self": 6.283942800091609, "children": { "env_step": { "total": 4008.8484777000226, "count": 401212, "self": 1688.6688614000836, "children": { "SubprocessEnvManager._take_step": { "total": 2315.8741404999705, "count": 401212, "self": 12.286400099961611, "children": { "TorchPolicy.evaluate": { "total": 2303.587740400009, "count": 400346, "self": 2303.587740400009 } } }, "workers": { "total": 4.305475799968528, "count": 401212, "self": 0.0, "children": { "worker_root": { "total": 4014.887415299974, "count": 401212, "is_parallel": true, "self": 2570.5212920998615, "children": { "steps_from_proto": { "total": 0.0062581999999999915, "count": 1, "is_parallel": true, "self": 0.0001041000000001624, "children": { "_process_maybe_compressed_observation": { "total": 0.006107399999999874, "count": 2, "is_parallel": true, "self": 2.9700000000021376e-05, "children": { "_observation_to_np_array": { "total": 0.006077699999999853, "count": 3, "is_parallel": true, "self": 2.819999999981171e-05, "children": { "process_pixels": { "total": 0.006049500000000041, "count": 3, "is_parallel": true, "self": 0.0002406999999999826, "children": { "image_decompress": { "total": 0.005808800000000058, "count": 3, "is_parallel": true, "self": 0.005808800000000058 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.669999999995511e-05, "count": 2, "is_parallel": true, "self": 4.669999999995511e-05 } } }, "UnityEnvironment.step": { "total": 1444.3598650001122, "count": 401212, "is_parallel": true, "self": 22.982416200152784, "children": { "UnityEnvironment._generate_step_input": { "total": 24.666361000051545, "count": 401212, "is_parallel": true, "self": 24.666361000051545 }, "communicator.exchange": { "total": 1209.1215095999087, "count": 401212, "is_parallel": true, "self": 1209.1215095999087 }, "steps_from_proto": { "total": 187.58957819999944, "count": 401212, "is_parallel": true, "self": 37.552167899814066, "children": { "_process_maybe_compressed_observation": { "total": 134.0370428000622, "count": 802424, "is_parallel": true, "self": 10.410967999887362, "children": { "_observation_to_np_array": { "total": 123.62607480017485, "count": 1204305, "is_parallel": true, "self": 10.863587100228912, "children": { "process_pixels": { "total": 112.76248769994594, "count": 1204305, "is_parallel": true, "self": 53.50085809998973, "children": { "image_decompress": { "total": 59.26162959995621, "count": 1204305, "is_parallel": true, "self": 59.26162959995621 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 16.000367500123158, "count": 802424, "is_parallel": true, "self": 16.000367500123158 } } } } } } } } } } } } }, "trainer_threads": { "total": 3.2500000088475645e-05, "count": 1, "self": 3.2500000088475645e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 4020.277517000015, "count": 191258, "is_parallel": true, "self": 5.217701400116312, "children": { "process_trajectory": { "total": 3147.2481361998975, "count": 191258, "is_parallel": true, "self": 3146.6857697998976, "children": { "RLTrainer._checkpoint": { "total": 0.5623664000002009, "count": 2, "is_parallel": true, "self": 0.5623664000002009 } } }, "_update_policy": { "total": 867.8116794000011, "count": 140, "is_parallel": true, "self": 579.4782453999968, "children": { "TorchPPOOptimizer.update": { "total": 288.33343400000433, "count": 3384, "is_parallel": true, "self": 288.33343400000433 } } } } } } } } }, "TrainerController._save_models": { "total": 0.22544209999978193, "count": 1, "self": 0.007709299999532959, "children": { "RLTrainer._checkpoint": { "total": 0.21773280000024897, "count": 1, "self": 0.21773280000024897 } } } } } } }