philippds's picture
Upload 13 files
c83c24d verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.427331566810608,
"min": 1.4189385175704956,
"max": 1.4384431838989258,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8850.8828125,
"min": 7010.93408203125,
"max": 10145.9560546875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 193.72222222222223,
"min": 0.0,
"max": 559.6666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 3487.0,
"min": 0.0,
"max": 9800.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.6111111111111112,
"min": 0.42857142857142855,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 11.0,
"min": 7.0,
"max": 15.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 343.8333333333333,
"min": 258.5,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6189.0,
"min": 4863.0,
"max": 7074.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199623.0,
"min": 5600.0,
"max": 1199623.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199623.0,
"min": 5600.0,
"max": 1199623.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.28847405314445496,
"min": 0.029321420937776566,
"max": 0.9179295897483826,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 4.904058933258057,
"min": 0.4398213028907776,
"max": 14.574878692626953,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.6907849311828613,
"min": 0.07653423398733139,
"max": 3.6844630241394043,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 28.743343353271484,
"min": 1.071479320526123,
"max": 67.0732650756836,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 16.70050396814066,
"min": -1.0659666538238526,
"max": 37.93033692571852,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 283.9085674583912,
"min": -15.989499807357788,
"max": 682.7460646629333,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.9740008159595377,
"min": 0.0,
"max": 14.208847840627035,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 16.55801387131214,
"min": 0.0,
"max": 255.75926113128662,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 15.030446822152419,
"min": -0.95937020778656,
"max": 34.13729618489742,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 255.5175959765911,
"min": -14.3905531167984,
"max": 614.4713313281536,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.019827006578755874,
"min": 0.011569853018348416,
"max": 0.03528644716910397,
"count": 139
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.019827006578755874,
"min": 0.011569853018348416,
"max": 0.03528644716910397,
"count": 139
},
"Agent.Losses.ValueLoss.mean": {
"value": 15.16891094048818,
"min": 0.0075566458981484175,
"max": 28.07444941997528,
"count": 139
},
"Agent.Losses.ValueLoss.sum": {
"value": 15.16891094048818,
"min": 0.0075566458981484175,
"max": 28.07444941997528,
"count": 139
},
"Agent.Policy.LearningRate.mean": {
"value": 1.9434993525000693e-07,
"min": 1.9434993525000693e-07,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.LearningRate.sum": {
"value": 1.9434993525000693e-07,
"min": 1.9434993525000693e-07,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10006475,
"min": 0.10006475,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10006475,
"min": 0.10006475,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Beta.mean": {
"value": 1.3231025000000118e-05,
"min": 1.3231025000000118e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Policy.Beta.sum": {
"value": 1.3231025000000118e-05,
"min": 1.3231025000000118e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.02689781233978768,
"min": 0.025415692866469424,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.02689781233978768,
"min": 0.025415692866469424,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.8951698342959087,
"min": 1.7059353639682133,
"max": 3.310828596353531,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.8951698342959087,
"min": 1.7059353639682133,
"max": 3.310828596353531,
"count": 139
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717420617",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_10_task_2_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_10_task_2_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717424728"
},
"total": 4110.5160381000005,
"count": 1,
"self": 0.32059840000056283,
"children": {
"run_training.setup": {
"total": 0.052212100000000095,
"count": 1,
"self": 0.052212100000000095
},
"TrainerController.start_learning": {
"total": 4110.1432276,
"count": 1,
"self": 7.037711600030889,
"children": {
"TrainerController._reset_env": {
"total": 2.1466497,
"count": 1,
"self": 2.1466497
},
"TrainerController.advance": {
"total": 4100.747437499969,
"count": 401067,
"self": 6.550736899880121,
"children": {
"env_step": {
"total": 4094.196700600089,
"count": 401067,
"self": 1902.4891477000315,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2187.0464885999386,
"count": 401067,
"self": 12.823303799686528,
"children": {
"TorchPolicy.evaluate": {
"total": 2174.223184800252,
"count": 400144,
"self": 2174.223184800252
}
}
},
"workers": {
"total": 4.661064300119268,
"count": 401067,
"self": 0.0,
"children": {
"worker_root": {
"total": 4100.824766699939,
"count": 401067,
"is_parallel": true,
"self": 2445.3973263000953,
"children": {
"steps_from_proto": {
"total": 0.006438900000000025,
"count": 1,
"is_parallel": true,
"self": 0.00010350000000003412,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006286900000000095,
"count": 2,
"is_parallel": true,
"self": 3.420000000020629e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006252699999999889,
"count": 3,
"is_parallel": true,
"self": 2.740000000001075e-05,
"children": {
"process_pixels": {
"total": 0.006225299999999878,
"count": 3,
"is_parallel": true,
"self": 0.00022799999999989495,
"children": {
"image_decompress": {
"total": 0.005997299999999983,
"count": 3,
"is_parallel": true,
"self": 0.005997299999999983
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.849999999989585e-05,
"count": 2,
"is_parallel": true,
"self": 4.849999999989585e-05
}
}
},
"UnityEnvironment.step": {
"total": 1655.4210014998437,
"count": 401067,
"is_parallel": true,
"self": 21.693077299662946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.86204190004806,
"count": 401067,
"is_parallel": true,
"self": 22.86204190004806
},
"communicator.exchange": {
"total": 1431.766514100158,
"count": 401067,
"is_parallel": true,
"self": 1431.766514100158
},
"steps_from_proto": {
"total": 179.09936819997455,
"count": 401067,
"is_parallel": true,
"self": 36.20441809990484,
"children": {
"_process_maybe_compressed_observation": {
"total": 127.44497150016291,
"count": 802134,
"is_parallel": true,
"self": 9.909645700207761,
"children": {
"_observation_to_np_array": {
"total": 117.53532579995515,
"count": 1203756,
"is_parallel": true,
"self": 9.769774000021698,
"children": {
"process_pixels": {
"total": 107.76555179993345,
"count": 1203756,
"is_parallel": true,
"self": 50.2779833998878,
"children": {
"image_decompress": {
"total": 57.487568400045646,
"count": 1203756,
"is_parallel": true,
"self": 57.487568400045646
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 15.449978599906801,
"count": 802134,
"is_parallel": true,
"self": 15.449978599906801
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.909999915980734e-05,
"count": 1,
"self": 4.909999915980734e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4105.905633200024,
"count": 199594,
"is_parallel": true,
"self": 5.874801499900059,
"children": {
"process_trajectory": {
"total": 3275.5923437001275,
"count": 199594,
"is_parallel": true,
"self": 3275.0745273001276,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5178164000001289,
"count": 2,
"is_parallel": true,
"self": 0.5178164000001289
}
}
},
"_update_policy": {
"total": 824.4384879999966,
"count": 139,
"is_parallel": true,
"self": 555.6392749999884,
"children": {
"TorchPPOOptimizer.update": {
"total": 268.79921300000825,
"count": 3357,
"is_parallel": true,
"self": 268.79921300000825
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21137970000017958,
"count": 1,
"self": 0.006418599999960861,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20496110000021872,
"count": 1,
"self": 0.20496110000021872
}
}
}
}
}
}
}