philippds's picture
Upload 13 files
77ac9c1 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4380470514297485,
"min": 1.4210506677627563,
"max": 1.440159797668457,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8511.80078125,
"min": 7452.4091796875,
"max": 10234.216796875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 1.0666666666666667,
"min": 0.6,
"max": 1.3333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 16.0,
"min": 9.0,
"max": 24.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 11.4,
"min": 9.055555555555555,
"max": 56.72222222222222,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 171.0,
"min": 140.0,
"max": 1021.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.9333333333333333,
"min": 0.4666666666666667,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 14.0,
"min": 7.0,
"max": 18.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 55.36326599121094,
"min": 15.930303573608398,
"max": 96.99653180440266,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 830.4489898681641,
"min": 238.95455360412598,
"max": 1546.7922191619873,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 9.80856335957845,
"min": 0.7838140626748403,
"max": 10.676346683502198,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 147.12845039367676,
"min": 11.757210940122604,
"max": 160.14520025253296,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 1.9161027113596598,
"min": 0.19702432552973428,
"max": 3.0707154936260648,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 28.741540670394897,
"min": 2.9553648829460144,
"max": 55.272878885269165,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.19161026726166408,
"min": 0.019702432056268055,
"max": 0.3070715483691957,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 2.874154008924961,
"min": 0.29553648084402084,
"max": 5.527287870645523,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 46.57113973299662,
"min": 28.612841029961903,
"max": 110.09242121378581,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 698.5670959949493,
"min": 459.6895525455475,
"max": 1726.5684146881104,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 393.2,
"min": 337.8333333333333,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5898.0,
"min": 5160.0,
"max": 7173.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199913.0,
"min": 5987.0,
"max": 1199913.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199913.0,
"min": 5987.0,
"max": 1199913.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.4023384749889374,
"min": 0.02915876917541027,
"max": 0.9292956590652466,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 6.035077095031738,
"min": 0.4373815357685089,
"max": 15.34562873840332,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6397857069969177,
"min": 0.06568823009729385,
"max": 1.130825161933899,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.596785545349121,
"min": 1.1166999340057373,
"max": 18.093202590942383,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 12.295849800109863,
"min": -0.4003447631994883,
"max": 16.644680166244505,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 184.43774700164795,
"min": -6.005171447992325,
"max": 253.88925862312317,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.5010416428248088,
"min": 0.0,
"max": 13.214636166890463,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 22.51562464237213,
"min": 0.0,
"max": 198.21954250335693,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 11.066264327367147,
"min": -0.3603106955687205,
"max": 14.980211734771729,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 165.9939649105072,
"min": -5.4046604335308075,
"max": 228.50032329559326,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.020519517420325428,
"min": 0.013041299698805367,
"max": 0.033138952873371263,
"count": 137
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.020519517420325428,
"min": 0.013041299698805367,
"max": 0.033138952873371263,
"count": 137
},
"Agent.Losses.ValueLoss.mean": {
"value": 1.3046319286028545,
"min": 0.08780664003764589,
"max": 1.3675937431829948,
"count": 137
},
"Agent.Losses.ValueLoss.sum": {
"value": 1.3046319286028545,
"min": 0.08780664003764589,
"max": 1.3675937431829948,
"count": 137
},
"Agent.Policy.LearningRate.mean": {
"value": 5.108498297500153e-07,
"min": 5.108498297500153e-07,
"max": 0.00029780325073225,
"count": 137
},
"Agent.Policy.LearningRate.sum": {
"value": 5.108498297500153e-07,
"min": 5.108498297500153e-07,
"max": 0.00029780325073225,
"count": 137
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10017025000000002,
"min": 0.10017025000000002,
"max": 0.19926775000000005,
"count": 137
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10017025000000002,
"min": 0.10017025000000002,
"max": 0.19926775000000005,
"count": 137
},
"Agent.Policy.Beta.mean": {
"value": 1.8495475000000255e-05,
"min": 1.8495475000000255e-05,
"max": 0.004963460725,
"count": 137
},
"Agent.Policy.Beta.sum": {
"value": 1.8495475000000255e-05,
"min": 1.8495475000000255e-05,
"max": 0.004963460725,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.03610416315495968,
"min": 0.030629316137896642,
"max": 0.6027635087569555,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.03610416315495968,
"min": 0.030629316137896642,
"max": 0.6027635087569555,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.896045779188474,
"min": 1.7552975515524547,
"max": 3.315477500359217,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.896045779188474,
"min": 1.7552975515524547,
"max": 3.315477500359217,
"count": 137
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717239440",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717243110"
},
"total": 3669.7381731,
"count": 1,
"self": 0.28814790000024004,
"children": {
"run_training.setup": {
"total": 0.050848799999999916,
"count": 1,
"self": 0.050848799999999916
},
"TrainerController.start_learning": {
"total": 3669.3991763999998,
"count": 1,
"self": 5.227523499976087,
"children": {
"TrainerController._reset_env": {
"total": 3.6653909,
"count": 1,
"self": 3.6653909
},
"TrainerController.advance": {
"total": 3660.3280628000234,
"count": 401040,
"self": 4.8690916999630645,
"children": {
"env_step": {
"total": 3655.4589711000604,
"count": 401040,
"self": 1652.7016028998614,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1999.2849421001135,
"count": 401040,
"self": 10.719409600169229,
"children": {
"TorchPolicy.evaluate": {
"total": 1988.5655324999443,
"count": 400110,
"self": 1988.5655324999443
}
}
},
"workers": {
"total": 3.472426100085382,
"count": 401040,
"self": 0.0,
"children": {
"worker_root": {
"total": 3658.9897427999776,
"count": 401040,
"is_parallel": true,
"self": 2215.9914199000195,
"children": {
"steps_from_proto": {
"total": 0.006121300000000218,
"count": 1,
"is_parallel": true,
"self": 0.00010129999999985984,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.005975900000000145,
"count": 2,
"is_parallel": true,
"self": 2.9300000000898052e-05,
"children": {
"_observation_to_np_array": {
"total": 0.005946599999999247,
"count": 3,
"is_parallel": true,
"self": 2.7499999999403002e-05,
"children": {
"process_pixels": {
"total": 0.005919099999999844,
"count": 3,
"is_parallel": true,
"self": 0.00022789999999961452,
"children": {
"image_decompress": {
"total": 0.005691200000000229,
"count": 3,
"is_parallel": true,
"self": 0.005691200000000229
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.410000000021341e-05,
"count": 2,
"is_parallel": true,
"self": 4.410000000021341e-05
}
}
},
"UnityEnvironment.step": {
"total": 1442.9922015999582,
"count": 401040,
"is_parallel": true,
"self": 18.86580499987258,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.762399499892027,
"count": 401040,
"is_parallel": true,
"self": 20.762399499892027
},
"communicator.exchange": {
"total": 1255.3583208000516,
"count": 401040,
"is_parallel": true,
"self": 1255.3583208000516
},
"steps_from_proto": {
"total": 148.0056763001418,
"count": 401040,
"is_parallel": true,
"self": 29.713494699777357,
"children": {
"_process_maybe_compressed_observation": {
"total": 105.30966810025639,
"count": 802080,
"is_parallel": true,
"self": 8.615924700225861,
"children": {
"_observation_to_np_array": {
"total": 96.69374340003053,
"count": 1203444,
"is_parallel": true,
"self": 8.426584100211485,
"children": {
"process_pixels": {
"total": 88.26715929981904,
"count": 1203444,
"is_parallel": true,
"self": 41.55240329984778,
"children": {
"image_decompress": {
"total": 46.71475599997126,
"count": 1203444,
"is_parallel": true,
"self": 46.71475599997126
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 12.982513500108055,
"count": 802080,
"is_parallel": true,
"self": 12.982513500108055
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.66000001829525e-05,
"count": 1,
"self": 2.66000001829525e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3664.2653862999946,
"count": 177916,
"is_parallel": true,
"self": 3.890928399915083,
"children": {
"process_trajectory": {
"total": 2912.16683060008,
"count": 177916,
"is_parallel": true,
"self": 2911.73211080008,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43471980000003896,
"count": 2,
"is_parallel": true,
"self": 0.43471980000003896
}
}
},
"_update_policy": {
"total": 748.2076272999991,
"count": 137,
"is_parallel": true,
"self": 502.2388822000035,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.9687450999956,
"count": 3393,
"is_parallel": true,
"self": 245.9687450999956
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1781725999999253,
"count": 1,
"self": 0.012462499999855936,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16571010000006936,
"count": 1,
"self": 0.16571010000006936
}
}
}
}
}
}
}