philippds's picture
Upload 13 files
d32af15 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.3046125173568726,
"min": 1.3046125173568726,
"max": 1.4200314283370972,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 7921.60693359375,
"min": 6510.97314453125,
"max": 9482.42578125,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.47368421052631576,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 12.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 23.11904761904762,
"min": 19.032258064516128,
"max": 337.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5826.0,
"min": 4743.0,
"max": 6885.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199963.0,
"min": 5913.0,
"max": 1199963.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199963.0,
"min": 5913.0,
"max": 1199963.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.2772812843322754,
"min": -0.0017869045259431005,
"max": 1.6576530933380127,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 69.59760284423828,
"min": -0.0500333271920681,
"max": 339.894287109375,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 72.91808319091797,
"min": 0.07052389532327652,
"max": 78.70875549316406,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 18302.439453125,
"min": 1.7630974054336548,
"max": 21564.365234375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 98.3481856198718,
"min": 32.488859413398636,
"max": 101.33825216084902,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 24685.39459058782,
"min": 584.7994694411755,
"max": 28784.73096944578,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.044070242953985796,
"min": 0.0,
"max": 6.637133581297738,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 11.061630981450435,
"min": 0.0,
"max": 232.29967534542084,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 88.51336466445981,
"min": 29.23997161620193,
"max": 91.20442511557006,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 22216.854530779412,
"min": 526.3194890916348,
"max": 25906.257217356935,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.026823992472297203,
"min": 0.015120919308780381,
"max": 0.03299241218095025,
"count": 143
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.026823992472297203,
"min": 0.015120919308780381,
"max": 0.03299241218095025,
"count": 143
},
"Agent.Losses.ValueLoss.mean": {
"value": 1321.5291137695312,
"min": 249.33739598592123,
"max": 2391.9322408040366,
"count": 143
},
"Agent.Losses.ValueLoss.sum": {
"value": 1321.5291137695312,
"min": 249.33739598592123,
"max": 2391.9322408040366,
"count": 143
},
"Agent.Policy.LearningRate.mean": {
"value": 1.4888495037499943e-06,
"min": 1.4888495037499943e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.LearningRate.sum": {
"value": 1.4888495037499943e-06,
"min": 1.4888495037499943e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10049625000000001,
"min": 0.10049625000000001,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10049625000000001,
"min": 0.10049625000000001,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Beta.mean": {
"value": 3.476287499999991e-05,
"min": 3.476287499999991e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Policy.Beta.sum": {
"value": 3.476287499999991e-05,
"min": 3.476287499999991e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.02420971527074774,
"min": 0.017945740604773164,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.02420971527074774,
"min": 0.017945740604773164,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.4218464146057765,
"min": 1.252934291958809,
"max": 3.305937925974528,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.4218464146057765,
"min": 1.252934291958809,
"max": 3.305937925974528,
"count": 143
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717185844",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_2_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_2_task_1_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717192353"
},
"total": 6509.0749714,
"count": 1,
"self": 1.7435722000009264,
"children": {
"run_training.setup": {
"total": 0.0514599,
"count": 1,
"self": 0.0514599
},
"TrainerController.start_learning": {
"total": 6507.2799393,
"count": 1,
"self": 7.823252899979707,
"children": {
"TrainerController._reset_env": {
"total": 2.142326,
"count": 1,
"self": 2.142326
},
"TrainerController.advance": {
"total": 6497.08110360002,
"count": 410214,
"self": 7.467202000159887,
"children": {
"env_step": {
"total": 6489.61390159986,
"count": 410214,
"self": 4349.035375599892,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2135.3472138998422,
"count": 410214,
"self": 13.311706799913281,
"children": {
"TorchPolicy.evaluate": {
"total": 2122.035507099929,
"count": 400044,
"self": 2122.035507099929
}
}
},
"workers": {
"total": 5.231312100125457,
"count": 410214,
"self": 0.0,
"children": {
"worker_root": {
"total": 6498.288177799745,
"count": 410214,
"is_parallel": true,
"self": 2481.615599699838,
"children": {
"steps_from_proto": {
"total": 0.006615100000000096,
"count": 1,
"is_parallel": true,
"self": 0.00010419999999999874,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.0064614000000000615,
"count": 2,
"is_parallel": true,
"self": 3.520000000012402e-05,
"children": {
"_observation_to_np_array": {
"total": 0.0064261999999999375,
"count": 3,
"is_parallel": true,
"self": 3.16000000000205e-05,
"children": {
"process_pixels": {
"total": 0.006394599999999917,
"count": 3,
"is_parallel": true,
"self": 0.00023580000000000823,
"children": {
"image_decompress": {
"total": 0.006158799999999909,
"count": 3,
"is_parallel": true,
"self": 0.006158799999999909
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.950000000003563e-05,
"count": 2,
"is_parallel": true,
"self": 4.950000000003563e-05
}
}
},
"UnityEnvironment.step": {
"total": 4016.665962999907,
"count": 410214,
"is_parallel": true,
"self": 22.521379199707553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.270418399782063,
"count": 410214,
"is_parallel": true,
"self": 21.270418399782063
},
"communicator.exchange": {
"total": 3795.381976300237,
"count": 410214,
"is_parallel": true,
"self": 3795.381976300237
},
"steps_from_proto": {
"total": 177.49218910018064,
"count": 410214,
"is_parallel": true,
"self": 35.128209500233424,
"children": {
"_process_maybe_compressed_observation": {
"total": 127.71082309995005,
"count": 820428,
"is_parallel": true,
"self": 9.771404900107996,
"children": {
"_observation_to_np_array": {
"total": 117.93941819984205,
"count": 1238166,
"is_parallel": true,
"self": 9.615338599845629,
"children": {
"process_pixels": {
"total": 108.32407959999642,
"count": 1238166,
"is_parallel": true,
"self": 50.49505990018025,
"children": {
"image_decompress": {
"total": 57.82901969981617,
"count": 1238166,
"is_parallel": true,
"self": 57.82901969981617
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 14.65315649999716,
"count": 820428,
"is_parallel": true,
"self": 14.65315649999716
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.7400000362831634e-05,
"count": 1,
"self": 2.7400000362831634e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6501.3513741001125,
"count": 333399,
"is_parallel": true,
"self": 9.704165000246576,
"children": {
"process_trajectory": {
"total": 5726.129997099867,
"count": 333399,
"is_parallel": true,
"self": 5725.628414399867,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5015826999997444,
"count": 2,
"is_parallel": true,
"self": 0.5015826999997444
}
}
},
"_update_policy": {
"total": 765.5172119999987,
"count": 143,
"is_parallel": true,
"self": 522.424437399994,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.09277460000476,
"count": 3441,
"is_parallel": true,
"self": 243.09277460000476
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23322939999980008,
"count": 1,
"self": 0.011447399999269692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2217820000005304,
"count": 1,
"self": 0.2217820000005304
}
}
}
}
}
}
}