philippds's picture
Upload 17 files
8eb636c verified
raw
history blame
16.8 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 0.855984091758728,
"min": 0.505821704864502,
"max": 1.0962650775909424,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 34225.66796875,
"min": 20208.58984375,
"max": 43911.99609375,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.723402976989746,
"min": 2.2312328815460205,
"max": 9.799125671386719,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 233.36167907714844,
"min": 51.318355560302734,
"max": 235.17901611328125,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.051323230499364735,
"min": 0.04491637487173272,
"max": 0.054505098355649456,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.1539696914980942,
"min": 0.10527651318504164,
"max": 0.16351529506694837,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.0049588195544192646,
"min": 0.0030816976082167543,
"max": 3.9712093503524857,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.014876458663257795,
"min": 0.009245092824650263,
"max": 7.942418700704971,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.mean": {
"value": 4846.482971191406,
"min": 1364.4507369995117,
"max": 4881.824462890625,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.sum": {
"value": 38771.86376953125,
"min": 10915.605895996094,
"max": 39054.595703125,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4802.791069030762,
"min": 1410.6444644927979,
"max": 4871.900245666504,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 38422.328552246094,
"min": 11285.155715942383,
"max": 38975.20196533203,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4802.791069030762,
"min": 1410.6444644927979,
"max": 4871.900245666504,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 38422.328552246094,
"min": 11285.155715942383,
"max": 38975.20196533203,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715164204",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_1_task_1_run_id_0_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_1_task_1_run_id_0_train",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1715167810"
},
"total": 3606.8480412000004,
"count": 1,
"self": 0.1417844000002333,
"children": {
"run_training.setup": {
"total": 0.045487500000000014,
"count": 1,
"self": 0.045487500000000014
},
"TrainerController.start_learning": {
"total": 3606.6607693,
"count": 1,
"self": 10.57787299982374,
"children": {
"TrainerController._reset_env": {
"total": 2.9059438999999996,
"count": 1,
"self": 2.9059438999999996
},
"TrainerController.advance": {
"total": 3593.1502613001767,
"count": 1002051,
"self": 10.148106600211577,
"children": {
"env_step": {
"total": 3583.002154699965,
"count": 1002051,
"self": 1495.395013600044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2081.919388500077,
"count": 1002051,
"self": 24.965887699899213,
"children": {
"TorchPolicy.evaluate": {
"total": 2056.9535008001776,
"count": 1002051,
"self": 2056.9535008001776
}
}
},
"workers": {
"total": 5.687752599844124,
"count": 1002051,
"self": 0.0,
"children": {
"worker_root": {
"total": 3592.1515751002244,
"count": 1002051,
"is_parallel": true,
"self": 2734.081737800304,
"children": {
"steps_from_proto": {
"total": 0.00025630000000020914,
"count": 1,
"is_parallel": true,
"self": 0.00011340000000004125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0001429000000001679,
"count": 2,
"is_parallel": true,
"self": 0.0001429000000001679
}
}
},
"UnityEnvironment.step": {
"total": 858.0695809999205,
"count": 1002051,
"is_parallel": true,
"self": 41.71356829987474,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 62.90565180009499,
"count": 1002051,
"is_parallel": true,
"self": 62.90565180009499
},
"communicator.exchange": {
"total": 636.1016120999109,
"count": 1002051,
"is_parallel": true,
"self": 636.1016120999109
},
"steps_from_proto": {
"total": 117.34874880003984,
"count": 1002051,
"is_parallel": true,
"self": 66.03595579985648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.31279300018337,
"count": 2004102,
"is_parallel": true,
"self": 51.31279300018337
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.099999983329326e-05,
"count": 1,
"self": 2.099999983329326e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3602.8280018999576,
"count": 107125,
"is_parallel": true,
"self": 4.575515599927712,
"children": {
"process_trajectory": {
"total": 1851.4764810000318,
"count": 107125,
"is_parallel": true,
"self": 1851.063276800032,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4132041999995977,
"count": 16,
"is_parallel": true,
"self": 0.4132041999995977
}
}
},
"_update_policy": {
"total": 1746.776005299998,
"count": 600,
"is_parallel": true,
"self": 578.3676885000043,
"children": {
"TorchPPOOptimizer.update": {
"total": 1168.4083167999936,
"count": 93600,
"is_parallel": true,
"self": 1168.4083167999936
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.02667010000004666,
"count": 1,
"self": 0.005927000000156113,
"children": {
"RLTrainer._checkpoint": {
"total": 0.020743099999890546,
"count": 1,
"self": 0.020743099999890546
}
}
}
}
}
}
}