ppo-Pyramids / run_logs /timers.json
Re-Re's picture
Pyramids 1M
b7ccfc4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45144903659820557,
"min": 0.45144903659820557,
"max": 1.4546117782592773,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13745.720703125,
"min": 13745.720703125,
"max": 44127.1015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29884.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29884.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6039779782295227,
"min": -0.09628661721944809,
"max": 0.6039779782295227,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.71780395507812,
"min": -23.301361083984375,
"max": 169.71780395507812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005139841698110104,
"min": -0.08753462880849838,
"max": 0.346064954996109,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.4442955255508423,
"min": -23.021608352661133,
"max": 82.01739501953125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06536575123793753,
"min": 0.06248163894737706,
"max": 0.07390628284460521,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9151205173311254,
"min": 0.5912502627568417,
"max": 1.0622652414900835,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016031046216227755,
"min": 0.0009810267452543167,
"max": 0.020399413331820743,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22443464702718854,
"min": 0.012753347688306117,
"max": 0.2855917866454904,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.454433229507141e-06,
"min": 7.454433229507141e-06,
"max": 0.00029484765171744996,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010436206521309998,
"min": 0.00010436206521309998,
"max": 0.0037583152472282994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248477857142858,
"min": 0.10248477857142858,
"max": 0.19828255,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347869000000002,
"min": 1.4347869000000002,
"max": 2.6527716999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025822937928571423,
"min": 0.00025822937928571423,
"max": 0.009828426745,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036152113099999993,
"min": 0.0036152113099999993,
"max": 0.12529189283,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011388683691620827,
"min": 0.011295264586806297,
"max": 0.4017273783683777,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15944157540798187,
"min": 0.15813370048999786,
"max": 3.2138190269470215,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 328.6875,
"min": 328.6875,
"max": 985.4545454545455,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31554.0,
"min": 16427.0,
"max": 32520.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6296291430480778,
"min": -0.8651455051519654,
"max": 1.6296291430480778,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.44439773261547,
"min": -28.54980167001486,
"max": 156.44439773261547,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6296291430480778,
"min": -0.8651455051519654,
"max": 1.6296291430480778,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.44439773261547,
"min": -28.54980167001486,
"max": 156.44439773261547,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03866716907001925,
"min": 0.03866716907001925,
"max": 8.364002719521523,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.712048230721848,
"min": 3.37108197924681,
"max": 142.18804623186588,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723207239",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723210400"
},
"total": 3160.5517131109996,
"count": 1,
"self": 1.2109581269996852,
"children": {
"run_training.setup": {
"total": 0.07258396799988986,
"count": 1,
"self": 0.07258396799988986
},
"TrainerController.start_learning": {
"total": 3159.268171016,
"count": 1,
"self": 2.290939556037756,
"children": {
"TrainerController._reset_env": {
"total": 2.7976361920000272,
"count": 1,
"self": 2.7976361920000272
},
"TrainerController.advance": {
"total": 3154.037110499962,
"count": 63871,
"self": 2.5156189199628898,
"children": {
"env_step": {
"total": 2084.224529412035,
"count": 63871,
"self": 1925.2744642960593,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.50782159802475,
"count": 63871,
"self": 6.755935075946127,
"children": {
"TorchPolicy.evaluate": {
"total": 150.75188652207862,
"count": 62560,
"self": 150.75188652207862
}
}
},
"workers": {
"total": 1.4422435179508284,
"count": 63871,
"self": 0.0,
"children": {
"worker_root": {
"total": 3152.0467003880594,
"count": 63871,
"is_parallel": true,
"self": 1409.376671536097,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00355952600011733,
"count": 1,
"is_parallel": true,
"self": 0.0012517239997578145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023078020003595157,
"count": 8,
"is_parallel": true,
"self": 0.0023078020003595157
}
}
},
"UnityEnvironment.step": {
"total": 0.05978297699994073,
"count": 1,
"is_parallel": true,
"self": 0.0007492470003853668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005471760000546055,
"count": 1,
"is_parallel": true,
"self": 0.0005471760000546055
},
"communicator.exchange": {
"total": 0.05637916499972562,
"count": 1,
"is_parallel": true,
"self": 0.05637916499972562
},
"steps_from_proto": {
"total": 0.0021073889997751394,
"count": 1,
"is_parallel": true,
"self": 0.0005085769998913747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015988119998837647,
"count": 8,
"is_parallel": true,
"self": 0.0015988119998837647
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1742.6700288519623,
"count": 63870,
"is_parallel": true,
"self": 49.41027024292953,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.63422101601691,
"count": 63870,
"is_parallel": true,
"self": 30.63422101601691
},
"communicator.exchange": {
"total": 1533.6358760640073,
"count": 63870,
"is_parallel": true,
"self": 1533.6358760640073
},
"steps_from_proto": {
"total": 128.98966152900857,
"count": 63870,
"is_parallel": true,
"self": 27.730346447383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 101.25931508162557,
"count": 510960,
"is_parallel": true,
"self": 101.25931508162557
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1067.2969621679645,
"count": 63871,
"self": 4.595349156002612,
"children": {
"process_trajectory": {
"total": 165.17038859495642,
"count": 63871,
"self": 164.81954804395673,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3508405509996919,
"count": 2,
"self": 0.3508405509996919
}
}
},
"_update_policy": {
"total": 897.5312244170054,
"count": 456,
"self": 372.36562696301235,
"children": {
"TorchPPOOptimizer.update": {
"total": 525.1655974539931,
"count": 22827,
"self": 525.1655974539931
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4590004866477102e-06,
"count": 1,
"self": 1.4590004866477102e-06
},
"TrainerController._save_models": {
"total": 0.14248330899954453,
"count": 1,
"self": 0.00548895999963861,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13699434899990592,
"count": 1,
"self": 0.13699434899990592
}
}
}
}
}
}
}