Pyramids / run_logs /timers.json
rlanday's picture
First commit
9f26193
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1408933401107788,
"min": 0.1331000030040741,
"max": 1.4451422691345215,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 4211.02001953125,
"min": 3978.0927734375,
"max": 43839.8359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3550621569156647,
"min": -0.10232432931661606,
"max": 0.3550621569156647,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 94.446533203125,
"min": -24.66016387939453,
"max": 94.446533203125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04747239872813225,
"min": 0.04742880165576935,
"max": 0.3632095456123352,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.627657890319824,
"min": 12.568632125854492,
"max": 87.53350067138672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07250438436816241,
"min": 0.06603680933774757,
"max": 0.07250438436816241,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0150613811542737,
"min": 0.4819999701795422,
"max": 1.0714611336297821,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014409513144648058,
"min": 0.0008280690021465284,
"max": 0.014409513144648058,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20173318402507281,
"min": 0.011592966030051397,
"max": 0.20173318402507281,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.554676053235717e-06,
"min": 7.554676053235717e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010576546474530004,
"min": 0.00010576546474530004,
"max": 0.0033806834731055996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251819285714288,
"min": 0.10251819285714288,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352547000000002,
"min": 1.3886848,
"max": 2.5268944,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026156746642857153,
"min": 0.00026156746642857153,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036619445300000012,
"min": 0.0036619445300000012,
"max": 0.11271675056000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.09160561859607697,
"min": 0.09050997346639633,
"max": 0.42307570576667786,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.2824786901474,
"min": 1.2671396732330322,
"max": 2.9615299701690674,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 462.1194029850746,
"min": 462.1194029850746,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30962.0,
"min": 15984.0,
"max": 33024.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2989999716406437,
"min": -1.0000000521540642,
"max": 1.2989999716406437,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 87.03299809992313,
"min": -30.697001740336418,
"max": 87.03299809992313,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2989999716406437,
"min": -1.0000000521540642,
"max": 1.2989999716406437,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 87.03299809992313,
"min": -30.697001740336418,
"max": 87.03299809992313,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.4310333515137761,
"min": 0.42429793723960524,
"max": 7.520682496018708,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 28.879234551422996,
"min": 26.73077004609513,
"max": 120.33091993629932,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685943301",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685945032"
},
"total": 1731.368692346,
"count": 1,
"self": 0.37283626299995376,
"children": {
"run_training.setup": {
"total": 0.04048994999993738,
"count": 1,
"self": 0.04048994999993738
},
"TrainerController.start_learning": {
"total": 1730.955366133,
"count": 1,
"self": 1.5714975868854708,
"children": {
"TrainerController._reset_env": {
"total": 4.189904978999948,
"count": 1,
"self": 4.189904978999948
},
"TrainerController.advance": {
"total": 1725.0886394181139,
"count": 63602,
"self": 1.4622982369978672,
"children": {
"env_step": {
"total": 1124.504876991035,
"count": 63602,
"self": 1009.1690688160552,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.419299357015,
"count": 63602,
"self": 4.760665715030427,
"children": {
"TorchPolicy.evaluate": {
"total": 109.65863364198458,
"count": 62568,
"self": 109.65863364198458
}
}
},
"workers": {
"total": 0.9165088179647682,
"count": 63602,
"self": 0.0,
"children": {
"worker_root": {
"total": 1728.189728813009,
"count": 63602,
"is_parallel": true,
"self": 823.4012033581089,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019729819996427977,
"count": 1,
"is_parallel": true,
"self": 0.0006033620002199314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013696199994228664,
"count": 8,
"is_parallel": true,
"self": 0.0013696199994228664
}
}
},
"UnityEnvironment.step": {
"total": 0.04923758300037662,
"count": 1,
"is_parallel": true,
"self": 0.0004900820003967965,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004927489999317913,
"count": 1,
"is_parallel": true,
"self": 0.0004927489999317913
},
"communicator.exchange": {
"total": 0.046554469000057,
"count": 1,
"is_parallel": true,
"self": 0.046554469000057
},
"steps_from_proto": {
"total": 0.0017002829999910318,
"count": 1,
"is_parallel": true,
"self": 0.0002992080007970799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001401074999193952,
"count": 8,
"is_parallel": true,
"self": 0.001401074999193952
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 904.7885254549001,
"count": 63601,
"is_parallel": true,
"self": 25.58731569983638,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.63401546099567,
"count": 63601,
"is_parallel": true,
"self": 17.63401546099567
},
"communicator.exchange": {
"total": 785.8385756440898,
"count": 63601,
"is_parallel": true,
"self": 785.8385756440898
},
"steps_from_proto": {
"total": 75.72861864997822,
"count": 63601,
"is_parallel": true,
"self": 15.66387929494931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.064739355028905,
"count": 508808,
"is_parallel": true,
"self": 60.064739355028905
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 599.121464190081,
"count": 63602,
"self": 2.86412017603152,
"children": {
"process_trajectory": {
"total": 100.37848487504925,
"count": 63602,
"self": 100.16790631404956,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21057856099969285,
"count": 2,
"self": 0.21057856099969285
}
}
},
"_update_policy": {
"total": 495.87885913900027,
"count": 448,
"self": 312.8440424709511,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.0348166680492,
"count": 22824,
"self": 183.0348166680492
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.13300029624952e-06,
"count": 1,
"self": 1.13300029624952e-06
},
"TrainerController._save_models": {
"total": 0.10532301600051142,
"count": 1,
"self": 0.0015248780000547413,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10379813800045667,
"count": 1,
"self": 0.10379813800045667
}
}
}
}
}
}
}