PyramidsRND / run_logs /timers.json
Convolution's picture
First attempt
24cb2d0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5815364122390747,
"min": 0.5815364122390747,
"max": 1.4025776386260986,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17464.701171875,
"min": 17464.701171875,
"max": 42548.59375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2527148425579071,
"min": -0.14628981053829193,
"max": 0.27927452325820923,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 66.21128845214844,
"min": -34.670684814453125,
"max": 71.77355194091797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.025025691837072372,
"min": -0.025025691837072372,
"max": 0.5245475769042969,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.556731224060059,
"min": -6.556731224060059,
"max": 124.31777954101562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06985496494356387,
"min": 0.06517078513518651,
"max": 0.07408976303065125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9779695092098941,
"min": 0.4934117605423972,
"max": 1.0577347804889237,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01304991841936412,
"min": 8.21431959897004e-05,
"max": 0.013248865455130323,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1826988578710977,
"min": 0.0010678615478661052,
"max": 0.19873298182695484,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.333640412628574e-06,
"min": 7.333640412628574e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010267096577680004,
"min": 0.00010267096577680004,
"max": 0.0030199700933434,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244451428571431,
"min": 0.10244451428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342232000000004,
"min": 1.3691136000000002,
"max": 2.3478712999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025420697714285723,
"min": 0.00025420697714285723,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035588976800000013,
"min": 0.0035588976800000013,
"max": 0.10069499434,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009028260596096516,
"min": 0.009028260596096516,
"max": 0.542335033416748,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12639564275741577,
"min": 0.12639564275741577,
"max": 3.7963452339172363,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 541.6140350877193,
"min": 541.6140350877193,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30872.0,
"min": 15984.0,
"max": 33559.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0021262814601262,
"min": -1.0000000521540642,
"max": 1.1202479594945907,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 57.121198043227196,
"min": -32.000001668930054,
"max": 57.121198043227196,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0021262814601262,
"min": -1.0000000521540642,
"max": 1.1202479594945907,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 57.121198043227196,
"min": -32.000001668930054,
"max": 57.121198043227196,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05032036487720784,
"min": 0.05032036487720784,
"max": 11.414958842098713,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.868260798000847,
"min": 2.6303195174841676,
"max": 182.6393414735794,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673813644",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673815479"
},
"total": 1835.014315685,
"count": 1,
"self": 0.43310936200009564,
"children": {
"run_training.setup": {
"total": 0.10427694500003781,
"count": 1,
"self": 0.10427694500003781
},
"TrainerController.start_learning": {
"total": 1834.4769293779998,
"count": 1,
"self": 1.120968757935998,
"children": {
"TrainerController._reset_env": {
"total": 6.2133105649998015,
"count": 1,
"self": 6.2133105649998015
},
"TrainerController.advance": {
"total": 1827.0571086500638,
"count": 63281,
"self": 1.1580026420663216,
"children": {
"env_step": {
"total": 1178.2344336689525,
"count": 63281,
"self": 1079.9125038179625,
"children": {
"SubprocessEnvManager._take_step": {
"total": 97.61528270202825,
"count": 63281,
"self": 4.053953072999775,
"children": {
"TorchPolicy.evaluate": {
"total": 93.56132962902848,
"count": 62558,
"self": 31.70782114399549,
"children": {
"TorchPolicy.sample_actions": {
"total": 61.85350848503299,
"count": 62558,
"self": 61.85350848503299
}
}
}
}
},
"workers": {
"total": 0.7066471489617925,
"count": 63281,
"self": 0.0,
"children": {
"worker_root": {
"total": 1830.3580412079577,
"count": 63281,
"is_parallel": true,
"self": 843.8393621989653,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016293730000143114,
"count": 1,
"is_parallel": true,
"self": 0.0005560369997965608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010733360002177506,
"count": 8,
"is_parallel": true,
"self": 0.0010733360002177506
}
}
},
"UnityEnvironment.step": {
"total": 0.042445614000143905,
"count": 1,
"is_parallel": true,
"self": 0.0004966250000961736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048019299993029563,
"count": 1,
"is_parallel": true,
"self": 0.00048019299993029563
},
"communicator.exchange": {
"total": 0.0398152420000315,
"count": 1,
"is_parallel": true,
"self": 0.0398152420000315
},
"steps_from_proto": {
"total": 0.0016535540000859328,
"count": 1,
"is_parallel": true,
"self": 0.0004189229998701194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012346310002158134,
"count": 8,
"is_parallel": true,
"self": 0.0012346310002158134
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 986.5186790089924,
"count": 63280,
"is_parallel": true,
"self": 26.941189801981636,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.064157817052546,
"count": 63280,
"is_parallel": true,
"self": 22.064157817052546
},
"communicator.exchange": {
"total": 841.6955784659647,
"count": 63280,
"is_parallel": true,
"self": 841.6955784659647
},
"steps_from_proto": {
"total": 95.81775292399357,
"count": 63280,
"is_parallel": true,
"self": 20.854735723791237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.96301720020233,
"count": 506240,
"is_parallel": true,
"self": 74.96301720020233
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.664672339045,
"count": 63281,
"self": 2.0742144990774705,
"children": {
"process_trajectory": {
"total": 138.51219087896584,
"count": 63281,
"self": 138.31362962096614,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1985612579996996,
"count": 2,
"self": 0.1985612579996996
}
}
},
"_update_policy": {
"total": 507.07826696100165,
"count": 429,
"self": 196.52816947701672,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.5500974839849,
"count": 22878,
"self": 310.5500974839849
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.240002327715047e-07,
"count": 1,
"self": 9.240002327715047e-07
},
"TrainerController._save_models": {
"total": 0.0855404809999527,
"count": 1,
"self": 0.0013111530001879146,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08422932799976479,
"count": 1,
"self": 0.08422932799976479
}
}
}
}
}
}
}