Pyramids / run_logs /timers.json
danilyef's picture
First Push
442c555
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5226655602455139,
"min": 0.5226655602455139,
"max": 1.495460033416748,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15596.33984375,
"min": 15596.33984375,
"max": 45366.27734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989974.0,
"min": 29952.0,
"max": 989974.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989974.0,
"min": 29952.0,
"max": 989974.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4031621515750885,
"min": -0.087301105260849,
"max": 0.4031621515750885,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 108.45062255859375,
"min": -21.039566040039062,
"max": 108.45062255859375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03888430818915367,
"min": -0.013721240684390068,
"max": 0.440623939037323,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 10.459878921508789,
"min": -3.5812437534332275,
"max": 104.42787170410156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06518822666701107,
"min": 0.06400905932592135,
"max": 0.07545935446017188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.912635173338155,
"min": 0.5047046405133784,
"max": 1.0341924493550323,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01575132667910241,
"min": 0.0004330962430524843,
"max": 0.01575132667910241,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22051857350743373,
"min": 0.005630251159682295,
"max": 0.22051857350743373,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.534254631471427e-06,
"min": 7.534254631471427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010547956484059998,
"min": 0.00010547956484059998,
"max": 0.0036089247970251,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251138571428572,
"min": 0.10251138571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351594,
"min": 1.3886848,
"max": 2.569162500000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002608874328571428,
"min": 0.0002608874328571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00365242406,
"min": 0.00365242406,
"max": 0.12030719251,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013385926373302937,
"min": 0.013385926373302937,
"max": 0.5761869549751282,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18740296363830566,
"min": 0.18740296363830566,
"max": 4.033308506011963,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 434.2857142857143,
"min": 434.2857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30400.0,
"min": 15984.0,
"max": 33845.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4799799785017966,
"min": -1.0000000521540642,
"max": 1.4799799785017966,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 103.59859849512577,
"min": -30.74380173534155,
"max": 103.59859849512577,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4799799785017966,
"min": -1.0000000521540642,
"max": 1.4799799785017966,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 103.59859849512577,
"min": -30.74380173534155,
"max": 103.59859849512577,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.061112541359034364,
"min": 0.061112541359034364,
"max": 11.821637786924839,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.277877895132406,
"min": 4.277877895132406,
"max": 189.14620459079742,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678730041",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678732126"
},
"total": 2084.27358121,
"count": 1,
"self": 0.47456795499965665,
"children": {
"run_training.setup": {
"total": 0.10628328000007059,
"count": 1,
"self": 0.10628328000007059
},
"TrainerController.start_learning": {
"total": 2083.6927299750005,
"count": 1,
"self": 1.5025811670475377,
"children": {
"TrainerController._reset_env": {
"total": 8.315415677999908,
"count": 1,
"self": 8.315415677999908
},
"TrainerController.advance": {
"total": 2073.780391438953,
"count": 63492,
"self": 1.535912382988954,
"children": {
"env_step": {
"total": 1440.9111832489484,
"count": 63492,
"self": 1327.0256248498995,
"children": {
"SubprocessEnvManager._take_step": {
"total": 112.98512790304994,
"count": 63492,
"self": 4.741314884040776,
"children": {
"TorchPolicy.evaluate": {
"total": 108.24381301900917,
"count": 62568,
"self": 108.24381301900917
}
}
},
"workers": {
"total": 0.9004304959989895,
"count": 63492,
"self": 0.0,
"children": {
"worker_root": {
"total": 2079.0188786290378,
"count": 63492,
"is_parallel": true,
"self": 870.1520676210703,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002583831000038117,
"count": 1,
"is_parallel": true,
"self": 0.0006897480000134237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018940830000246933,
"count": 8,
"is_parallel": true,
"self": 0.0018940830000246933
}
}
},
"UnityEnvironment.step": {
"total": 0.045769880000079866,
"count": 1,
"is_parallel": true,
"self": 0.0005115349999869068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004476740000427526,
"count": 1,
"is_parallel": true,
"self": 0.0004476740000427526
},
"communicator.exchange": {
"total": 0.04323337400001037,
"count": 1,
"is_parallel": true,
"self": 0.04323337400001037
},
"steps_from_proto": {
"total": 0.0015772970000398345,
"count": 1,
"is_parallel": true,
"self": 0.00037516400016102125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012021329998788133,
"count": 8,
"is_parallel": true,
"self": 0.0012021329998788133
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1208.8668110079675,
"count": 63491,
"is_parallel": true,
"self": 31.070592564035678,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.684635536995074,
"count": 63491,
"is_parallel": true,
"self": 22.684635536995074
},
"communicator.exchange": {
"total": 1063.004125210943,
"count": 63491,
"is_parallel": true,
"self": 1063.004125210943
},
"steps_from_proto": {
"total": 92.10745769599384,
"count": 63491,
"is_parallel": true,
"self": 20.054575236810706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.05288245918314,
"count": 507928,
"is_parallel": true,
"self": 72.05288245918314
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.3332958070155,
"count": 63492,
"self": 2.871817751034996,
"children": {
"process_trajectory": {
"total": 117.24316144597856,
"count": 63492,
"self": 116.9126582959791,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3305031499994584,
"count": 2,
"self": 0.3305031499994584
}
}
},
"_update_policy": {
"total": 511.2183166100019,
"count": 452,
"self": 323.5419686030625,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.6763480069394,
"count": 22773,
"self": 187.6763480069394
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.910001270123757e-07,
"count": 1,
"self": 8.910001270123757e-07
},
"TrainerController._save_models": {
"total": 0.09434080000028189,
"count": 1,
"self": 0.001544221000131074,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09279657900015081,
"count": 1,
"self": 0.09279657900015081
}
}
}
}
}
}
}