ppo-Huggy / run_logs /timers.json
vitorhgomes's picture
Huggy
3e75ee7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404507040977478,
"min": 1.404507040977478,
"max": 1.4272645711898804,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70395.296875,
"min": 69025.59375,
"max": 75812.015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.82291666666667,
"min": 82.51580698835275,
"max": 403.3709677419355,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49434.0,
"min": 48842.0,
"max": 50103.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999964.0,
"min": 49939.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999964.0,
"min": 49939.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4476101398468018,
"min": 0.09894672781229019,
"max": 2.504772186279297,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1409.823486328125,
"min": 12.17044734954834,
"max": 1456.391357421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7210323815751405,
"min": 2.035573485905562,
"max": 4.030502086256941,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2143.314651787281,
"min": 250.37553876638412,
"max": 2260.450822889805,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7210323815751405,
"min": 2.035573485905562,
"max": 4.030502086256941,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2143.314651787281,
"min": 250.37553876638412,
"max": 2260.450822889805,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018134313680477337,
"min": 0.013595746912809167,
"max": 0.021672544186003506,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05440294104143201,
"min": 0.02845364124320137,
"max": 0.05800077439344023,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058525007714827854,
"min": 0.020855766938378414,
"max": 0.06078531760722399,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17557502314448356,
"min": 0.04171153387675683,
"max": 0.17557502314448356,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.457698847466672e-06,
"min": 3.457698847466672e-06,
"max": 0.00029533650155449996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0373096542400016e-05,
"min": 1.0373096542400016e-05,
"max": 0.0008440995186334999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115253333333335,
"min": 0.10115253333333335,
"max": 0.19844550000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345760000000005,
"min": 0.20746494999999998,
"max": 0.5813664999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.751141333333343e-05,
"min": 6.751141333333343e-05,
"max": 0.004922430450000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002025342400000003,
"min": 0.0002025342400000003,
"max": 0.014070188350000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670888551",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670890711"
},
"total": 2159.908840569,
"count": 1,
"self": 0.3993178420005279,
"children": {
"run_training.setup": {
"total": 0.11041313400005492,
"count": 1,
"self": 0.11041313400005492
},
"TrainerController.start_learning": {
"total": 2159.3991095929996,
"count": 1,
"self": 3.868961786982254,
"children": {
"TrainerController._reset_env": {
"total": 9.978055574999985,
"count": 1,
"self": 9.978055574999985
},
"TrainerController.advance": {
"total": 2145.4316848670173,
"count": 232601,
"self": 3.8476411350029593,
"children": {
"env_step": {
"total": 1676.026553857047,
"count": 232601,
"self": 1400.3822057861014,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.13624155496495,
"count": 232601,
"self": 14.281760193912987,
"children": {
"TorchPolicy.evaluate": {
"total": 258.85448136105197,
"count": 223000,
"self": 64.75271857994971,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.10176278110225,
"count": 223000,
"self": 194.10176278110225
}
}
}
}
},
"workers": {
"total": 2.5081065159806712,
"count": 232601,
"self": 0.0,
"children": {
"worker_root": {
"total": 2151.80521870301,
"count": 232601,
"is_parallel": true,
"self": 1002.2875016890046,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002487989999963247,
"count": 1,
"is_parallel": true,
"self": 0.0003062010000576265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021817889999056206,
"count": 2,
"is_parallel": true,
"self": 0.0021817889999056206
}
}
},
"UnityEnvironment.step": {
"total": 0.027765578999947138,
"count": 1,
"is_parallel": true,
"self": 0.00030920199992579,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019962700002906786,
"count": 1,
"is_parallel": true,
"self": 0.00019962700002906786
},
"communicator.exchange": {
"total": 0.02644901500002561,
"count": 1,
"is_parallel": true,
"self": 0.02644901500002561
},
"steps_from_proto": {
"total": 0.0008077349999666694,
"count": 1,
"is_parallel": true,
"self": 0.00028254200003630103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005251929999303684,
"count": 2,
"is_parallel": true,
"self": 0.0005251929999303684
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1149.5177170140053,
"count": 232600,
"is_parallel": true,
"self": 34.15764112676061,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.28204828209857,
"count": 232600,
"is_parallel": true,
"self": 75.28204828209857
},
"communicator.exchange": {
"total": 949.3332091720613,
"count": 232600,
"is_parallel": true,
"self": 949.3332091720613
},
"steps_from_proto": {
"total": 90.7448184330849,
"count": 232600,
"is_parallel": true,
"self": 37.397387764879,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.3474306682059,
"count": 465200,
"is_parallel": true,
"self": 53.3474306682059
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.55748987496736,
"count": 232601,
"self": 6.105970451058283,
"children": {
"process_trajectory": {
"total": 145.9155044829082,
"count": 232601,
"self": 145.43516339290807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48034109000013814,
"count": 4,
"self": 0.48034109000013814
}
}
},
"_update_policy": {
"total": 313.5360149410009,
"count": 97,
"self": 259.8720795850053,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.66393535599559,
"count": 2910,
"self": 53.66393535599559
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.119999049289618e-07,
"count": 1,
"self": 9.119999049289618e-07
},
"TrainerController._save_models": {
"total": 0.12040645200022482,
"count": 1,
"self": 0.0020434510001905437,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11836300100003427,
"count": 1,
"self": 0.11836300100003427
}
}
}
}
}
}
}