ppo-Huggy / run_logs /timers.json
robotman0's picture
Huggy v1. used defaults
5904fa0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407686471939087,
"min": 1.407686471939087,
"max": 1.4281586408615112,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68703.546875,
"min": 68564.421875,
"max": 77948.359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.86213235294117,
"min": 84.38461538461539,
"max": 396.92913385826773,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49429.0,
"min": 49122.0,
"max": 50410.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999954.0,
"min": 49814.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999954.0,
"min": 49814.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.388575553894043,
"min": 0.01320489589124918,
"max": 2.4594802856445312,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1299.3851318359375,
"min": 1.6638169288635254,
"max": 1379.4686279296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7419488443171276,
"min": 1.8689357943478084,
"max": 3.9063646756451855,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2035.6201713085175,
"min": 235.48591008782387,
"max": 2169.014094889164,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7419488443171276,
"min": 1.8689357943478084,
"max": 3.9063646756451855,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2035.6201713085175,
"min": 235.48591008782387,
"max": 2169.014094889164,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017973322317832045,
"min": 0.013226937520327435,
"max": 0.01956920241322627,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053919966953496136,
"min": 0.02645387504065487,
"max": 0.05870760723967881,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05170359400411447,
"min": 0.022990238149132994,
"max": 0.05489820254345735,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1551107820123434,
"min": 0.04833087362349034,
"max": 0.16291377072532973,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4516988494666646e-06,
"min": 3.4516988494666646e-06,
"max": 0.000295327276557575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0355096548399993e-05,
"min": 1.0355096548399993e-05,
"max": 0.0008441484186171999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115053333333335,
"min": 0.10115053333333335,
"max": 0.19844242500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345160000000004,
"min": 0.20742670000000002,
"max": 0.5813828000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.741161333333335e-05,
"min": 6.741161333333335e-05,
"max": 0.0049222770075,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020223484000000003,
"min": 0.00020223484000000003,
"max": 0.01407100172,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670961671",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670963924"
},
"total": 2252.7400932299997,
"count": 1,
"self": 0.3962675759994454,
"children": {
"run_training.setup": {
"total": 0.11476383900003384,
"count": 1,
"self": 0.11476383900003384
},
"TrainerController.start_learning": {
"total": 2252.229061815,
"count": 1,
"self": 3.882184409023921,
"children": {
"TrainerController._reset_env": {
"total": 10.083884673,
"count": 1,
"self": 10.083884673
},
"TrainerController.advance": {
"total": 2238.147155857976,
"count": 231751,
"self": 4.08876219299691,
"children": {
"env_step": {
"total": 1768.7174794140162,
"count": 231751,
"self": 1480.15377697095,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.90468336006626,
"count": 231751,
"self": 14.900519066079426,
"children": {
"TorchPolicy.evaluate": {
"total": 271.00416429398683,
"count": 222927,
"self": 67.22938684796168,
"children": {
"TorchPolicy.sample_actions": {
"total": 203.77477744602515,
"count": 222927,
"self": 203.77477744602515
}
}
}
}
},
"workers": {
"total": 2.659019082999862,
"count": 231751,
"self": 0.0,
"children": {
"worker_root": {
"total": 2244.200678631045,
"count": 231751,
"is_parallel": true,
"self": 1029.5116031930086,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003435244000002058,
"count": 1,
"is_parallel": true,
"self": 0.00038348399999676985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003051760000005288,
"count": 2,
"is_parallel": true,
"self": 0.003051760000005288
}
}
},
"UnityEnvironment.step": {
"total": 0.027502957999956834,
"count": 1,
"is_parallel": true,
"self": 0.00027710699993122034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018295500001386245,
"count": 1,
"is_parallel": true,
"self": 0.00018295500001386245
},
"communicator.exchange": {
"total": 0.026328293000005942,
"count": 1,
"is_parallel": true,
"self": 0.026328293000005942
},
"steps_from_proto": {
"total": 0.000714603000005809,
"count": 1,
"is_parallel": true,
"self": 0.00024963999999272346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046496300001308555,
"count": 2,
"is_parallel": true,
"self": 0.00046496300001308555
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1214.6890754380363,
"count": 231750,
"is_parallel": true,
"self": 36.02532519519468,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.9331709959734,
"count": 231750,
"is_parallel": true,
"self": 80.9331709959734
},
"communicator.exchange": {
"total": 1000.3100786899265,
"count": 231750,
"is_parallel": true,
"self": 1000.3100786899265
},
"steps_from_proto": {
"total": 97.42050055694182,
"count": 231750,
"is_parallel": true,
"self": 42.66117536178251,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.759325195159306,
"count": 463500,
"is_parallel": true,
"self": 54.759325195159306
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.34091425096267,
"count": 231751,
"self": 6.214561086949402,
"children": {
"process_trajectory": {
"total": 152.6539491730142,
"count": 231751,
"self": 152.1603290700141,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4936201030001257,
"count": 4,
"self": 0.4936201030001257
}
}
},
"_update_policy": {
"total": 306.47240399099906,
"count": 97,
"self": 253.30546490499722,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.16693908600183,
"count": 2910,
"self": 53.16693908600183
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0140001904801466e-06,
"count": 1,
"self": 1.0140001904801466e-06
},
"TrainerController._save_models": {
"total": 0.1158358610000505,
"count": 1,
"self": 0.002028074000008928,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11380778700004157,
"count": 1,
"self": 0.11380778700004157
}
}
}
}
}
}
}