ppo-Huggy / run_logs /timers.json
joweyel's picture
Huggy
4022d92
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.398861050605774,
"min": 1.398861050605774,
"max": 1.4246208667755127,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69025.3984375,
"min": 68343.65625,
"max": 76680.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.46113989637306,
"min": 77.68346456692913,
"max": 420.5378151260504,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49482.0,
"min": 49253.0,
"max": 50107.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49637.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49637.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4429776668548584,
"min": 0.1602545529603958,
"max": 2.486964464187622,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1414.484130859375,
"min": 18.910037994384766,
"max": 1543.209716796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.842637016262621,
"min": 1.9009619905155595,
"max": 3.9931660855287,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2224.8868324160576,
"min": 224.313514880836,
"max": 2429.6563808321953,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.842637016262621,
"min": 1.9009619905155595,
"max": 3.9931660855287,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2224.8868324160576,
"min": 224.313514880836,
"max": 2429.6563808321953,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01584160807542503,
"min": 0.014456782383300986,
"max": 0.020585601924398764,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04752482422627509,
"min": 0.028913564766601972,
"max": 0.0617568057731963,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05738332619269689,
"min": 0.02067596431200703,
"max": 0.07215208320154083,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17214997857809067,
"min": 0.04135192862401406,
"max": 0.2164562496046225,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5882988039333414e-06,
"min": 3.5882988039333414e-06,
"max": 0.000295308076563975,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0764896411800024e-05,
"min": 1.0764896411800024e-05,
"max": 0.0008437428187523997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119606666666668,
"min": 0.10119606666666668,
"max": 0.19843602500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30358820000000003,
"min": 0.20752170000000003,
"max": 0.5812476,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.968372666666681e-05,
"min": 6.968372666666681e-05,
"max": 0.004921957647500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020905118000000042,
"min": 0.00020905118000000042,
"max": 0.01406425524,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684593670",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684596137"
},
"total": 2466.953112436,
"count": 1,
"self": 0.4457629859998633,
"children": {
"run_training.setup": {
"total": 0.04962743999999475,
"count": 1,
"self": 0.04962743999999475
},
"TrainerController.start_learning": {
"total": 2466.45772201,
"count": 1,
"self": 4.5057493960930515,
"children": {
"TrainerController._reset_env": {
"total": 3.8253772809999873,
"count": 1,
"self": 3.8253772809999873
},
"TrainerController.advance": {
"total": 2458.002786680907,
"count": 232781,
"self": 4.585548909962199,
"children": {
"env_step": {
"total": 1896.625261489014,
"count": 232781,
"self": 1603.2607499780415,
"children": {
"SubprocessEnvManager._take_step": {
"total": 290.4420745088971,
"count": 232781,
"self": 17.472097285950326,
"children": {
"TorchPolicy.evaluate": {
"total": 272.9699772229468,
"count": 222927,
"self": 272.9699772229468
}
}
},
"workers": {
"total": 2.9224370020754122,
"count": 232781,
"self": 0.0,
"children": {
"worker_root": {
"total": 2458.6216004618827,
"count": 232781,
"is_parallel": true,
"self": 1150.66796130411,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000971378000031109,
"count": 1,
"is_parallel": true,
"self": 0.00029314400001112517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006782340000199838,
"count": 2,
"is_parallel": true,
"self": 0.0006782340000199838
}
}
},
"UnityEnvironment.step": {
"total": 0.028319354000018393,
"count": 1,
"is_parallel": true,
"self": 0.00030422299994370405,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022401700005048042,
"count": 1,
"is_parallel": true,
"self": 0.00022401700005048042
},
"communicator.exchange": {
"total": 0.02710767299993222,
"count": 1,
"is_parallel": true,
"self": 0.02710767299993222
},
"steps_from_proto": {
"total": 0.0006834410000919888,
"count": 1,
"is_parallel": true,
"self": 0.00018320300011964719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005002379999723416,
"count": 2,
"is_parallel": true,
"self": 0.0005002379999723416
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1307.9536391577728,
"count": 232780,
"is_parallel": true,
"self": 39.741582383786636,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.99023349598508,
"count": 232780,
"is_parallel": true,
"self": 79.99023349598508
},
"communicator.exchange": {
"total": 1091.843645824018,
"count": 232780,
"is_parallel": true,
"self": 1091.843645824018
},
"steps_from_proto": {
"total": 96.37817745398308,
"count": 232780,
"is_parallel": true,
"self": 34.79058932000635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.58758813397674,
"count": 465560,
"is_parallel": true,
"self": 61.58758813397674
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 556.7919762819306,
"count": 232781,
"self": 6.765023584056394,
"children": {
"process_trajectory": {
"total": 138.89849898687555,
"count": 232781,
"self": 137.63724252087525,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2612564660003045,
"count": 10,
"self": 1.2612564660003045
}
}
},
"_update_policy": {
"total": 411.12845371099866,
"count": 97,
"self": 349.6743762949942,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.45407741600445,
"count": 2910,
"self": 61.45407741600445
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.223000253958162e-06,
"count": 1,
"self": 1.223000253958162e-06
},
"TrainerController._save_models": {
"total": 0.12380742899995312,
"count": 1,
"self": 0.0020416050001585972,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12176582399979452,
"count": 1,
"self": 0.12176582399979452
}
}
}
}
}
}
}