ppo-Huggy / run_logs /timers.json
Sanjay-Papaiahgari's picture
Huggy
ccf8b92
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4064387083053589,
"min": 1.4064387083053589,
"max": 1.429587721824646,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72445.65625,
"min": 68126.03125,
"max": 77046.453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 106.41648590021693,
"min": 84.58461538461539,
"max": 398.7936507936508,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49058.0,
"min": 48827.0,
"max": 50248.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999965.0,
"min": 49625.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999965.0,
"min": 49625.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3671765327453613,
"min": 0.1793675571680069,
"max": 2.4502859115600586,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1091.2684326171875,
"min": 22.420944213867188,
"max": 1400.45166015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5907912965451816,
"min": 1.8279388830661774,
"max": 3.946215929592107,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1655.3547877073288,
"min": 228.49236038327217,
"max": 2175.5463359355927,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5907912965451816,
"min": 1.8279388830661774,
"max": 3.946215929592107,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1655.3547877073288,
"min": 228.49236038327217,
"max": 2175.5463359355927,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016036322519802425,
"min": 0.013656769838902013,
"max": 0.020140285381057763,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04810896755940727,
"min": 0.027313539677804026,
"max": 0.05942108825062556,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04440565527313286,
"min": 0.022298189656188093,
"max": 0.06216996125876904,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13321696581939857,
"min": 0.044596379312376186,
"max": 0.1765829092512528,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.346348884583328e-06,
"min": 3.346348884583328e-06,
"max": 0.000295321726559425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0039046653749984e-05,
"min": 1.0039046653749984e-05,
"max": 0.0008439943686685502,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111541666666668,
"min": 0.10111541666666668,
"max": 0.19844057500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30334625000000004,
"min": 0.20743849999999997,
"max": 0.58133145,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.565929166666658e-05,
"min": 6.565929166666658e-05,
"max": 0.0049221846925000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019697787499999976,
"min": 0.00019697787499999976,
"max": 0.014068439355,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670739899",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670742274"
},
"total": 2375.4751631080003,
"count": 1,
"self": 0.39424484900018797,
"children": {
"run_training.setup": {
"total": 0.1111939350000739,
"count": 1,
"self": 0.1111939350000739
},
"TrainerController.start_learning": {
"total": 2374.9697243240003,
"count": 1,
"self": 4.273159273860983,
"children": {
"TrainerController._reset_env": {
"total": 10.541931007000017,
"count": 1,
"self": 10.541931007000017
},
"TrainerController.advance": {
"total": 2360.029684916139,
"count": 232166,
"self": 4.321402199149816,
"children": {
"env_step": {
"total": 1868.5360949779615,
"count": 232166,
"self": 1570.1681532807925,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.5869795110714,
"count": 232166,
"self": 15.390184131043497,
"children": {
"TorchPolicy.evaluate": {
"total": 280.1967953800279,
"count": 223200,
"self": 69.73981056007517,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.4569848199527,
"count": 223200,
"self": 210.4569848199527
}
}
}
}
},
"workers": {
"total": 2.780962186097554,
"count": 232166,
"self": 0.0,
"children": {
"worker_root": {
"total": 2366.5302458031356,
"count": 232166,
"is_parallel": true,
"self": 1078.1913122659453,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020635289999972883,
"count": 1,
"is_parallel": true,
"self": 0.00036013099997944664,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017033980000178417,
"count": 2,
"is_parallel": true,
"self": 0.0017033980000178417
}
}
},
"UnityEnvironment.step": {
"total": 0.037327301999994233,
"count": 1,
"is_parallel": true,
"self": 0.0003027760001259594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020080899992080958,
"count": 1,
"is_parallel": true,
"self": 0.00020080899992080958
},
"communicator.exchange": {
"total": 0.036004987999945115,
"count": 1,
"is_parallel": true,
"self": 0.036004987999945115
},
"steps_from_proto": {
"total": 0.0008187290000023495,
"count": 1,
"is_parallel": true,
"self": 0.00027370399993742467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005450250000649248,
"count": 2,
"is_parallel": true,
"self": 0.0005450250000649248
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1288.3389335371903,
"count": 232165,
"is_parallel": true,
"self": 36.66170837705977,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.04903973192506,
"count": 232165,
"is_parallel": true,
"self": 84.04903973192506
},
"communicator.exchange": {
"total": 1067.4658632661476,
"count": 232165,
"is_parallel": true,
"self": 1067.4658632661476
},
"steps_from_proto": {
"total": 100.16232216205799,
"count": 232165,
"is_parallel": true,
"self": 43.79431237811389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.3680097839441,
"count": 464330,
"is_parallel": true,
"self": 56.3680097839441
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 487.17218773902766,
"count": 232166,
"self": 6.36224388398341,
"children": {
"process_trajectory": {
"total": 154.61502468604385,
"count": 232166,
"self": 154.13103544604428,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48398923999957333,
"count": 4,
"self": 0.48398923999957333
}
}
},
"_update_policy": {
"total": 326.1949191690004,
"count": 97,
"self": 272.2775672090015,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.91735195999888,
"count": 2910,
"self": 53.91735195999888
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.569998837832827e-07,
"count": 1,
"self": 9.569998837832827e-07
},
"TrainerController._save_models": {
"total": 0.12494817000015246,
"count": 1,
"self": 0.0019769549999182345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12297121500023422,
"count": 1,
"self": 0.12297121500023422
}
}
}
}
}
}
}