ppo-Huggy / run_logs /timers.json
leoleung93's picture
Huggy
3d16467
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4057316780090332,
"min": 1.4057316780090332,
"max": 1.4270449876785278,
"count": 32
},
"Huggy.Policy.Entropy.sum": {
"value": 69523.2734375,
"min": 67456.140625,
"max": 78356.0625,
"count": 32
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 75.03951367781156,
"min": 73.65424739195231,
"max": 371.3897058823529,
"count": 32
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49376.0,
"min": 49065.0,
"max": 50509.0,
"count": 32
},
"Huggy.Step.mean": {
"value": 1599975.0,
"min": 49895.0,
"max": 1599975.0,
"count": 32
},
"Huggy.Step.sum": {
"value": 1599975.0,
"min": 49895.0,
"max": 1599975.0,
"count": 32
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.483839988708496,
"min": -0.04450579360127449,
"max": 2.483839988708496,
"count": 32
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1634.36669921875,
"min": -6.00828218460083,
"max": 1634.36669921875,
"count": 32
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.916504387978725,
"min": 1.7891569998529222,
"max": 4.075430088505453,
"count": 32
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2577.059887290001,
"min": 241.5361949801445,
"max": 2597.2833672761917,
"count": 32
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.916504387978725,
"min": 1.7891569998529222,
"max": 4.075430088505453,
"count": 32
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2577.059887290001,
"min": 241.5361949801445,
"max": 2597.2833672761917,
"count": 32
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016737234519435637,
"min": 0.012369157458200181,
"max": 0.019922754033662688,
"count": 32
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.033474469038871274,
"min": 0.024738314916400363,
"max": 0.05976826210098807,
"count": 32
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05467135310173035,
"min": 0.023668925153712432,
"max": 0.06081185332602925,
"count": 32
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1093427062034607,
"min": 0.047337850307424864,
"max": 0.18243555997808775,
"count": 32
},
"Huggy.Policy.LearningRate.mean": {
"value": 6.3993303668925e-05,
"min": 6.3993303668925e-05,
"max": 0.0002953509015497,
"count": 32
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00012798660733785,
"min": 0.00012798660733785,
"max": 0.0008442739685753499,
"count": 32
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.12133107500000001,
"min": 0.12133107500000001,
"max": 0.19845029999999997,
"count": 32
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.24266215000000002,
"min": 0.24266215000000002,
"max": 0.58142465,
"count": 32
},
"Huggy.Policy.Beta.mean": {
"value": 0.0010744206424999998,
"min": 0.0010744206424999998,
"max": 0.00492266997,
"count": 32
},
"Huggy.Policy.Beta.sum": {
"value": 0.0021488412849999996,
"min": 0.0021488412849999996,
"max": 0.014073090035000001,
"count": 32
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670574078",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670575860"
},
"total": 1782.5666559279998,
"count": 1,
"self": 0.2543382039998505,
"children": {
"run_training.setup": {
"total": 0.10519172900012563,
"count": 1,
"self": 0.10519172900012563
},
"TrainerController.start_learning": {
"total": 1782.2071259949998,
"count": 1,
"self": 3.125387361066487,
"children": {
"TrainerController._reset_env": {
"total": 10.104404549000037,
"count": 1,
"self": 10.104404549000037
},
"TrainerController.advance": {
"total": 1768.7924722629332,
"count": 188170,
"self": 3.113089320037261,
"children": {
"env_step": {
"total": 1388.0308488930132,
"count": 188170,
"self": 1166.0548897439514,
"children": {
"SubprocessEnvManager._take_step": {
"total": 219.8919852440979,
"count": 188170,
"self": 11.237367498229332,
"children": {
"TorchPolicy.evaluate": {
"total": 208.65461774586856,
"count": 180242,
"self": 52.478459624966035,
"children": {
"TorchPolicy.sample_actions": {
"total": 156.17615812090253,
"count": 180242,
"self": 156.17615812090253
}
}
}
}
},
"workers": {
"total": 2.083973904963841,
"count": 188169,
"self": 0.0,
"children": {
"worker_root": {
"total": 1775.9964961410549,
"count": 188169,
"is_parallel": true,
"self": 815.5529754729898,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024221599999236787,
"count": 1,
"is_parallel": true,
"self": 0.0003945610001210298,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002027598999802649,
"count": 2,
"is_parallel": true,
"self": 0.002027598999802649
}
}
},
"UnityEnvironment.step": {
"total": 0.027145861999997578,
"count": 1,
"is_parallel": true,
"self": 0.00033613999971748854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017366700012644287,
"count": 1,
"is_parallel": true,
"self": 0.00017366700012644287
},
"communicator.exchange": {
"total": 0.025853257000107988,
"count": 1,
"is_parallel": true,
"self": 0.025853257000107988
},
"steps_from_proto": {
"total": 0.0007827980000456591,
"count": 1,
"is_parallel": true,
"self": 0.00027631500006464194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005064829999810172,
"count": 2,
"is_parallel": true,
"self": 0.0005064829999810172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 960.443520668065,
"count": 188168,
"is_parallel": true,
"self": 27.723698292064228,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 60.80893625805584,
"count": 188168,
"is_parallel": true,
"self": 60.80893625805584
},
"communicator.exchange": {
"total": 797.1764578109444,
"count": 188168,
"is_parallel": true,
"self": 797.1764578109444
},
"steps_from_proto": {
"total": 74.7344283070006,
"count": 188168,
"is_parallel": true,
"self": 30.83328682703427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 43.90114147996633,
"count": 376336,
"is_parallel": true,
"self": 43.90114147996633
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 377.64853404988276,
"count": 188169,
"self": 4.746037505966569,
"children": {
"process_trajectory": {
"total": 119.64486633491629,
"count": 188169,
"self": 119.29549402691669,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3493723079996016,
"count": 3,
"self": 0.3493723079996016
}
}
},
"_update_policy": {
"total": 253.2576302089999,
"count": 78,
"self": 210.08176795701047,
"children": {
"TorchPPOOptimizer.update": {
"total": 43.17586225198943,
"count": 2340,
"self": 43.17586225198943
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3760000001639128e-06,
"count": 1,
"self": 1.3760000001639128e-06
},
"TrainerController._save_models": {
"total": 0.18486044600012974,
"count": 1,
"self": 0.0027408400001149857,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18211960600001476,
"count": 1,
"self": 0.18211960600001476
}
}
}
}
}
}
}