ppo-Huggy / run_logs /timers.json
afos950's picture
Huggy
97e31fe
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3991037607192993,
"min": 1.3991037607192993,
"max": 1.4248517751693726,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70316.15625,
"min": 68539.640625,
"max": 77477.0234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.41068447412354,
"min": 80.78125,
"max": 387.0387596899225,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49364.0,
"min": 49115.0,
"max": 50065.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999968.0,
"min": 49307.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999968.0,
"min": 49307.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4595658779144287,
"min": 0.061697907745838165,
"max": 2.488400936126709,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1473.280029296875,
"min": 7.897332191467285,
"max": 1497.7586669921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.83207467501868,
"min": 1.7480135189834982,
"max": 3.9825573247722073,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2295.4127303361893,
"min": 223.74573042988777,
"max": 2370.5127581357956,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.83207467501868,
"min": 1.7480135189834982,
"max": 3.9825573247722073,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2295.4127303361893,
"min": 223.74573042988777,
"max": 2370.5127581357956,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016834978946846806,
"min": 0.013339102023100067,
"max": 0.019045019042581164,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05050493684054042,
"min": 0.027732247298505777,
"max": 0.055426078139862514,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05181484926078055,
"min": 0.023329983527461687,
"max": 0.05849601117273171,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15544454778234165,
"min": 0.046659967054923375,
"max": 0.17548803351819514,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4526488491500102e-06,
"min": 3.4526488491500102e-06,
"max": 0.0002953644015452,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.035794654745003e-05,
"min": 1.035794654745003e-05,
"max": 0.0008443363685545498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115085000000001,
"min": 0.10115085000000001,
"max": 0.19845480000000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345255000000004,
"min": 0.20746295,
"max": 0.58144545,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.742741500000017e-05,
"min": 6.742741500000017e-05,
"max": 0.00492289452,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020228224500000054,
"min": 0.00020228224500000054,
"max": 0.014074127955000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683827856",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683830154"
},
"total": 2297.766727694,
"count": 1,
"self": 0.4464219500005129,
"children": {
"run_training.setup": {
"total": 0.03926321599988114,
"count": 1,
"self": 0.03926321599988114
},
"TrainerController.start_learning": {
"total": 2297.281042528,
"count": 1,
"self": 4.157673988919669,
"children": {
"TrainerController._reset_env": {
"total": 3.79730670999993,
"count": 1,
"self": 3.79730670999993
},
"TrainerController.advance": {
"total": 2289.2029428110804,
"count": 232914,
"self": 4.252865141289931,
"children": {
"env_step": {
"total": 1776.0596326778184,
"count": 232914,
"self": 1504.2437005849554,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.1578821679648,
"count": 232914,
"self": 15.825597694033831,
"children": {
"TorchPolicy.evaluate": {
"total": 253.33228447393094,
"count": 223013,
"self": 253.33228447393094
}
}
},
"workers": {
"total": 2.6580499248982505,
"count": 232914,
"self": 0.0,
"children": {
"worker_root": {
"total": 2289.5739821860047,
"count": 232914,
"is_parallel": true,
"self": 1060.5289981921085,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009504849999757425,
"count": 1,
"is_parallel": true,
"self": 0.0002647179999257787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006857670000499638,
"count": 2,
"is_parallel": true,
"self": 0.0006857670000499638
}
}
},
"UnityEnvironment.step": {
"total": 0.02881981900009123,
"count": 1,
"is_parallel": true,
"self": 0.00035796800011667074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022927600002731197,
"count": 1,
"is_parallel": true,
"self": 0.00022927600002731197
},
"communicator.exchange": {
"total": 0.02750365800011423,
"count": 1,
"is_parallel": true,
"self": 0.02750365800011423
},
"steps_from_proto": {
"total": 0.000728916999833018,
"count": 1,
"is_parallel": true,
"self": 0.00021580599991466443,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005131109999183536,
"count": 2,
"is_parallel": true,
"self": 0.0005131109999183536
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1229.0449839938963,
"count": 232913,
"is_parallel": true,
"self": 37.58620179081163,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.14435714698425,
"count": 232913,
"is_parallel": true,
"self": 75.14435714698425
},
"communicator.exchange": {
"total": 1026.5236906420114,
"count": 232913,
"is_parallel": true,
"self": 1026.5236906420114
},
"steps_from_proto": {
"total": 89.790734414089,
"count": 232913,
"is_parallel": true,
"self": 32.96562295792614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.82511145616286,
"count": 465826,
"is_parallel": true,
"self": 56.82511145616286
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 508.8904449919719,
"count": 232914,
"self": 6.164135972850545,
"children": {
"process_trajectory": {
"total": 129.6638497031215,
"count": 232914,
"self": 128.39478240612175,
"children": {
"RLTrainer._checkpoint": {
"total": 1.269067296999765,
"count": 10,
"self": 1.269067296999765
}
}
},
"_update_policy": {
"total": 373.06245931599983,
"count": 97,
"self": 314.7668847480063,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.295574567993526,
"count": 2910,
"self": 58.295574567993526
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.749998414714355e-07,
"count": 1,
"self": 8.749998414714355e-07
},
"TrainerController._save_models": {
"total": 0.12311814300028345,
"count": 1,
"self": 0.0025366250001752633,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12058151800010819,
"count": 1,
"self": 0.12058151800010819
}
}
}
}
}
}
}