ppo-Huggy / run_logs /timers.json
NathanaelM's picture
Huggy
f622d56
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4020355939865112,
"min": 1.4020355939865112,
"max": 1.4270888566970825,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70124.2109375,
"min": 69454.7421875,
"max": 76330.015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 70.9568345323741,
"min": 68.35277777777777,
"max": 412.0327868852459,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49315.0,
"min": 49083.0,
"max": 50268.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49787.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49787.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5125083923339844,
"min": 0.22056181728839874,
"max": 2.581148386001587,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1746.193359375,
"min": 26.68798065185547,
"max": 1795.1309814453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9268506382009107,
"min": 1.8642849151260596,
"max": 4.067575732268483,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2729.161193549633,
"min": 225.57847473025322,
"max": 2756.26691788435,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9268506382009107,
"min": 1.8642849151260596,
"max": 4.067575732268483,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2729.161193549633,
"min": 225.57847473025322,
"max": 2756.26691788435,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018502883807135124,
"min": 0.013709100597932573,
"max": 0.02138868095189537,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055508651421405375,
"min": 0.029186686036215784,
"max": 0.056041625852230936,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05660761665138933,
"min": 0.020171530451625584,
"max": 0.06295947699497143,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.169822849954168,
"min": 0.04034306090325117,
"max": 0.17725707255303857,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.943548685516671e-06,
"min": 3.943548685516671e-06,
"max": 0.0002953647015451,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1830646056550014e-05,
"min": 1.1830646056550014e-05,
"max": 0.0008440152186616,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10131448333333332,
"min": 0.10131448333333332,
"max": 0.1984549,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30394344999999995,
"min": 0.20775480000000002,
"max": 0.5813383999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.559271833333341e-05,
"min": 7.559271833333341e-05,
"max": 0.00492289951,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022677815500000024,
"min": 0.00022677815500000024,
"max": 0.014068786160000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670579108",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670581512"
},
"total": 2404.2552013170002,
"count": 1,
"self": 0.3875552630001948,
"children": {
"run_training.setup": {
"total": 0.10409922700000607,
"count": 1,
"self": 0.10409922700000607
},
"TrainerController.start_learning": {
"total": 2403.763546827,
"count": 1,
"self": 4.142599072139092,
"children": {
"TrainerController._reset_env": {
"total": 9.996327605000033,
"count": 1,
"self": 9.996327605000033
},
"TrainerController.advance": {
"total": 2389.5103552478618,
"count": 233805,
"self": 4.636138827916511,
"children": {
"env_step": {
"total": 1895.7494262560476,
"count": 233805,
"self": 1593.6097360650747,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.2511979959718,
"count": 233805,
"self": 15.697202677871246,
"children": {
"TorchPolicy.evaluate": {
"total": 283.55399531810053,
"count": 222904,
"self": 71.34602436707041,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.20797095103012,
"count": 222904,
"self": 212.20797095103012
}
}
}
}
},
"workers": {
"total": 2.888492195001163,
"count": 233805,
"self": 0.0,
"children": {
"worker_root": {
"total": 2395.0971207201214,
"count": 233805,
"is_parallel": true,
"self": 1084.3848197600591,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021301010000343013,
"count": 1,
"is_parallel": true,
"self": 0.00035872199998721044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017713790000470908,
"count": 2,
"is_parallel": true,
"self": 0.0017713790000470908
}
}
},
"UnityEnvironment.step": {
"total": 0.02927759600004265,
"count": 1,
"is_parallel": true,
"self": 0.00031357199998183205,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018830000010439107,
"count": 1,
"is_parallel": true,
"self": 0.00018830000010439107
},
"communicator.exchange": {
"total": 0.027933210000014697,
"count": 1,
"is_parallel": true,
"self": 0.027933210000014697
},
"steps_from_proto": {
"total": 0.0008425139999417297,
"count": 1,
"is_parallel": true,
"self": 0.00027889299997241324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005636209999693165,
"count": 2,
"is_parallel": true,
"self": 0.0005636209999693165
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1310.7123009600623,
"count": 233804,
"is_parallel": true,
"self": 36.4343358931153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.24827826696242,
"count": 233804,
"is_parallel": true,
"self": 85.24827826696242
},
"communicator.exchange": {
"total": 1089.026962048973,
"count": 233804,
"is_parallel": true,
"self": 1089.026962048973
},
"steps_from_proto": {
"total": 100.00272475101156,
"count": 233804,
"is_parallel": true,
"self": 43.6986235860461,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.30410116496546,
"count": 467608,
"is_parallel": true,
"self": 56.30410116496546
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.1247901638977,
"count": 233805,
"self": 6.654805618782916,
"children": {
"process_trajectory": {
"total": 166.97446249011398,
"count": 233805,
"self": 166.49288695911457,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4815755309994074,
"count": 4,
"self": 0.4815755309994074
}
}
},
"_update_policy": {
"total": 315.49552205500083,
"count": 97,
"self": 261.8646387079855,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.630883347015356,
"count": 2910,
"self": 53.630883347015356
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.023999629978789e-06,
"count": 1,
"self": 1.023999629978789e-06
},
"TrainerController._save_models": {
"total": 0.11426387799974691,
"count": 1,
"self": 0.0025987859999077045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1116650919998392,
"count": 1,
"self": 0.1116650919998392
}
}
}
}
}
}
}