Unit-5.2 / run_logs /timers.json
Bodolaz's picture
Init
3513c22
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5609207153320312,
"min": 0.5550739169120789,
"max": 1.395443320274353,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16710.94921875,
"min": 16710.94921875,
"max": 42332.16796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989952.0,
"min": 29900.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989952.0,
"min": 29900.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5511177182197571,
"min": -0.08345665037631989,
"max": 0.5641608238220215,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 154.86407470703125,
"min": -20.113052368164062,
"max": 155.14422607421875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012479162774980068,
"min": -0.009399117901921272,
"max": 0.32309338450431824,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.5066447257995605,
"min": -2.5753583908081055,
"max": 77.54241180419922,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0673223896599727,
"min": 0.06573836891947794,
"max": 0.07376802339085999,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9425134552396177,
"min": 0.571805765652447,
"max": 1.1059773351270112,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01522596007907086,
"min": 0.000799258206186218,
"max": 0.01663943660678342,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21316344110699204,
"min": 0.009591098474234616,
"max": 0.24959154910175127,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.686226009385717e-06,
"min": 7.686226009385717e-06,
"max": 0.00029500035166655,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010760716413140004,
"min": 0.00010760716413140004,
"max": 0.0036322093892635996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256204285714286,
"min": 0.10256204285714286,
"max": 0.19833345,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358686,
"min": 1.4358686,
"max": 2.6107364,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002659480814285716,
"min": 0.0002659480814285716,
"max": 0.009833511654999998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003723273140000002,
"min": 0.003723273140000002,
"max": 0.12109256635999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013258623890578747,
"min": 0.013111588545143604,
"max": 0.47583431005477905,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1856207400560379,
"min": 0.183562234044075,
"max": 3.8066744804382324,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 333.9512195121951,
"min": 333.9512195121951,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27384.0,
"min": 17082.0,
"max": 33095.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.64164876174636,
"min": -0.9999419873760592,
"max": 1.64164876174636,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.61519846320152,
"min": -30.998201608657837,
"max": 138.3573983311653,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.64164876174636,
"min": -0.9999419873760592,
"max": 1.64164876174636,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.61519846320152,
"min": -30.998201608657837,
"max": 138.3573983311653,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04589055652131054,
"min": 0.04589055652131054,
"max": 8.64536305434174,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.763025634747464,
"min": 3.763025634747464,
"max": 155.61653497815132,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687112111",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687114573"
},
"total": 2462.021744918,
"count": 1,
"self": 1.1084785479988568,
"children": {
"run_training.setup": {
"total": 0.04174876000070071,
"count": 1,
"self": 0.04174876000070071
},
"TrainerController.start_learning": {
"total": 2460.8715176100004,
"count": 1,
"self": 1.7122806310508167,
"children": {
"TrainerController._reset_env": {
"total": 4.725388283999564,
"count": 1,
"self": 4.725388283999564
},
"TrainerController.advance": {
"total": 2454.317271210949,
"count": 63913,
"self": 1.7505984818926663,
"children": {
"env_step": {
"total": 1784.853047866044,
"count": 63913,
"self": 1662.006885662051,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.88977028702993,
"count": 63913,
"self": 5.282818576911268,
"children": {
"TorchPolicy.evaluate": {
"total": 116.60695171011866,
"count": 62553,
"self": 116.60695171011866
}
}
},
"workers": {
"total": 0.9563919169631845,
"count": 63913,
"self": 0.0,
"children": {
"worker_root": {
"total": 2454.4542479959346,
"count": 63913,
"is_parallel": true,
"self": 920.7708041019341,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019115130007776315,
"count": 1,
"is_parallel": true,
"self": 0.0005995849996907054,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001311928001086926,
"count": 8,
"is_parallel": true,
"self": 0.001311928001086926
}
}
},
"UnityEnvironment.step": {
"total": 0.1024508220007192,
"count": 1,
"is_parallel": true,
"self": 0.0006324500009213807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045807599963154644,
"count": 1,
"is_parallel": true,
"self": 0.00045807599963154644
},
"communicator.exchange": {
"total": 0.09935798200058343,
"count": 1,
"is_parallel": true,
"self": 0.09935798200058343
},
"steps_from_proto": {
"total": 0.0020023139995828387,
"count": 1,
"is_parallel": true,
"self": 0.00035135199959768215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016509619999851566,
"count": 8,
"is_parallel": true,
"self": 0.0016509619999851566
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1533.6834438940004,
"count": 63912,
"is_parallel": true,
"self": 36.33494097501625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.477291079922907,
"count": 63912,
"is_parallel": true,
"self": 24.477291079922907
},
"communicator.exchange": {
"total": 1362.447713341011,
"count": 63912,
"is_parallel": true,
"self": 1362.447713341011
},
"steps_from_proto": {
"total": 110.42349849805032,
"count": 63912,
"is_parallel": true,
"self": 22.334162450102667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.08933604794765,
"count": 511296,
"is_parallel": true,
"self": 88.08933604794765
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 667.7136248630122,
"count": 63913,
"self": 3.0219805721462762,
"children": {
"process_trajectory": {
"total": 113.15612898786003,
"count": 63913,
"self": 112.93090543186008,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22522355599994626,
"count": 2,
"self": 0.22522355599994626
}
}
},
"_update_policy": {
"total": 551.5355153030059,
"count": 457,
"self": 354.64018996400864,
"children": {
"TorchPPOOptimizer.update": {
"total": 196.89532533899728,
"count": 22806,
"self": 196.89532533899728
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1910005923709832e-06,
"count": 1,
"self": 1.1910005923709832e-06
},
"TrainerController._save_models": {
"total": 0.11657629300043482,
"count": 1,
"self": 0.001025455000672082,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11555083799976273,
"count": 1,
"self": 0.11555083799976273
}
}
}
}
}
}
}