amal94's picture
First Push
5283f40
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35866230726242065,
"min": 0.35866230726242065,
"max": 1.3993011713027954,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10880.3798828125,
"min": 10880.3798828125,
"max": 42449.19921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5610277652740479,
"min": -0.07672976702451706,
"max": 0.5867434144020081,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 158.77085876464844,
"min": -18.49187469482422,
"max": 166.6351318359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019127245992422104,
"min": 0.010812677443027496,
"max": 0.6053438186645508,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.413010597229004,
"min": 2.9842989444732666,
"max": 143.46649169921875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06887442944432787,
"min": 0.06548510361157935,
"max": 0.0732635804577977,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9642420122205901,
"min": 0.5059841690382962,
"max": 1.0908435207578198,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015459876919131992,
"min": 0.0008229976322410607,
"max": 0.018043673940659,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2164382768678479,
"min": 0.01152196685137485,
"max": 0.24905205308944764,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.30036185229286e-06,
"min": 7.30036185229286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010220506593210003,
"min": 0.00010220506593210003,
"max": 0.0036365125878291995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243342142857144,
"min": 0.10243342142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340679,
"min": 1.3886848,
"max": 2.6121708,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025309880071428583,
"min": 0.00025309880071428583,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035433832100000015,
"min": 0.0035433832100000015,
"max": 0.12123586292,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014259764924645424,
"min": 0.014259764924645424,
"max": 0.6292320489883423,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19963671267032623,
"min": 0.19963671267032623,
"max": 4.4046244621276855,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 332.8901098901099,
"min": 329.0888888888889,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30293.0,
"min": 15984.0,
"max": 32885.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6011516279899156,
"min": -1.0000000521540642,
"max": 1.6475590704516931,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.70479814708233,
"min": -30.565001666545868,
"max": 152.737198241055,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6011516279899156,
"min": -1.0000000521540642,
"max": 1.6475590704516931,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.70479814708233,
"min": -30.565001666545868,
"max": 152.737198241055,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04855204433660729,
"min": 0.04855204433660729,
"max": 13.744093339890242,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.4182360346312635,
"min": 4.3243188370834105,
"max": 219.90549343824387,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684167734",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684169907"
},
"total": 2173.1350519029993,
"count": 1,
"self": 0.47498634599924117,
"children": {
"run_training.setup": {
"total": 0.06023302500011596,
"count": 1,
"self": 0.06023302500011596
},
"TrainerController.start_learning": {
"total": 2172.5998325319997,
"count": 1,
"self": 1.2422317269874839,
"children": {
"TrainerController._reset_env": {
"total": 4.168205598999975,
"count": 1,
"self": 4.168205598999975
},
"TrainerController.advance": {
"total": 2167.100573657012,
"count": 63973,
"self": 1.2753728590264473,
"children": {
"env_step": {
"total": 1543.7632127849558,
"count": 63973,
"self": 1441.298102238006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.73182336900572,
"count": 63973,
"self": 4.568509338993181,
"children": {
"TorchPolicy.evaluate": {
"total": 97.16331403001254,
"count": 62547,
"self": 97.16331403001254
}
}
},
"workers": {
"total": 0.7332871779440211,
"count": 63973,
"self": 0.0,
"children": {
"worker_root": {
"total": 2168.03935150507,
"count": 63973,
"is_parallel": true,
"self": 833.7086661030905,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026858389999233623,
"count": 1,
"is_parallel": true,
"self": 0.0007584410000163189,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019273979999070434,
"count": 8,
"is_parallel": true,
"self": 0.0019273979999070434
}
}
},
"UnityEnvironment.step": {
"total": 0.04482039200001964,
"count": 1,
"is_parallel": true,
"self": 0.0005461279997689417,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004585670001233666,
"count": 1,
"is_parallel": true,
"self": 0.0004585670001233666
},
"communicator.exchange": {
"total": 0.042049202000043806,
"count": 1,
"is_parallel": true,
"self": 0.042049202000043806
},
"steps_from_proto": {
"total": 0.0017664950000835233,
"count": 1,
"is_parallel": true,
"self": 0.0003616439998950227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014048510001885006,
"count": 8,
"is_parallel": true,
"self": 0.0014048510001885006
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1334.3306854019795,
"count": 63972,
"is_parallel": true,
"self": 32.41259530891966,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.78714580705605,
"count": 63972,
"is_parallel": true,
"self": 21.78714580705605
},
"communicator.exchange": {
"total": 1182.5304673619278,
"count": 63972,
"is_parallel": true,
"self": 1182.5304673619278
},
"steps_from_proto": {
"total": 97.60047692407602,
"count": 63972,
"is_parallel": true,
"self": 18.868905130248777,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.73157179382724,
"count": 511776,
"is_parallel": true,
"self": 78.73157179382724
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 622.0619880130293,
"count": 63973,
"self": 2.502087075107511,
"children": {
"process_trajectory": {
"total": 105.33646378591925,
"count": 63973,
"self": 105.02895251691939,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3075112689998605,
"count": 2,
"self": 0.3075112689998605
}
}
},
"_update_policy": {
"total": 514.2234371520026,
"count": 456,
"self": 332.8317823289924,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.39165482301019,
"count": 22803,
"self": 181.39165482301019
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.4600000011269e-07,
"count": 1,
"self": 9.4600000011269e-07
},
"TrainerController._save_models": {
"total": 0.08882060300038574,
"count": 1,
"self": 0.001353573999949731,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08746702900043601,
"count": 1,
"self": 0.08746702900043601
}
}
}
}
}
}
}