Jjateen's picture
First Push
f7d6d58
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.357063889503479,
"min": 0.3229082524776459,
"max": 1.509750247001648,
"count": 36
},
"Pyramids.Policy.Entropy.sum": {
"value": 10780.47265625,
"min": 9552.91796875,
"max": 45799.78515625,
"count": 36
},
"Pyramids.Step.mean": {
"value": 1079968.0,
"min": 29952.0,
"max": 1079968.0,
"count": 36
},
"Pyramids.Step.sum": {
"value": 1079968.0,
"min": 29952.0,
"max": 1079968.0,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6609543561935425,
"min": -0.10109496861696243,
"max": 0.717953085899353,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 186.38912963867188,
"min": -24.363887786865234,
"max": 208.20639038085938,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010891588404774666,
"min": 0.007197369821369648,
"max": 0.2011968046426773,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.071427822113037,
"min": 2.029658317565918,
"max": 48.2872314453125,
"count": 36
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07222109096870219,
"min": 0.06603155255869439,
"max": 0.07331102708468237,
"count": 36
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0110952735618306,
"min": 0.48308003711582415,
"max": 1.0731725864998047,
"count": 36
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015217571987462989,
"min": 0.0011763430783597794,
"max": 0.016209592629494937,
"count": 36
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21304600782448185,
"min": 0.008739182107374604,
"max": 0.24314388944242404,
"count": 36
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00019346057122744047,
"min": 0.00019346057122744047,
"max": 0.00029838354339596195,
"count": 36
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0027084479971841665,
"min": 0.0020691136102954665,
"max": 0.003968965777011433,
"count": 36
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1644868452380952,
"min": 0.1644868452380952,
"max": 0.19946118095238097,
"count": 36
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.302815833333333,
"min": 1.3897045333333333,
"max": 2.7974840333333337,
"count": 36
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006452235839285713,
"min": 0.006452235839285713,
"max": 0.009946171977142856,
"count": 36
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09033130174999998,
"min": 0.06897148288,
"max": 0.13230655781,
"count": 36
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009454161860048771,
"min": 0.008867509663105011,
"max": 0.330863893032074,
"count": 36
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13235826790332794,
"min": 0.12414513528347015,
"max": 2.316047191619873,
"count": 36
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 311.24,
"min": 282.1010101010101,
"max": 999.0,
"count": 36
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31124.0,
"min": 15984.0,
"max": 33824.0,
"count": 36
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6687519839406013,
"min": -1.0000000521540642,
"max": 1.7148846021065345,
"count": 36
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 166.87519839406013,
"min": -32.000001668930054,
"max": 178.62299844622612,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6687519839406013,
"min": -1.0000000521540642,
"max": 1.7148846021065345,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 166.87519839406013,
"min": -32.000001668930054,
"max": 178.62299844622612,
"count": 36
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030253754709265196,
"min": 0.026187766512783546,
"max": 6.780996962450445,
"count": 36
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0253754709265195,
"min": 2.665645871522429,
"max": 108.49595139920712,
"count": 36
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703352706",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703355319"
},
"total": 2612.8587016009997,
"count": 1,
"self": 0.4092223479997301,
"children": {
"run_training.setup": {
"total": 0.050281290000043555,
"count": 1,
"self": 0.050281290000043555
},
"TrainerController.start_learning": {
"total": 2612.399197963,
"count": 1,
"self": 1.5695541309796681,
"children": {
"TrainerController._reset_env": {
"total": 2.0303926660000116,
"count": 1,
"self": 2.0303926660000116
},
"TrainerController.advance": {
"total": 2608.6680105990204,
"count": 69543,
"self": 1.6087841420826408,
"children": {
"env_step": {
"total": 1909.9858612029575,
"count": 69543,
"self": 1761.5860086149628,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.4138060610336,
"count": 69543,
"self": 5.328052064052031,
"children": {
"TorchPolicy.evaluate": {
"total": 142.08575399698157,
"count": 67774,
"self": 142.08575399698157
}
}
},
"workers": {
"total": 0.9860465269611041,
"count": 69543,
"self": 0.0,
"children": {
"worker_root": {
"total": 2606.303721756051,
"count": 69543,
"is_parallel": true,
"self": 985.1580606050661,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024070580000170594,
"count": 1,
"is_parallel": true,
"self": 0.0006698310002093422,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017372269998077172,
"count": 8,
"is_parallel": true,
"self": 0.0017372269998077172
}
}
},
"UnityEnvironment.step": {
"total": 0.05330422200006524,
"count": 1,
"is_parallel": true,
"self": 0.0005637429999296728,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005488180000838838,
"count": 1,
"is_parallel": true,
"self": 0.0005488180000838838
},
"communicator.exchange": {
"total": 0.050441544000022986,
"count": 1,
"is_parallel": true,
"self": 0.050441544000022986
},
"steps_from_proto": {
"total": 0.0017501170000286947,
"count": 1,
"is_parallel": true,
"self": 0.0003662849999273021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013838320001013926,
"count": 8,
"is_parallel": true,
"self": 0.0013838320001013926
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1621.1456611509848,
"count": 69542,
"is_parallel": true,
"self": 39.028971284001045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.800357806041347,
"count": 69542,
"is_parallel": true,
"self": 28.800357806041347
},
"communicator.exchange": {
"total": 1437.5871030989492,
"count": 69542,
"is_parallel": true,
"self": 1437.5871030989492
},
"steps_from_proto": {
"total": 115.72922896199316,
"count": 69542,
"is_parallel": true,
"self": 23.7610387710273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.96819019096586,
"count": 556336,
"is_parallel": true,
"self": 91.96819019096586
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 697.0733652539805,
"count": 69543,
"self": 3.0122993380165326,
"children": {
"process_trajectory": {
"total": 145.80757599296498,
"count": 69543,
"self": 145.61025211696483,
"children": {
"RLTrainer._checkpoint": {
"total": 0.197323876000155,
"count": 2,
"self": 0.197323876000155
}
}
},
"_update_policy": {
"total": 548.2534899229989,
"count": 486,
"self": 325.14715183296835,
"children": {
"TorchPPOOptimizer.update": {
"total": 223.1063380900306,
"count": 24715,
"self": 223.1063380900306
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.488000179961091e-06,
"count": 1,
"self": 1.488000179961091e-06
},
"TrainerController._save_models": {
"total": 0.13123907899989717,
"count": 1,
"self": 0.002002037999773165,
"children": {
"RLTrainer._checkpoint": {
"total": 0.129237041000124,
"count": 1,
"self": 0.129237041000124
}
}
}
}
}
}
}