ppo-Pyramid / run_logs /timers.json
mathreader's picture
First Training of Pyramid
405b127 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4224604368209839,
"min": 0.41015860438346863,
"max": 1.4517440795898438,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12680.572265625,
"min": 12252.2578125,
"max": 44040.109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29955.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29955.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4153847098350525,
"min": -0.13148026168346405,
"max": 0.42808446288108826,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 112.56925964355469,
"min": -31.292301177978516,
"max": 113.01429748535156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.031819459050893784,
"min": 0.006717985961586237,
"max": 0.34291377663612366,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.62307357788086,
"min": 1.726522445678711,
"max": 81.61347961425781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06749992663195978,
"min": 0.06606303311354539,
"max": 0.07391170858549416,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0124988994793966,
"min": 0.5603373674374748,
"max": 1.0789463070458643,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012254997698215043,
"min": 0.00017764149640808082,
"max": 0.01602384718694657,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18382496547322566,
"min": 0.0023093394533050507,
"max": 0.22433386061725197,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.525697491466665e-06,
"min": 7.525697491466665e-06,
"max": 0.000294764176745275,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011288546237199998,
"min": 0.00011288546237199998,
"max": 0.0034944055351982003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250853333333335,
"min": 0.10250853333333335,
"max": 0.198254725,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376280000000002,
"min": 1.4780484000000003,
"max": 2.4648018,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026060247999999995,
"min": 0.00026060247999999995,
"max": 0.009825647027499999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003909037199999999,
"min": 0.003909037199999999,
"max": 0.11649369981999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010060147382318974,
"min": 0.010060147382318974,
"max": 0.3849566578865051,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15090221166610718,
"min": 0.14163489639759064,
"max": 3.079653263092041,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 435.18055555555554,
"min": 425.859375,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31333.0,
"min": 17521.0,
"max": 33067.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4800844907130994,
"min": -0.9999613423020609,
"max": 1.485017626601107,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 105.08599884063005,
"min": -31.99640166759491,
"max": 105.08599884063005,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4800844907130994,
"min": -0.9999613423020609,
"max": 1.485017626601107,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 105.08599884063005,
"min": -31.99640166759491,
"max": 105.08599884063005,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044431310145202435,
"min": 0.044431310145202435,
"max": 7.232109357913335,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.154623020309373,
"min": 3.0103645500494167,
"max": 130.17796844244003,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707896033",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707897725"
},
"total": 1691.6612674570001,
"count": 1,
"self": 0.3727360720001798,
"children": {
"run_training.setup": {
"total": 0.05172446599999603,
"count": 1,
"self": 0.05172446599999603
},
"TrainerController.start_learning": {
"total": 1691.236806919,
"count": 1,
"self": 1.4421276859548016,
"children": {
"TrainerController._reset_env": {
"total": 2.4619480579999617,
"count": 1,
"self": 2.4619480579999617
},
"TrainerController.advance": {
"total": 1687.2471859610453,
"count": 63615,
"self": 1.5490380939809256,
"children": {
"env_step": {
"total": 1104.9288046780762,
"count": 63615,
"self": 979.4269370771351,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.61222899402514,
"count": 63615,
"self": 4.602445365955418,
"children": {
"TorchPolicy.evaluate": {
"total": 120.00978362806973,
"count": 62567,
"self": 120.00978362806973
}
}
},
"workers": {
"total": 0.8896386069159234,
"count": 63615,
"self": 0.0,
"children": {
"worker_root": {
"total": 1689.060673647962,
"count": 63615,
"is_parallel": true,
"self": 808.2099805659268,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001926987000160807,
"count": 1,
"is_parallel": true,
"self": 0.0006131729996923241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013138140004684828,
"count": 8,
"is_parallel": true,
"self": 0.0013138140004684828
}
}
},
"UnityEnvironment.step": {
"total": 0.036285045999875365,
"count": 1,
"is_parallel": true,
"self": 0.00035883700024896825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003639279998424172,
"count": 1,
"is_parallel": true,
"self": 0.0003639279998424172
},
"communicator.exchange": {
"total": 0.03442563299995527,
"count": 1,
"is_parallel": true,
"self": 0.03442563299995527
},
"steps_from_proto": {
"total": 0.0011366479998287105,
"count": 1,
"is_parallel": true,
"self": 0.00026913999977296044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00086750800005575,
"count": 8,
"is_parallel": true,
"self": 0.00086750800005575
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 880.8506930820351,
"count": 63614,
"is_parallel": true,
"self": 23.016375984991328,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.873605871968266,
"count": 63614,
"is_parallel": true,
"self": 15.873605871968266
},
"communicator.exchange": {
"total": 777.3959742330762,
"count": 63614,
"is_parallel": true,
"self": 777.3959742330762
},
"steps_from_proto": {
"total": 64.56473699199933,
"count": 63614,
"is_parallel": true,
"self": 13.956460682834631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 50.608276309164694,
"count": 508912,
"is_parallel": true,
"self": 50.608276309164694
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 580.7693431889882,
"count": 63615,
"self": 2.6468081239672756,
"children": {
"process_trajectory": {
"total": 115.42120229602324,
"count": 63615,
"self": 115.22695688202293,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19424541400030648,
"count": 2,
"self": 0.19424541400030648
}
}
},
"_update_policy": {
"total": 462.70133276899764,
"count": 451,
"self": 268.304917175931,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.39641559306665,
"count": 22776,
"self": 194.39641559306665
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.309997039963491e-07,
"count": 1,
"self": 9.309997039963491e-07
},
"TrainerController._save_models": {
"total": 0.08554428300021755,
"count": 1,
"self": 0.0014424790001612564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08410180400005629,
"count": 1,
"self": 0.08410180400005629
}
}
}
}
}
}
}