ppo-Pyramid / run_logs /timers.json
knight9114's picture
First Push
05adda2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.23576901853084564,
"min": 0.23576901853084564,
"max": 0.36750328540802,
"count": 23
},
"Pyramids.Policy.Entropy.sum": {
"value": 7057.9814453125,
"min": 7057.9814453125,
"max": 10233.427734375,
"count": 23
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.21505376344084,
"min": 302.7551020408163,
"max": 478.65714285714284,
"count": 23
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30617.0,
"min": 13762.0,
"max": 33506.0,
"count": 23
},
"Pyramids.Step.mean": {
"value": 1679903.0,
"min": 1019959.0,
"max": 1679903.0,
"count": 23
},
"Pyramids.Step.sum": {
"value": 1679903.0,
"min": 1019959.0,
"max": 1679903.0,
"count": 23
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6018615961074829,
"min": 0.3889569044113159,
"max": 0.6184836030006409,
"count": 23
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.9193878173828,
"min": 71.98414611816406,
"max": 171.93844604492188,
"count": 23
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0008180650766007602,
"min": -0.004084211774170399,
"max": 0.028206808492541313,
"count": 23
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.22824016213417053,
"min": -1.1272424459457397,
"max": 7.644045352935791,
"count": 23
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6277553023810083,
"min": 1.321277118580682,
"max": 1.6277553023810083,
"count": 23
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.00899842381477,
"min": 58.237999230623245,
"max": 159.90779871493578,
"count": 23
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6277553023810083,
"min": 1.321277118580682,
"max": 1.6277553023810083,
"count": 23
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.00899842381477,
"min": 58.237999230623245,
"max": 159.90779871493578,
"count": 23
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031048489362682574,
"min": 0.03094655353232505,
"max": 0.07338713616149367,
"count": 23
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.918558000092162,
"min": 2.089674953633221,
"max": 5.137099531304557,
"count": 23
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06773896340330664,
"min": 0.06412356666405684,
"max": 0.07152340681585925,
"count": 23
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.948345487646293,
"min": 0.6317606997326948,
"max": 1.036901918860773,
"count": 23
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01557055618849561,
"min": 0.011792417253423526,
"max": 0.016712072956752744,
"count": 23
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21798778663893856,
"min": 0.11095894096797565,
"max": 0.2339690213945384,
"count": 23
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.022549397248215e-05,
"min": 5.022549397248215e-05,
"max": 0.00014842608385798892,
"count": 23
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0007031569156147501,
"min": 0.0007031569156147501,
"max": 0.00202674837441745,
"count": 23
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.11674180357142859,
"min": 0.11674180357142859,
"max": 0.14947534444444446,
"count": 23
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.6343852500000002,
"min": 1.3452781000000003,
"max": 2.1563334,
"count": 23
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0016825061767857144,
"min": 0.0016825061767857144,
"max": 0.00495258691,
"count": 23
},
"Pyramids.Policy.Beta.sum": {
"value": 0.023555086475,
"min": 0.023555086475,
"max": 0.067630696745,
"count": 23
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009054279886186123,
"min": 0.009054279886186123,
"max": 0.014804517850279808,
"count": 23
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12675991654396057,
"min": 0.12675991654396057,
"max": 0.20726324617862701,
"count": 23
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 23
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721993125",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721994768"
},
"total": 1642.6813722149996,
"count": 1,
"self": 0.4982997039987822,
"children": {
"run_training.setup": {
"total": 0.059540038000250206,
"count": 1,
"self": 0.059540038000250206
},
"TrainerController.start_learning": {
"total": 1642.1235324730005,
"count": 1,
"self": 0.8982533830780994,
"children": {
"TrainerController._reset_env": {
"total": 2.051978708000206,
"count": 1,
"self": 2.051978708000206
},
"TrainerController.advance": {
"total": 1638.7901514449227,
"count": 44161,
"self": 0.9263329430473277,
"children": {
"env_step": {
"total": 1199.851026489871,
"count": 44161,
"self": 1109.8131978896654,
"children": {
"SubprocessEnvManager._take_step": {
"total": 89.51381826909846,
"count": 44161,
"self": 3.1261631671336545,
"children": {
"TorchPolicy.evaluate": {
"total": 86.38765510196481,
"count": 42699,
"self": 86.38765510196481
}
}
},
"workers": {
"total": 0.5240103311070925,
"count": 44160,
"self": 0.0,
"children": {
"worker_root": {
"total": 1638.5502897600577,
"count": 44160,
"is_parallel": true,
"self": 610.4764003480341,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002349054000205797,
"count": 1,
"is_parallel": true,
"self": 0.0006989300004534016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016501239997523953,
"count": 8,
"is_parallel": true,
"self": 0.0016501239997523953
}
}
},
"UnityEnvironment.step": {
"total": 0.049945648000175424,
"count": 1,
"is_parallel": true,
"self": 0.0006210650008142693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004746249996969709,
"count": 1,
"is_parallel": true,
"self": 0.0004746249996969709
},
"communicator.exchange": {
"total": 0.04719839099971068,
"count": 1,
"is_parallel": true,
"self": 0.04719839099971068
},
"steps_from_proto": {
"total": 0.001651566999953502,
"count": 1,
"is_parallel": true,
"self": 0.0003632790003393893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012882879996141128,
"count": 8,
"is_parallel": true,
"self": 0.0012882879996141128
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1028.0738894120236,
"count": 44159,
"is_parallel": true,
"self": 22.615243170127542,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 16.16354234092296,
"count": 44159,
"is_parallel": true,
"self": 16.16354234092296
},
"communicator.exchange": {
"total": 922.6371116630103,
"count": 44159,
"is_parallel": true,
"self": 922.6371116630103
},
"steps_from_proto": {
"total": 66.65799223796284,
"count": 44159,
"is_parallel": true,
"self": 13.613427392246194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.04456484571665,
"count": 353272,
"is_parallel": true,
"self": 53.04456484571665
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 438.0127920120044,
"count": 44160,
"self": 1.8215301810610072,
"children": {
"process_trajectory": {
"total": 88.54691594793758,
"count": 44160,
"self": 88.35791005293822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1890058949993545,
"count": 1,
"self": 0.1890058949993545
}
}
},
"_update_policy": {
"total": 347.6443458830058,
"count": 322,
"self": 199.17829740993784,
"children": {
"TorchPPOOptimizer.update": {
"total": 148.46604847306799,
"count": 15504,
"self": 148.46604847306799
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7639995348872617e-06,
"count": 1,
"self": 1.7639995348872617e-06
},
"TrainerController._save_models": {
"total": 0.38314717299999757,
"count": 1,
"self": 0.014124008999715443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3690231640002821,
"count": 1,
"self": 0.3690231640002821
}
}
}
}
}
}
}