ppo-Pyramids1 / run_logs /timers.json
lujan002's picture
First Push
7d631ce verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6451210975646973,
"min": 0.6451210975646973,
"max": 1.6069170236587524,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 3014.005859375,
"min": 2957.91796875,
"max": 9872.8984375,
"count": 20
},
"Pyramids.Step.mean": {
"value": 99957.0,
"min": 4992.0,
"max": 99957.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 99957.0,
"min": 4992.0,
"max": 99957.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10700247436761856,
"min": -0.11036466807126999,
"max": 0.19310301542282104,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.280098915100098,
"min": -4.524951457977295,
"max": 7.531017780303955,
"count": 20
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.31208768486976624,
"min": -0.11871524900197983,
"max": 0.5101918578147888,
"count": 20
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 12.48350715637207,
"min": -4.629894733428955,
"max": 21.294876098632812,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06758057010979622,
"min": 0.060139786034900077,
"max": 0.08101109993355028,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.20274171032938865,
"min": 0.060139786034900077,
"max": 0.20274171032938865,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00029973042170878004,
"min": 0.00029973042170878004,
"max": 0.014868051817757078,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0008991912651263402,
"min": 0.00041016983324844463,
"max": 0.016111305192002785,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.109509778100002e-05,
"min": 1.109509778100002e-05,
"max": 0.000479520004096,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.328529334300006e-05,
"min": 3.328529334300006e-05,
"max": 0.0007785600442879999,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10221900000000002,
"min": 0.10221900000000002,
"max": 0.19590400000000005,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.30665700000000007,
"min": 0.13778899999999994,
"max": 0.38347300000000006,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00023167810000000047,
"min": 0.00023167810000000047,
"max": 0.0095908096,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0006950343000000014,
"min": 0.0006950343000000014,
"max": 0.015575628800000001,
"count": 20
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.1331191056088549,
"min": 0.13191234056527415,
"max": 1.062101550400257,
"count": 20
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 0.3993573168265647,
"min": 0.1845689576960379,
"max": 1.062101550400257,
"count": 20
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.19341666065103527,
"min": 0.19341666065103527,
"max": 1.0087984967976809,
"count": 20
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.5802499819531058,
"min": 0.2827631707793923,
"max": 1.0708979216093817,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 514.0,
"max": 999.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 14985.0,
"min": 514.0,
"max": 15984.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": 1.4860000014305115,
"count": 12
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -6.000000312924385,
"min": -16.000000834465027,
"max": 2.375999867916107,
"count": 12
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": 1.4860000014305115,
"count": 12
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -6.000000312924385,
"min": -16.000000834465027,
"max": 2.375999867916107,
"count": 12
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 2.8052185798684754,
"min": 2.8052185798684754,
"max": 6.720910437405109,
"count": 12
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 16.831311479210854,
"min": 3.1243360936641693,
"max": 107.53456699848175,
"count": 12
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716714195",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Pyramids.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716714437"
},
"total": 241.32956991600076,
"count": 1,
"self": 0.5013118150000082,
"children": {
"run_training.setup": {
"total": 0.056518850000429666,
"count": 1,
"self": 0.056518850000429666
},
"TrainerController.start_learning": {
"total": 240.77173925100033,
"count": 1,
"self": 0.18175960001099156,
"children": {
"TrainerController._reset_env": {
"total": 2.4149710379997487,
"count": 1,
"self": 2.4149710379997487
},
"TrainerController.advance": {
"total": 238.05032934099017,
"count": 6304,
"self": 0.2095663039754072,
"children": {
"env_step": {
"total": 155.05327726704854,
"count": 6304,
"self": 136.69235677504003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 18.247605549026957,
"count": 6304,
"self": 0.5990804590810512,
"children": {
"TorchPolicy.evaluate": {
"total": 17.648525089945906,
"count": 6292,
"self": 17.648525089945906
}
}
},
"workers": {
"total": 0.11331494298156031,
"count": 6304,
"self": 0.0,
"children": {
"worker_root": {
"total": 240.0926715700234,
"count": 6304,
"is_parallel": true,
"self": 118.93710902202383,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002296657000442792,
"count": 1,
"is_parallel": true,
"self": 0.0006495540001196787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016471030003231135,
"count": 8,
"is_parallel": true,
"self": 0.0016471030003231135
}
}
},
"UnityEnvironment.step": {
"total": 0.049303064000014274,
"count": 1,
"is_parallel": true,
"self": 0.0006382899991876911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000485536999804026,
"count": 1,
"is_parallel": true,
"self": 0.000485536999804026
},
"communicator.exchange": {
"total": 0.0463892420002594,
"count": 1,
"is_parallel": true,
"self": 0.0463892420002594
},
"steps_from_proto": {
"total": 0.001789995000763156,
"count": 1,
"is_parallel": true,
"self": 0.0003914800026905141,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001398514998072642,
"count": 8,
"is_parallel": true,
"self": 0.001398514998072642
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 121.15556254799958,
"count": 6303,
"is_parallel": true,
"self": 3.98279508106134,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6460406800015335,
"count": 6303,
"is_parallel": true,
"self": 2.6460406800015335
},
"communicator.exchange": {
"total": 103.0294933479372,
"count": 6303,
"is_parallel": true,
"self": 103.0294933479372
},
"steps_from_proto": {
"total": 11.497233438999501,
"count": 6303,
"is_parallel": true,
"self": 2.431688196011237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.065545242988264,
"count": 50424,
"is_parallel": true,
"self": 9.065545242988264
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 82.78748576996622,
"count": 6304,
"self": 0.25155939395790483,
"children": {
"process_trajectory": {
"total": 12.518190540013165,
"count": 6304,
"self": 12.518190540013165
},
"_update_policy": {
"total": 70.01773583599515,
"count": 33,
"self": 46.00202858499506,
"children": {
"TorchPPOOptimizer.update": {
"total": 24.015707251000094,
"count": 2325,
"self": 24.015707251000094
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.669994713272899e-07,
"count": 1,
"self": 8.669994713272899e-07
},
"TrainerController._save_models": {
"total": 0.12467840499994054,
"count": 1,
"self": 0.0019627969995781314,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12271560800036241,
"count": 1,
"self": 0.12271560800036241
}
}
}
}
}
}
}