NickThe1's picture
First Push
92237ea
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.49355149269104,
"min": 1.49355149269104,
"max": 1.49355149269104,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45308.37890625,
"min": 45308.37890625,
"max": 45308.37890625,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10137154906988144,
"min": -0.10137154906988144,
"max": -0.10137154906988144,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -24.025056838989258,
"min": -24.025056838989258,
"max": -24.025056838989258,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2738514244556427,
"min": 0.2738514244556427,
"max": 0.2738514244556427,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 64.90278625488281,
"min": 64.90278625488281,
"max": 64.90278625488281,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07013873445180364,
"min": 0.07013873445180364,
"max": 0.07013873445180364,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.49097114116262547,
"min": 0.49097114116262547,
"max": 0.49097114116262547,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005827084696635997,
"min": 0.005827084696635997,
"max": 0.005827084696635997,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04078959287645198,
"min": 0.04078959287645198,
"max": 0.04078959287645198,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00020301260375771423,
"min": 0.00020301260375771423,
"max": 0.00020301260375771423,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0014210882263039997,
"min": 0.0014210882263039997,
"max": 0.0014210882263039997,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16767085714285712,
"min": 0.16767085714285712,
"max": 0.16767085714285712,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1736959999999999,
"min": 1.1736959999999999,
"max": 1.1736959999999999,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006770318628571428,
"min": 0.006770318628571428,
"max": 0.006770318628571428,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0473922304,
"min": 0.0473922304,
"max": 0.0473922304,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.43251997232437134,
"min": 0.43251997232437134,
"max": 0.43251997232437134,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 3.027639865875244,
"min": 3.027639865875244,
"max": 3.027639865875244,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 8.63835357222706,
"min": 8.63835357222706,
"max": 8.63835357222706,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 138.21365715563297,
"min": 138.21365715563297,
"max": 138.21365715563297,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685595525",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685595618"
},
"total": 92.8546537630001,
"count": 1,
"self": 0.474551048000194,
"children": {
"run_training.setup": {
"total": 0.03662407299998449,
"count": 1,
"self": 0.03662407299998449
},
"TrainerController.start_learning": {
"total": 92.34347864199992,
"count": 1,
"self": 0.05902587100194978,
"children": {
"TrainerController._reset_env": {
"total": 3.986774209000032,
"count": 1,
"self": 3.986774209000032
},
"TrainerController.advance": {
"total": 88.18238182099788,
"count": 3129,
"self": 0.061272045992836865,
"children": {
"env_step": {
"total": 58.40099859400459,
"count": 3129,
"self": 53.115689106993045,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5.248908848009023,
"count": 3129,
"self": 0.22159490800186177,
"children": {
"TorchPolicy.evaluate": {
"total": 5.027313940007161,
"count": 3128,
"self": 5.027313940007161
}
}
},
"workers": {
"total": 0.03640063900252244,
"count": 3129,
"self": 0.0,
"children": {
"worker_root": {
"total": 91.88438270100414,
"count": 3129,
"is_parallel": true,
"self": 44.062418983006296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002037276999999449,
"count": 1,
"is_parallel": true,
"self": 0.0006507810001039616,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013864959998954873,
"count": 8,
"is_parallel": true,
"self": 0.0013864959998954873
}
}
},
"UnityEnvironment.step": {
"total": 0.07310153100002026,
"count": 1,
"is_parallel": true,
"self": 0.0005644390000725252,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004604800000151954,
"count": 1,
"is_parallel": true,
"self": 0.0004604800000151954
},
"communicator.exchange": {
"total": 0.07034748399996715,
"count": 1,
"is_parallel": true,
"self": 0.07034748399996715
},
"steps_from_proto": {
"total": 0.0017291279999653852,
"count": 1,
"is_parallel": true,
"self": 0.00034980900011305494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013793189998523303,
"count": 8,
"is_parallel": true,
"self": 0.0013793189998523303
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 47.821963717997846,
"count": 3128,
"is_parallel": true,
"self": 1.6035997220022864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.0838460659929297,
"count": 3128,
"is_parallel": true,
"self": 1.0838460659929297
},
"communicator.exchange": {
"total": 40.36302870700388,
"count": 3128,
"is_parallel": true,
"self": 40.36302870700388
},
"steps_from_proto": {
"total": 4.771489222998753,
"count": 3128,
"is_parallel": true,
"self": 0.9373386270056017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.834150595993151,
"count": 25024,
"is_parallel": true,
"self": 3.834150595993151
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 29.720111181000448,
"count": 3129,
"self": 0.0753446269949336,
"children": {
"process_trajectory": {
"total": 5.00568300100565,
"count": 3129,
"self": 5.00568300100565
},
"_update_policy": {
"total": 24.639083552999864,
"count": 12,
"self": 15.870051897003464,
"children": {
"TorchPPOOptimizer.update": {
"total": 8.7690316559964,
"count": 1086,
"self": 8.7690316559964
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.96999915514607e-07,
"count": 1,
"self": 9.96999915514607e-07
},
"TrainerController._save_models": {
"total": 0.11529574400015008,
"count": 1,
"self": 0.0013876790001177142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11390806500003237,
"count": 1,
"self": 0.11390806500003237
}
}
}
}
}
}
}