lodist's picture
First Push
c8ebbfb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.495465874671936,
"min": 1.495465874671936,
"max": 1.495465874671936,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45366.453125,
"min": 45366.453125,
"max": 45366.453125,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10833893716335297,
"min": -0.10833893716335297,
"max": -0.10833893716335297,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -25.676328659057617,
"min": -25.676328659057617,
"max": -25.676328659057617,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2971581220626831,
"min": 0.2971581220626831,
"max": 0.2971581220626831,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 70.42647552490234,
"min": 70.42647552490234,
"max": 70.42647552490234,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07152492512541835,
"min": 0.07152492512541835,
"max": 0.07152492512541835,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5006744758779285,
"min": 0.5006744758779285,
"max": 0.5006744758779285,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008161418558412615,
"min": 0.008161418558412615,
"max": 0.008161418558412615,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05712992990888831,
"min": 0.05712992990888831,
"max": 0.05712992990888831,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00013835433959619045,
"min": 0.00013835433959619045,
"max": 0.00013835433959619045,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009684803771733332,
"min": 0.0009684803771733332,
"max": 0.0009684803771733332,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1461180952380953,
"min": 0.1461180952380953,
"max": 0.1461180952380953,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.022826666666667,
"min": 1.022826666666667,
"max": 1.022826666666667,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004617197714285715,
"min": 0.004617197714285715,
"max": 0.004617197714285715,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.032320384,
"min": 0.032320384,
"max": 0.032320384,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.5411986708641052,
"min": 0.5411986708641052,
"max": 0.5411986708641052,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 3.788390636444092,
"min": 3.788390636444092,
"max": 3.788390636444092,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 10.943432716652751,
"min": 10.943432716652751,
"max": 10.943432716652751,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 175.09492346644402,
"min": 175.09492346644402,
"max": 175.09492346644402,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1734269318",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1734269409"
},
"total": 91.555367768,
"count": 1,
"self": 0.6308576060000632,
"children": {
"run_training.setup": {
"total": 0.0800285380000787,
"count": 1,
"self": 0.0800285380000787
},
"TrainerController.start_learning": {
"total": 90.84448162399985,
"count": 1,
"self": 0.08061051499362293,
"children": {
"TrainerController._reset_env": {
"total": 2.9266255330003332,
"count": 1,
"self": 2.9266255330003332
},
"TrainerController.advance": {
"total": 87.70845507700596,
"count": 1896,
"self": 0.0835120950118835,
"children": {
"env_step": {
"total": 55.39678463700284,
"count": 1896,
"self": 49.71258725698817,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5.639808088025802,
"count": 1896,
"self": 0.25819770903262906,
"children": {
"TorchPolicy.evaluate": {
"total": 5.381610378993173,
"count": 1896,
"self": 5.381610378993173
}
}
},
"workers": {
"total": 0.04438929198886399,
"count": 1896,
"self": 0.0,
"children": {
"worker_root": {
"total": 90.2754665660018,
"count": 1896,
"is_parallel": true,
"self": 46.58018129399761,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027813649999188783,
"count": 1,
"is_parallel": true,
"self": 0.000901820999388292,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018795440005305863,
"count": 8,
"is_parallel": true,
"self": 0.0018795440005305863
}
}
},
"UnityEnvironment.step": {
"total": 0.07342804600011732,
"count": 1,
"is_parallel": true,
"self": 0.0008202680005524599,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005021819997637067,
"count": 1,
"is_parallel": true,
"self": 0.0005021819997637067
},
"communicator.exchange": {
"total": 0.06987664999996923,
"count": 1,
"is_parallel": true,
"self": 0.06987664999996923
},
"steps_from_proto": {
"total": 0.0022289459998319217,
"count": 1,
"is_parallel": true,
"self": 0.00044996699989496847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017789789999369532,
"count": 8,
"is_parallel": true,
"self": 0.0017789789999369532
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 43.695285272004185,
"count": 1895,
"is_parallel": true,
"self": 1.7149194299922783,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.0536242959851734,
"count": 1895,
"is_parallel": true,
"self": 1.0536242959851734
},
"communicator.exchange": {
"total": 36.45212385302011,
"count": 1895,
"is_parallel": true,
"self": 36.45212385302011
},
"steps_from_proto": {
"total": 4.474617693006621,
"count": 1895,
"is_parallel": true,
"self": 0.9737946830127839,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.5008230099938373,
"count": 15160,
"is_parallel": true,
"self": 3.5008230099938373
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 32.22815834499124,
"count": 1896,
"self": 0.09568896500150004,
"children": {
"process_trajectory": {
"total": 5.217748665990257,
"count": 1896,
"self": 5.217748665990257
},
"_update_policy": {
"total": 26.914720713999486,
"count": 7,
"self": 10.796357198994428,
"children": {
"TorchPPOOptimizer.update": {
"total": 16.118363515005058,
"count": 663,
"self": 16.118363515005058
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.619998309062794e-07,
"count": 1,
"self": 9.619998309062794e-07
},
"TrainerController._save_models": {
"total": 0.12878953700010243,
"count": 1,
"self": 0.002580144000148721,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1262093929999537,
"count": 1,
"self": 0.1262093929999537
}
}
}
}
}
}
}