tmoroder's picture
default parameter
a45af53 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.436242014169693,
"min": 0.436242014169693,
"max": 1.4636121988296509,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13094.240234375,
"min": 13094.240234375,
"max": 44400.140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5034027099609375,
"min": -0.10487513989210129,
"max": 0.5034027099609375,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.93914794921875,
"min": -25.379783630371094,
"max": 138.93914794921875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0008503045537509024,
"min": -0.0008503045537509024,
"max": 0.31303316354751587,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.2346840500831604,
"min": -0.2346840500831604,
"max": 75.12796020507812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0694308313035262,
"min": 0.06563874812231266,
"max": 0.07184012622003341,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9720316382493668,
"min": 0.47908612286980434,
"max": 1.0776018933005012,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014048831393232656,
"min": 0.00031272439702015147,
"max": 0.01503226775890826,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1966836395052572,
"min": 0.004065417161261969,
"max": 0.21045174862471563,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.452826087185715e-06,
"min": 7.452826087185715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010433956522060001,
"min": 0.00010433956522060001,
"max": 0.0036354349881883995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248424285714286,
"min": 0.10248424285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347794,
"min": 1.3886848,
"max": 2.6118116000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025817586142857146,
"min": 0.00025817586142857146,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036144620600000008,
"min": 0.0036144620600000008,
"max": 0.12119997884,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007100880146026611,
"min": 0.006741122808307409,
"max": 0.5128947496414185,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09941232204437256,
"min": 0.09437572211027145,
"max": 3.5902633666992188,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 372.1975308641975,
"min": 372.1975308641975,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30148.0,
"min": 15984.0,
"max": 32906.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5784098636588932,
"min": -1.0000000521540642,
"max": 1.5784098636588932,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 127.85119895637035,
"min": -30.99500161409378,
"max": 127.85119895637035,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5784098636588932,
"min": -1.0000000521540642,
"max": 1.5784098636588932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 127.85119895637035,
"min": -30.99500161409378,
"max": 127.85119895637035,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02777904900740171,
"min": 0.02777904900740171,
"max": 9.690085615962744,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2501029695995385,
"min": 2.0108259644111968,
"max": 155.0413698554039,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740872337",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740874766"
},
"total": 2429.604701541,
"count": 1,
"self": 0.47701173200039193,
"children": {
"run_training.setup": {
"total": 0.029845167999610567,
"count": 1,
"self": 0.029845167999610567
},
"TrainerController.start_learning": {
"total": 2429.097844641,
"count": 1,
"self": 1.4309590500388367,
"children": {
"TrainerController._reset_env": {
"total": 2.7619192890001614,
"count": 1,
"self": 2.7619192890001614
},
"TrainerController.advance": {
"total": 2424.805221040961,
"count": 63630,
"self": 1.4655971248262176,
"children": {
"env_step": {
"total": 1675.1972993030654,
"count": 63630,
"self": 1511.4576798019948,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.88155949501288,
"count": 63630,
"self": 4.883012785038318,
"children": {
"TorchPolicy.evaluate": {
"total": 157.99854670997456,
"count": 62550,
"self": 157.99854670997456
}
}
},
"workers": {
"total": 0.8580600060577126,
"count": 63630,
"self": 0.0,
"children": {
"worker_root": {
"total": 2423.708936611042,
"count": 63630,
"is_parallel": true,
"self": 1030.6948323940537,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022225370003070566,
"count": 1,
"is_parallel": true,
"self": 0.0007071560012263944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015153809990806621,
"count": 8,
"is_parallel": true,
"self": 0.0015153809990806621
}
}
},
"UnityEnvironment.step": {
"total": 0.05142218799983311,
"count": 1,
"is_parallel": true,
"self": 0.0005950780000603118,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005439679998744396,
"count": 1,
"is_parallel": true,
"self": 0.0005439679998744396
},
"communicator.exchange": {
"total": 0.0484688679998726,
"count": 1,
"is_parallel": true,
"self": 0.0484688679998726
},
"steps_from_proto": {
"total": 0.0018142740000257618,
"count": 1,
"is_parallel": true,
"self": 0.00041236299966840306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014019110003573587,
"count": 8,
"is_parallel": true,
"self": 0.0014019110003573587
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1393.0141042169885,
"count": 63629,
"is_parallel": true,
"self": 34.78714936805454,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.298614166058996,
"count": 63629,
"is_parallel": true,
"self": 24.298614166058996
},
"communicator.exchange": {
"total": 1229.7742229069968,
"count": 63629,
"is_parallel": true,
"self": 1229.7742229069968
},
"steps_from_proto": {
"total": 104.15411777587815,
"count": 63629,
"is_parallel": true,
"self": 20.999436375795995,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.15468140008215,
"count": 509032,
"is_parallel": true,
"self": 83.15468140008215
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 748.1423246130694,
"count": 63630,
"self": 2.732457192998936,
"children": {
"process_trajectory": {
"total": 136.63605194907632,
"count": 63630,
"self": 136.41278076207664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22327118699968196,
"count": 2,
"self": 0.22327118699968196
}
}
},
"_update_policy": {
"total": 608.7738154709941,
"count": 454,
"self": 336.236192447996,
"children": {
"TorchPPOOptimizer.update": {
"total": 272.5376230229981,
"count": 22794,
"self": 272.5376230229981
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0280000424245372e-06,
"count": 1,
"self": 1.0280000424245372e-06
},
"TrainerController._save_models": {
"total": 0.09974423299991031,
"count": 1,
"self": 0.0018836620001820847,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09786057099972822,
"count": 1,
"self": 0.09786057099972822
}
}
}
}
}
}
}