jarski's picture
first commit
71bd13f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5032317042350769,
"min": 0.48783302307128906,
"max": 1.4052824974060059,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15024.486328125,
"min": 14642.7958984375,
"max": 32014.6796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.23423583805561066,
"min": -0.13060688972473145,
"max": 0.29978105425834656,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 60.198612213134766,
"min": -31.476259231567383,
"max": 77.94307708740234,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014698714949190617,
"min": -0.0001770851667970419,
"max": 0.43297699093818665,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.7775697708129883,
"min": -0.045687973499298096,
"max": 80.90855407714844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06832533799939632,
"min": 0.06546820892413677,
"max": 0.0705766391779651,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9565547319915484,
"min": 0.26895508741232005,
"max": 1.057345730854043,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011186378650622306,
"min": 0.00027094015793847366,
"max": 0.011186378650622306,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1566093011087123,
"min": 0.0035222220532001576,
"max": 0.1626825869328362,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.195254744471428e-06,
"min": 7.195254744471428e-06,
"max": 0.00029388480203839995,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010073356642259998,
"min": 0.00010073356642259998,
"max": 0.003376515174494999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10239838571428572,
"min": 0.10239838571428572,
"max": 0.1979616,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4335774000000001,
"min": 0.7918464,
"max": 2.4430687000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00024959873285714294,
"min": 0.00024959873285714294,
"max": 0.00979636384,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003494382260000001,
"min": 0.003494382260000001,
"max": 0.1125679495,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014127911068499088,
"min": 0.014127911068499088,
"max": 0.3064412474632263,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19779075682163239,
"min": 0.19779075682163239,
"max": 1.7120411396026611,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 600.0784313725491,
"min": 520.3888888888889,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30604.0,
"min": 15984.0,
"max": 33919.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9684156534426352,
"min": -1.0000000521540642,
"max": 1.183225898830979,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 49.3891983255744,
"min": -30.995201610028744,
"max": 63.894198536872864,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9684156534426352,
"min": -1.0000000521540642,
"max": 1.183225898830979,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 49.3891983255744,
"min": -30.995201610028744,
"max": 63.894198536872864,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08710368909055878,
"min": 0.08372828757812924,
"max": 3.996903594583273,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.442288143618498,
"min": 4.3072825799754355,
"max": 63.95045751333237,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722596668",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722598870"
},
"total": 2201.384442583,
"count": 1,
"self": 0.5380541719996472,
"children": {
"run_training.setup": {
"total": 0.05258541700004571,
"count": 1,
"self": 0.05258541700004571
},
"TrainerController.start_learning": {
"total": 2200.793802994,
"count": 1,
"self": 1.3802115891480753,
"children": {
"TrainerController._reset_env": {
"total": 2.1322384960003546,
"count": 1,
"self": 2.1322384960003546
},
"TrainerController.advance": {
"total": 2197.1987525578525,
"count": 62736,
"self": 1.4046175168018635,
"children": {
"env_step": {
"total": 1576.8071024969918,
"count": 62736,
"self": 1443.9154912961862,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.06496854784518,
"count": 62736,
"self": 4.681367966797097,
"children": {
"TorchPolicy.evaluate": {
"total": 127.38360058104809,
"count": 61944,
"self": 127.38360058104809
}
}
},
"workers": {
"total": 0.826642652960345,
"count": 62736,
"self": 0.0,
"children": {
"worker_root": {
"total": 2195.701025025926,
"count": 62736,
"is_parallel": true,
"self": 873.4038963110197,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002116510999712773,
"count": 1,
"is_parallel": true,
"self": 0.0006839950001449324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014325159995678405,
"count": 8,
"is_parallel": true,
"self": 0.0014325159995678405
}
}
},
"UnityEnvironment.step": {
"total": 0.04816528399987874,
"count": 1,
"is_parallel": true,
"self": 0.0006433989997276512,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004596919998220983,
"count": 1,
"is_parallel": true,
"self": 0.0004596919998220983
},
"communicator.exchange": {
"total": 0.04531101300017326,
"count": 1,
"is_parallel": true,
"self": 0.04531101300017326
},
"steps_from_proto": {
"total": 0.0017511800001557276,
"count": 1,
"is_parallel": true,
"self": 0.00037308400078472914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013780959993709985,
"count": 8,
"is_parallel": true,
"self": 0.0013780959993709985
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1322.2971287149062,
"count": 62735,
"is_parallel": true,
"self": 33.34118377760478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.58642543505266,
"count": 62735,
"is_parallel": true,
"self": 23.58642543505266
},
"communicator.exchange": {
"total": 1166.3172018161508,
"count": 62735,
"is_parallel": true,
"self": 1166.3172018161508
},
"steps_from_proto": {
"total": 99.05231768609792,
"count": 62735,
"is_parallel": true,
"self": 20.347783920181882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.70453376591604,
"count": 501880,
"is_parallel": true,
"self": 78.70453376591604
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 618.9870325440588,
"count": 62736,
"self": 2.5270645890882406,
"children": {
"process_trajectory": {
"total": 127.56066954296512,
"count": 62736,
"self": 127.36193948196524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19873006099987833,
"count": 2,
"self": 0.19873006099987833
}
}
},
"_update_policy": {
"total": 488.89929841200546,
"count": 445,
"self": 289.3877783760113,
"children": {
"TorchPPOOptimizer.update": {
"total": 199.51152003599418,
"count": 22575,
"self": 199.51152003599418
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.549994501867332e-07,
"count": 1,
"self": 9.549994501867332e-07
},
"TrainerController._save_models": {
"total": 0.08259939599975041,
"count": 1,
"self": 0.0018695460003073094,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0807298499994431,
"count": 1,
"self": 0.0807298499994431
}
}
}
}
}
}
}