MarcusAGray's picture
First Push
7f8d1b9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7485129237174988,
"min": 0.7459189295768738,
"max": 2.8562512397766113,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7113.86669921875,
"min": 7113.86669921875,
"max": 27580.619140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.931973457336426,
"min": 0.36002692580223083,
"max": 12.931973457336426,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2521.73486328125,
"min": 34.20255661010742,
"max": 2629.231201171875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 4378.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07143005984064932,
"min": 0.0634824107939779,
"max": 0.07450440518886728,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2857202393625973,
"min": 0.1480563891693201,
"max": 0.37252202594433637,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18356743458585412,
"min": 0.11577610888372303,
"max": 0.29279502110154015,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7342697383434165,
"min": 0.23155221776744606,
"max": 1.4639751055077008,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.0260976579999984e-06,
"min": 7.0260976579999984e-06,
"max": 0.000287526004158,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.8104390631999994e-05,
"min": 2.8104390631999994e-05,
"max": 0.0013138800620399998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10234199999999999,
"min": 0.10234199999999999,
"max": 0.19584200000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40936799999999995,
"min": 0.39168400000000003,
"max": 0.93796,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001268658,
"min": 0.0001268658,
"max": 0.004792515800000001,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005074632,
"min": 0.0005074632,
"max": 0.021904204,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.1818181818181817,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 70.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.1818181818181817,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 70.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679761884",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679762347"
},
"total": 463.17037403099994,
"count": 1,
"self": 0.43105761699985123,
"children": {
"run_training.setup": {
"total": 0.10540425099998174,
"count": 1,
"self": 0.10540425099998174
},
"TrainerController.start_learning": {
"total": 462.6339121630001,
"count": 1,
"self": 0.5867048149959828,
"children": {
"TrainerController._reset_env": {
"total": 5.944527345000097,
"count": 1,
"self": 5.944527345000097
},
"TrainerController.advance": {
"total": 455.96854212000403,
"count": 17737,
"self": 0.3253818390219294,
"children": {
"env_step": {
"total": 455.6431602809821,
"count": 17737,
"self": 328.5171771879816,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.83280138098235,
"count": 17737,
"self": 2.3277883179878245,
"children": {
"TorchPolicy.evaluate": {
"total": 124.50501306299452,
"count": 17737,
"self": 124.50501306299452
}
}
},
"workers": {
"total": 0.2931817120181677,
"count": 17737,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.0009280790043,
"count": 17737,
"is_parallel": true,
"self": 214.96186385500232,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027792089999820746,
"count": 1,
"is_parallel": true,
"self": 0.000728120999838211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020510880001438636,
"count": 10,
"is_parallel": true,
"self": 0.0020510880001438636
}
}
},
"UnityEnvironment.step": {
"total": 0.04824274600014178,
"count": 1,
"is_parallel": true,
"self": 0.0005894910000279197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038546900009350793,
"count": 1,
"is_parallel": true,
"self": 0.00038546900009350793
},
"communicator.exchange": {
"total": 0.04512782699998752,
"count": 1,
"is_parallel": true,
"self": 0.04512782699998752
},
"steps_from_proto": {
"total": 0.0021399590000328317,
"count": 1,
"is_parallel": true,
"self": 0.0005973410004571633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015426179995756684,
"count": 10,
"is_parallel": true,
"self": 0.0015426179995756684
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 246.039064224002,
"count": 17736,
"is_parallel": true,
"self": 9.403280328034043,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.098721370994781,
"count": 17736,
"is_parallel": true,
"self": 5.098721370994781
},
"communicator.exchange": {
"total": 200.8669338079908,
"count": 17736,
"is_parallel": true,
"self": 200.8669338079908
},
"steps_from_proto": {
"total": 30.67012871698239,
"count": 17736,
"is_parallel": true,
"self": 6.321889531904844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.348239185077546,
"count": 177360,
"is_parallel": true,
"self": 24.348239185077546
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.6843000088992994e-05,
"count": 1,
"self": 3.6843000088992994e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 452.60676231104935,
"count": 420773,
"is_parallel": true,
"self": 9.976955237974153,
"children": {
"process_trajectory": {
"total": 252.97012494807495,
"count": 420773,
"is_parallel": true,
"self": 252.15448749207508,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8156374559998767,
"count": 4,
"is_parallel": true,
"self": 0.8156374559998767
}
}
},
"_update_policy": {
"total": 189.65968212500024,
"count": 88,
"is_parallel": true,
"self": 68.43871314200942,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.22096898299083,
"count": 4485,
"is_parallel": true,
"self": 121.22096898299083
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13410103999990497,
"count": 1,
"self": 0.001158266999937041,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13294277299996793,
"count": 1,
"self": 0.13294277299996793
}
}
}
}
}
}
}