poca-SoccerTwos / run_logs /timers.json
gRaphael's picture
First Push
948a106 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5712438821792603,
"min": 1.5587527751922607,
"max": 2.0243117809295654,
"count": 308
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34894.18359375,
"min": 27434.048828125,
"max": 45251.359375,
"count": 308
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.888888888888886,
"min": 42.02654867256637,
"max": 84.96551724137932,
"count": 308
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20840.0,
"min": 17400.0,
"max": 21824.0,
"count": 308
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1264.454923827002,
"min": 1173.1927203833575,
"max": 1292.2790965101735,
"count": 308
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 227601.88628886035,
"min": 146698.67756814498,
"max": 274048.9838696183,
"count": 308
},
"SoccerTwos.Step.mean": {
"value": 3079958.0,
"min": 9870.0,
"max": 3079958.0,
"count": 308
},
"SoccerTwos.Step.sum": {
"value": 3079958.0,
"min": 9870.0,
"max": 3079958.0,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.017327221110463142,
"min": -0.11203747242689133,
"max": 0.10505310446023941,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.1188998222351074,
"min": -19.606557846069336,
"max": 20.275249481201172,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.014008126221597195,
"min": -0.11430858075618744,
"max": 0.11353336274623871,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.5214626789093018,
"min": -20.00400161743164,
"max": 21.91193962097168,
"count": 308
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 308
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11526444554328918,
"min": -0.29855857193470003,
"max": 0.315018183655209,
"count": 308
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.747600197792053,
"min": -50.76240026950836,
"max": 62.373600363731384,
"count": 308
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11526444554328918,
"min": -0.29855857193470003,
"max": 0.315018183655209,
"count": 308
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.747600197792053,
"min": -50.76240026950836,
"max": 62.373600363731384,
"count": 308
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 308
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 308
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014509687471824387,
"min": 0.011130414668393011,
"max": 0.021387927453118512,
"count": 149
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014509687471824387,
"min": 0.011130414668393011,
"max": 0.021387927453118512,
"count": 149
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10417577028274536,
"min": 0.08628185093402863,
"max": 0.12103650743762652,
"count": 149
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10417577028274536,
"min": 0.08628185093402863,
"max": 0.12103650743762652,
"count": 149
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10661319221059481,
"min": 0.08823907102147738,
"max": 0.12421340743700664,
"count": 149
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10661319221059481,
"min": 0.08823907102147738,
"max": 0.12421340743700664,
"count": 149
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 149
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 149
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 149
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 149
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 149
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 149
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712852912",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos_7 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712860334"
},
"total": 7422.35113601,
"count": 1,
"self": 4.956130135000421,
"children": {
"run_training.setup": {
"total": 0.06972608000000946,
"count": 1,
"self": 0.06972608000000946
},
"TrainerController.start_learning": {
"total": 7417.325279795,
"count": 1,
"self": 5.4442507271742215,
"children": {
"TrainerController._reset_env": {
"total": 29.171038249998958,
"count": 16,
"self": 29.171038249998958
},
"TrainerController.advance": {
"total": 7382.708459428827,
"count": 214518,
"self": 5.693532849913026,
"children": {
"env_step": {
"total": 5708.206104742876,
"count": 214518,
"self": 4394.0394083652955,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1310.9314957215418,
"count": 214518,
"self": 35.09114510350105,
"children": {
"TorchPolicy.evaluate": {
"total": 1275.8403506180407,
"count": 387544,
"self": 1275.8403506180407
}
}
},
"workers": {
"total": 3.2352006560385576,
"count": 214517,
"self": 0.0,
"children": {
"worker_root": {
"total": 7401.50360033186,
"count": 214517,
"is_parallel": true,
"self": 3711.771301610851,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006357496999953582,
"count": 2,
"is_parallel": true,
"self": 0.003800963999879059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025565330000745234,
"count": 8,
"is_parallel": true,
"self": 0.0025565330000745234
}
}
},
"UnityEnvironment.step": {
"total": 0.04533018199998651,
"count": 1,
"is_parallel": true,
"self": 0.0012176590000194665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008767979999788622,
"count": 1,
"is_parallel": true,
"self": 0.0008767979999788622
},
"communicator.exchange": {
"total": 0.03945899199999303,
"count": 1,
"is_parallel": true,
"self": 0.03945899199999303
},
"steps_from_proto": {
"total": 0.003776732999995147,
"count": 2,
"is_parallel": true,
"self": 0.0007150210000190782,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003061711999976069,
"count": 8,
"is_parallel": true,
"self": 0.003061711999976069
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3689.69585746801,
"count": 214516,
"is_parallel": true,
"self": 224.85616908131578,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 154.20964920780523,
"count": 214516,
"is_parallel": true,
"self": 154.20964920780523
},
"communicator.exchange": {
"total": 2578.6190841460702,
"count": 214516,
"is_parallel": true,
"self": 2578.6190841460702
},
"steps_from_proto": {
"total": 732.0109550328189,
"count": 429032,
"is_parallel": true,
"self": 124.626059117973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 607.3848959148459,
"count": 1716128,
"is_parallel": true,
"self": 607.3848959148459
}
}
}
}
},
"steps_from_proto": {
"total": 0.03644125299865664,
"count": 30,
"is_parallel": true,
"self": 0.006973879003908223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.029467373994748414,
"count": 120,
"is_parallel": true,
"self": 0.029467373994748414
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1668.8088218360379,
"count": 214517,
"self": 40.49114764599585,
"children": {
"process_trajectory": {
"total": 805.2076311340425,
"count": 214517,
"self": 803.2075128010435,
"children": {
"RLTrainer._checkpoint": {
"total": 2.000118332998909,
"count": 6,
"self": 2.000118332998909
}
}
},
"_update_policy": {
"total": 823.1100430559994,
"count": 149,
"self": 477.9641945340094,
"children": {
"TorchPOCAOptimizer.update": {
"total": 345.14584852199005,
"count": 4470,
"self": 345.14584852199005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.258999873243738e-06,
"count": 1,
"self": 1.258999873243738e-06
},
"TrainerController._save_models": {
"total": 0.0015301299999919138,
"count": 1,
"self": 4.5572000090032816e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.001484557999901881,
"count": 1,
"self": 0.001484557999901881
}
}
}
}
}
}
}