claterza's picture
First Push
75de035
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.213080406188965,
"min": 3.195675849914551,
"max": 3.2957704067230225,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43183.80078125,
"min": 23627.29296875,
"max": 154655.484375,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 874.2,
"min": 451.6363636363636,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17484.0,
"min": 11252.0,
"max": 29516.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1207.818450682942,
"min": 1197.5363095604225,
"max": 1207.818450682942,
"count": 36
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4831.273802731768,
"min": 2396.5424058931126,
"max": 12053.541169197288,
"count": 36
},
"SoccerTwos.Step.mean": {
"value": 499608.0,
"min": 9576.0,
"max": 499608.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499608.0,
"min": 9576.0,
"max": 499608.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008370266295969486,
"min": -0.010459543205797672,
"max": 0.03847888484597206,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09207293391227722,
"min": -0.11505497246980667,
"max": 0.5386500358581543,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0041692848317325115,
"min": -0.009671417996287346,
"max": 0.03851667791604996,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.04586213454604149,
"min": -0.09671417623758316,
"max": 0.5392143726348877,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.11352727088061246,
"min": -0.5454545454545454,
"max": 0.23676923031990343,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.248799979686737,
"min": -6.4040000438690186,
"max": 3.077999994158745,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.11352727088061246,
"min": -0.5454545454545454,
"max": 0.23676923031990343,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.248799979686737,
"min": -6.4040000438690186,
"max": 3.077999994158745,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019857598662686844,
"min": 0.012036419286838887,
"max": 0.020377191051375122,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019857598662686844,
"min": 0.012036419286838887,
"max": 0.020377191051375122,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.004033098672516644,
"min": 2.9109245557871568e-05,
"max": 0.005741683052231868,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.004033098672516644,
"min": 2.9109245557871568e-05,
"max": 0.005741683052231868,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004029558249749243,
"min": 2.7684122081457947e-05,
"max": 0.005232056998647749,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004029558249749243,
"min": 2.7684122081457947e-05,
"max": 0.005232056998647749,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677367362",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/chris/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1677369401"
},
"total": 2038.969888917,
"count": 1,
"self": 0.8068985840000096,
"children": {
"run_training.setup": {
"total": 0.02367441700000006,
"count": 1,
"self": 0.02367441700000006
},
"TrainerController.start_learning": {
"total": 2038.139315916,
"count": 1,
"self": 0.4050966300105756,
"children": {
"TrainerController._reset_env": {
"total": 6.344743000999963,
"count": 3,
"self": 6.344743000999963
},
"TrainerController.advance": {
"total": 2031.2847589089895,
"count": 32793,
"self": 0.397566532982637,
"children": {
"env_step": {
"total": 453.2893800969891,
"count": 32793,
"self": 369.1834745589976,
"children": {
"SubprocessEnvManager._take_step": {
"total": 83.83118816099994,
"count": 32793,
"self": 2.219918360023158,
"children": {
"TorchPolicy.evaluate": {
"total": 81.61126980097679,
"count": 65128,
"self": 81.61126980097679
}
}
},
"workers": {
"total": 0.2747173769915179,
"count": 32793,
"self": 0.0,
"children": {
"worker_root": {
"total": 2030.319078550018,
"count": 32793,
"is_parallel": true,
"self": 1712.1780708810202,
"children": {
"steps_from_proto": {
"total": 0.004825625999848704,
"count": 6,
"is_parallel": true,
"self": 0.0008520009996288991,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003973625000219805,
"count": 24,
"is_parallel": true,
"self": 0.003973625000219805
}
}
},
"UnityEnvironment.step": {
"total": 318.1361820429979,
"count": 32793,
"is_parallel": true,
"self": 16.15829219898916,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.076947131021361,
"count": 32793,
"is_parallel": true,
"self": 9.076947131021361
},
"communicator.exchange": {
"total": 248.70168471299345,
"count": 32793,
"is_parallel": true,
"self": 248.70168471299345
},
"steps_from_proto": {
"total": 44.199257999993954,
"count": 65586,
"is_parallel": true,
"self": 7.138712469999774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.06054552999418,
"count": 262344,
"is_parallel": true,
"self": 37.06054552999418
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1577.5978122790177,
"count": 32793,
"self": 3.5731096260174127,
"children": {
"process_trajectory": {
"total": 178.88359098400025,
"count": 32793,
"self": 178.7066030260001,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17698795800015432,
"count": 1,
"self": 0.17698795800015432
}
}
},
"_update_policy": {
"total": 1395.141111669,
"count": 23,
"self": 57.44646665500068,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1337.6946450139994,
"count": 696,
"self": 1337.6946450139994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.420001798484009e-07,
"count": 1,
"self": 5.420001798484009e-07
},
"TrainerController._save_models": {
"total": 0.10471683399987342,
"count": 1,
"self": 0.0006766259998585156,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1040402080000149,
"count": 1,
"self": 0.1040402080000149
}
}
}
}
}
}
}