poca-SoccerTwos / run_logs /timers.json
simpnyaDrMei's picture
First Push
b40b546 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.207034111022949,
"min": 3.195733070373535,
"max": 3.237135887145996,
"count": 74
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 63114.4296875,
"min": 24813.296875,
"max": 103490.671875,
"count": 74
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 722.0,
"min": 466.0,
"max": 999.0,
"count": 74
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17328.0,
"min": 11988.0,
"max": 31448.0,
"count": 74
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.9095325253534,
"min": 1197.2513086528097,
"max": 1207.6485228203383,
"count": 51
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9639.276260202827,
"min": 2394.5026173056194,
"max": 12076.485228203383,
"count": 51
},
"SoccerTwos.Step.mean": {
"value": 999780.0,
"min": 259079.0,
"max": 999780.0,
"count": 75
},
"SoccerTwos.Step.sum": {
"value": 999780.0,
"min": 259079.0,
"max": 999780.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01595696620643139,
"min": -0.023637687787413597,
"max": 0.008386930450797081,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.2074405550956726,
"min": -0.28365224599838257,
"max": 0.09589813649654388,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01624440588057041,
"min": -0.024123603478074074,
"max": 0.007923462428152561,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.2111772745847702,
"min": -0.2894832491874695,
"max": 0.0972113162279129,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.42859999950115496,
"min": -0.42859999950115496,
"max": 0.32130000554025173,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.571799993515015,
"min": -5.571799993515015,
"max": 5.140800088644028,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.42859999950115496,
"min": -0.42859999950115496,
"max": 0.32130000554025173,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.571799993515015,
"min": -5.571799993515015,
"max": 5.140800088644028,
"count": 75
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01614117485781511,
"min": 0.012441317781728382,
"max": 0.022755093033225017,
"count": 34
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01614117485781511,
"min": 0.012441317781728382,
"max": 0.022755093033225017,
"count": 34
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003393122786656022,
"min": 8.370173736693686e-06,
"max": 0.0043563801717634,
"count": 34
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003393122786656022,
"min": 8.370173736693686e-06,
"max": 0.0043563801717634,
"count": 34
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003496897577618559,
"min": 8.588571442184426e-06,
"max": 0.0043817395266766345,
"count": 34
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003496897577618559,
"min": 8.588571442184426e-06,
"max": 0.0043817395266766345,
"count": 34
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 34
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 34
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 34
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 34
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 34
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 34
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717146329",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\User\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1717149069"
},
"total": 2739.635941299959,
"count": 1,
"self": 8.018365099909715,
"children": {
"run_training.setup": {
"total": 20.091053000069223,
"count": 1,
"self": 20.091053000069223
},
"TrainerController.start_learning": {
"total": 2711.52652319998,
"count": 1,
"self": 1.5683462915476412,
"children": {
"TrainerController._reset_env": {
"total": 33.86788459983654,
"count": 5,
"self": 33.86788459983654
},
"TrainerController.advance": {
"total": 2675.8766679086257,
"count": 48750,
"self": 1.6080809925915673,
"children": {
"env_step": {
"total": 1078.9752967073582,
"count": 48750,
"self": 794.534522132948,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.46425178449135,
"count": 48750,
"self": 9.413497863337398,
"children": {
"TorchPolicy.evaluate": {
"total": 274.05075392115396,
"count": 96842,
"self": 274.05075392115396
}
}
},
"workers": {
"total": 0.9765227899188176,
"count": 48750,
"self": 0.0,
"children": {
"worker_root": {
"total": 2680.837356209755,
"count": 48750,
"is_parallel": true,
"self": 2072.1351201153593,
"children": {
"steps_from_proto": {
"total": 0.01413460006006062,
"count": 10,
"is_parallel": true,
"self": 0.0029638997511938214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.011170700308866799,
"count": 40,
"is_parallel": true,
"self": 0.011170700308866799
}
}
},
"UnityEnvironment.step": {
"total": 608.6881014943356,
"count": 48750,
"is_parallel": true,
"self": 33.7895703825634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.27174610516522,
"count": 48750,
"is_parallel": true,
"self": 26.27174610516522
},
"communicator.exchange": {
"total": 446.7445906009525,
"count": 48750,
"is_parallel": true,
"self": 446.7445906009525
},
"steps_from_proto": {
"total": 101.88219440565445,
"count": 97500,
"is_parallel": true,
"self": 20.896080320933834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.98611408472061,
"count": 390000,
"is_parallel": true,
"self": 80.98611408472061
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1595.293290208676,
"count": 48750,
"self": 11.13482260541059,
"children": {
"process_trajectory": {
"total": 231.44681030325592,
"count": 48750,
"self": 230.99146780313458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4553425001213327,
"count": 2,
"self": 0.4553425001213327
}
}
},
"_update_policy": {
"total": 1352.7116573000094,
"count": 34,
"self": 143.39030400139745,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1209.321353298612,
"count": 1038,
"self": 1209.321353298612
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.200009137392044e-06,
"count": 1,
"self": 1.200009137392044e-06
},
"TrainerController._save_models": {
"total": 0.2136231999611482,
"count": 1,
"self": 0.011383900069631636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20223929989151657,
"count": 1,
"self": 0.20223929989151657
}
}
}
}
}
}
}