poca-SoccerTwos / run_logs /timers.json
mixklim's picture
First Push`
f679943 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2696778774261475,
"min": 3.2696778774261475,
"max": 3.295753002166748,
"count": 10
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 82343.5703125,
"min": 44060.8828125,
"max": 105464.09375,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 727.8333333333334,
"min": 590.5,
"max": 999.0,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17468.0,
"min": 4724.0,
"max": 31968.0,
"count": 10
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.3192679027218,
"min": 1198.3192679027218,
"max": 1200.3689776363867,
"count": 9
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9586.554143221774,
"min": 2397.9801952086127,
"max": 9591.965857908112,
"count": 9
},
"SoccerTwos.Step.mean": {
"value": 99381.0,
"min": 9584.0,
"max": 99381.0,
"count": 10
},
"SoccerTwos.Step.sum": {
"value": 99381.0,
"min": 9584.0,
"max": 99381.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0437549352645874,
"min": -0.07337113469839096,
"max": -0.0437549352645874,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.5688141584396362,
"min": -0.9163175225257874,
"max": -0.5090833902359009,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04437267407774925,
"min": -0.07342542707920074,
"max": -0.04437267407774925,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.5768447518348694,
"min": -0.8945850133895874,
"max": -0.5112509727478027,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.22533846245362207,
"min": -0.2826857141086033,
"max": 0.07639999958601865,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.929400011897087,
"min": -3.9575999975204468,
"max": 0.8403999954462051,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.22533846245362207,
"min": -0.2826857141086033,
"max": 0.07639999958601865,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.929400011897087,
"min": -3.9575999975204468,
"max": 0.8403999954462051,
"count": 10
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015225912048481404,
"min": 0.014602835137685318,
"max": 0.016029355120069037,
"count": 4
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015225912048481404,
"min": 0.014602835137685318,
"max": 0.016029355120069037,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0018669080648881693,
"min": 0.0016783606517405826,
"max": 0.004392762575298548,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0018669080648881693,
"min": 0.0016783606517405826,
"max": 0.004392762575298548,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0019107467533710102,
"min": 0.0014240537226820986,
"max": 0.004455769644118845,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0019107467533710102,
"min": 0.0014240537226820986,
"max": 0.004455769644118845,
"count": 4
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 4
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730912221",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\K.Mikhailov\\AppData\\Local\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1730912549"
},
"total": 327.82158280000294,
"count": 1,
"self": 0.9260648000054061,
"children": {
"run_training.setup": {
"total": 0.11618769999768119,
"count": 1,
"self": 0.11618769999768119
},
"TrainerController.start_learning": {
"total": 326.77933029999986,
"count": 1,
"self": 0.20002060011029243,
"children": {
"TrainerController._reset_env": {
"total": 5.063689199996588,
"count": 1,
"self": 5.063689199996588
},
"TrainerController.advance": {
"total": 321.272164999893,
"count": 6824,
"self": 0.1894599995939643,
"children": {
"env_step": {
"total": 157.59721720013476,
"count": 6824,
"self": 123.87716779967741,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.5906568003993,
"count": 6824,
"self": 1.0147750005999114,
"children": {
"TorchPolicy.evaluate": {
"total": 32.57588179979939,
"count": 13574,
"self": 32.57588179979939
}
}
},
"workers": {
"total": 0.12939260005805409,
"count": 6824,
"self": 0.0,
"children": {
"worker_root": {
"total": 322.2273354001809,
"count": 6824,
"is_parallel": true,
"self": 224.17855320023955,
"children": {
"steps_from_proto": {
"total": 0.001918899994052481,
"count": 2,
"is_parallel": true,
"self": 0.00037459998566191643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015443000083905645,
"count": 8,
"is_parallel": true,
"self": 0.0015443000083905645
}
}
},
"UnityEnvironment.step": {
"total": 98.04686329994729,
"count": 6824,
"is_parallel": true,
"self": 5.167755800204759,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.0627230002428405,
"count": 6824,
"is_parallel": true,
"self": 4.0627230002428405
},
"communicator.exchange": {
"total": 71.62522349983192,
"count": 6824,
"is_parallel": true,
"self": 71.62522349983192
},
"steps_from_proto": {
"total": 17.191160999667773,
"count": 13648,
"is_parallel": true,
"self": 3.3366357003178564,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.854525299349916,
"count": 54592,
"is_parallel": true,
"self": 13.854525299349916
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 163.48548780016426,
"count": 6824,
"self": 1.197419000280206,
"children": {
"process_trajectory": {
"total": 23.392365599887853,
"count": 6824,
"self": 23.392365599887853
},
"_update_policy": {
"total": 138.8957031999962,
"count": 4,
"self": 15.92536030003248,
"children": {
"TorchPOCAOptimizer.update": {
"total": 122.97034289996373,
"count": 132,
"self": 122.97034289996373
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.500004145782441e-06,
"count": 1,
"self": 1.500004145782441e-06
},
"TrainerController._save_models": {
"total": 0.24345399999583606,
"count": 1,
"self": 0.013574499993410427,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22987950000242563,
"count": 1,
"self": 0.22987950000242563
}
}
}
}
}
}
}