poca-SoccerTwos / run_logs /timers.json
monti-python's picture
First training for SoccerTwos
8b7ce37 verified
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.858447551727295,
"min": 0.8562191128730774,
"max": 3.204357385635376,
"count": 1426
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30686.685546875,
"min": 13434.1328125,
"max": 179370.796875,
"count": 1426
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 276.0,
"max": 999.0,
"count": 1426
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 1104.0,
"max": 28240.0,
"count": 1426
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1192.775622451832,
"min": 1186.0780097703073,
"max": 1206.281809758689,
"count": 80
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2385.551244903664,
"min": 2372.1560195406146,
"max": 14305.719416506243,
"count": 80
},
"SoccerTwos.Step.mean": {
"value": 21679222.0,
"min": 7429794.0,
"max": 21679222.0,
"count": 1426
},
"SoccerTwos.Step.sum": {
"value": 21679222.0,
"min": 7429794.0,
"max": 21679222.0,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0015860110288485885,
"min": -0.03300626948475838,
"max": 0.002437777118757367,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.01586011052131653,
"min": -0.4290814995765686,
"max": 0.024377770721912384,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0015952929388731718,
"min": -0.03303126245737076,
"max": 0.0022892130073159933,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.015952929854393005,
"min": -0.42940640449523926,
"max": 0.02289213053882122,
"count": 1426
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1426
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6666666666666666,
"max": 0.22958460908669692,
"count": 1426
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -10.0,
"max": 2.98459991812706,
"count": 1426
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6666666666666666,
"max": 0.22958460908669692,
"count": 1426
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -10.0,
"max": 2.98459991812706,
"count": 1426
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1426
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1426
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01903152364733008,
"min": 0.010264369505845632,
"max": 0.02609913217990349,
"count": 651
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01903152364733008,
"min": 0.010264369505845632,
"max": 0.02609913217990349,
"count": 651
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 2.072296975500857e-08,
"min": 1.4381100274749963e-11,
"max": 0.004640070341217021,
"count": 651
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 2.072296975500857e-08,
"min": 1.4381100274749963e-11,
"max": 0.004640070341217021,
"count": 651
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 2.046845244407791e-08,
"min": 1.869307584880427e-11,
"max": 0.004646581714041531,
"count": 651
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 2.046845244407791e-08,
"min": 1.869307584880427e-11,
"max": 0.004646581714041531,
"count": 651
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 651
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 651
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 651
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 651
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 651
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 651
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724582597",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/montana/miniconda3/envs/huggy/bin/mlagents-learn ./config/poca/SoccerTwos_monti-python.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos1 --no-graphics --resume",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1724614691"
},
"total": 32093.904947411982,
"count": 1,
"self": 4.165521894974518,
"children": {
"run_training.setup": {
"total": 0.03884438700333703,
"count": 1,
"self": 0.03884438700333703
},
"TrainerController.start_learning": {
"total": 32089.700581130004,
"count": 1,
"self": 19.33149866956228,
"children": {
"TrainerController._reset_env": {
"total": 14.322139700889238,
"count": 72,
"self": 14.322139700889238
},
"TrainerController.advance": {
"total": 32055.80913507055,
"count": 928021,
"self": 21.733378148725023,
"children": {
"env_step": {
"total": 26282.779153292184,
"count": 928021,
"self": 16354.79527279995,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9914.268397079664,
"count": 928021,
"self": 163.38794265690376,
"children": {
"TorchPolicy.evaluate": {
"total": 9750.88045442276,
"count": 1843301,
"self": 9750.88045442276
}
}
},
"workers": {
"total": 13.715483412568574,
"count": 928020,
"self": 0.0,
"children": {
"worker_root": {
"total": 32041.622939642184,
"count": 928020,
"is_parallel": true,
"self": 17993.497503570892,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005819965997943655,
"count": 2,
"is_parallel": true,
"self": 0.0015694559551775455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004250510042766109,
"count": 8,
"is_parallel": true,
"self": 0.004250510042766109
}
}
},
"UnityEnvironment.step": {
"total": 0.10780634600087069,
"count": 1,
"is_parallel": true,
"self": 0.0029583440045826137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009076459973584861,
"count": 1,
"is_parallel": true,
"self": 0.0009076459973584861
},
"communicator.exchange": {
"total": 0.10096871899440885,
"count": 1,
"is_parallel": true,
"self": 0.10096871899440885
},
"steps_from_proto": {
"total": 0.002971637004520744,
"count": 2,
"is_parallel": true,
"self": 0.0007164519920479506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022551850124727935,
"count": 8,
"is_parallel": true,
"self": 0.0022551850124727935
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.10211113910190761,
"count": 142,
"is_parallel": true,
"self": 0.02299245135509409,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07911868774681352,
"count": 568,
"is_parallel": true,
"self": 0.07911868774681352
}
}
},
"UnityEnvironment.step": {
"total": 14048.02332493219,
"count": 928019,
"is_parallel": true,
"self": 453.432637439284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 293.7782576743921,
"count": 928019,
"is_parallel": true,
"self": 293.7782576743921
},
"communicator.exchange": {
"total": 11874.136807199946,
"count": 928019,
"is_parallel": true,
"self": 11874.136807199946
},
"steps_from_proto": {
"total": 1426.675622618568,
"count": 1856038,
"is_parallel": true,
"self": 310.5753293937596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1116.1002932248084,
"count": 7424152,
"is_parallel": true,
"self": 1116.1002932248084
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5751.296603629642,
"count": 928020,
"self": 175.50496283953544,
"children": {
"process_trajectory": {
"total": 2013.2323555843031,
"count": 928020,
"self": 2004.3198484042514,
"children": {
"RLTrainer._checkpoint": {
"total": 8.912507180051762,
"count": 29,
"self": 8.912507180051762
}
}
},
"_update_policy": {
"total": 3562.5592852058035,
"count": 652,
"self": 1994.2416836644697,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1568.3176015413337,
"count": 19560,
"self": 1568.3176015413337
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2509990483522415e-06,
"count": 1,
"self": 1.2509990483522415e-06
},
"TrainerController._save_models": {
"total": 0.23780643800273538,
"count": 1,
"self": 0.00804893599706702,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22975750200566836,
"count": 1,
"self": 0.22975750200566836
}
}
}
}
}
}
}